xref: /xnu-8792.41.9/osfmk/vm/vm_object.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_object.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Virtual memory object module.
63  */
64 
65 #include <debug.h>
66 
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/vm_param.h>
70 
71 #include <mach/sdt.h>
72 
73 #include <ipc/ipc_types.h>
74 #include <ipc/ipc_port.h>
75 
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/queue.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <kern/misc_protos.h>
85 #include <kern/policy_internal.h>
86 
87 #include <sys/kdebug_triage.h>
88 
89 #include <vm/memory_object.h>
90 #include <vm/vm_compressor_pager.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_protos.h>
97 #include <vm/vm_purgeable_internal.h>
98 
99 #include <vm/vm_compressor.h>
100 
101 #if CONFIG_PHANTOM_CACHE
102 #include <vm/vm_phantom_cache.h>
103 #endif
104 
105 #if VM_OBJECT_ACCESS_TRACKING
106 uint64_t vm_object_access_tracking_reads = 0;
107 uint64_t vm_object_access_tracking_writes = 0;
108 #endif /* VM_OBJECT_ACCESS_TRACKING */
109 
110 boolean_t vm_object_collapse_compressor_allowed = TRUE;
111 
112 struct vm_counters vm_counters;
113 
114 #if DEVELOPMENT || DEBUG
115 extern struct memory_object_pager_ops shared_region_pager_ops;
116 extern unsigned int shared_region_pagers_resident_count;
117 extern unsigned int shared_region_pagers_resident_peak;
118 #endif /* DEVELOPMENT || DEBUG */
119 
120 #if VM_OBJECT_TRACKING
121 btlog_t vm_object_tracking_btlog;
122 
123 void
vm_object_tracking_init(void)124 vm_object_tracking_init(void)
125 {
126 	int vm_object_tracking;
127 
128 	vm_object_tracking = 1;
129 	PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
130 	    sizeof(vm_object_tracking));
131 
132 	if (vm_object_tracking) {
133 		vm_object_tracking_btlog = btlog_create(BTLOG_HASH,
134 		    VM_OBJECT_TRACKING_NUM_RECORDS);
135 		assert(vm_object_tracking_btlog);
136 	}
137 }
138 #endif /* VM_OBJECT_TRACKING */
139 
140 /*
141  *	Virtual memory objects maintain the actual data
142  *	associated with allocated virtual memory.  A given
143  *	page of memory exists within exactly one object.
144  *
145  *	An object is only deallocated when all "references"
146  *	are given up.
147  *
148  *	Associated with each object is a list of all resident
149  *	memory pages belonging to that object; this list is
150  *	maintained by the "vm_page" module, but locked by the object's
151  *	lock.
152  *
153  *	Each object also records the memory object reference
154  *	that is used by the kernel to request and write
155  *	back data (the memory object, field "pager"), etc...
156  *
157  *	Virtual memory objects are allocated to provide
158  *	zero-filled memory (vm_allocate) or map a user-defined
159  *	memory object into a virtual address space (vm_map).
160  *
161  *	Virtual memory objects that refer to a user-defined
162  *	memory object are called "permanent", because all changes
163  *	made in virtual memory are reflected back to the
164  *	memory manager, which may then store it permanently.
165  *	Other virtual memory objects are called "temporary",
166  *	meaning that changes need be written back only when
167  *	necessary to reclaim pages, and that storage associated
168  *	with the object can be discarded once it is no longer
169  *	mapped.
170  *
171  *	A permanent memory object may be mapped into more
172  *	than one virtual address space.  Moreover, two threads
173  *	may attempt to make the first mapping of a memory
174  *	object concurrently.  Only one thread is allowed to
175  *	complete this mapping; all others wait for the
176  *	"pager_initialized" field is asserted, indicating
177  *	that the first thread has initialized all of the
178  *	necessary fields in the virtual memory object structure.
179  *
180  *	The kernel relies on a *default memory manager* to
181  *	provide backing storage for the zero-filled virtual
182  *	memory objects.  The pager memory objects associated
183  *	with these temporary virtual memory objects are only
184  *	requested from the default memory manager when it
185  *	becomes necessary.  Virtual memory objects
186  *	that depend on the default memory manager are called
187  *	"internal".  The "pager_created" field is provided to
188  *	indicate whether these ports have ever been allocated.
189  *
190  *	The kernel may also create virtual memory objects to
191  *	hold changed pages after a copy-on-write operation.
192  *	In this case, the virtual memory object (and its
193  *	backing storage -- its memory object) only contain
194  *	those pages that have been changed.  The "shadow"
195  *	field refers to the virtual memory object that contains
196  *	the remainder of the contents.  The "shadow_offset"
197  *	field indicates where in the "shadow" these contents begin.
198  *	The "copy" field refers to a virtual memory object
199  *	to which changed pages must be copied before changing
200  *	this object, in order to implement another form
201  *	of copy-on-write optimization.
202  *
203  *	The virtual memory object structure also records
204  *	the attributes associated with its memory object.
205  *	The "pager_ready", "can_persist" and "copy_strategy"
206  *	fields represent those attributes.  The "cached_list"
207  *	field is used in the implementation of the persistence
208  *	attribute.
209  *
210  * ZZZ Continue this comment.
211  */
212 
213 /* Forward declarations for internal functions. */
214 static kern_return_t    vm_object_terminate(
215 	vm_object_t     object);
216 
217 static kern_return_t    vm_object_copy_call(
218 	vm_object_t             src_object,
219 	vm_object_offset_t      src_offset,
220 	vm_object_size_t        size,
221 	vm_object_t             *_result_object);
222 
223 static void             vm_object_do_collapse(
224 	vm_object_t     object,
225 	vm_object_t     backing_object);
226 
227 static void             vm_object_do_bypass(
228 	vm_object_t     object,
229 	vm_object_t     backing_object);
230 
231 static void             vm_object_release_pager(
232 	memory_object_t pager);
233 
234 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
235 
236 /*
237  *	All wired-down kernel memory belongs to a single virtual
238  *	memory object (kernel_object) to avoid wasting data structures.
239  */
240 static struct vm_object                 kernel_object_store VM_PAGE_PACKED_ALIGNED;
241 const vm_object_t                       kernel_object = &kernel_object_store;
242 
243 static struct vm_object                 compressor_object_store VM_PAGE_PACKED_ALIGNED;
244 const vm_object_t                       compressor_object = &compressor_object_store;
245 
246 /*
247  * This object holds all pages that have been retired due to errors like ECC.
248  * The system should never use the page or look at its contents. The offset
249  * in this object is the same as the page's physical address.
250  */
251 static struct vm_object                 retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
252 const vm_object_t                       retired_pages_object = &retired_pages_object_store;
253 
254 
255 /*
256  *	Virtual memory objects are initialized from
257  *	a template (see vm_object_allocate).
258  *
259  *	When adding a new field to the virtual memory
260  *	object structure, be sure to add initialization
261  *	(see _vm_object_allocate()).
262  */
263 static const struct vm_object vm_object_template = {
264 	.memq.prev = 0,
265 	.memq.next = 0,
266 	/*
267 	 * The lock will be initialized for each allocated object in
268 	 * _vm_object_allocate(), so we don't need to initialize it in
269 	 * the vm_object_template.
270 	 */
271 	.vo_size = 0,
272 	.memq_hint = VM_PAGE_NULL,
273 	.ref_count = 1,
274 	.resident_page_count = 0,
275 	.wired_page_count = 0,
276 	.reusable_page_count = 0,
277 	.copy = VM_OBJECT_NULL,
278 	.shadow = VM_OBJECT_NULL,
279 	.vo_shadow_offset = (vm_object_offset_t) 0,
280 	.pager = MEMORY_OBJECT_NULL,
281 	.paging_offset = 0,
282 	.pager_control = MEMORY_OBJECT_CONTROL_NULL,
283 	.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
284 	.paging_in_progress = 0,
285 #if __LP64__
286 	.__object1_unused_bits = 0,
287 #endif /* __LP64__ */
288 	.activity_in_progress = 0,
289 
290 	/* Begin bitfields */
291 	.all_wanted = 0, /* all bits FALSE */
292 	.pager_created = FALSE,
293 	.pager_initialized = FALSE,
294 	.pager_ready = FALSE,
295 	.pager_trusted = FALSE,
296 	.can_persist = FALSE,
297 	.internal = TRUE,
298 	.private = FALSE,
299 	.pageout = FALSE,
300 	.alive = TRUE,
301 	.purgable = VM_PURGABLE_DENY,
302 	.purgeable_when_ripe = FALSE,
303 	.purgeable_only_by_kernel = FALSE,
304 	.shadowed = FALSE,
305 	.true_share = FALSE,
306 	.terminating = FALSE,
307 	.named = FALSE,
308 	.shadow_severed = FALSE,
309 	.phys_contiguous = FALSE,
310 	.nophyscache = FALSE,
311 	/* End bitfields */
312 
313 	.cached_list.prev = NULL,
314 	.cached_list.next = NULL,
315 
316 	.last_alloc = (vm_object_offset_t) 0,
317 	.sequential = (vm_object_offset_t) 0,
318 	.pages_created = 0,
319 	.pages_used = 0,
320 	.scan_collisions = 0,
321 #if CONFIG_PHANTOM_CACHE
322 	.phantom_object_id = 0,
323 #endif
324 	.cow_hint = ~(vm_offset_t)0,
325 
326 	/* cache bitfields */
327 	.wimg_bits = VM_WIMG_USE_DEFAULT,
328 	.set_cache_attr = FALSE,
329 	.object_is_shared_cache = FALSE,
330 	.code_signed = FALSE,
331 	.transposed = FALSE,
332 	.mapping_in_progress = FALSE,
333 	.phantom_isssd = FALSE,
334 	.volatile_empty = FALSE,
335 	.volatile_fault = FALSE,
336 	.all_reusable = FALSE,
337 	.blocked_access = FALSE,
338 	.vo_ledger_tag = VM_LEDGER_TAG_NONE,
339 	.vo_no_footprint = FALSE,
340 #if CONFIG_IOSCHED || UPL_DEBUG
341 	.uplq.prev = NULL,
342 	.uplq.next = NULL,
343 #endif /* UPL_DEBUG */
344 #ifdef VM_PIP_DEBUG
345 	.pip_holders = {0},
346 #endif /* VM_PIP_DEBUG */
347 
348 	.objq.next = NULL,
349 	.objq.prev = NULL,
350 	.task_objq.next = NULL,
351 	.task_objq.prev = NULL,
352 
353 	.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
354 	.purgeable_queue_group = 0,
355 
356 	.wire_tag = VM_KERN_MEMORY_NONE,
357 #if !VM_TAG_ACTIVE_UPDATE
358 	.wired_objq.next = NULL,
359 	.wired_objq.prev = NULL,
360 #endif /* ! VM_TAG_ACTIVE_UPDATE */
361 
362 	.io_tracking = FALSE,
363 
364 #if CONFIG_SECLUDED_MEMORY
365 	.eligible_for_secluded = FALSE,
366 	.can_grab_secluded = FALSE,
367 #else /* CONFIG_SECLUDED_MEMORY */
368 	.__object3_unused_bits = 0,
369 #endif /* CONFIG_SECLUDED_MEMORY */
370 
371 #if VM_OBJECT_ACCESS_TRACKING
372 	.access_tracking = FALSE,
373 	.access_tracking_reads = 0,
374 	.access_tracking_writes = 0,
375 #endif /* VM_OBJECT_ACCESS_TRACKING */
376 
377 #if DEBUG
378 	.purgeable_owner_bt = {0},
379 	.vo_purgeable_volatilizer = NULL,
380 	.purgeable_volatilizer_bt = {0},
381 #endif /* DEBUG */
382 };
383 
384 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
385 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
386 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
387 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
388 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
389 
390 unsigned int vm_page_purged_wired = 0;
391 unsigned int vm_page_purged_busy = 0;
392 unsigned int vm_page_purged_others = 0;
393 
394 static queue_head_t     vm_object_cached_list;
395 static uint32_t         vm_object_cache_pages_freed = 0;
396 static uint32_t         vm_object_cache_pages_moved = 0;
397 static uint32_t         vm_object_cache_pages_skipped = 0;
398 static uint32_t         vm_object_cache_adds = 0;
399 static uint32_t         vm_object_cached_count = 0;
400 static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data,
401     &vm_object_cache_lck_grp, &vm_object_lck_attr);
402 
403 static uint32_t         vm_object_page_grab_failed = 0;
404 static uint32_t         vm_object_page_grab_skipped = 0;
405 static uint32_t         vm_object_page_grab_returned = 0;
406 static uint32_t         vm_object_page_grab_pmapped = 0;
407 static uint32_t         vm_object_page_grab_reactivations = 0;
408 
409 #define vm_object_cache_lock_spin()             \
410 	        lck_mtx_lock_spin(&vm_object_cached_lock_data)
411 #define vm_object_cache_unlock()        \
412 	        lck_mtx_unlock(&vm_object_cached_lock_data)
413 
414 static void     vm_object_cache_remove_locked(vm_object_t);
415 
416 
417 static void vm_object_reap(vm_object_t object);
418 static void vm_object_reap_async(vm_object_t object);
419 static void vm_object_reaper_thread(void);
420 
421 static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data,
422     &vm_object_lck_grp, &vm_object_lck_attr);
423 
424 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
425 unsigned int vm_object_reap_count = 0;
426 unsigned int vm_object_reap_count_async = 0;
427 
428 #define vm_object_reaper_lock()         \
429 	        lck_mtx_lock(&vm_object_reaper_lock_data)
430 #define vm_object_reaper_lock_spin()            \
431 	        lck_mtx_lock_spin(&vm_object_reaper_lock_data)
432 #define vm_object_reaper_unlock()       \
433 	        lck_mtx_unlock(&vm_object_reaper_lock_data)
434 
435 #if CONFIG_IOSCHED
436 /* I/O Re-prioritization request list */
437 queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list);
438 
439 LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock,
440     &vm_object_lck_grp, &vm_object_lck_attr);
441 
442 #define IO_REPRIORITIZE_LIST_LOCK()     \
443 	        lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
444 #define IO_REPRIORITIZE_LIST_UNLOCK()   \
445 	        lck_spin_unlock(&io_reprioritize_list_lock)
446 
447 #define MAX_IO_REPRIORITIZE_REQS        8192
448 ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req",
449     struct io_reprioritize_req, ZC_NOGC);
450 
451 /* I/O Re-prioritization thread */
452 int io_reprioritize_wakeup = 0;
453 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
454 
455 #define IO_REPRIO_THREAD_WAKEUP()       thread_wakeup((event_t)&io_reprioritize_wakeup)
456 #define IO_REPRIO_THREAD_CONTINUATION()                                 \
457 {                                                               \
458 	assert_wait(&io_reprioritize_wakeup, THREAD_UNINT);     \
459 	thread_block(io_reprioritize_thread);                   \
460 }
461 
462 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
463 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
464 void vm_decmp_upl_reprioritize(upl_t, int);
465 #endif
466 
467 #if 0
468 #undef KERNEL_DEBUG
469 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
470 #endif
471 
472 
473 /*
474  *	vm_object_allocate:
475  *
476  *	Returns a new object with the given size.
477  */
478 
479 __private_extern__ void
_vm_object_allocate(vm_object_size_t size,vm_object_t object)480 _vm_object_allocate(
481 	vm_object_size_t        size,
482 	vm_object_t             object)
483 {
484 	*object = vm_object_template;
485 	vm_page_queue_init(&object->memq);
486 #if UPL_DEBUG || CONFIG_IOSCHED
487 	queue_init(&object->uplq);
488 #endif
489 	vm_object_lock_init(object);
490 	object->vo_size = vm_object_round_page(size);
491 
492 #if VM_OBJECT_TRACKING_OP_CREATED
493 	if (vm_object_tracking_btlog) {
494 		btlog_record(vm_object_tracking_btlog, object,
495 		    VM_OBJECT_TRACKING_OP_CREATED,
496 		    btref_get(__builtin_frame_address(0), 0));
497 	}
498 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
499 }
500 
501 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size)502 vm_object_allocate(
503 	vm_object_size_t        size)
504 {
505 	vm_object_t object;
506 
507 	object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL);
508 	_vm_object_allocate(size, object);
509 
510 	return object;
511 }
512 
513 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
514 
515 /*
516  *	vm_object_bootstrap:
517  *
518  *	Initialize the VM objects module.
519  */
520 __startup_func
521 void
vm_object_bootstrap(void)522 vm_object_bootstrap(void)
523 {
524 	vm_size_t       vm_object_size;
525 
526 	assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
527 
528 	vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
529 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
530 
531 	vm_object_zone = zone_create("vm objects", vm_object_size,
532 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM_LP64 | ZC_NOTBITAG);
533 
534 	queue_init(&vm_object_cached_list);
535 
536 	queue_init(&vm_object_reaper_queue);
537 
538 	/*
539 	 *	Initialize the "kernel object"
540 	 */
541 
542 	/*
543 	 * Note that in the following size specifications, we need to add 1 because
544 	 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
545 	 */
546 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object);
547 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
548 	kernel_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
549 	compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
550 	kernel_object->no_tag_update = TRUE;
551 
552 	/*
553 	 * The object to hold retired VM pages.
554 	 */
555 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
556 	retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
557 }
558 
559 #if CONFIG_IOSCHED
560 void
vm_io_reprioritize_init(void)561 vm_io_reprioritize_init(void)
562 {
563 	kern_return_t   result;
564 	thread_t        thread = THREAD_NULL;
565 
566 	result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
567 	if (result == KERN_SUCCESS) {
568 		thread_set_thread_name(thread, "VM_io_reprioritize_thread");
569 		thread_deallocate(thread);
570 	} else {
571 		panic("Could not create io_reprioritize_thread");
572 	}
573 }
574 #endif
575 
576 void
vm_object_reaper_init(void)577 vm_object_reaper_init(void)
578 {
579 	kern_return_t   kr;
580 	thread_t        thread;
581 
582 	kr = kernel_thread_start_priority(
583 		(thread_continue_t) vm_object_reaper_thread,
584 		NULL,
585 		BASEPRI_VM,
586 		&thread);
587 	if (kr != KERN_SUCCESS) {
588 		panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
589 	}
590 	thread_set_thread_name(thread, "VM_object_reaper_thread");
591 	thread_deallocate(thread);
592 }
593 
594 
595 /*
596  *	vm_object_deallocate:
597  *
598  *	Release a reference to the specified object,
599  *	gained either through a vm_object_allocate
600  *	or a vm_object_reference call.  When all references
601  *	are gone, storage associated with this object
602  *	may be relinquished.
603  *
604  *	No object may be locked.
605  */
606 unsigned long vm_object_deallocate_shared_successes = 0;
607 unsigned long vm_object_deallocate_shared_failures = 0;
608 unsigned long vm_object_deallocate_shared_swap_failures = 0;
609 
610 __private_extern__ void
vm_object_deallocate(vm_object_t object)611 vm_object_deallocate(
612 	vm_object_t     object)
613 {
614 	vm_object_t     shadow = VM_OBJECT_NULL;
615 
616 //	if(object)dbgLog(object, object->ref_count, object->can_persist, 3);	/* (TEST/DEBUG) */
617 //	else dbgLog(object, 0, 0, 3);	/* (TEST/DEBUG) */
618 
619 	if (object == VM_OBJECT_NULL) {
620 		return;
621 	}
622 
623 	if (object == kernel_object || object == compressor_object || object == retired_pages_object) {
624 		vm_object_lock_shared(object);
625 
626 		OSAddAtomic(-1, &object->ref_count);
627 
628 		if (object->ref_count == 0) {
629 			if (object == kernel_object) {
630 				panic("vm_object_deallocate: losing kernel_object");
631 			} else if (object == retired_pages_object) {
632 				panic("vm_object_deallocate: losing retired_pages_object");
633 			} else {
634 				panic("vm_object_deallocate: losing compressor_object");
635 			}
636 		}
637 		vm_object_unlock(object);
638 		return;
639 	}
640 
641 	if (object->ref_count == 2 &&
642 	    object->named) {
643 		/*
644 		 * This "named" object's reference count is about to
645 		 * drop from 2 to 1:
646 		 * we'll need to call memory_object_last_unmap().
647 		 */
648 	} else if (object->ref_count == 2 &&
649 	    object->internal &&
650 	    object->shadow != VM_OBJECT_NULL) {
651 		/*
652 		 * This internal object's reference count is about to
653 		 * drop from 2 to 1 and it has a shadow object:
654 		 * we'll want to try and collapse this object with its
655 		 * shadow.
656 		 */
657 	} else if (object->ref_count >= 2) {
658 		UInt32          original_ref_count;
659 		volatile UInt32 *ref_count_p;
660 		Boolean         atomic_swap;
661 
662 		/*
663 		 * The object currently looks like it is not being
664 		 * kept alive solely by the reference we're about to release.
665 		 * Let's try and release our reference without taking
666 		 * all the locks we would need if we had to terminate the
667 		 * object (cache lock + exclusive object lock).
668 		 * Lock the object "shared" to make sure we don't race with
669 		 * anyone holding it "exclusive".
670 		 */
671 		vm_object_lock_shared(object);
672 		ref_count_p = (volatile UInt32 *) &object->ref_count;
673 		original_ref_count = object->ref_count;
674 		/*
675 		 * Test again as "ref_count" could have changed.
676 		 * "named" shouldn't change.
677 		 */
678 		if (original_ref_count == 2 &&
679 		    object->named) {
680 			/* need to take slow path for m_o_last_unmap() */
681 			atomic_swap = FALSE;
682 		} else if (original_ref_count == 2 &&
683 		    object->internal &&
684 		    object->shadow != VM_OBJECT_NULL) {
685 			/* need to take slow path for vm_object_collapse() */
686 			atomic_swap = FALSE;
687 		} else if (original_ref_count < 2) {
688 			/* need to take slow path for vm_object_terminate() */
689 			atomic_swap = FALSE;
690 		} else {
691 			/* try an atomic update with the shared lock */
692 			atomic_swap = OSCompareAndSwap(
693 				original_ref_count,
694 				original_ref_count - 1,
695 				(UInt32 *) &object->ref_count);
696 			if (atomic_swap == FALSE) {
697 				vm_object_deallocate_shared_swap_failures++;
698 				/* fall back to the slow path... */
699 			}
700 		}
701 
702 		vm_object_unlock(object);
703 
704 		if (atomic_swap) {
705 			/*
706 			 * ref_count was updated atomically !
707 			 */
708 			vm_object_deallocate_shared_successes++;
709 			return;
710 		}
711 
712 		/*
713 		 * Someone else updated the ref_count at the same
714 		 * time and we lost the race.  Fall back to the usual
715 		 * slow but safe path...
716 		 */
717 		vm_object_deallocate_shared_failures++;
718 	}
719 
720 	while (object != VM_OBJECT_NULL) {
721 		vm_object_lock(object);
722 
723 		assert(object->ref_count > 0);
724 
725 		/*
726 		 *	If the object has a named reference, and only
727 		 *	that reference would remain, inform the pager
728 		 *	about the last "mapping" reference going away.
729 		 */
730 		if ((object->ref_count == 2) && (object->named)) {
731 			memory_object_t pager = object->pager;
732 
733 			/* Notify the Pager that there are no */
734 			/* more mappers for this object */
735 
736 			if (pager != MEMORY_OBJECT_NULL) {
737 				vm_object_mapping_wait(object, THREAD_UNINT);
738 				vm_object_mapping_begin(object);
739 				vm_object_unlock(object);
740 
741 				memory_object_last_unmap(pager);
742 
743 				vm_object_lock(object);
744 				vm_object_mapping_end(object);
745 			}
746 			assert(object->ref_count > 0);
747 		}
748 
749 		/*
750 		 *	Lose the reference. If other references
751 		 *	remain, then we are done, unless we need
752 		 *	to retry a cache trim.
753 		 *	If it is the last reference, then keep it
754 		 *	until any pending initialization is completed.
755 		 */
756 
757 		/* if the object is terminating, it cannot go into */
758 		/* the cache and we obviously should not call      */
759 		/* terminate again.  */
760 
761 		if ((object->ref_count > 1) || object->terminating) {
762 			vm_object_lock_assert_exclusive(object);
763 			object->ref_count--;
764 
765 			if (object->ref_count == 1 &&
766 			    object->shadow != VM_OBJECT_NULL) {
767 				/*
768 				 * There's only one reference left on this
769 				 * VM object.  We can't tell if it's a valid
770 				 * one (from a mapping for example) or if this
771 				 * object is just part of a possibly stale and
772 				 * useless shadow chain.
773 				 * We would like to try and collapse it into
774 				 * its parent, but we don't have any pointers
775 				 * back to this parent object.
776 				 * But we can try and collapse this object with
777 				 * its own shadows, in case these are useless
778 				 * too...
779 				 * We can't bypass this object though, since we
780 				 * don't know if this last reference on it is
781 				 * meaningful or not.
782 				 */
783 				vm_object_collapse(object, 0, FALSE);
784 			}
785 			vm_object_unlock(object);
786 			return;
787 		}
788 
789 		/*
790 		 *	We have to wait for initialization
791 		 *	before destroying or caching the object.
792 		 */
793 
794 		if (object->pager_created && !object->pager_initialized) {
795 			assert(!object->can_persist);
796 			vm_object_assert_wait(object,
797 			    VM_OBJECT_EVENT_INITIALIZED,
798 			    THREAD_UNINT);
799 			vm_object_unlock(object);
800 
801 			thread_block(THREAD_CONTINUE_NULL);
802 			continue;
803 		}
804 
805 		/*
806 		 *	Terminate this object. If it had a shadow,
807 		 *	then deallocate it; otherwise, if we need
808 		 *	to retry a cache trim, do so now; otherwise,
809 		 *	we are done. "pageout" objects have a shadow,
810 		 *	but maintain a "paging reference" rather than
811 		 *	a normal reference.
812 		 */
813 		shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
814 
815 		if (vm_object_terminate(object) != KERN_SUCCESS) {
816 			return;
817 		}
818 		if (shadow != VM_OBJECT_NULL) {
819 			object = shadow;
820 			continue;
821 		}
822 		return;
823 	}
824 }
825 
826 
827 
828 vm_page_t
vm_object_page_grab(vm_object_t object)829 vm_object_page_grab(
830 	vm_object_t     object)
831 {
832 	vm_page_t       p, next_p;
833 	int             p_limit = 0;
834 	int             p_skipped = 0;
835 
836 	vm_object_lock_assert_exclusive(object);
837 
838 	next_p = (vm_page_t)vm_page_queue_first(&object->memq);
839 	p_limit = MIN(50, object->resident_page_count);
840 
841 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
842 		p = next_p;
843 		next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
844 
845 		if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) {
846 			goto move_page_in_obj;
847 		}
848 
849 		if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
850 			vm_page_lockspin_queues();
851 
852 			if (p->vmp_pmapped) {
853 				int refmod_state;
854 
855 				vm_object_page_grab_pmapped++;
856 
857 				if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
858 					refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
859 
860 					if (refmod_state & VM_MEM_REFERENCED) {
861 						p->vmp_reference = TRUE;
862 					}
863 					if (refmod_state & VM_MEM_MODIFIED) {
864 						SET_PAGE_DIRTY(p, FALSE);
865 					}
866 				}
867 				if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
868 					refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
869 
870 					if (refmod_state & VM_MEM_REFERENCED) {
871 						p->vmp_reference = TRUE;
872 					}
873 					if (refmod_state & VM_MEM_MODIFIED) {
874 						SET_PAGE_DIRTY(p, FALSE);
875 					}
876 
877 					if (p->vmp_dirty == FALSE) {
878 						goto take_page;
879 					}
880 				}
881 			}
882 			if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
883 				vm_page_activate(p);
884 
885 				counter_inc(&vm_statistics_reactivations);
886 				vm_object_page_grab_reactivations++;
887 			}
888 			vm_page_unlock_queues();
889 move_page_in_obj:
890 			vm_page_queue_remove(&object->memq, p, vmp_listq);
891 			vm_page_queue_enter(&object->memq, p, vmp_listq);
892 
893 			p_skipped++;
894 			continue;
895 		}
896 		vm_page_lockspin_queues();
897 take_page:
898 		vm_page_free_prepare_queues(p);
899 		vm_object_page_grab_returned++;
900 		vm_object_page_grab_skipped += p_skipped;
901 
902 		vm_page_unlock_queues();
903 
904 		vm_page_free_prepare_object(p, TRUE);
905 
906 		return p;
907 	}
908 	vm_object_page_grab_skipped += p_skipped;
909 	vm_object_page_grab_failed++;
910 
911 	return NULL;
912 }
913 
914 
915 
916 #define EVICT_PREPARE_LIMIT     64
917 #define EVICT_AGE               10
918 
919 static  clock_sec_t     vm_object_cache_aging_ts = 0;
920 
921 static void
vm_object_cache_remove_locked(vm_object_t object)922 vm_object_cache_remove_locked(
923 	vm_object_t     object)
924 {
925 	assert(object->purgable == VM_PURGABLE_DENY);
926 
927 	queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
928 	object->cached_list.next = NULL;
929 	object->cached_list.prev = NULL;
930 
931 	vm_object_cached_count--;
932 }
933 
934 void
vm_object_cache_remove(vm_object_t object)935 vm_object_cache_remove(
936 	vm_object_t     object)
937 {
938 	vm_object_cache_lock_spin();
939 
940 	if (object->cached_list.next &&
941 	    object->cached_list.prev) {
942 		vm_object_cache_remove_locked(object);
943 	}
944 
945 	vm_object_cache_unlock();
946 }
947 
948 void
vm_object_cache_add(vm_object_t object)949 vm_object_cache_add(
950 	vm_object_t     object)
951 {
952 	clock_sec_t sec;
953 	clock_nsec_t nsec;
954 
955 	assert(object->purgable == VM_PURGABLE_DENY);
956 
957 	if (object->resident_page_count == 0) {
958 		return;
959 	}
960 	clock_get_system_nanotime(&sec, &nsec);
961 
962 	vm_object_cache_lock_spin();
963 
964 	if (object->cached_list.next == NULL &&
965 	    object->cached_list.prev == NULL) {
966 		queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
967 		object->vo_cache_ts = sec + EVICT_AGE;
968 		object->vo_cache_pages_to_scan = object->resident_page_count;
969 
970 		vm_object_cached_count++;
971 		vm_object_cache_adds++;
972 	}
973 	vm_object_cache_unlock();
974 }
975 
976 int
vm_object_cache_evict(int num_to_evict,int max_objects_to_examine)977 vm_object_cache_evict(
978 	int     num_to_evict,
979 	int     max_objects_to_examine)
980 {
981 	vm_object_t     object = VM_OBJECT_NULL;
982 	vm_object_t     next_obj = VM_OBJECT_NULL;
983 	vm_page_t       local_free_q = VM_PAGE_NULL;
984 	vm_page_t       p;
985 	vm_page_t       next_p;
986 	int             object_cnt = 0;
987 	vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
988 	int             ep_count;
989 	int             ep_limit;
990 	int             ep_index;
991 	int             ep_freed = 0;
992 	int             ep_moved = 0;
993 	uint32_t        ep_skipped = 0;
994 	clock_sec_t     sec;
995 	clock_nsec_t    nsec;
996 
997 	KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
998 	/*
999 	 * do a couple of quick checks to see if it's
1000 	 * worthwhile grabbing the lock
1001 	 */
1002 	if (queue_empty(&vm_object_cached_list)) {
1003 		KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1004 		return 0;
1005 	}
1006 	clock_get_system_nanotime(&sec, &nsec);
1007 
1008 	/*
1009 	 * the object on the head of the queue has not
1010 	 * yet sufficiently aged
1011 	 */
1012 	if (sec < vm_object_cache_aging_ts) {
1013 		KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1014 		return 0;
1015 	}
1016 	/*
1017 	 * don't need the queue lock to find
1018 	 * and lock an object on the cached list
1019 	 */
1020 	vm_page_unlock_queues();
1021 
1022 	vm_object_cache_lock_spin();
1023 
1024 	for (;;) {
1025 		next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1026 
1027 		while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1028 			object = next_obj;
1029 			next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1030 
1031 			assert(object->purgable == VM_PURGABLE_DENY);
1032 
1033 			if (sec < object->vo_cache_ts) {
1034 				KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1035 
1036 				vm_object_cache_aging_ts = object->vo_cache_ts;
1037 				object = VM_OBJECT_NULL;
1038 				break;
1039 			}
1040 			if (!vm_object_lock_try_scan(object)) {
1041 				/*
1042 				 * just skip over this guy for now... if we find
1043 				 * an object to steal pages from, we'll revist in a bit...
1044 				 * hopefully, the lock will have cleared
1045 				 */
1046 				KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1047 
1048 				object = VM_OBJECT_NULL;
1049 				continue;
1050 			}
1051 			if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1052 				/*
1053 				 * this case really shouldn't happen, but it's not fatal
1054 				 * so deal with it... if we don't remove the object from
1055 				 * the list, we'll never move past it.
1056 				 */
1057 				KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1058 
1059 				vm_object_cache_remove_locked(object);
1060 				vm_object_unlock(object);
1061 				object = VM_OBJECT_NULL;
1062 				continue;
1063 			}
1064 			/*
1065 			 * we have a locked object with pages...
1066 			 * time to start harvesting
1067 			 */
1068 			break;
1069 		}
1070 		vm_object_cache_unlock();
1071 
1072 		if (object == VM_OBJECT_NULL) {
1073 			break;
1074 		}
1075 
1076 		/*
1077 		 * object is locked at this point and
1078 		 * has resident pages
1079 		 */
1080 		next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1081 
1082 		/*
1083 		 * break the page scan into 2 pieces to minimize the time spent
1084 		 * behind the page queue lock...
1085 		 * the list of pages on these unused objects is likely to be cold
1086 		 * w/r to the cpu cache which increases the time to scan the list
1087 		 * tenfold...  and we may have a 'run' of pages we can't utilize that
1088 		 * needs to be skipped over...
1089 		 */
1090 		if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1091 			ep_limit = EVICT_PREPARE_LIMIT;
1092 		}
1093 		ep_count = 0;
1094 
1095 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1096 			p = next_p;
1097 			next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1098 
1099 			object->vo_cache_pages_to_scan--;
1100 
1101 			if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1102 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1103 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1104 
1105 				ep_skipped++;
1106 				continue;
1107 			}
1108 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1109 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1110 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1111 
1112 				pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1113 			}
1114 			ep_array[ep_count++] = p;
1115 		}
1116 		KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1117 
1118 		vm_page_lockspin_queues();
1119 
1120 		for (ep_index = 0; ep_index < ep_count; ep_index++) {
1121 			p = ep_array[ep_index];
1122 
1123 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1124 				p->vmp_reference = FALSE;
1125 				p->vmp_no_cache = FALSE;
1126 
1127 				/*
1128 				 * we've already filtered out pages that are in the laundry
1129 				 * so if we get here, this page can't be on the pageout queue
1130 				 */
1131 				vm_page_queues_remove(p, FALSE);
1132 				vm_page_enqueue_inactive(p, TRUE);
1133 
1134 				ep_moved++;
1135 			} else {
1136 #if CONFIG_PHANTOM_CACHE
1137 				vm_phantom_cache_add_ghost(p);
1138 #endif
1139 				vm_page_free_prepare_queues(p);
1140 
1141 				assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1142 				/*
1143 				 * Add this page to our list of reclaimed pages,
1144 				 * to be freed later.
1145 				 */
1146 				p->vmp_snext = local_free_q;
1147 				local_free_q = p;
1148 
1149 				ep_freed++;
1150 			}
1151 		}
1152 		vm_page_unlock_queues();
1153 
1154 		KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1155 
1156 		if (local_free_q) {
1157 			vm_page_free_list(local_free_q, TRUE);
1158 			local_free_q = VM_PAGE_NULL;
1159 		}
1160 		if (object->vo_cache_pages_to_scan == 0) {
1161 			KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1162 
1163 			vm_object_cache_remove(object);
1164 
1165 			KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1166 		}
1167 		/*
1168 		 * done with this object
1169 		 */
1170 		vm_object_unlock(object);
1171 		object = VM_OBJECT_NULL;
1172 
1173 		/*
1174 		 * at this point, we are not holding any locks
1175 		 */
1176 		if ((ep_freed + ep_moved) >= num_to_evict) {
1177 			/*
1178 			 * we've reached our target for the
1179 			 * number of pages to evict
1180 			 */
1181 			break;
1182 		}
1183 		vm_object_cache_lock_spin();
1184 	}
1185 	/*
1186 	 * put the page queues lock back to the caller's
1187 	 * idea of it
1188 	 */
1189 	vm_page_lock_queues();
1190 
1191 	vm_object_cache_pages_freed += ep_freed;
1192 	vm_object_cache_pages_moved += ep_moved;
1193 	vm_object_cache_pages_skipped += ep_skipped;
1194 
1195 	KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1196 	return ep_freed;
1197 }
1198 
1199 /*
1200  *	Routine:	vm_object_terminate
1201  *	Purpose:
1202  *		Free all resources associated with a vm_object.
1203  *	In/out conditions:
1204  *		Upon entry, the object must be locked,
1205  *		and the object must have exactly one reference.
1206  *
1207  *		The shadow object reference is left alone.
1208  *
1209  *		The object must be unlocked if its found that pages
1210  *		must be flushed to a backing object.  If someone
1211  *		manages to map the object while it is being flushed
1212  *		the object is returned unlocked and unchanged.  Otherwise,
1213  *		upon exit, the cache will be unlocked, and the
1214  *		object will cease to exist.
1215  */
1216 static kern_return_t
vm_object_terminate(vm_object_t object)1217 vm_object_terminate(
1218 	vm_object_t     object)
1219 {
1220 	vm_object_t     shadow_object;
1221 
1222 	vm_object_lock_assert_exclusive(object);
1223 
1224 	if (!object->pageout && (!object->internal && object->can_persist) &&
1225 	    (object->pager != NULL || object->shadow_severed)) {
1226 		/*
1227 		 * Clear pager_trusted bit so that the pages get yanked
1228 		 * out of the object instead of cleaned in place.  This
1229 		 * prevents a deadlock in XMM and makes more sense anyway.
1230 		 */
1231 		object->pager_trusted = FALSE;
1232 
1233 		vm_object_reap_pages(object, REAP_TERMINATE);
1234 	}
1235 	/*
1236 	 *	Make sure the object isn't already being terminated
1237 	 */
1238 	if (object->terminating) {
1239 		vm_object_lock_assert_exclusive(object);
1240 		object->ref_count--;
1241 		assert(object->ref_count > 0);
1242 		vm_object_unlock(object);
1243 		return KERN_FAILURE;
1244 	}
1245 
1246 	/*
1247 	 * Did somebody get a reference to the object while we were
1248 	 * cleaning it?
1249 	 */
1250 	if (object->ref_count != 1) {
1251 		vm_object_lock_assert_exclusive(object);
1252 		object->ref_count--;
1253 		assert(object->ref_count > 0);
1254 		vm_object_unlock(object);
1255 		return KERN_FAILURE;
1256 	}
1257 
1258 	/*
1259 	 *	Make sure no one can look us up now.
1260 	 */
1261 
1262 	object->terminating = TRUE;
1263 	object->alive = FALSE;
1264 
1265 	if (!object->internal &&
1266 	    object->cached_list.next &&
1267 	    object->cached_list.prev) {
1268 		vm_object_cache_remove(object);
1269 	}
1270 
1271 	/*
1272 	 *	Detach the object from its shadow if we are the shadow's
1273 	 *	copy. The reference we hold on the shadow must be dropped
1274 	 *	by our caller.
1275 	 */
1276 	if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1277 	    !(object->pageout)) {
1278 		vm_object_lock(shadow_object);
1279 		if (shadow_object->copy == object) {
1280 			shadow_object->copy = VM_OBJECT_NULL;
1281 		}
1282 		vm_object_unlock(shadow_object);
1283 	}
1284 
1285 	if (object->paging_in_progress != 0 ||
1286 	    object->activity_in_progress != 0) {
1287 		/*
1288 		 * There are still some paging_in_progress references
1289 		 * on this object, meaning that there are some paging
1290 		 * or other I/O operations in progress for this VM object.
1291 		 * Such operations take some paging_in_progress references
1292 		 * up front to ensure that the object doesn't go away, but
1293 		 * they may also need to acquire a reference on the VM object,
1294 		 * to map it in kernel space, for example.  That means that
1295 		 * they may end up releasing the last reference on the VM
1296 		 * object, triggering its termination, while still holding
1297 		 * paging_in_progress references.  Waiting for these
1298 		 * pending paging_in_progress references to go away here would
1299 		 * deadlock.
1300 		 *
1301 		 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1302 		 * complete the VM object termination if it still holds
1303 		 * paging_in_progress references at this point.
1304 		 *
1305 		 * No new paging_in_progress should appear now that the
1306 		 * VM object is "terminating" and not "alive".
1307 		 */
1308 		vm_object_reap_async(object);
1309 		vm_object_unlock(object);
1310 		/*
1311 		 * Return KERN_FAILURE to let the caller know that we
1312 		 * haven't completed the termination and it can't drop this
1313 		 * object's reference on its shadow object yet.
1314 		 * The reaper thread will take care of that once it has
1315 		 * completed this object's termination.
1316 		 */
1317 		return KERN_FAILURE;
1318 	}
1319 	/*
1320 	 * complete the VM object termination
1321 	 */
1322 	vm_object_reap(object);
1323 	object = VM_OBJECT_NULL;
1324 
1325 	/*
1326 	 * the object lock was released by vm_object_reap()
1327 	 *
1328 	 * KERN_SUCCESS means that this object has been terminated
1329 	 * and no longer needs its shadow object but still holds a
1330 	 * reference on it.
1331 	 * The caller is responsible for dropping that reference.
1332 	 * We can't call vm_object_deallocate() here because that
1333 	 * would create a recursion.
1334 	 */
1335 	return KERN_SUCCESS;
1336 }
1337 
1338 
1339 /*
1340  * vm_object_reap():
1341  *
1342  * Complete the termination of a VM object after it's been marked
1343  * as "terminating" and "!alive" by vm_object_terminate().
1344  *
1345  * The VM object must be locked by caller.
1346  * The lock will be released on return and the VM object is no longer valid.
1347  */
1348 
1349 void
vm_object_reap(vm_object_t object)1350 vm_object_reap(
1351 	vm_object_t object)
1352 {
1353 	memory_object_t         pager;
1354 
1355 	vm_object_lock_assert_exclusive(object);
1356 	assert(object->paging_in_progress == 0);
1357 	assert(object->activity_in_progress == 0);
1358 
1359 	vm_object_reap_count++;
1360 
1361 	/*
1362 	 * Disown this purgeable object to cleanup its owner's purgeable
1363 	 * ledgers.  We need to do this before disconnecting the object
1364 	 * from its pager, to properly account for compressed pages.
1365 	 */
1366 	if (object->internal &&
1367 	    (object->purgable != VM_PURGABLE_DENY ||
1368 	    object->vo_ledger_tag)) {
1369 		int ledger_flags;
1370 		kern_return_t kr;
1371 
1372 		ledger_flags = 0;
1373 		if (object->vo_no_footprint) {
1374 			ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1375 		}
1376 		assert(!object->alive);
1377 		assert(object->terminating);
1378 		kr = vm_object_ownership_change(object,
1379 		    object->vo_ledger_tag,   /* unchanged */
1380 		    NULL,                    /* no owner */
1381 		    ledger_flags,
1382 		    FALSE);                  /* task_objq not locked */
1383 		assert(kr == KERN_SUCCESS);
1384 		assert(object->vo_owner == NULL);
1385 	}
1386 
1387 #if DEVELOPMENT || DEBUG
1388 	if (object->object_is_shared_cache &&
1389 	    object->pager != NULL &&
1390 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1391 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1392 	}
1393 #endif /* DEVELOPMENT || DEBUG */
1394 
1395 	pager = object->pager;
1396 	object->pager = MEMORY_OBJECT_NULL;
1397 
1398 	if (pager != MEMORY_OBJECT_NULL) {
1399 		memory_object_control_disable(&object->pager_control);
1400 	}
1401 
1402 	object->ref_count--;
1403 	assert(object->ref_count == 0);
1404 
1405 	/*
1406 	 * remove from purgeable queue if it's on
1407 	 */
1408 	if (object->internal) {
1409 		assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1410 
1411 		VM_OBJECT_UNWIRED(object);
1412 
1413 		if (object->purgable == VM_PURGABLE_DENY) {
1414 			/* not purgeable: nothing to do */
1415 		} else if (object->purgable == VM_PURGABLE_VOLATILE) {
1416 			purgeable_q_t queue;
1417 
1418 			queue = vm_purgeable_object_remove(object);
1419 			assert(queue);
1420 
1421 			if (object->purgeable_when_ripe) {
1422 				/*
1423 				 * Must take page lock for this -
1424 				 * using it to protect token queue
1425 				 */
1426 				vm_page_lock_queues();
1427 				vm_purgeable_token_delete_first(queue);
1428 
1429 				assert(queue->debug_count_objects >= 0);
1430 				vm_page_unlock_queues();
1431 			}
1432 
1433 			/*
1434 			 * Update "vm_page_purgeable_count" in bulk and mark
1435 			 * object as VM_PURGABLE_EMPTY to avoid updating
1436 			 * "vm_page_purgeable_count" again in vm_page_remove()
1437 			 * when reaping the pages.
1438 			 */
1439 			unsigned int delta;
1440 			assert(object->resident_page_count >=
1441 			    object->wired_page_count);
1442 			delta = (object->resident_page_count -
1443 			    object->wired_page_count);
1444 			if (delta != 0) {
1445 				assert(vm_page_purgeable_count >= delta);
1446 				OSAddAtomic(-delta,
1447 				    (SInt32 *)&vm_page_purgeable_count);
1448 			}
1449 			if (object->wired_page_count != 0) {
1450 				assert(vm_page_purgeable_wired_count >=
1451 				    object->wired_page_count);
1452 				OSAddAtomic(-object->wired_page_count,
1453 				    (SInt32 *)&vm_page_purgeable_wired_count);
1454 			}
1455 			object->purgable = VM_PURGABLE_EMPTY;
1456 		} else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1457 		    object->purgable == VM_PURGABLE_EMPTY) {
1458 			/* remove from nonvolatile queue */
1459 			vm_purgeable_nonvolatile_dequeue(object);
1460 		} else {
1461 			panic("object %p in unexpected purgeable state 0x%x",
1462 			    object, object->purgable);
1463 		}
1464 		if (object->transposed &&
1465 		    object->cached_list.next != NULL &&
1466 		    object->cached_list.prev == NULL) {
1467 			/*
1468 			 * object->cached_list.next "points" to the
1469 			 * object that was transposed with this object.
1470 			 */
1471 		} else {
1472 			assert(object->cached_list.next == NULL);
1473 		}
1474 		assert(object->cached_list.prev == NULL);
1475 	}
1476 
1477 	if (object->pageout) {
1478 		/*
1479 		 * free all remaining pages tabled on
1480 		 * this object
1481 		 * clean up it's shadow
1482 		 */
1483 		assert(object->shadow != VM_OBJECT_NULL);
1484 
1485 		vm_pageout_object_terminate(object);
1486 	} else if (object->resident_page_count) {
1487 		/*
1488 		 * free all remaining pages tabled on
1489 		 * this object
1490 		 */
1491 		vm_object_reap_pages(object, REAP_REAP);
1492 	}
1493 	assert(vm_page_queue_empty(&object->memq));
1494 	assert(object->paging_in_progress == 0);
1495 	assert(object->activity_in_progress == 0);
1496 	assert(object->ref_count == 0);
1497 
1498 	/*
1499 	 * If the pager has not already been released by
1500 	 * vm_object_destroy, we need to terminate it and
1501 	 * release our reference to it here.
1502 	 */
1503 	if (pager != MEMORY_OBJECT_NULL) {
1504 		vm_object_unlock(object);
1505 		vm_object_release_pager(pager);
1506 		vm_object_lock(object);
1507 	}
1508 
1509 	/* kick off anyone waiting on terminating */
1510 	object->terminating = FALSE;
1511 	vm_object_paging_begin(object);
1512 	vm_object_paging_end(object);
1513 	vm_object_unlock(object);
1514 
1515 	object->shadow = VM_OBJECT_NULL;
1516 
1517 #if VM_OBJECT_TRACKING
1518 	if (vm_object_tracking_btlog) {
1519 		btlog_erase(vm_object_tracking_btlog, object);
1520 	}
1521 #endif /* VM_OBJECT_TRACKING */
1522 
1523 	vm_object_lock_destroy(object);
1524 	/*
1525 	 *	Free the space for the object.
1526 	 */
1527 	zfree(vm_object_zone, object);
1528 	object = VM_OBJECT_NULL;
1529 }
1530 
1531 
1532 unsigned int vm_max_batch = 256;
1533 
1534 #define V_O_R_MAX_BATCH 128
1535 
1536 #define BATCH_LIMIT(max)        (vm_max_batch >= max ? max : vm_max_batch)
1537 
1538 
1539 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect)              \
1540 	MACRO_BEGIN                                                     \
1541 	if (_local_free_q) {                                            \
1542 	        if (do_disconnect) {                                    \
1543 	                vm_page_t m;                                    \
1544 	                for (m = _local_free_q;                         \
1545 	                     m != VM_PAGE_NULL;                         \
1546 	                     m = m->vmp_snext) {                        \
1547 	                        if (m->vmp_pmapped) {                   \
1548 	                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1549 	                        }                                       \
1550 	                }                                               \
1551 	        }                                                       \
1552 	        vm_page_free_list(_local_free_q, TRUE);                 \
1553 	        _local_free_q = VM_PAGE_NULL;                           \
1554 	}                                                               \
1555 	MACRO_END
1556 
1557 
1558 void
vm_object_reap_pages(vm_object_t object,int reap_type)1559 vm_object_reap_pages(
1560 	vm_object_t     object,
1561 	int             reap_type)
1562 {
1563 	vm_page_t       p;
1564 	vm_page_t       next;
1565 	vm_page_t       local_free_q = VM_PAGE_NULL;
1566 	int             loop_count;
1567 	boolean_t       disconnect_on_release;
1568 	pmap_flush_context      pmap_flush_context_storage;
1569 
1570 	if (reap_type == REAP_DATA_FLUSH) {
1571 		/*
1572 		 * We need to disconnect pages from all pmaps before
1573 		 * releasing them to the free list
1574 		 */
1575 		disconnect_on_release = TRUE;
1576 	} else {
1577 		/*
1578 		 * Either the caller has already disconnected the pages
1579 		 * from all pmaps, or we disconnect them here as we add
1580 		 * them to out local list of pages to be released.
1581 		 * No need to re-disconnect them when we release the pages
1582 		 * to the free list.
1583 		 */
1584 		disconnect_on_release = FALSE;
1585 	}
1586 
1587 restart_after_sleep:
1588 	if (vm_page_queue_empty(&object->memq)) {
1589 		return;
1590 	}
1591 	loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1592 
1593 	if (reap_type == REAP_PURGEABLE) {
1594 		pmap_flush_context_init(&pmap_flush_context_storage);
1595 	}
1596 
1597 	vm_page_lock_queues();
1598 
1599 	next = (vm_page_t)vm_page_queue_first(&object->memq);
1600 
1601 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1602 		p = next;
1603 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1604 
1605 		if (--loop_count == 0) {
1606 			vm_page_unlock_queues();
1607 
1608 			if (local_free_q) {
1609 				if (reap_type == REAP_PURGEABLE) {
1610 					pmap_flush(&pmap_flush_context_storage);
1611 					pmap_flush_context_init(&pmap_flush_context_storage);
1612 				}
1613 				/*
1614 				 * Free the pages we reclaimed so far
1615 				 * and take a little break to avoid
1616 				 * hogging the page queue lock too long
1617 				 */
1618 				VM_OBJ_REAP_FREELIST(local_free_q,
1619 				    disconnect_on_release);
1620 			} else {
1621 				mutex_pause(0);
1622 			}
1623 
1624 			loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1625 
1626 			vm_page_lock_queues();
1627 		}
1628 		if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1629 			if (p->vmp_busy || p->vmp_cleaning) {
1630 				vm_page_unlock_queues();
1631 				/*
1632 				 * free the pages reclaimed so far
1633 				 */
1634 				VM_OBJ_REAP_FREELIST(local_free_q,
1635 				    disconnect_on_release);
1636 
1637 				PAGE_SLEEP(object, p, THREAD_UNINT);
1638 
1639 				goto restart_after_sleep;
1640 			}
1641 			if (p->vmp_laundry) {
1642 				vm_pageout_steal_laundry(p, TRUE);
1643 			}
1644 		}
1645 		switch (reap_type) {
1646 		case REAP_DATA_FLUSH:
1647 			if (VM_PAGE_WIRED(p)) {
1648 				/*
1649 				 * this is an odd case... perhaps we should
1650 				 * zero-fill this page since we're conceptually
1651 				 * tossing its data at this point, but leaving
1652 				 * it on the object to honor the 'wire' contract
1653 				 */
1654 				continue;
1655 			}
1656 			break;
1657 
1658 		case REAP_PURGEABLE:
1659 			if (VM_PAGE_WIRED(p)) {
1660 				/*
1661 				 * can't purge a wired page
1662 				 */
1663 				vm_page_purged_wired++;
1664 				continue;
1665 			}
1666 			if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1667 				vm_pageout_steal_laundry(p, TRUE);
1668 			}
1669 
1670 			if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1671 				/*
1672 				 * page is being acted upon,
1673 				 * so don't mess with it
1674 				 */
1675 				vm_page_purged_others++;
1676 				continue;
1677 			}
1678 			if (p->vmp_busy) {
1679 				/*
1680 				 * We can't reclaim a busy page but we can
1681 				 * make it more likely to be paged (it's not wired) to make
1682 				 * sure that it gets considered by
1683 				 * vm_pageout_scan() later.
1684 				 */
1685 				if (VM_PAGE_PAGEABLE(p)) {
1686 					vm_page_deactivate(p);
1687 				}
1688 				vm_page_purged_busy++;
1689 				continue;
1690 			}
1691 
1692 			assert(VM_PAGE_OBJECT(p) != kernel_object);
1693 
1694 			/*
1695 			 * we can discard this page...
1696 			 */
1697 			if (p->vmp_pmapped == TRUE) {
1698 				/*
1699 				 * unmap the page
1700 				 */
1701 				pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1702 			}
1703 			vm_page_purged_count++;
1704 
1705 			break;
1706 
1707 		case REAP_TERMINATE:
1708 			if (p->vmp_absent || p->vmp_private) {
1709 				/*
1710 				 *	For private pages, VM_PAGE_FREE just
1711 				 *	leaves the page structure around for
1712 				 *	its owner to clean up.  For absent
1713 				 *	pages, the structure is returned to
1714 				 *	the appropriate pool.
1715 				 */
1716 				break;
1717 			}
1718 			if (p->vmp_fictitious) {
1719 				assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
1720 				break;
1721 			}
1722 			if (!p->vmp_dirty && p->vmp_wpmapped) {
1723 				p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1724 			}
1725 
1726 			if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) {
1727 				assert(!object->internal);
1728 
1729 				p->vmp_free_when_done = TRUE;
1730 
1731 				if (!p->vmp_laundry) {
1732 					vm_page_queues_remove(p, TRUE);
1733 					/*
1734 					 * flush page... page will be freed
1735 					 * upon completion of I/O
1736 					 */
1737 					vm_pageout_cluster(p);
1738 				}
1739 				vm_page_unlock_queues();
1740 				/*
1741 				 * free the pages reclaimed so far
1742 				 */
1743 				VM_OBJ_REAP_FREELIST(local_free_q,
1744 				    disconnect_on_release);
1745 
1746 				vm_object_paging_wait(object, THREAD_UNINT);
1747 
1748 				goto restart_after_sleep;
1749 			}
1750 			break;
1751 
1752 		case REAP_REAP:
1753 			break;
1754 		}
1755 		vm_page_free_prepare_queues(p);
1756 		assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1757 		/*
1758 		 * Add this page to our list of reclaimed pages,
1759 		 * to be freed later.
1760 		 */
1761 		p->vmp_snext = local_free_q;
1762 		local_free_q = p;
1763 	}
1764 	vm_page_unlock_queues();
1765 
1766 	/*
1767 	 * Free the remaining reclaimed pages
1768 	 */
1769 	if (reap_type == REAP_PURGEABLE) {
1770 		pmap_flush(&pmap_flush_context_storage);
1771 	}
1772 
1773 	VM_OBJ_REAP_FREELIST(local_free_q,
1774 	    disconnect_on_release);
1775 }
1776 
1777 
1778 void
vm_object_reap_async(vm_object_t object)1779 vm_object_reap_async(
1780 	vm_object_t     object)
1781 {
1782 	vm_object_lock_assert_exclusive(object);
1783 
1784 	vm_object_reaper_lock_spin();
1785 
1786 	vm_object_reap_count_async++;
1787 
1788 	/* enqueue the VM object... */
1789 	queue_enter(&vm_object_reaper_queue, object,
1790 	    vm_object_t, cached_list);
1791 
1792 	vm_object_reaper_unlock();
1793 
1794 	/* ... and wake up the reaper thread */
1795 	thread_wakeup((event_t) &vm_object_reaper_queue);
1796 }
1797 
1798 
1799 void
vm_object_reaper_thread(void)1800 vm_object_reaper_thread(void)
1801 {
1802 	vm_object_t     object, shadow_object;
1803 
1804 	vm_object_reaper_lock_spin();
1805 
1806 	while (!queue_empty(&vm_object_reaper_queue)) {
1807 		queue_remove_first(&vm_object_reaper_queue,
1808 		    object,
1809 		    vm_object_t,
1810 		    cached_list);
1811 
1812 		vm_object_reaper_unlock();
1813 		vm_object_lock(object);
1814 
1815 		assert(object->terminating);
1816 		assert(!object->alive);
1817 
1818 		/*
1819 		 * The pageout daemon might be playing with our pages.
1820 		 * Now that the object is dead, it won't touch any more
1821 		 * pages, but some pages might already be on their way out.
1822 		 * Hence, we wait until the active paging activities have
1823 		 * ceased before we break the association with the pager
1824 		 * itself.
1825 		 */
1826 		while (object->paging_in_progress != 0 ||
1827 		    object->activity_in_progress != 0) {
1828 			vm_object_wait(object,
1829 			    VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1830 			    THREAD_UNINT);
1831 			vm_object_lock(object);
1832 		}
1833 
1834 		shadow_object =
1835 		    object->pageout ? VM_OBJECT_NULL : object->shadow;
1836 
1837 		vm_object_reap(object);
1838 		/* cache is unlocked and object is no longer valid */
1839 		object = VM_OBJECT_NULL;
1840 
1841 		if (shadow_object != VM_OBJECT_NULL) {
1842 			/*
1843 			 * Drop the reference "object" was holding on
1844 			 * its shadow object.
1845 			 */
1846 			vm_object_deallocate(shadow_object);
1847 			shadow_object = VM_OBJECT_NULL;
1848 		}
1849 		vm_object_reaper_lock_spin();
1850 	}
1851 
1852 	/* wait for more work... */
1853 	assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
1854 
1855 	vm_object_reaper_unlock();
1856 
1857 	thread_block((thread_continue_t) vm_object_reaper_thread);
1858 	/*NOTREACHED*/
1859 }
1860 
1861 /*
1862  *	Routine:	vm_object_release_pager
1863  *	Purpose:	Terminate the pager and, upon completion,
1864  *			release our last reference to it.
1865  */
1866 static void
vm_object_release_pager(memory_object_t pager)1867 vm_object_release_pager(
1868 	memory_object_t pager)
1869 {
1870 	/*
1871 	 *	Terminate the pager.
1872 	 */
1873 
1874 	(void) memory_object_terminate(pager);
1875 
1876 	/*
1877 	 *	Release reference to pager.
1878 	 */
1879 	memory_object_deallocate(pager);
1880 }
1881 
1882 /*
1883  *	Routine:	vm_object_destroy
1884  *	Purpose:
1885  *		Shut down a VM object, despite the
1886  *		presence of address map (or other) references
1887  *		to the vm_object.
1888  */
1889 #if MACH_ASSERT
1890 extern vm_object_t fbdp_object;
1891 extern memory_object_t fbdp_moc;
1892 struct vnode;
1893 extern struct vnode *fbdp_vp;
1894 extern uint32_t system_inshutdown;
1895 int fbdp_no_panic = 0;
1896 #endif /* MACH_ASSERT */
1897 kern_return_t
vm_object_destroy(vm_object_t object,__unused kern_return_t reason)1898 vm_object_destroy(
1899 	vm_object_t             object,
1900 	__unused kern_return_t          reason)
1901 {
1902 	memory_object_t         old_pager;
1903 
1904 	if (object == VM_OBJECT_NULL) {
1905 		return KERN_SUCCESS;
1906 	}
1907 
1908 	/*
1909 	 *	Remove the pager association immediately.
1910 	 *
1911 	 *	This will prevent the memory manager from further
1912 	 *	meddling.  [If it wanted to flush data or make
1913 	 *	other changes, it should have done so before performing
1914 	 *	the destroy call.]
1915 	 */
1916 
1917 #if MACH_ASSERT
1918 	if (object == fbdp_object) {
1919 		if (object->ref_count > 1 && !system_inshutdown) {
1920 			PE_parse_boot_argn("fbdp_no_panic2", &fbdp_no_panic, sizeof(fbdp_no_panic));
1921 			if (!fbdp_no_panic) {
1922 				panic("FBDP %s:%d object %p refs %d moc %p vp %p\n", __FUNCTION__, __LINE__, fbdp_object, fbdp_object->ref_count, fbdp_moc, fbdp_vp);
1923 			}
1924 		}
1925 		fbdp_object = NULL;
1926 		fbdp_moc = NULL;
1927 		fbdp_vp = NULL;
1928 	}
1929 #endif /* MACH_ASSERT */
1930 
1931 	vm_object_lock(object);
1932 	object->can_persist = FALSE;
1933 	object->named = FALSE;
1934 #if 00
1935 	object->alive = FALSE;
1936 #endif /* 00 */
1937 
1938 #if DEVELOPMENT || DEBUG
1939 	if (object->object_is_shared_cache &&
1940 	    object->pager != NULL &&
1941 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1942 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1943 	}
1944 #endif /* DEVELOPMENT || DEBUG */
1945 
1946 	old_pager = object->pager;
1947 	object->pager = MEMORY_OBJECT_NULL;
1948 	if (old_pager != MEMORY_OBJECT_NULL) {
1949 		memory_object_control_disable(&object->pager_control);
1950 	}
1951 
1952 	/*
1953 	 * Wait for the existing paging activity (that got
1954 	 * through before we nulled out the pager) to subside.
1955 	 */
1956 
1957 	vm_object_paging_wait(object, THREAD_UNINT);
1958 	vm_object_unlock(object);
1959 
1960 	/*
1961 	 *	Terminate the object now.
1962 	 */
1963 	if (old_pager != MEMORY_OBJECT_NULL) {
1964 		vm_object_release_pager(old_pager);
1965 
1966 		/*
1967 		 * JMM - Release the caller's reference.  This assumes the
1968 		 * caller had a reference to release, which is a big (but
1969 		 * currently valid) assumption if this is driven from the
1970 		 * vnode pager (it is holding a named reference when making
1971 		 * this call)..
1972 		 */
1973 		vm_object_deallocate(object);
1974 	}
1975 	return KERN_SUCCESS;
1976 }
1977 
1978 /*
1979  * The "chunk" macros are used by routines below when looking for pages to deactivate.  These
1980  * exist because of the need to handle shadow chains.  When deactivating pages, we only
1981  * want to deactive the ones at the top most level in the object chain.  In order to do
1982  * this efficiently, the specified address range is divided up into "chunks" and we use
1983  * a bit map to keep track of which pages have already been processed as we descend down
1984  * the shadow chain.  These chunk macros hide the details of the bit map implementation
1985  * as much as we can.
1986  *
1987  * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
1988  * set to 64 pages.  The bit map is indexed from the low-order end, so that the lowest
1989  * order bit represents page 0 in the current range and highest order bit represents
1990  * page 63.
1991  *
1992  * For further convenience, we also use negative logic for the page state in the bit map.
1993  * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
1994  * been processed.  This way we can simply test the 64-bit long word to see if it's zero
1995  * to easily tell if the whole range has been processed.  Therefore, the bit map starts
1996  * out with all the bits set.  The macros below hide all these details from the caller.
1997  */
1998 
1999 #define PAGES_IN_A_CHUNK        64      /* The number of pages in the chunk must */
2000                                         /* be the same as the number of bits in  */
2001                                         /* the chunk_state_t type. We use 64     */
2002                                         /* just for convenience.		 */
2003 
2004 #define CHUNK_SIZE      (PAGES_IN_A_CHUNK * PAGE_SIZE_64)       /* Size of a chunk in bytes */
2005 
2006 typedef uint64_t        chunk_state_t;
2007 
2008 /*
2009  * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2010  * that no pages have been processed yet.  Also, if len is less than the full CHUNK_SIZE,
2011  * then we mark pages beyond the len as having been "processed" so that we don't waste time
2012  * looking at pages in that range.  This can save us from unnecessarily chasing down the
2013  * shadow chain.
2014  */
2015 
2016 #define CHUNK_INIT(c, len)                                              \
2017 	MACRO_BEGIN                                                     \
2018 	uint64_t p;                                                     \
2019                                                                         \
2020 	(c) = 0xffffffffffffffffLL;                                     \
2021                                                                         \
2022 	for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++)       \
2023 	        MARK_PAGE_HANDLED(c, p);                                \
2024 	MACRO_END
2025 
2026 
2027 /*
2028  * Return true if all pages in the chunk have not yet been processed.
2029  */
2030 
2031 #define CHUNK_NOT_COMPLETE(c)   ((c) != 0)
2032 
2033 /*
2034  * Return true if the page at offset 'p' in the bit map has already been handled
2035  * while processing a higher level object in the shadow chain.
2036  */
2037 
2038 #define PAGE_ALREADY_HANDLED(c, p)      (((c) & (1ULL << (p))) == 0)
2039 
2040 /*
2041  * Mark the page at offset 'p' in the bit map as having been processed.
2042  */
2043 
2044 #define MARK_PAGE_HANDLED(c, p) \
2045 MACRO_BEGIN \
2046 	(c) = (c) & ~(1ULL << (p)); \
2047 MACRO_END
2048 
2049 
2050 /*
2051  * Return true if the page at the given offset has been paged out.  Object is
2052  * locked upon entry and returned locked.
2053  */
2054 
2055 static boolean_t
page_is_paged_out(vm_object_t object,vm_object_offset_t offset)2056 page_is_paged_out(
2057 	vm_object_t             object,
2058 	vm_object_offset_t      offset)
2059 {
2060 	if (object->internal &&
2061 	    object->alive &&
2062 	    !object->terminating &&
2063 	    object->pager_ready) {
2064 		if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2065 		    == VM_EXTERNAL_STATE_EXISTS) {
2066 			return TRUE;
2067 		}
2068 	}
2069 	return FALSE;
2070 }
2071 
2072 
2073 
2074 /*
2075  * madvise_free_debug
2076  *
2077  * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2078  * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2079  * simulate the loss of the page's contents as if the page had been
2080  * reclaimed and then re-faulted.
2081  */
2082 #if DEVELOPMENT || DEBUG
2083 int madvise_free_debug = 1;
2084 #else /* DEBUG */
2085 int madvise_free_debug = 0;
2086 #endif /* DEBUG */
2087 
2088 __options_decl(deactivate_flags_t, uint32_t, {
2089 	DEACTIVATE_KILL         = 0x1,
2090 	DEACTIVATE_REUSABLE     = 0x2,
2091 	DEACTIVATE_ALL_REUSABLE = 0x4,
2092 	DEACTIVATE_CLEAR_REFMOD = 0x8
2093 });
2094 
2095 /*
2096  * Deactivate the pages in the specified object and range.  If kill_page is set, also discard any
2097  * page modified state from the pmap.  Update the chunk_state as we go along.  The caller must specify
2098  * a size that is less than or equal to the CHUNK_SIZE.
2099  */
2100 
2101 static void
deactivate_pages_in_object(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,chunk_state_t * chunk_state,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2102 deactivate_pages_in_object(
2103 	vm_object_t             object,
2104 	vm_object_offset_t      offset,
2105 	vm_object_size_t        size,
2106 	deactivate_flags_t      flags,
2107 	chunk_state_t           *chunk_state,
2108 	pmap_flush_context      *pfc,
2109 	struct pmap             *pmap,
2110 	vm_map_offset_t         pmap_offset)
2111 {
2112 	vm_page_t       m;
2113 	int             p;
2114 	struct  vm_page_delayed_work    dw_array;
2115 	struct  vm_page_delayed_work    *dwp, *dwp_start;
2116 	bool            dwp_finish_ctx = TRUE;
2117 	int             dw_count;
2118 	int             dw_limit;
2119 	unsigned int    reusable = 0;
2120 
2121 	/*
2122 	 * Examine each page in the chunk.  The variable 'p' is the page number relative to the start of the
2123 	 * chunk.  Since this routine is called once for each level in the shadow chain, the chunk_state may
2124 	 * have pages marked as having been processed already.  We stop the loop early if we find we've handled
2125 	 * all the pages in the chunk.
2126 	 */
2127 
2128 	dwp_start = dwp = NULL;
2129 	dw_count = 0;
2130 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2131 	dwp_start = vm_page_delayed_work_get_ctx();
2132 	if (dwp_start == NULL) {
2133 		dwp_start = &dw_array;
2134 		dw_limit = 1;
2135 		dwp_finish_ctx = FALSE;
2136 	}
2137 
2138 	dwp = dwp_start;
2139 
2140 	for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2141 		/*
2142 		 * If this offset has already been found and handled in a higher level object, then don't
2143 		 * do anything with it in the current shadow object.
2144 		 */
2145 
2146 		if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2147 			continue;
2148 		}
2149 
2150 		/*
2151 		 * See if the page at this offset is around.  First check to see if the page is resident,
2152 		 * then if not, check the existence map or with the pager.
2153 		 */
2154 
2155 		if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2156 			/*
2157 			 * We found a page we were looking for.  Mark it as "handled" now in the chunk_state
2158 			 * so that we won't bother looking for a page at this offset again if there are more
2159 			 * shadow objects.  Then deactivate the page.
2160 			 */
2161 
2162 			MARK_PAGE_HANDLED(*chunk_state, p);
2163 
2164 			if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2165 			    (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2166 				int     clear_refmod_mask;
2167 				int     pmap_options;
2168 				dwp->dw_mask = 0;
2169 
2170 				pmap_options = 0;
2171 				clear_refmod_mask = VM_MEM_REFERENCED;
2172 				dwp->dw_mask |= DW_clear_reference;
2173 
2174 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2175 					if (madvise_free_debug) {
2176 						/*
2177 						 * zero-fill the page now
2178 						 * to simulate it being
2179 						 * reclaimed and re-faulted.
2180 						 */
2181 						pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2182 					}
2183 					m->vmp_precious = FALSE;
2184 					m->vmp_dirty = FALSE;
2185 
2186 					clear_refmod_mask |= VM_MEM_MODIFIED;
2187 					if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2188 						/*
2189 						 * This page is now clean and
2190 						 * reclaimable.  Move it out
2191 						 * of the throttled queue, so
2192 						 * that vm_pageout_scan() can
2193 						 * find it.
2194 						 */
2195 						dwp->dw_mask |= DW_move_page;
2196 					}
2197 
2198 					VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2199 
2200 					if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2201 						assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2202 						assert(!object->all_reusable);
2203 						m->vmp_reusable = TRUE;
2204 						object->reusable_page_count++;
2205 						assert(object->resident_page_count >= object->reusable_page_count);
2206 						reusable++;
2207 						/*
2208 						 * Tell pmap this page is now
2209 						 * "reusable" (to update pmap
2210 						 * stats for all mappings).
2211 						 */
2212 						pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2213 					}
2214 				}
2215 				if (flags & DEACTIVATE_CLEAR_REFMOD) {
2216 					/*
2217 					 * The caller didn't clear the refmod bits in advance.
2218 					 * Clear them for this page now.
2219 					 */
2220 					pmap_options |= PMAP_OPTIONS_NOFLUSH;
2221 					pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2222 					    clear_refmod_mask,
2223 					    pmap_options,
2224 					    (void *)pfc);
2225 				}
2226 
2227 				if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2228 				    !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2229 					dwp->dw_mask |= DW_move_page;
2230 				}
2231 
2232 				if (dwp->dw_mask) {
2233 					VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2234 					    dw_count);
2235 				}
2236 
2237 				if (dw_count >= dw_limit) {
2238 					if (reusable) {
2239 						OSAddAtomic(reusable,
2240 						    &vm_page_stats_reusable.reusable_count);
2241 						vm_page_stats_reusable.reusable += reusable;
2242 						reusable = 0;
2243 					}
2244 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2245 
2246 					dwp = dwp_start;
2247 					dw_count = 0;
2248 				}
2249 			}
2250 		} else {
2251 			/*
2252 			 * The page at this offset isn't memory resident, check to see if it's
2253 			 * been paged out.  If so, mark it as handled so we don't bother looking
2254 			 * for it in the shadow chain.
2255 			 */
2256 
2257 			if (page_is_paged_out(object, offset)) {
2258 				MARK_PAGE_HANDLED(*chunk_state, p);
2259 
2260 				/*
2261 				 * If we're killing a non-resident page, then clear the page in the existence
2262 				 * map so we don't bother paging it back in if it's touched again in the future.
2263 				 */
2264 
2265 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2266 					VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2267 
2268 					if (pmap != PMAP_NULL) {
2269 						/*
2270 						 * Tell pmap that this page
2271 						 * is no longer mapped, to
2272 						 * adjust the footprint ledger
2273 						 * because this page is no
2274 						 * longer compressed.
2275 						 */
2276 						pmap_remove_options(
2277 							pmap,
2278 							pmap_offset,
2279 							(pmap_offset +
2280 							PAGE_SIZE),
2281 							PMAP_OPTIONS_REMOVE);
2282 					}
2283 				}
2284 			}
2285 		}
2286 	}
2287 
2288 	if (reusable) {
2289 		OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2290 		vm_page_stats_reusable.reusable += reusable;
2291 		reusable = 0;
2292 	}
2293 
2294 	if (dw_count) {
2295 		vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2296 		dwp = dwp_start;
2297 		dw_count = 0;
2298 	}
2299 
2300 	if (dwp_start && dwp_finish_ctx) {
2301 		vm_page_delayed_work_finish_ctx(dwp_start);
2302 		dwp_start = dwp = NULL;
2303 	}
2304 }
2305 
2306 
2307 /*
2308  * Deactive a "chunk" of the given range of the object starting at offset.  A "chunk"
2309  * will always be less than or equal to the given size.  The total range is divided up
2310  * into chunks for efficiency and performance related to the locks and handling the shadow
2311  * chain.  This routine returns how much of the given "size" it actually processed.  It's
2312  * up to the caler to loop and keep calling this routine until the entire range they want
2313  * to process has been done.
2314  * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2315  */
2316 
2317 static vm_object_size_t
deactivate_a_chunk(vm_object_t orig_object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2318 deactivate_a_chunk(
2319 	vm_object_t             orig_object,
2320 	vm_object_offset_t      offset,
2321 	vm_object_size_t        size,
2322 	deactivate_flags_t      flags,
2323 	pmap_flush_context      *pfc,
2324 	struct pmap             *pmap,
2325 	vm_map_offset_t         pmap_offset)
2326 {
2327 	vm_object_t             object;
2328 	vm_object_t             tmp_object;
2329 	vm_object_size_t        length;
2330 	chunk_state_t           chunk_state;
2331 
2332 
2333 	/*
2334 	 * Get set to do a chunk.  We'll do up to CHUNK_SIZE, but no more than the
2335 	 * remaining size the caller asked for.
2336 	 */
2337 
2338 	length = MIN(size, CHUNK_SIZE);
2339 
2340 	/*
2341 	 * The chunk_state keeps track of which pages we've already processed if there's
2342 	 * a shadow chain on this object.  At this point, we haven't done anything with this
2343 	 * range of pages yet, so initialize the state to indicate no pages processed yet.
2344 	 */
2345 
2346 	CHUNK_INIT(chunk_state, length);
2347 	object = orig_object;
2348 
2349 	/*
2350 	 * Start at the top level object and iterate around the loop once for each object
2351 	 * in the shadow chain.  We stop processing early if we've already found all the pages
2352 	 * in the range.  Otherwise we stop when we run out of shadow objects.
2353 	 */
2354 
2355 	while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2356 		vm_object_paging_begin(object);
2357 
2358 		deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2359 
2360 		vm_object_paging_end(object);
2361 
2362 		/*
2363 		 * We've finished with this object, see if there's a shadow object.  If
2364 		 * there is, update the offset and lock the new object.  We also turn off
2365 		 * kill_page at this point since we only kill pages in the top most object.
2366 		 */
2367 
2368 		tmp_object = object->shadow;
2369 
2370 		if (tmp_object) {
2371 			assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2372 			flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2373 			offset += object->vo_shadow_offset;
2374 			vm_object_lock(tmp_object);
2375 		}
2376 
2377 		if (object != orig_object) {
2378 			vm_object_unlock(object);
2379 		}
2380 
2381 		object = tmp_object;
2382 	}
2383 
2384 	if (object && object != orig_object) {
2385 		vm_object_unlock(object);
2386 	}
2387 
2388 	return length;
2389 }
2390 
2391 
2392 
2393 /*
2394  * Move any resident pages in the specified range to the inactive queue.  If kill_page is set,
2395  * we also clear the modified status of the page and "forget" any changes that have been made
2396  * to the page.
2397  */
2398 
2399 __private_extern__ void
vm_object_deactivate_pages(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,boolean_t kill_page,boolean_t reusable_page,struct pmap * pmap,vm_map_offset_t pmap_offset)2400 vm_object_deactivate_pages(
2401 	vm_object_t             object,
2402 	vm_object_offset_t      offset,
2403 	vm_object_size_t        size,
2404 	boolean_t               kill_page,
2405 	boolean_t               reusable_page,
2406 	struct pmap             *pmap,
2407 	vm_map_offset_t         pmap_offset)
2408 {
2409 	vm_object_size_t        length;
2410 	boolean_t               all_reusable;
2411 	pmap_flush_context      pmap_flush_context_storage;
2412 	unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2413 	unsigned int pmap_clear_refmod_options = 0;
2414 	deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2415 	bool refmod_cleared = false;
2416 	if (kill_page) {
2417 		flags |= DEACTIVATE_KILL;
2418 	}
2419 	if (reusable_page) {
2420 		flags |= DEACTIVATE_REUSABLE;
2421 	}
2422 
2423 	/*
2424 	 * We break the range up into chunks and do one chunk at a time.  This is for
2425 	 * efficiency and performance while handling the shadow chains and the locks.
2426 	 * The deactivate_a_chunk() function returns how much of the range it processed.
2427 	 * We keep calling this routine until the given size is exhausted.
2428 	 */
2429 
2430 
2431 	all_reusable = FALSE;
2432 #if 11
2433 	/*
2434 	 * For the sake of accurate "reusable" pmap stats, we need
2435 	 * to tell pmap about each page that is no longer "reusable",
2436 	 * so we can't do the "all_reusable" optimization.
2437 	 *
2438 	 * If we do go with the all_reusable optimization, we can't
2439 	 * return if size is 0 since we could have "all_reusable == TRUE"
2440 	 * In this case, we save the overhead of doing the pmap_flush_context
2441 	 * work.
2442 	 */
2443 	if (size == 0) {
2444 		return;
2445 	}
2446 #else
2447 	if (reusable_page &&
2448 	    object->internal &&
2449 	    object->vo_size != 0 &&
2450 	    object->vo_size == size &&
2451 	    object->reusable_page_count == 0) {
2452 		all_reusable = TRUE;
2453 		reusable_page = FALSE;
2454 		flags |= DEACTIVATE_ALL_REUSABLE;
2455 	}
2456 #endif
2457 
2458 	if ((reusable_page || all_reusable) && object->all_reusable) {
2459 		/* This means MADV_FREE_REUSABLE has been called twice, which
2460 		 * is probably illegal. */
2461 		return;
2462 	}
2463 
2464 
2465 	pmap_flush_context_init(&pmap_flush_context_storage);
2466 
2467 	/*
2468 	 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2469 	 * We can't do this if we're killing pages and there's a shadow chain as
2470 	 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2471 	 * safe to kill).
2472 	 * And we can only do this on hardware that supports it.
2473 	 */
2474 	if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2475 		if (kill_page && object->internal) {
2476 			pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2477 		}
2478 		if (reusable_page) {
2479 			pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2480 		}
2481 
2482 		refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2483 		if (refmod_cleared) {
2484 			// We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2485 			flags &= ~DEACTIVATE_CLEAR_REFMOD;
2486 		}
2487 	}
2488 
2489 	while (size) {
2490 		length = deactivate_a_chunk(object, offset, size, flags,
2491 		    &pmap_flush_context_storage, pmap, pmap_offset);
2492 
2493 		size -= length;
2494 		offset += length;
2495 		pmap_offset += length;
2496 	}
2497 	pmap_flush(&pmap_flush_context_storage);
2498 
2499 	if (all_reusable) {
2500 		if (!object->all_reusable) {
2501 			unsigned int reusable;
2502 
2503 			object->all_reusable = TRUE;
2504 			assert(object->reusable_page_count == 0);
2505 			/* update global stats */
2506 			reusable = object->resident_page_count;
2507 			OSAddAtomic(reusable,
2508 			    &vm_page_stats_reusable.reusable_count);
2509 			vm_page_stats_reusable.reusable += reusable;
2510 			vm_page_stats_reusable.all_reusable_calls++;
2511 		}
2512 	} else if (reusable_page) {
2513 		vm_page_stats_reusable.partial_reusable_calls++;
2514 	}
2515 }
2516 
2517 void
vm_object_reuse_pages(vm_object_t object,vm_object_offset_t start_offset,vm_object_offset_t end_offset,boolean_t allow_partial_reuse)2518 vm_object_reuse_pages(
2519 	vm_object_t             object,
2520 	vm_object_offset_t      start_offset,
2521 	vm_object_offset_t      end_offset,
2522 	boolean_t               allow_partial_reuse)
2523 {
2524 	vm_object_offset_t      cur_offset;
2525 	vm_page_t               m;
2526 	unsigned int            reused, reusable;
2527 
2528 #define VM_OBJECT_REUSE_PAGE(object, m, reused)                         \
2529 	MACRO_BEGIN                                                     \
2530 	        if ((m) != VM_PAGE_NULL &&                              \
2531 	            (m)->vmp_reusable) {                                \
2532 	                assert((object)->reusable_page_count <=         \
2533 	                       (object)->resident_page_count);          \
2534 	                assert((object)->reusable_page_count > 0);      \
2535 	                (object)->reusable_page_count--;                \
2536 	                (m)->vmp_reusable = FALSE;                      \
2537 	                (reused)++;                                     \
2538 	/* \
2539 	 * Tell pmap that this page is no longer \
2540 	 * "reusable", to update the "reusable" stats \
2541 	 * for all the pmaps that have mapped this \
2542 	 * page. \
2543 	 */                                                             \
2544 	                pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2545 	                                          0, /* refmod */       \
2546 	                                          (PMAP_OPTIONS_CLEAR_REUSABLE \
2547 	                                           | PMAP_OPTIONS_NOFLUSH), \
2548 	                                          NULL);                \
2549 	        }                                                       \
2550 	MACRO_END
2551 
2552 	reused = 0;
2553 	reusable = 0;
2554 
2555 	vm_object_lock_assert_exclusive(object);
2556 
2557 	if (object->all_reusable) {
2558 		panic("object %p all_reusable: can't update pmap stats",
2559 		    object);
2560 		assert(object->reusable_page_count == 0);
2561 		object->all_reusable = FALSE;
2562 		if (end_offset - start_offset == object->vo_size ||
2563 		    !allow_partial_reuse) {
2564 			vm_page_stats_reusable.all_reuse_calls++;
2565 			reused = object->resident_page_count;
2566 		} else {
2567 			vm_page_stats_reusable.partial_reuse_calls++;
2568 			vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2569 				if (m->vmp_offset < start_offset ||
2570 				    m->vmp_offset >= end_offset) {
2571 					m->vmp_reusable = TRUE;
2572 					object->reusable_page_count++;
2573 					assert(object->resident_page_count >= object->reusable_page_count);
2574 					continue;
2575 				} else {
2576 					assert(!m->vmp_reusable);
2577 					reused++;
2578 				}
2579 			}
2580 		}
2581 	} else if (object->resident_page_count >
2582 	    ((end_offset - start_offset) >> PAGE_SHIFT)) {
2583 		vm_page_stats_reusable.partial_reuse_calls++;
2584 		for (cur_offset = start_offset;
2585 		    cur_offset < end_offset;
2586 		    cur_offset += PAGE_SIZE_64) {
2587 			if (object->reusable_page_count == 0) {
2588 				break;
2589 			}
2590 			m = vm_page_lookup(object, cur_offset);
2591 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2592 		}
2593 	} else {
2594 		vm_page_stats_reusable.partial_reuse_calls++;
2595 		vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2596 			if (object->reusable_page_count == 0) {
2597 				break;
2598 			}
2599 			if (m->vmp_offset < start_offset ||
2600 			    m->vmp_offset >= end_offset) {
2601 				continue;
2602 			}
2603 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2604 		}
2605 	}
2606 
2607 	/* update global stats */
2608 	OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2609 	vm_page_stats_reusable.reused += reused;
2610 	vm_page_stats_reusable.reusable += reusable;
2611 }
2612 
2613 /*
2614  *	Routine:	vm_object_pmap_protect
2615  *
2616  *	Purpose:
2617  *		Reduces the permission for all physical
2618  *		pages in the specified object range.
2619  *
2620  *		If removing write permission only, it is
2621  *		sufficient to protect only the pages in
2622  *		the top-level object; only those pages may
2623  *		have write permission.
2624  *
2625  *		If removing all access, we must follow the
2626  *		shadow chain from the top-level object to
2627  *		remove access to all pages in shadowed objects.
2628  *
2629  *		The object must *not* be locked.  The object must
2630  *		be internal.
2631  *
2632  *              If pmap is not NULL, this routine assumes that
2633  *              the only mappings for the pages are in that
2634  *              pmap.
2635  */
2636 
2637 __private_extern__ void
vm_object_pmap_protect(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,pmap_t pmap,vm_map_size_t pmap_page_size,vm_map_offset_t pmap_start,vm_prot_t prot)2638 vm_object_pmap_protect(
2639 	vm_object_t                     object,
2640 	vm_object_offset_t              offset,
2641 	vm_object_size_t                size,
2642 	pmap_t                          pmap,
2643 	vm_map_size_t                   pmap_page_size,
2644 	vm_map_offset_t                 pmap_start,
2645 	vm_prot_t                       prot)
2646 {
2647 	vm_object_pmap_protect_options(object, offset, size, pmap,
2648 	    pmap_page_size,
2649 	    pmap_start, prot, 0);
2650 }
2651 
2652 __private_extern__ void
vm_object_pmap_protect_options(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,pmap_t pmap,vm_map_size_t pmap_page_size,vm_map_offset_t pmap_start,vm_prot_t prot,int options)2653 vm_object_pmap_protect_options(
2654 	vm_object_t                     object,
2655 	vm_object_offset_t              offset,
2656 	vm_object_size_t                size,
2657 	pmap_t                          pmap,
2658 	vm_map_size_t                   pmap_page_size,
2659 	vm_map_offset_t                 pmap_start,
2660 	vm_prot_t                       prot,
2661 	int                             options)
2662 {
2663 	pmap_flush_context      pmap_flush_context_storage;
2664 	boolean_t               delayed_pmap_flush = FALSE;
2665 	vm_object_offset_t      offset_in_object;
2666 	vm_object_size_t        size_in_object;
2667 
2668 	if (object == VM_OBJECT_NULL) {
2669 		return;
2670 	}
2671 	if (pmap_page_size > PAGE_SIZE) {
2672 		/* for 16K map on 4K device... */
2673 		pmap_page_size = PAGE_SIZE;
2674 	}
2675 	/*
2676 	 * If we decide to work on the object itself, extend the range to
2677 	 * cover a full number of native pages.
2678 	 */
2679 	size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
2680 	offset_in_object = vm_object_trunc_page(offset);
2681 	/*
2682 	 * If we decide to work on the pmap, use the exact range specified,
2683 	 * so no rounding/truncating offset and size.  They should already
2684 	 * be aligned to pmap_page_size.
2685 	 */
2686 	assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
2687 	    "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
2688 	    offset, size, (uint64_t)pmap_page_size);
2689 
2690 	vm_object_lock(object);
2691 
2692 	if (object->phys_contiguous) {
2693 		if (pmap != NULL) {
2694 			vm_object_unlock(object);
2695 			pmap_protect_options(pmap,
2696 			    pmap_start,
2697 			    pmap_start + size,
2698 			    prot,
2699 			    options & ~PMAP_OPTIONS_NOFLUSH,
2700 			    NULL);
2701 		} else {
2702 			vm_object_offset_t phys_start, phys_end, phys_addr;
2703 
2704 			phys_start = object->vo_shadow_offset + offset_in_object;
2705 			phys_end = phys_start + size_in_object;
2706 			assert(phys_start <= phys_end);
2707 			assert(phys_end <= object->vo_shadow_offset + object->vo_size);
2708 			vm_object_unlock(object);
2709 
2710 			pmap_flush_context_init(&pmap_flush_context_storage);
2711 			delayed_pmap_flush = FALSE;
2712 
2713 			for (phys_addr = phys_start;
2714 			    phys_addr < phys_end;
2715 			    phys_addr += PAGE_SIZE_64) {
2716 				pmap_page_protect_options(
2717 					(ppnum_t) (phys_addr >> PAGE_SHIFT),
2718 					prot,
2719 					options | PMAP_OPTIONS_NOFLUSH,
2720 					(void *)&pmap_flush_context_storage);
2721 				delayed_pmap_flush = TRUE;
2722 			}
2723 			if (delayed_pmap_flush == TRUE) {
2724 				pmap_flush(&pmap_flush_context_storage);
2725 			}
2726 		}
2727 		return;
2728 	}
2729 
2730 	assert(object->internal);
2731 
2732 	while (TRUE) {
2733 		if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
2734 			vm_object_unlock(object);
2735 			if (pmap_page_size < PAGE_SIZE) {
2736 				DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
2737 			}
2738 			pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
2739 			    options & ~PMAP_OPTIONS_NOFLUSH, NULL);
2740 			return;
2741 		}
2742 
2743 		if (pmap_page_size < PAGE_SIZE) {
2744 			DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
2745 		}
2746 
2747 		pmap_flush_context_init(&pmap_flush_context_storage);
2748 		delayed_pmap_flush = FALSE;
2749 
2750 		/*
2751 		 * if we are doing large ranges with respect to resident
2752 		 * page count then we should interate over pages otherwise
2753 		 * inverse page look-up will be faster
2754 		 */
2755 		if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
2756 			vm_page_t               p;
2757 			vm_object_offset_t      end;
2758 
2759 			end = offset_in_object + size_in_object;
2760 
2761 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
2762 				if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) {
2763 					vm_map_offset_t start;
2764 
2765 					/*
2766 					 * XXX FBDP 4K: intentionally using "offset" here instead
2767 					 * of "offset_in_object", since "start" is a pmap address.
2768 					 */
2769 					start = pmap_start + p->vmp_offset - offset;
2770 
2771 					if (pmap != PMAP_NULL) {
2772 						vm_map_offset_t curr;
2773 						for (curr = start;
2774 						    curr < start + PAGE_SIZE_64;
2775 						    curr += pmap_page_size) {
2776 							if (curr < pmap_start) {
2777 								continue;
2778 							}
2779 							if (curr >= pmap_start + size) {
2780 								break;
2781 							}
2782 							pmap_protect_options(
2783 								pmap,
2784 								curr,
2785 								curr + pmap_page_size,
2786 								prot,
2787 								options | PMAP_OPTIONS_NOFLUSH,
2788 								&pmap_flush_context_storage);
2789 						}
2790 					} else {
2791 						pmap_page_protect_options(
2792 							VM_PAGE_GET_PHYS_PAGE(p),
2793 							prot,
2794 							options | PMAP_OPTIONS_NOFLUSH,
2795 							&pmap_flush_context_storage);
2796 					}
2797 					delayed_pmap_flush = TRUE;
2798 				}
2799 			}
2800 		} else {
2801 			vm_page_t               p;
2802 			vm_object_offset_t      end;
2803 			vm_object_offset_t      target_off;
2804 
2805 			end = offset_in_object + size_in_object;
2806 
2807 			for (target_off = offset_in_object;
2808 			    target_off < end; target_off += PAGE_SIZE) {
2809 				p = vm_page_lookup(object, target_off);
2810 
2811 				if (p != VM_PAGE_NULL) {
2812 					vm_object_offset_t start;
2813 
2814 					/*
2815 					 * XXX FBDP 4K: intentionally using "offset" here instead
2816 					 * of "offset_in_object", since "start" is a pmap address.
2817 					 */
2818 					start = pmap_start + (p->vmp_offset - offset);
2819 
2820 					if (pmap != PMAP_NULL) {
2821 						vm_map_offset_t curr;
2822 						for (curr = start;
2823 						    curr < start + PAGE_SIZE;
2824 						    curr += pmap_page_size) {
2825 							if (curr < pmap_start) {
2826 								continue;
2827 							}
2828 							if (curr >= pmap_start + size) {
2829 								break;
2830 							}
2831 							pmap_protect_options(
2832 								pmap,
2833 								curr,
2834 								curr + pmap_page_size,
2835 								prot,
2836 								options | PMAP_OPTIONS_NOFLUSH,
2837 								&pmap_flush_context_storage);
2838 						}
2839 					} else {
2840 						pmap_page_protect_options(
2841 							VM_PAGE_GET_PHYS_PAGE(p),
2842 							prot,
2843 							options | PMAP_OPTIONS_NOFLUSH,
2844 							&pmap_flush_context_storage);
2845 					}
2846 					delayed_pmap_flush = TRUE;
2847 				}
2848 			}
2849 		}
2850 		if (delayed_pmap_flush == TRUE) {
2851 			pmap_flush(&pmap_flush_context_storage);
2852 		}
2853 
2854 		if (prot == VM_PROT_NONE) {
2855 			/*
2856 			 * Must follow shadow chain to remove access
2857 			 * to pages in shadowed objects.
2858 			 */
2859 			vm_object_t     next_object;
2860 
2861 			next_object = object->shadow;
2862 			if (next_object != VM_OBJECT_NULL) {
2863 				offset_in_object += object->vo_shadow_offset;
2864 				offset += object->vo_shadow_offset;
2865 				vm_object_lock(next_object);
2866 				vm_object_unlock(object);
2867 				object = next_object;
2868 			} else {
2869 				/*
2870 				 * End of chain - we are done.
2871 				 */
2872 				break;
2873 			}
2874 		} else {
2875 			/*
2876 			 * Pages in shadowed objects may never have
2877 			 * write permission - we may stop here.
2878 			 */
2879 			break;
2880 		}
2881 	}
2882 
2883 	vm_object_unlock(object);
2884 }
2885 
2886 uint32_t vm_page_busy_absent_skipped = 0;
2887 
2888 /*
2889  *	Routine:	vm_object_copy_slowly
2890  *
2891  *	Description:
2892  *		Copy the specified range of the source
2893  *		virtual memory object without using
2894  *		protection-based optimizations (such
2895  *		as copy-on-write).  The pages in the
2896  *		region are actually copied.
2897  *
2898  *	In/out conditions:
2899  *		The caller must hold a reference and a lock
2900  *		for the source virtual memory object.  The source
2901  *		object will be returned *unlocked*.
2902  *
2903  *	Results:
2904  *		If the copy is completed successfully, KERN_SUCCESS is
2905  *		returned.  If the caller asserted the interruptible
2906  *		argument, and an interruption occurred while waiting
2907  *		for a user-generated event, MACH_SEND_INTERRUPTED is
2908  *		returned.  Other values may be returned to indicate
2909  *		hard errors during the copy operation.
2910  *
2911  *		A new virtual memory object is returned in a
2912  *		parameter (_result_object).  The contents of this
2913  *		new object, starting at a zero offset, are a copy
2914  *		of the source memory region.  In the event of
2915  *		an error, this parameter will contain the value
2916  *		VM_OBJECT_NULL.
2917  */
2918 __private_extern__ kern_return_t
vm_object_copy_slowly(vm_object_t src_object,vm_object_offset_t src_offset,vm_object_size_t size,boolean_t interruptible,vm_object_t * _result_object)2919 vm_object_copy_slowly(
2920 	vm_object_t             src_object,
2921 	vm_object_offset_t      src_offset,
2922 	vm_object_size_t        size,
2923 	boolean_t               interruptible,
2924 	vm_object_t             *_result_object)        /* OUT */
2925 {
2926 	vm_object_t             new_object;
2927 	vm_object_offset_t      new_offset;
2928 
2929 	struct vm_object_fault_info fault_info = {};
2930 
2931 	if (size == 0) {
2932 		vm_object_unlock(src_object);
2933 		*_result_object = VM_OBJECT_NULL;
2934 		return KERN_INVALID_ARGUMENT;
2935 	}
2936 
2937 	/*
2938 	 *	Prevent destruction of the source object while we copy.
2939 	 */
2940 
2941 	vm_object_reference_locked(src_object);
2942 	vm_object_unlock(src_object);
2943 
2944 	/*
2945 	 *	Create a new object to hold the copied pages.
2946 	 *	A few notes:
2947 	 *		We fill the new object starting at offset 0,
2948 	 *		 regardless of the input offset.
2949 	 *		We don't bother to lock the new object within
2950 	 *		 this routine, since we have the only reference.
2951 	 */
2952 
2953 	size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
2954 	src_offset = vm_object_trunc_page(src_offset);
2955 	new_object = vm_object_allocate(size);
2956 	new_offset = 0;
2957 
2958 	assert(size == trunc_page_64(size));    /* Will the loop terminate? */
2959 
2960 	fault_info.interruptible = interruptible;
2961 	fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
2962 	fault_info.lo_offset = src_offset;
2963 	fault_info.hi_offset = src_offset + size;
2964 	fault_info.stealth = TRUE;
2965 
2966 	for (;
2967 	    size != 0;
2968 	    src_offset += PAGE_SIZE_64,
2969 	    new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
2970 	    ) {
2971 		vm_page_t       new_page;
2972 		vm_fault_return_t result;
2973 
2974 		vm_object_lock(new_object);
2975 
2976 		while ((new_page = vm_page_alloc(new_object, new_offset))
2977 		    == VM_PAGE_NULL) {
2978 			vm_object_unlock(new_object);
2979 
2980 			if (!vm_page_wait(interruptible)) {
2981 				vm_object_deallocate(new_object);
2982 				vm_object_deallocate(src_object);
2983 				*_result_object = VM_OBJECT_NULL;
2984 				return MACH_SEND_INTERRUPTED;
2985 			}
2986 			vm_object_lock(new_object);
2987 		}
2988 		vm_object_unlock(new_object);
2989 
2990 		do {
2991 			vm_prot_t       prot = VM_PROT_READ;
2992 			vm_page_t       _result_page;
2993 			vm_page_t       top_page;
2994 			vm_page_t       result_page;
2995 			kern_return_t   error_code;
2996 			vm_object_t     result_page_object;
2997 
2998 
2999 			vm_object_lock(src_object);
3000 
3001 			if (src_object->internal &&
3002 			    src_object->shadow == VM_OBJECT_NULL &&
3003 			    (src_object->pager == NULL ||
3004 			    (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3005 			    src_offset) ==
3006 			    VM_EXTERNAL_STATE_ABSENT))) {
3007 				boolean_t can_skip_page;
3008 
3009 				_result_page = vm_page_lookup(src_object,
3010 				    src_offset);
3011 				if (_result_page == VM_PAGE_NULL) {
3012 					/*
3013 					 * This page is neither resident nor
3014 					 * compressed and there's no shadow
3015 					 * object below "src_object", so this
3016 					 * page is really missing.
3017 					 * There's no need to zero-fill it just
3018 					 * to copy it:  let's leave it missing
3019 					 * in "new_object" and get zero-filled
3020 					 * on demand.
3021 					 */
3022 					can_skip_page = TRUE;
3023 				} else if (workaround_41447923 &&
3024 				    src_object->pager == NULL &&
3025 				    _result_page != VM_PAGE_NULL &&
3026 				    _result_page->vmp_busy &&
3027 				    _result_page->vmp_absent &&
3028 				    src_object->purgable == VM_PURGABLE_DENY &&
3029 				    !src_object->blocked_access) {
3030 					/*
3031 					 * This page is "busy" and "absent"
3032 					 * but not because we're waiting for
3033 					 * it to be decompressed.  It must
3034 					 * be because it's a "no zero fill"
3035 					 * page that is currently not
3036 					 * accessible until it gets overwritten
3037 					 * by a device driver.
3038 					 * Since its initial state would have
3039 					 * been "zero-filled", let's leave the
3040 					 * copy page missing and get zero-filled
3041 					 * on demand.
3042 					 */
3043 					assert(src_object->internal);
3044 					assert(src_object->shadow == NULL);
3045 					assert(src_object->pager == NULL);
3046 					can_skip_page = TRUE;
3047 					vm_page_busy_absent_skipped++;
3048 				} else {
3049 					can_skip_page = FALSE;
3050 				}
3051 				if (can_skip_page) {
3052 					vm_object_unlock(src_object);
3053 					/* free the unused "new_page"... */
3054 					vm_object_lock(new_object);
3055 					VM_PAGE_FREE(new_page);
3056 					new_page = VM_PAGE_NULL;
3057 					vm_object_unlock(new_object);
3058 					/* ...and go to next page in "src_object" */
3059 					result = VM_FAULT_SUCCESS;
3060 					break;
3061 				}
3062 			}
3063 
3064 			vm_object_paging_begin(src_object);
3065 
3066 			/* cap size at maximum UPL size */
3067 			upl_size_t cluster_size;
3068 			if (os_convert_overflow(size, &cluster_size)) {
3069 				cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3070 			}
3071 			fault_info.cluster_size = cluster_size;
3072 
3073 			_result_page = VM_PAGE_NULL;
3074 			result = vm_fault_page(src_object, src_offset,
3075 			    VM_PROT_READ, FALSE,
3076 			    FALSE,     /* page not looked up */
3077 			    &prot, &_result_page, &top_page,
3078 			    (int *)0,
3079 			    &error_code, FALSE, &fault_info);
3080 
3081 			switch (result) {
3082 			case VM_FAULT_SUCCESS:
3083 				result_page = _result_page;
3084 				result_page_object = VM_PAGE_OBJECT(result_page);
3085 
3086 				/*
3087 				 *	Copy the page to the new object.
3088 				 *
3089 				 *	POLICY DECISION:
3090 				 *		If result_page is clean,
3091 				 *		we could steal it instead
3092 				 *		of copying.
3093 				 */
3094 
3095 				vm_page_copy(result_page, new_page);
3096 				vm_object_unlock(result_page_object);
3097 
3098 				/*
3099 				 *	Let go of both pages (make them
3100 				 *	not busy, perform wakeup, activate).
3101 				 */
3102 				vm_object_lock(new_object);
3103 				SET_PAGE_DIRTY(new_page, FALSE);
3104 				PAGE_WAKEUP_DONE(new_page);
3105 				vm_object_unlock(new_object);
3106 
3107 				vm_object_lock(result_page_object);
3108 				PAGE_WAKEUP_DONE(result_page);
3109 
3110 				vm_page_lockspin_queues();
3111 				if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3112 				    (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3113 					vm_page_activate(result_page);
3114 				}
3115 				vm_page_activate(new_page);
3116 				vm_page_unlock_queues();
3117 
3118 				/*
3119 				 *	Release paging references and
3120 				 *	top-level placeholder page, if any.
3121 				 */
3122 
3123 				vm_fault_cleanup(result_page_object,
3124 				    top_page);
3125 
3126 				break;
3127 
3128 			case VM_FAULT_RETRY:
3129 				break;
3130 
3131 			case VM_FAULT_MEMORY_SHORTAGE:
3132 				if (vm_page_wait(interruptible)) {
3133 					break;
3134 				}
3135 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), 0 /* arg */);
3136 				OS_FALLTHROUGH;
3137 
3138 			case VM_FAULT_INTERRUPTED:
3139 				vm_object_lock(new_object);
3140 				VM_PAGE_FREE(new_page);
3141 				vm_object_unlock(new_object);
3142 
3143 				vm_object_deallocate(new_object);
3144 				vm_object_deallocate(src_object);
3145 				*_result_object = VM_OBJECT_NULL;
3146 				return MACH_SEND_INTERRUPTED;
3147 
3148 			case VM_FAULT_SUCCESS_NO_VM_PAGE:
3149 				/* success but no VM page: fail */
3150 				vm_object_paging_end(src_object);
3151 				vm_object_unlock(src_object);
3152 				OS_FALLTHROUGH;
3153 			case VM_FAULT_MEMORY_ERROR:
3154 				/*
3155 				 * A policy choice:
3156 				 *	(a) ignore pages that we can't
3157 				 *	    copy
3158 				 *	(b) return the null object if
3159 				 *	    any page fails [chosen]
3160 				 */
3161 
3162 				vm_object_lock(new_object);
3163 				VM_PAGE_FREE(new_page);
3164 				vm_object_unlock(new_object);
3165 
3166 				vm_object_deallocate(new_object);
3167 				vm_object_deallocate(src_object);
3168 				*_result_object = VM_OBJECT_NULL;
3169 				return error_code ? error_code:
3170 				       KERN_MEMORY_ERROR;
3171 
3172 			default:
3173 				panic("vm_object_copy_slowly: unexpected error"
3174 				    " 0x%x from vm_fault_page()\n", result);
3175 			}
3176 		} while (result != VM_FAULT_SUCCESS);
3177 	}
3178 
3179 	/*
3180 	 *	Lose the extra reference, and return our object.
3181 	 */
3182 	vm_object_deallocate(src_object);
3183 	*_result_object = new_object;
3184 	return KERN_SUCCESS;
3185 }
3186 
3187 /*
3188  *	Routine:	vm_object_copy_quickly
3189  *
3190  *	Purpose:
3191  *		Copy the specified range of the source virtual
3192  *		memory object, if it can be done without waiting
3193  *		for user-generated events.
3194  *
3195  *	Results:
3196  *		If the copy is successful, the copy is returned in
3197  *		the arguments; otherwise, the arguments are not
3198  *		affected.
3199  *
3200  *	In/out conditions:
3201  *		The object should be unlocked on entry and exit.
3202  */
3203 
3204 /*ARGSUSED*/
3205 __private_extern__ boolean_t
vm_object_copy_quickly(vm_object_t object,__unused vm_object_offset_t offset,__unused vm_object_size_t size,boolean_t * _src_needs_copy,boolean_t * _dst_needs_copy)3206 vm_object_copy_quickly(
3207 	vm_object_t             object,               /* IN */
3208 	__unused vm_object_offset_t     offset, /* IN */
3209 	__unused vm_object_size_t       size,   /* IN */
3210 	boolean_t               *_src_needs_copy,       /* OUT */
3211 	boolean_t               *_dst_needs_copy)       /* OUT */
3212 {
3213 	memory_object_copy_strategy_t copy_strategy;
3214 
3215 	if (object == VM_OBJECT_NULL) {
3216 		*_src_needs_copy = FALSE;
3217 		*_dst_needs_copy = FALSE;
3218 		return TRUE;
3219 	}
3220 
3221 	vm_object_lock(object);
3222 
3223 	copy_strategy = object->copy_strategy;
3224 
3225 	switch (copy_strategy) {
3226 	case MEMORY_OBJECT_COPY_SYMMETRIC:
3227 
3228 		/*
3229 		 *	Symmetric copy strategy.
3230 		 *	Make another reference to the object.
3231 		 *	Leave object/offset unchanged.
3232 		 */
3233 
3234 		vm_object_reference_locked(object);
3235 		object->shadowed = TRUE;
3236 		vm_object_unlock(object);
3237 
3238 		/*
3239 		 *	Both source and destination must make
3240 		 *	shadows, and the source must be made
3241 		 *	read-only if not already.
3242 		 */
3243 
3244 		*_src_needs_copy = TRUE;
3245 		*_dst_needs_copy = TRUE;
3246 
3247 		break;
3248 
3249 	case MEMORY_OBJECT_COPY_DELAY:
3250 		vm_object_unlock(object);
3251 		return FALSE;
3252 
3253 	default:
3254 		vm_object_unlock(object);
3255 		return FALSE;
3256 	}
3257 	return TRUE;
3258 }
3259 
3260 static int copy_call_count = 0;
3261 static int copy_call_sleep_count = 0;
3262 static int copy_call_restart_count = 0;
3263 
3264 /*
3265  *	Routine:	vm_object_copy_call [internal]
3266  *
3267  *	Description:
3268  *		Copy the source object (src_object), using the
3269  *		user-managed copy algorithm.
3270  *
3271  *	In/out conditions:
3272  *		The source object must be locked on entry.  It
3273  *		will be *unlocked* on exit.
3274  *
3275  *	Results:
3276  *		If the copy is successful, KERN_SUCCESS is returned.
3277  *		A new object that represents the copied virtual
3278  *		memory is returned in a parameter (*_result_object).
3279  *		If the return value indicates an error, this parameter
3280  *		is not valid.
3281  */
3282 static kern_return_t
vm_object_copy_call(vm_object_t src_object,vm_object_offset_t src_offset,vm_object_size_t size,vm_object_t * _result_object)3283 vm_object_copy_call(
3284 	vm_object_t             src_object,
3285 	vm_object_offset_t      src_offset,
3286 	vm_object_size_t        size,
3287 	vm_object_t             *_result_object)        /* OUT */
3288 {
3289 	kern_return_t   kr;
3290 	vm_object_t     copy;
3291 	boolean_t       check_ready = FALSE;
3292 	uint32_t        try_failed_count = 0;
3293 
3294 	/*
3295 	 *	If a copy is already in progress, wait and retry.
3296 	 *
3297 	 *	XXX
3298 	 *	Consider making this call interruptable, as Mike
3299 	 *	intended it to be.
3300 	 *
3301 	 *	XXXO
3302 	 *	Need a counter or version or something to allow
3303 	 *	us to use the copy that the currently requesting
3304 	 *	thread is obtaining -- is it worth adding to the
3305 	 *	vm object structure? Depends how common this case it.
3306 	 */
3307 	copy_call_count++;
3308 	while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3309 		vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3310 		    THREAD_UNINT);
3311 		copy_call_restart_count++;
3312 	}
3313 
3314 	/*
3315 	 *	Indicate (for the benefit of memory_object_create_copy)
3316 	 *	that we want a copy for src_object. (Note that we cannot
3317 	 *	do a real assert_wait before calling memory_object_copy,
3318 	 *	so we simply set the flag.)
3319 	 */
3320 
3321 	vm_object_set_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL);
3322 	vm_object_unlock(src_object);
3323 
3324 	/*
3325 	 *	Ask the memory manager to give us a memory object
3326 	 *	which represents a copy of the src object.
3327 	 *	The memory manager may give us a memory object
3328 	 *	which we already have, or it may give us a
3329 	 *	new memory object. This memory object will arrive
3330 	 *	via memory_object_create_copy.
3331 	 */
3332 
3333 	kr = KERN_FAILURE;      /* XXX need to change memory_object.defs */
3334 	if (kr != KERN_SUCCESS) {
3335 		return kr;
3336 	}
3337 
3338 	/*
3339 	 *	Wait for the copy to arrive.
3340 	 */
3341 	vm_object_lock(src_object);
3342 	while (vm_object_wanted(src_object, VM_OBJECT_EVENT_COPY_CALL)) {
3343 		vm_object_sleep(src_object, VM_OBJECT_EVENT_COPY_CALL,
3344 		    THREAD_UNINT);
3345 		copy_call_sleep_count++;
3346 	}
3347 Retry:
3348 	assert(src_object->copy != VM_OBJECT_NULL);
3349 	copy = src_object->copy;
3350 	if (!vm_object_lock_try(copy)) {
3351 		vm_object_unlock(src_object);
3352 
3353 		try_failed_count++;
3354 		mutex_pause(try_failed_count);  /* wait a bit */
3355 
3356 		vm_object_lock(src_object);
3357 		goto Retry;
3358 	}
3359 	if (copy->vo_size < src_offset + size) {
3360 		assertf(page_aligned(src_offset + size),
3361 		    "object %p size 0x%llx",
3362 		    copy, (uint64_t)(src_offset + size));
3363 		copy->vo_size = src_offset + size;
3364 	}
3365 
3366 	if (!copy->pager_ready) {
3367 		check_ready = TRUE;
3368 	}
3369 
3370 	/*
3371 	 *	Return the copy.
3372 	 */
3373 	*_result_object = copy;
3374 	vm_object_unlock(copy);
3375 	vm_object_unlock(src_object);
3376 
3377 	/* Wait for the copy to be ready. */
3378 	if (check_ready == TRUE) {
3379 		vm_object_lock(copy);
3380 		while (!copy->pager_ready) {
3381 			vm_object_sleep(copy, VM_OBJECT_EVENT_PAGER_READY, THREAD_UNINT);
3382 		}
3383 		vm_object_unlock(copy);
3384 	}
3385 
3386 	return KERN_SUCCESS;
3387 }
3388 
3389 static uint32_t copy_delayed_lock_collisions;
3390 static uint32_t copy_delayed_max_collisions;
3391 static uint32_t copy_delayed_lock_contention;
3392 static uint32_t copy_delayed_protect_iterate;
3393 
3394 /*
3395  *	Routine:	vm_object_copy_delayed [internal]
3396  *
3397  *	Description:
3398  *		Copy the specified virtual memory object, using
3399  *		the asymmetric copy-on-write algorithm.
3400  *
3401  *	In/out conditions:
3402  *		The src_object must be locked on entry.  It will be unlocked
3403  *		on exit - so the caller must also hold a reference to it.
3404  *
3405  *		This routine will not block waiting for user-generated
3406  *		events.  It is not interruptible.
3407  */
3408 __private_extern__ vm_object_t
vm_object_copy_delayed(vm_object_t src_object,vm_object_offset_t src_offset,vm_object_size_t size,boolean_t src_object_shared)3409 vm_object_copy_delayed(
3410 	vm_object_t             src_object,
3411 	vm_object_offset_t      src_offset,
3412 	vm_object_size_t        size,
3413 	boolean_t               src_object_shared)
3414 {
3415 	vm_object_t             new_copy = VM_OBJECT_NULL;
3416 	vm_object_t             old_copy;
3417 	vm_page_t               p;
3418 	vm_object_size_t        copy_size = src_offset + size;
3419 	pmap_flush_context      pmap_flush_context_storage;
3420 	boolean_t               delayed_pmap_flush = FALSE;
3421 
3422 
3423 	uint32_t collisions = 0;
3424 	/*
3425 	 *	The user-level memory manager wants to see all of the changes
3426 	 *	to this object, but it has promised not to make any changes on
3427 	 *	its own.
3428 	 *
3429 	 *	Perform an asymmetric copy-on-write, as follows:
3430 	 *		Create a new object, called a "copy object" to hold
3431 	 *		 pages modified by the new mapping  (i.e., the copy,
3432 	 *		 not the original mapping).
3433 	 *		Record the original object as the backing object for
3434 	 *		 the copy object.  If the original mapping does not
3435 	 *		 change a page, it may be used read-only by the copy.
3436 	 *		Record the copy object in the original object.
3437 	 *		 When the original mapping causes a page to be modified,
3438 	 *		 it must be copied to a new page that is "pushed" to
3439 	 *		 the copy object.
3440 	 *		Mark the new mapping (the copy object) copy-on-write.
3441 	 *		 This makes the copy object itself read-only, allowing
3442 	 *		 it to be reused if the original mapping makes no
3443 	 *		 changes, and simplifying the synchronization required
3444 	 *		 in the "push" operation described above.
3445 	 *
3446 	 *	The copy-on-write is said to be assymetric because the original
3447 	 *	object is *not* marked copy-on-write. A copied page is pushed
3448 	 *	to the copy object, regardless which party attempted to modify
3449 	 *	the page.
3450 	 *
3451 	 *	Repeated asymmetric copy operations may be done. If the
3452 	 *	original object has not been changed since the last copy, its
3453 	 *	copy object can be reused. Otherwise, a new copy object can be
3454 	 *	inserted between the original object and its previous copy
3455 	 *	object.  Since any copy object is read-only, this cannot affect
3456 	 *	affect the contents of the previous copy object.
3457 	 *
3458 	 *	Note that a copy object is higher in the object tree than the
3459 	 *	original object; therefore, use of the copy object recorded in
3460 	 *	the original object must be done carefully, to avoid deadlock.
3461 	 */
3462 
3463 	copy_size = vm_object_round_page(copy_size);
3464 Retry:
3465 
3466 	/*
3467 	 * Wait for paging in progress.
3468 	 */
3469 	if (!src_object->true_share &&
3470 	    (src_object->paging_in_progress != 0 ||
3471 	    src_object->activity_in_progress != 0)) {
3472 		if (src_object_shared == TRUE) {
3473 			vm_object_unlock(src_object);
3474 			vm_object_lock(src_object);
3475 			src_object_shared = FALSE;
3476 			goto Retry;
3477 		}
3478 		vm_object_paging_wait(src_object, THREAD_UNINT);
3479 	}
3480 	/*
3481 	 *	See whether we can reuse the result of a previous
3482 	 *	copy operation.
3483 	 */
3484 
3485 	old_copy = src_object->copy;
3486 	if (old_copy != VM_OBJECT_NULL) {
3487 		int lock_granted;
3488 
3489 		/*
3490 		 *	Try to get the locks (out of order)
3491 		 */
3492 		if (src_object_shared == TRUE) {
3493 			lock_granted = vm_object_lock_try_shared(old_copy);
3494 		} else {
3495 			lock_granted = vm_object_lock_try(old_copy);
3496 		}
3497 
3498 		if (!lock_granted) {
3499 			vm_object_unlock(src_object);
3500 
3501 			if (collisions++ == 0) {
3502 				copy_delayed_lock_contention++;
3503 			}
3504 			mutex_pause(collisions);
3505 
3506 			/* Heisenberg Rules */
3507 			copy_delayed_lock_collisions++;
3508 
3509 			if (collisions > copy_delayed_max_collisions) {
3510 				copy_delayed_max_collisions = collisions;
3511 			}
3512 
3513 			if (src_object_shared == TRUE) {
3514 				vm_object_lock_shared(src_object);
3515 			} else {
3516 				vm_object_lock(src_object);
3517 			}
3518 
3519 			goto Retry;
3520 		}
3521 
3522 		/*
3523 		 *	Determine whether the old copy object has
3524 		 *	been modified.
3525 		 */
3526 
3527 		if (old_copy->resident_page_count == 0 &&
3528 		    !old_copy->pager_created) {
3529 			/*
3530 			 *	It has not been modified.
3531 			 *
3532 			 *	Return another reference to
3533 			 *	the existing copy-object if
3534 			 *	we can safely grow it (if
3535 			 *	needed).
3536 			 */
3537 
3538 			if (old_copy->vo_size < copy_size) {
3539 				if (src_object_shared == TRUE) {
3540 					vm_object_unlock(old_copy);
3541 					vm_object_unlock(src_object);
3542 
3543 					vm_object_lock(src_object);
3544 					src_object_shared = FALSE;
3545 					goto Retry;
3546 				}
3547 				/*
3548 				 * We can't perform a delayed copy if any of the
3549 				 * pages in the extended range are wired (because
3550 				 * we can't safely take write permission away from
3551 				 * wired pages).  If the pages aren't wired, then
3552 				 * go ahead and protect them.
3553 				 */
3554 				copy_delayed_protect_iterate++;
3555 
3556 				pmap_flush_context_init(&pmap_flush_context_storage);
3557 				delayed_pmap_flush = FALSE;
3558 
3559 				vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3560 					if (!p->vmp_fictitious &&
3561 					    p->vmp_offset >= old_copy->vo_size &&
3562 					    p->vmp_offset < copy_size) {
3563 						if (VM_PAGE_WIRED(p)) {
3564 							vm_object_unlock(old_copy);
3565 							vm_object_unlock(src_object);
3566 
3567 							if (new_copy != VM_OBJECT_NULL) {
3568 								vm_object_unlock(new_copy);
3569 								vm_object_deallocate(new_copy);
3570 							}
3571 							if (delayed_pmap_flush == TRUE) {
3572 								pmap_flush(&pmap_flush_context_storage);
3573 							}
3574 
3575 							return VM_OBJECT_NULL;
3576 						} else {
3577 							pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
3578 							    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3579 							delayed_pmap_flush = TRUE;
3580 						}
3581 					}
3582 				}
3583 				if (delayed_pmap_flush == TRUE) {
3584 					pmap_flush(&pmap_flush_context_storage);
3585 				}
3586 
3587 				assertf(page_aligned(copy_size),
3588 				    "object %p size 0x%llx",
3589 				    old_copy, (uint64_t)copy_size);
3590 				old_copy->vo_size = copy_size;
3591 			}
3592 			if (src_object_shared == TRUE) {
3593 				vm_object_reference_shared(old_copy);
3594 			} else {
3595 				vm_object_reference_locked(old_copy);
3596 			}
3597 			vm_object_unlock(old_copy);
3598 			vm_object_unlock(src_object);
3599 
3600 			if (new_copy != VM_OBJECT_NULL) {
3601 				vm_object_unlock(new_copy);
3602 				vm_object_deallocate(new_copy);
3603 			}
3604 			return old_copy;
3605 		}
3606 
3607 
3608 
3609 		/*
3610 		 * Adjust the size argument so that the newly-created
3611 		 * copy object will be large enough to back either the
3612 		 * old copy object or the new mapping.
3613 		 */
3614 		if (old_copy->vo_size > copy_size) {
3615 			copy_size = old_copy->vo_size;
3616 		}
3617 
3618 		if (new_copy == VM_OBJECT_NULL) {
3619 			vm_object_unlock(old_copy);
3620 			vm_object_unlock(src_object);
3621 			new_copy = vm_object_allocate(copy_size);
3622 			vm_object_lock(src_object);
3623 			vm_object_lock(new_copy);
3624 
3625 			src_object_shared = FALSE;
3626 			goto Retry;
3627 		}
3628 		assertf(page_aligned(copy_size),
3629 		    "object %p size 0x%llx",
3630 		    new_copy, (uint64_t)copy_size);
3631 		new_copy->vo_size = copy_size;
3632 
3633 		/*
3634 		 *	The copy-object is always made large enough to
3635 		 *	completely shadow the original object, since
3636 		 *	it may have several users who want to shadow
3637 		 *	the original object at different points.
3638 		 */
3639 
3640 		assert((old_copy->shadow == src_object) &&
3641 		    (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
3642 	} else if (new_copy == VM_OBJECT_NULL) {
3643 		vm_object_unlock(src_object);
3644 		new_copy = vm_object_allocate(copy_size);
3645 		vm_object_lock(src_object);
3646 		vm_object_lock(new_copy);
3647 
3648 		src_object_shared = FALSE;
3649 		goto Retry;
3650 	}
3651 
3652 	/*
3653 	 * We now have the src object locked, and the new copy object
3654 	 * allocated and locked (and potentially the old copy locked).
3655 	 * Before we go any further, make sure we can still perform
3656 	 * a delayed copy, as the situation may have changed.
3657 	 *
3658 	 * Specifically, we can't perform a delayed copy if any of the
3659 	 * pages in the range are wired (because we can't safely take
3660 	 * write permission away from wired pages).  If the pages aren't
3661 	 * wired, then go ahead and protect them.
3662 	 */
3663 	copy_delayed_protect_iterate++;
3664 
3665 	pmap_flush_context_init(&pmap_flush_context_storage);
3666 	delayed_pmap_flush = FALSE;
3667 
3668 	vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3669 		if (!p->vmp_fictitious && p->vmp_offset < copy_size) {
3670 			if (VM_PAGE_WIRED(p)) {
3671 				if (old_copy) {
3672 					vm_object_unlock(old_copy);
3673 				}
3674 				vm_object_unlock(src_object);
3675 				vm_object_unlock(new_copy);
3676 				vm_object_deallocate(new_copy);
3677 
3678 				if (delayed_pmap_flush == TRUE) {
3679 					pmap_flush(&pmap_flush_context_storage);
3680 				}
3681 
3682 				return VM_OBJECT_NULL;
3683 			} else {
3684 				pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p), (VM_PROT_ALL & ~VM_PROT_WRITE),
3685 				    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3686 				delayed_pmap_flush = TRUE;
3687 			}
3688 		}
3689 	}
3690 	if (delayed_pmap_flush == TRUE) {
3691 		pmap_flush(&pmap_flush_context_storage);
3692 	}
3693 
3694 	if (old_copy != VM_OBJECT_NULL) {
3695 		/*
3696 		 *	Make the old copy-object shadow the new one.
3697 		 *	It will receive no more pages from the original
3698 		 *	object.
3699 		 */
3700 
3701 		/* remove ref. from old_copy */
3702 		vm_object_lock_assert_exclusive(src_object);
3703 		src_object->ref_count--;
3704 		assert(src_object->ref_count > 0);
3705 		vm_object_lock_assert_exclusive(old_copy);
3706 		old_copy->shadow = new_copy;
3707 		vm_object_lock_assert_exclusive(new_copy);
3708 		assert(new_copy->ref_count > 0);
3709 		new_copy->ref_count++;          /* for old_copy->shadow ref. */
3710 
3711 		vm_object_unlock(old_copy);     /* done with old_copy */
3712 	}
3713 
3714 	/*
3715 	 *	Point the new copy at the existing object.
3716 	 */
3717 	vm_object_lock_assert_exclusive(new_copy);
3718 	new_copy->shadow = src_object;
3719 	new_copy->vo_shadow_offset = 0;
3720 	new_copy->shadowed = TRUE;      /* caller must set needs_copy */
3721 
3722 	vm_object_lock_assert_exclusive(src_object);
3723 	vm_object_reference_locked(src_object);
3724 	src_object->copy = new_copy;
3725 	vm_object_unlock(src_object);
3726 	vm_object_unlock(new_copy);
3727 
3728 	return new_copy;
3729 }
3730 
3731 /*
3732  *	Routine:	vm_object_copy_strategically
3733  *
3734  *	Purpose:
3735  *		Perform a copy according to the source object's
3736  *		declared strategy.  This operation may block,
3737  *		and may be interrupted.
3738  */
3739 __private_extern__ kern_return_t
vm_object_copy_strategically(vm_object_t src_object,vm_object_offset_t src_offset,vm_object_size_t size,vm_object_t * dst_object,vm_object_offset_t * dst_offset,boolean_t * dst_needs_copy)3740 vm_object_copy_strategically(
3741 	vm_object_t             src_object,
3742 	vm_object_offset_t      src_offset,
3743 	vm_object_size_t        size,
3744 	vm_object_t             *dst_object,    /* OUT */
3745 	vm_object_offset_t      *dst_offset,    /* OUT */
3746 	boolean_t               *dst_needs_copy) /* OUT */
3747 {
3748 	boolean_t       result;
3749 	boolean_t       interruptible = THREAD_ABORTSAFE; /* XXX */
3750 	boolean_t       object_lock_shared = FALSE;
3751 	memory_object_copy_strategy_t copy_strategy;
3752 
3753 	assert(src_object != VM_OBJECT_NULL);
3754 
3755 	copy_strategy = src_object->copy_strategy;
3756 
3757 	if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
3758 		vm_object_lock_shared(src_object);
3759 		object_lock_shared = TRUE;
3760 	} else {
3761 		vm_object_lock(src_object);
3762 	}
3763 
3764 	/*
3765 	 *	The copy strategy is only valid if the memory manager
3766 	 *	is "ready". Internal objects are always ready.
3767 	 */
3768 
3769 	while (!src_object->internal && !src_object->pager_ready) {
3770 		wait_result_t wait_result;
3771 
3772 		if (object_lock_shared == TRUE) {
3773 			vm_object_unlock(src_object);
3774 			vm_object_lock(src_object);
3775 			object_lock_shared = FALSE;
3776 			continue;
3777 		}
3778 		wait_result = vm_object_sleep(  src_object,
3779 		    VM_OBJECT_EVENT_PAGER_READY,
3780 		    interruptible);
3781 		if (wait_result != THREAD_AWAKENED) {
3782 			vm_object_unlock(src_object);
3783 			*dst_object = VM_OBJECT_NULL;
3784 			*dst_offset = 0;
3785 			*dst_needs_copy = FALSE;
3786 			return MACH_SEND_INTERRUPTED;
3787 		}
3788 	}
3789 
3790 	/*
3791 	 *	Use the appropriate copy strategy.
3792 	 */
3793 
3794 	switch (copy_strategy) {
3795 	case MEMORY_OBJECT_COPY_DELAY:
3796 		*dst_object = vm_object_copy_delayed(src_object,
3797 		    src_offset, size, object_lock_shared);
3798 		if (*dst_object != VM_OBJECT_NULL) {
3799 			*dst_offset = src_offset;
3800 			*dst_needs_copy = TRUE;
3801 			result = KERN_SUCCESS;
3802 			break;
3803 		}
3804 		vm_object_lock(src_object);
3805 		OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
3806 
3807 	case MEMORY_OBJECT_COPY_NONE:
3808 		result = vm_object_copy_slowly(src_object, src_offset, size,
3809 		    interruptible, dst_object);
3810 		if (result == KERN_SUCCESS) {
3811 			*dst_offset = src_offset - vm_object_trunc_page(src_offset);
3812 			*dst_needs_copy = FALSE;
3813 		}
3814 		break;
3815 
3816 	case MEMORY_OBJECT_COPY_CALL:
3817 		result = vm_object_copy_call(src_object, src_offset, size,
3818 		    dst_object);
3819 		if (result == KERN_SUCCESS) {
3820 			*dst_offset = src_offset;
3821 			*dst_needs_copy = TRUE;
3822 		}
3823 		break;
3824 
3825 	case MEMORY_OBJECT_COPY_SYMMETRIC:
3826 		vm_object_unlock(src_object);
3827 		result = KERN_MEMORY_RESTART_COPY;
3828 		break;
3829 
3830 	default:
3831 		panic("copy_strategically: bad strategy");
3832 		result = KERN_INVALID_ARGUMENT;
3833 	}
3834 	return result;
3835 }
3836 
3837 /*
3838  *	vm_object_shadow:
3839  *
3840  *	Create a new object which is backed by the
3841  *	specified existing object range.  The source
3842  *	object reference is deallocated.
3843  *
3844  *	The new object and offset into that object
3845  *	are returned in the source parameters.
3846  */
3847 boolean_t vm_object_shadow_check = TRUE;
3848 uint64_t vm_object_shadow_forced = 0;
3849 uint64_t vm_object_shadow_skipped = 0;
3850 
3851 __private_extern__ boolean_t
vm_object_shadow(vm_object_t * object,vm_object_offset_t * offset,vm_object_size_t length,boolean_t always_shadow)3852 vm_object_shadow(
3853 	vm_object_t             *object,        /* IN/OUT */
3854 	vm_object_offset_t      *offset,        /* IN/OUT */
3855 	vm_object_size_t        length,
3856 	boolean_t               always_shadow)
3857 {
3858 	vm_object_t     source;
3859 	vm_object_t     result;
3860 
3861 	source = *object;
3862 	assert(source != VM_OBJECT_NULL);
3863 	if (source == VM_OBJECT_NULL) {
3864 		return FALSE;
3865 	}
3866 
3867 	assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
3868 
3869 	/*
3870 	 *	Determine if we really need a shadow.
3871 	 *
3872 	 *	If the source object is larger than what we are trying
3873 	 *	to create, then force the shadow creation even if the
3874 	 *	ref count is 1.  This will allow us to [potentially]
3875 	 *	collapse the underlying object away in the future
3876 	 *	(freeing up the extra data it might contain and that
3877 	 *	we don't need).
3878 	 */
3879 
3880 	assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
3881 
3882 	/*
3883 	 * The following optimization does not work in the context of submaps
3884 	 * (the shared region, in particular).
3885 	 * This object might have only 1 reference (in the submap) but that
3886 	 * submap can itself be mapped multiple times, so the object is
3887 	 * actually indirectly referenced more than once...
3888 	 * The caller can specify to "always_shadow" to bypass the optimization.
3889 	 */
3890 	if (vm_object_shadow_check &&
3891 	    source->vo_size == length &&
3892 	    source->ref_count == 1) {
3893 		if (always_shadow) {
3894 			vm_object_shadow_forced++;
3895 		} else {
3896 			/*
3897 			 * Lock the object and check again.
3898 			 * We also check to see if there's
3899 			 * a shadow or copy object involved.
3900 			 * We can't do that earlier because
3901 			 * without the object locked, there
3902 			 * could be a collapse and the chain
3903 			 * gets modified leaving us with an
3904 			 * invalid pointer.
3905 			 */
3906 			vm_object_lock(source);
3907 			if (source->vo_size == length &&
3908 			    source->ref_count == 1 &&
3909 			    (source->shadow == VM_OBJECT_NULL ||
3910 			    source->shadow->copy == VM_OBJECT_NULL)) {
3911 				source->shadowed = FALSE;
3912 				vm_object_unlock(source);
3913 				vm_object_shadow_skipped++;
3914 				return FALSE;
3915 			}
3916 			/* things changed while we were locking "source"... */
3917 			vm_object_unlock(source);
3918 		}
3919 	}
3920 
3921 	/*
3922 	 * *offset is the map entry's offset into the VM object and
3923 	 * is aligned to the map's page size.
3924 	 * VM objects need to be aligned to the system's page size.
3925 	 * Record the necessary adjustment and re-align the offset so
3926 	 * that result->vo_shadow_offset is properly page-aligned.
3927 	 */
3928 	vm_object_offset_t offset_adjustment;
3929 	offset_adjustment = *offset - vm_object_trunc_page(*offset);
3930 	length = vm_object_round_page(length + offset_adjustment);
3931 	*offset = vm_object_trunc_page(*offset);
3932 
3933 	/*
3934 	 *	Allocate a new object with the given length
3935 	 */
3936 
3937 	if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) {
3938 		panic("vm_object_shadow: no object for shadowing");
3939 	}
3940 
3941 	/*
3942 	 *	The new object shadows the source object, adding
3943 	 *	a reference to it.  Our caller changes his reference
3944 	 *	to point to the new object, removing a reference to
3945 	 *	the source object.  Net result: no change of reference
3946 	 *	count.
3947 	 */
3948 	result->shadow = source;
3949 
3950 	/*
3951 	 *	Store the offset into the source object,
3952 	 *	and fix up the offset into the new object.
3953 	 */
3954 
3955 	result->vo_shadow_offset = *offset;
3956 	assertf(page_aligned(result->vo_shadow_offset),
3957 	    "result %p shadow offset 0x%llx",
3958 	    result, result->vo_shadow_offset);
3959 
3960 	/*
3961 	 *	Return the new things
3962 	 */
3963 
3964 	*offset = 0;
3965 	if (offset_adjustment) {
3966 		/*
3967 		 * Make the map entry point to the equivalent offset
3968 		 * in the new object.
3969 		 */
3970 		DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
3971 		*offset += offset_adjustment;
3972 	}
3973 	*object = result;
3974 	return TRUE;
3975 }
3976 
3977 /*
3978  *	The relationship between vm_object structures and
3979  *	the memory_object requires careful synchronization.
3980  *
3981  *	All associations are created by memory_object_create_named
3982  *  for external pagers and vm_object_compressor_pager_create for internal
3983  *  objects as follows:
3984  *
3985  *		pager:	the memory_object itself, supplied by
3986  *			the user requesting a mapping (or the kernel,
3987  *			when initializing internal objects); the
3988  *			kernel simulates holding send rights by keeping
3989  *			a port reference;
3990  *
3991  *		pager_request:
3992  *			the memory object control port,
3993  *			created by the kernel; the kernel holds
3994  *			receive (and ownership) rights to this
3995  *			port, but no other references.
3996  *
3997  *	When initialization is complete, the "initialized" field
3998  *	is asserted.  Other mappings using a particular memory object,
3999  *	and any references to the vm_object gained through the
4000  *	port association must wait for this initialization to occur.
4001  *
4002  *	In order to allow the memory manager to set attributes before
4003  *	requests (notably virtual copy operations, but also data or
4004  *	unlock requests) are made, a "ready" attribute is made available.
4005  *	Only the memory manager may affect the value of this attribute.
4006  *	Its value does not affect critical kernel functions, such as
4007  *	internal object initialization or destruction.  [Furthermore,
4008  *	memory objects created by the kernel are assumed to be ready
4009  *	immediately; the default memory manager need not explicitly
4010  *	set the "ready" attribute.]
4011  *
4012  *	[Both the "initialized" and "ready" attribute wait conditions
4013  *	use the "pager" field as the wait event.]
4014  *
4015  *	The port associations can be broken down by any of the
4016  *	following routines:
4017  *		vm_object_terminate:
4018  *			No references to the vm_object remain, and
4019  *			the object cannot (or will not) be cached.
4020  *			This is the normal case, and is done even
4021  *			though one of the other cases has already been
4022  *			done.
4023  *		memory_object_destroy:
4024  *			The memory manager has requested that the
4025  *			kernel relinquish references to the memory
4026  *			object. [The memory manager may not want to
4027  *			destroy the memory object, but may wish to
4028  *			refuse or tear down existing memory mappings.]
4029  *
4030  *	Each routine that breaks an association must break all of
4031  *	them at once.  At some later time, that routine must clear
4032  *	the pager field and release the memory object references.
4033  *	[Furthermore, each routine must cope with the simultaneous
4034  *	or previous operations of the others.]
4035  *
4036  *	Because the pager field may be cleared spontaneously, it
4037  *	cannot be used to determine whether a memory object has
4038  *	ever been associated with a particular vm_object.  [This
4039  *	knowledge is important to the shadow object mechanism.]
4040  *	For this reason, an additional "created" attribute is
4041  *	provided.
4042  *
4043  *	During various paging operations, the pager reference found in the
4044  *	vm_object must be valid.  To prevent this from being released,
4045  *	(other than being removed, i.e., made null), routines may use
4046  *	the vm_object_paging_begin/end routines [actually, macros].
4047  *	The implementation uses the "paging_in_progress" and "wanted" fields.
4048  *	[Operations that alter the validity of the pager values include the
4049  *	termination routines and vm_object_collapse.]
4050  */
4051 
4052 
4053 /*
4054  *	Routine:	vm_object_memory_object_associate
4055  *	Purpose:
4056  *		Associate a VM object to the given pager.
4057  *		If a VM object is not provided, create one.
4058  *		Initialize the pager.
4059  */
4060 vm_object_t
vm_object_memory_object_associate(memory_object_t pager,vm_object_t object,vm_object_size_t size,boolean_t named)4061 vm_object_memory_object_associate(
4062 	memory_object_t         pager,
4063 	vm_object_t             object,
4064 	vm_object_size_t        size,
4065 	boolean_t               named)
4066 {
4067 	memory_object_control_t control;
4068 
4069 	assert(pager != MEMORY_OBJECT_NULL);
4070 
4071 	if (object != VM_OBJECT_NULL) {
4072 		assert(object->internal);
4073 		assert(object->pager_created);
4074 		assert(!object->pager_initialized);
4075 		assert(!object->pager_ready);
4076 		assert(object->pager_trusted);
4077 	} else {
4078 		object = vm_object_allocate(size);
4079 		assert(object != VM_OBJECT_NULL);
4080 		object->internal = FALSE;
4081 		object->pager_trusted = FALSE;
4082 		/* copy strategy invalid until set by memory manager */
4083 		object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4084 	}
4085 
4086 	/*
4087 	 *	Allocate request port.
4088 	 */
4089 
4090 	control = memory_object_control_allocate(object);
4091 	assert(control != MEMORY_OBJECT_CONTROL_NULL);
4092 
4093 	vm_object_lock(object);
4094 
4095 	assert(!object->pager_ready);
4096 	assert(!object->pager_initialized);
4097 	assert(object->pager == NULL);
4098 	assert(object->pager_control == NULL);
4099 
4100 	/*
4101 	 *	Copy the reference we were given.
4102 	 */
4103 
4104 	memory_object_reference(pager);
4105 	object->pager_created = TRUE;
4106 	object->pager = pager;
4107 	object->pager_control = control;
4108 	object->pager_ready = FALSE;
4109 
4110 	vm_object_unlock(object);
4111 
4112 	/*
4113 	 *	Let the pager know we're using it.
4114 	 */
4115 
4116 	(void) memory_object_init(pager,
4117 	    object->pager_control,
4118 	    PAGE_SIZE);
4119 
4120 	vm_object_lock(object);
4121 	if (named) {
4122 		object->named = TRUE;
4123 	}
4124 	if (object->internal) {
4125 		object->pager_ready = TRUE;
4126 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4127 	}
4128 
4129 	object->pager_initialized = TRUE;
4130 	vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4131 
4132 	vm_object_unlock(object);
4133 
4134 	return object;
4135 }
4136 
4137 /*
4138  *	Routine:	vm_object_compressor_pager_create
4139  *	Purpose:
4140  *		Create a memory object for an internal object.
4141  *	In/out conditions:
4142  *		The object is locked on entry and exit;
4143  *		it may be unlocked within this call.
4144  *	Limitations:
4145  *		Only one thread may be performing a
4146  *		vm_object_compressor_pager_create on an object at
4147  *		a time.  Presumably, only the pageout
4148  *		daemon will be using this routine.
4149  */
4150 
4151 void
vm_object_compressor_pager_create(vm_object_t object)4152 vm_object_compressor_pager_create(
4153 	vm_object_t     object)
4154 {
4155 	memory_object_t         pager;
4156 	vm_object_t             pager_object = VM_OBJECT_NULL;
4157 
4158 	assert(object != kernel_object);
4159 
4160 	/*
4161 	 *	Prevent collapse or termination by holding a paging reference
4162 	 */
4163 
4164 	vm_object_paging_begin(object);
4165 	if (object->pager_created) {
4166 		/*
4167 		 *	Someone else got to it first...
4168 		 *	wait for them to finish initializing the ports
4169 		 */
4170 		while (!object->pager_initialized) {
4171 			vm_object_sleep(object,
4172 			    VM_OBJECT_EVENT_INITIALIZED,
4173 			    THREAD_UNINT);
4174 		}
4175 		vm_object_paging_end(object);
4176 		return;
4177 	}
4178 
4179 	if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4180 	    (object->vo_size / PAGE_SIZE)) {
4181 #if DEVELOPMENT || DEBUG
4182 		printf("vm_object_compressor_pager_create(%p): "
4183 		    "object size 0x%llx >= 0x%llx\n",
4184 		    object,
4185 		    (uint64_t) object->vo_size,
4186 		    0x0FFFFFFFFULL * PAGE_SIZE);
4187 #endif /* DEVELOPMENT || DEBUG */
4188 		vm_object_paging_end(object);
4189 		return;
4190 	}
4191 
4192 	/*
4193 	 *	Indicate that a memory object has been assigned
4194 	 *	before dropping the lock, to prevent a race.
4195 	 */
4196 
4197 	object->pager_created = TRUE;
4198 	object->pager_trusted = TRUE;
4199 	object->paging_offset = 0;
4200 
4201 	vm_object_unlock(object);
4202 
4203 	/*
4204 	 *	Create the [internal] pager, and associate it with this object.
4205 	 *
4206 	 *	We make the association here so that vm_object_enter()
4207 	 *      can look up the object to complete initializing it.  No
4208 	 *	user will ever map this object.
4209 	 */
4210 	{
4211 		/* create our new memory object */
4212 		assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4213 		    (object->vo_size / PAGE_SIZE));
4214 		(void) compressor_memory_object_create(
4215 			(memory_object_size_t) object->vo_size,
4216 			&pager);
4217 		if (pager == NULL) {
4218 			panic("vm_object_compressor_pager_create(): "
4219 			    "no pager for object %p size 0x%llx\n",
4220 			    object, (uint64_t) object->vo_size);
4221 		}
4222 	}
4223 
4224 	/*
4225 	 *	A reference was returned by
4226 	 *	memory_object_create(), and it is
4227 	 *	copied by vm_object_memory_object_associate().
4228 	 */
4229 
4230 	pager_object = vm_object_memory_object_associate(pager,
4231 	    object,
4232 	    object->vo_size,
4233 	    FALSE);
4234 	if (pager_object != object) {
4235 		panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)", pager, pager_object, object, (uint64_t) object->vo_size);
4236 	}
4237 
4238 	/*
4239 	 *	Drop the reference we were passed.
4240 	 */
4241 	memory_object_deallocate(pager);
4242 
4243 	vm_object_lock(object);
4244 
4245 	/*
4246 	 *	Release the paging reference
4247 	 */
4248 	vm_object_paging_end(object);
4249 }
4250 
4251 /*
4252  *	Global variables for vm_object_collapse():
4253  *
4254  *		Counts for normal collapses and bypasses.
4255  *		Debugging variables, to watch or disable collapse.
4256  */
4257 static long     object_collapses = 0;
4258 static long     object_bypasses  = 0;
4259 
4260 static boolean_t        vm_object_collapse_allowed = TRUE;
4261 static boolean_t        vm_object_bypass_allowed = TRUE;
4262 
4263 void vm_object_do_collapse_compressor(vm_object_t object,
4264     vm_object_t backing_object);
4265 void
vm_object_do_collapse_compressor(vm_object_t object,vm_object_t backing_object)4266 vm_object_do_collapse_compressor(
4267 	vm_object_t object,
4268 	vm_object_t backing_object)
4269 {
4270 	vm_object_offset_t new_offset, backing_offset;
4271 	vm_object_size_t size;
4272 
4273 	vm_counters.do_collapse_compressor++;
4274 
4275 	vm_object_lock_assert_exclusive(object);
4276 	vm_object_lock_assert_exclusive(backing_object);
4277 
4278 	size = object->vo_size;
4279 
4280 	/*
4281 	 *	Move all compressed pages from backing_object
4282 	 *	to the parent.
4283 	 */
4284 
4285 	for (backing_offset = object->vo_shadow_offset;
4286 	    backing_offset < object->vo_shadow_offset + object->vo_size;
4287 	    backing_offset += PAGE_SIZE) {
4288 		memory_object_offset_t backing_pager_offset;
4289 
4290 		/* find the next compressed page at or after this offset */
4291 		backing_pager_offset = (backing_offset +
4292 		    backing_object->paging_offset);
4293 		backing_pager_offset = vm_compressor_pager_next_compressed(
4294 			backing_object->pager,
4295 			backing_pager_offset);
4296 		if (backing_pager_offset == (memory_object_offset_t) -1) {
4297 			/* no more compressed pages */
4298 			break;
4299 		}
4300 		backing_offset = (backing_pager_offset -
4301 		    backing_object->paging_offset);
4302 
4303 		new_offset = backing_offset - object->vo_shadow_offset;
4304 
4305 		if (new_offset >= object->vo_size) {
4306 			/* we're out of the scope of "object": done */
4307 			break;
4308 		}
4309 
4310 		if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4311 		    (vm_compressor_pager_state_get(object->pager,
4312 		    (new_offset +
4313 		    object->paging_offset)) ==
4314 		    VM_EXTERNAL_STATE_EXISTS)) {
4315 			/*
4316 			 * This page already exists in object, resident or
4317 			 * compressed.
4318 			 * We don't need this compressed page in backing_object
4319 			 * and it will be reclaimed when we release
4320 			 * backing_object.
4321 			 */
4322 			continue;
4323 		}
4324 
4325 		/*
4326 		 * backing_object has this page in the VM compressor and
4327 		 * we need to transfer it to object.
4328 		 */
4329 		vm_counters.do_collapse_compressor_pages++;
4330 		vm_compressor_pager_transfer(
4331 			/* destination: */
4332 			object->pager,
4333 			(new_offset + object->paging_offset),
4334 			/* source: */
4335 			backing_object->pager,
4336 			(backing_offset + backing_object->paging_offset));
4337 	}
4338 }
4339 
4340 /*
4341  *	Routine:	vm_object_do_collapse
4342  *	Purpose:
4343  *		Collapse an object with the object backing it.
4344  *		Pages in the backing object are moved into the
4345  *		parent, and the backing object is deallocated.
4346  *	Conditions:
4347  *		Both objects and the cache are locked; the page
4348  *		queues are unlocked.
4349  *
4350  */
4351 static void
vm_object_do_collapse(vm_object_t object,vm_object_t backing_object)4352 vm_object_do_collapse(
4353 	vm_object_t object,
4354 	vm_object_t backing_object)
4355 {
4356 	vm_page_t p, pp;
4357 	vm_object_offset_t new_offset, backing_offset;
4358 	vm_object_size_t size;
4359 
4360 	vm_object_lock_assert_exclusive(object);
4361 	vm_object_lock_assert_exclusive(backing_object);
4362 
4363 	assert(object->purgable == VM_PURGABLE_DENY);
4364 	assert(backing_object->purgable == VM_PURGABLE_DENY);
4365 
4366 	backing_offset = object->vo_shadow_offset;
4367 	size = object->vo_size;
4368 
4369 	/*
4370 	 *	Move all in-memory pages from backing_object
4371 	 *	to the parent.  Pages that have been paged out
4372 	 *	will be overwritten by any of the parent's
4373 	 *	pages that shadow them.
4374 	 */
4375 
4376 	while (!vm_page_queue_empty(&backing_object->memq)) {
4377 		p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4378 
4379 		new_offset = (p->vmp_offset - backing_offset);
4380 
4381 		assert(!p->vmp_busy || p->vmp_absent);
4382 
4383 		/*
4384 		 *	If the parent has a page here, or if
4385 		 *	this page falls outside the parent,
4386 		 *	dispose of it.
4387 		 *
4388 		 *	Otherwise, move it as planned.
4389 		 */
4390 
4391 		if (p->vmp_offset < backing_offset || new_offset >= size) {
4392 			VM_PAGE_FREE(p);
4393 		} else {
4394 			pp = vm_page_lookup(object, new_offset);
4395 			if (pp == VM_PAGE_NULL) {
4396 				if (VM_COMPRESSOR_PAGER_STATE_GET(object,
4397 				    new_offset)
4398 				    == VM_EXTERNAL_STATE_EXISTS) {
4399 					/*
4400 					 * Parent object has this page
4401 					 * in the VM compressor.
4402 					 * Throw away the backing
4403 					 * object's page.
4404 					 */
4405 					VM_PAGE_FREE(p);
4406 				} else {
4407 					/*
4408 					 *	Parent now has no page.
4409 					 *	Move the backing object's page
4410 					 *      up.
4411 					 */
4412 					vm_page_rename(p, object, new_offset);
4413 				}
4414 			} else {
4415 				assert(!pp->vmp_absent);
4416 
4417 				/*
4418 				 *	Parent object has a real page.
4419 				 *	Throw away the backing object's
4420 				 *	page.
4421 				 */
4422 				VM_PAGE_FREE(p);
4423 			}
4424 		}
4425 	}
4426 
4427 	if (vm_object_collapse_compressor_allowed &&
4428 	    object->pager != MEMORY_OBJECT_NULL &&
4429 	    backing_object->pager != MEMORY_OBJECT_NULL) {
4430 		/* move compressed pages from backing_object to object */
4431 		vm_object_do_collapse_compressor(object, backing_object);
4432 	} else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4433 		assert((!object->pager_created &&
4434 		    (object->pager == MEMORY_OBJECT_NULL)) ||
4435 		    (!backing_object->pager_created &&
4436 		    (backing_object->pager == MEMORY_OBJECT_NULL)));
4437 		/*
4438 		 *	Move the pager from backing_object to object.
4439 		 *
4440 		 *	XXX We're only using part of the paging space
4441 		 *	for keeps now... we ought to discard the
4442 		 *	unused portion.
4443 		 */
4444 
4445 		assert(!object->paging_in_progress);
4446 		assert(!object->activity_in_progress);
4447 		assert(!object->pager_created);
4448 		assert(object->pager == NULL);
4449 		object->pager = backing_object->pager;
4450 
4451 		object->pager_created = backing_object->pager_created;
4452 		object->pager_control = backing_object->pager_control;
4453 		object->pager_ready = backing_object->pager_ready;
4454 		object->pager_initialized = backing_object->pager_initialized;
4455 		object->paging_offset =
4456 		    backing_object->paging_offset + backing_offset;
4457 		if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4458 			memory_object_control_collapse(&object->pager_control,
4459 			    object);
4460 		}
4461 		/* the backing_object has lost its pager: reset all fields */
4462 		backing_object->pager_created = FALSE;
4463 		backing_object->pager_control = NULL;
4464 		backing_object->pager_ready = FALSE;
4465 		backing_object->paging_offset = 0;
4466 		backing_object->pager = NULL;
4467 	}
4468 	/*
4469 	 *	Object now shadows whatever backing_object did.
4470 	 *	Note that the reference to backing_object->shadow
4471 	 *	moves from within backing_object to within object.
4472 	 */
4473 
4474 	assert(!object->phys_contiguous);
4475 	assert(!backing_object->phys_contiguous);
4476 	object->shadow = backing_object->shadow;
4477 	if (object->shadow) {
4478 		assertf(page_aligned(object->vo_shadow_offset),
4479 		    "object %p shadow_offset 0x%llx",
4480 		    object, object->vo_shadow_offset);
4481 		assertf(page_aligned(backing_object->vo_shadow_offset),
4482 		    "backing_object %p shadow_offset 0x%llx",
4483 		    backing_object, backing_object->vo_shadow_offset);
4484 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
4485 		/* "backing_object" gave its shadow to "object" */
4486 		backing_object->shadow = VM_OBJECT_NULL;
4487 		backing_object->vo_shadow_offset = 0;
4488 	} else {
4489 		/* no shadow, therefore no shadow offset... */
4490 		object->vo_shadow_offset = 0;
4491 	}
4492 	assert((object->shadow == VM_OBJECT_NULL) ||
4493 	    (object->shadow->copy != backing_object));
4494 
4495 	/*
4496 	 *	Discard backing_object.
4497 	 *
4498 	 *	Since the backing object has no pages, no
4499 	 *	pager left, and no object references within it,
4500 	 *	all that is necessary is to dispose of it.
4501 	 */
4502 	object_collapses++;
4503 
4504 	assert(backing_object->ref_count == 1);
4505 	assert(backing_object->resident_page_count == 0);
4506 	assert(backing_object->paging_in_progress == 0);
4507 	assert(backing_object->activity_in_progress == 0);
4508 	assert(backing_object->shadow == VM_OBJECT_NULL);
4509 	assert(backing_object->vo_shadow_offset == 0);
4510 
4511 	if (backing_object->pager != MEMORY_OBJECT_NULL) {
4512 		/* ... unless it has a pager; need to terminate pager too */
4513 		vm_counters.do_collapse_terminate++;
4514 		if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4515 			vm_counters.do_collapse_terminate_failure++;
4516 		}
4517 		return;
4518 	}
4519 
4520 	assert(backing_object->pager == NULL);
4521 
4522 	backing_object->alive = FALSE;
4523 	vm_object_unlock(backing_object);
4524 
4525 #if VM_OBJECT_TRACKING
4526 	if (vm_object_tracking_btlog) {
4527 		btlog_erase(vm_object_tracking_btlog, backing_object);
4528 	}
4529 #endif /* VM_OBJECT_TRACKING */
4530 
4531 	vm_object_lock_destroy(backing_object);
4532 
4533 	zfree(vm_object_zone, backing_object);
4534 }
4535 
4536 static void
vm_object_do_bypass(vm_object_t object,vm_object_t backing_object)4537 vm_object_do_bypass(
4538 	vm_object_t object,
4539 	vm_object_t backing_object)
4540 {
4541 	/*
4542 	 *	Make the parent shadow the next object
4543 	 *	in the chain.
4544 	 */
4545 
4546 	vm_object_lock_assert_exclusive(object);
4547 	vm_object_lock_assert_exclusive(backing_object);
4548 
4549 	vm_object_reference(backing_object->shadow);
4550 
4551 	assert(!object->phys_contiguous);
4552 	assert(!backing_object->phys_contiguous);
4553 	object->shadow = backing_object->shadow;
4554 	if (object->shadow) {
4555 		assertf(page_aligned(object->vo_shadow_offset),
4556 		    "object %p shadow_offset 0x%llx",
4557 		    object, object->vo_shadow_offset);
4558 		assertf(page_aligned(backing_object->vo_shadow_offset),
4559 		    "backing_object %p shadow_offset 0x%llx",
4560 		    backing_object, backing_object->vo_shadow_offset);
4561 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
4562 	} else {
4563 		/* no shadow, therefore no shadow offset... */
4564 		object->vo_shadow_offset = 0;
4565 	}
4566 
4567 	/*
4568 	 *	Backing object might have had a copy pointer
4569 	 *	to us.  If it did, clear it.
4570 	 */
4571 	if (backing_object->copy == object) {
4572 		backing_object->copy = VM_OBJECT_NULL;
4573 	}
4574 
4575 	/*
4576 	 *	Drop the reference count on backing_object.
4577 	 #if	TASK_SWAPPER
4578 	 *	Since its ref_count was at least 2, it
4579 	 *	will not vanish; so we don't need to call
4580 	 *	vm_object_deallocate.
4581 	 *	[with a caveat for "named" objects]
4582 	 *
4583 	 *	The res_count on the backing object is
4584 	 *	conditionally decremented.  It's possible
4585 	 *	(via vm_pageout_scan) to get here with
4586 	 *	a "swapped" object, which has a 0 res_count,
4587 	 *	in which case, the backing object res_count
4588 	 *	is already down by one.
4589 	 #else
4590 	 *	Don't call vm_object_deallocate unless
4591 	 *	ref_count drops to zero.
4592 	 *
4593 	 *	The ref_count can drop to zero here if the
4594 	 *	backing object could be bypassed but not
4595 	 *	collapsed, such as when the backing object
4596 	 *	is temporary and cachable.
4597 	 #endif
4598 	 */
4599 	if (backing_object->ref_count > 2 ||
4600 	    (!backing_object->named && backing_object->ref_count > 1)) {
4601 		vm_object_lock_assert_exclusive(backing_object);
4602 		backing_object->ref_count--;
4603 		vm_object_unlock(backing_object);
4604 	} else {
4605 		/*
4606 		 *	Drop locks so that we can deallocate
4607 		 *	the backing object.
4608 		 */
4609 
4610 		/*
4611 		 * vm_object_collapse (the caller of this function) is
4612 		 * now called from contexts that may not guarantee that a
4613 		 * valid reference is held on the object... w/o a valid
4614 		 * reference, it is unsafe and unwise (you will definitely
4615 		 * regret it) to unlock the object and then retake the lock
4616 		 * since the object may be terminated and recycled in between.
4617 		 * The "activity_in_progress" reference will keep the object
4618 		 * 'stable'.
4619 		 */
4620 		vm_object_activity_begin(object);
4621 		vm_object_unlock(object);
4622 
4623 		vm_object_unlock(backing_object);
4624 		vm_object_deallocate(backing_object);
4625 
4626 		/*
4627 		 *	Relock object. We don't have to reverify
4628 		 *	its state since vm_object_collapse will
4629 		 *	do that for us as it starts at the
4630 		 *	top of its loop.
4631 		 */
4632 
4633 		vm_object_lock(object);
4634 		vm_object_activity_end(object);
4635 	}
4636 
4637 	object_bypasses++;
4638 }
4639 
4640 
4641 /*
4642  *	vm_object_collapse:
4643  *
4644  *	Perform an object collapse or an object bypass if appropriate.
4645  *	The real work of collapsing and bypassing is performed in
4646  *	the routines vm_object_do_collapse and vm_object_do_bypass.
4647  *
4648  *	Requires that the object be locked and the page queues be unlocked.
4649  *
4650  */
4651 static unsigned long vm_object_collapse_calls = 0;
4652 static unsigned long vm_object_collapse_objects = 0;
4653 static unsigned long vm_object_collapse_do_collapse = 0;
4654 static unsigned long vm_object_collapse_do_bypass = 0;
4655 
4656 __private_extern__ void
vm_object_collapse(vm_object_t object,vm_object_offset_t hint_offset,boolean_t can_bypass)4657 vm_object_collapse(
4658 	vm_object_t                             object,
4659 	vm_object_offset_t                      hint_offset,
4660 	boolean_t                               can_bypass)
4661 {
4662 	vm_object_t                             backing_object;
4663 	vm_object_size_t                        object_vcount, object_rcount;
4664 	vm_object_t                             original_object;
4665 	int                                     object_lock_type;
4666 	int                                     backing_object_lock_type;
4667 
4668 	vm_object_collapse_calls++;
4669 
4670 	assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
4671 
4672 	if (!vm_object_collapse_allowed &&
4673 	    !(can_bypass && vm_object_bypass_allowed)) {
4674 		return;
4675 	}
4676 
4677 	if (object == VM_OBJECT_NULL) {
4678 		return;
4679 	}
4680 
4681 	original_object = object;
4682 
4683 	/*
4684 	 * The top object was locked "exclusive" by the caller.
4685 	 * In the first pass, to determine if we can collapse the shadow chain,
4686 	 * take a "shared" lock on the shadow objects.  If we can collapse,
4687 	 * we'll have to go down the chain again with exclusive locks.
4688 	 */
4689 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4690 	backing_object_lock_type = OBJECT_LOCK_SHARED;
4691 
4692 retry:
4693 	object = original_object;
4694 	vm_object_lock_assert_exclusive(object);
4695 
4696 	while (TRUE) {
4697 		vm_object_collapse_objects++;
4698 		/*
4699 		 *	Verify that the conditions are right for either
4700 		 *	collapse or bypass:
4701 		 */
4702 
4703 		/*
4704 		 *	There is a backing object, and
4705 		 */
4706 
4707 		backing_object = object->shadow;
4708 		if (backing_object == VM_OBJECT_NULL) {
4709 			if (object != original_object) {
4710 				vm_object_unlock(object);
4711 			}
4712 			return;
4713 		}
4714 		if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
4715 			vm_object_lock_shared(backing_object);
4716 		} else {
4717 			vm_object_lock(backing_object);
4718 		}
4719 
4720 		/*
4721 		 *	No pages in the object are currently
4722 		 *	being paged out, and
4723 		 */
4724 		if (object->paging_in_progress != 0 ||
4725 		    object->activity_in_progress != 0) {
4726 			/* try and collapse the rest of the shadow chain */
4727 			if (object != original_object) {
4728 				vm_object_unlock(object);
4729 			}
4730 			object = backing_object;
4731 			object_lock_type = backing_object_lock_type;
4732 			continue;
4733 		}
4734 
4735 		/*
4736 		 *	...
4737 		 *		The backing object is not read_only,
4738 		 *		and no pages in the backing object are
4739 		 *		currently being paged out.
4740 		 *		The backing object is internal.
4741 		 *
4742 		 */
4743 
4744 		if (!backing_object->internal ||
4745 		    backing_object->paging_in_progress != 0 ||
4746 		    backing_object->activity_in_progress != 0) {
4747 			/* try and collapse the rest of the shadow chain */
4748 			if (object != original_object) {
4749 				vm_object_unlock(object);
4750 			}
4751 			object = backing_object;
4752 			object_lock_type = backing_object_lock_type;
4753 			continue;
4754 		}
4755 
4756 		/*
4757 		 * Purgeable objects are not supposed to engage in
4758 		 * copy-on-write activities, so should not have
4759 		 * any shadow objects or be a shadow object to another
4760 		 * object.
4761 		 * Collapsing a purgeable object would require some
4762 		 * updates to the purgeable compressed ledgers.
4763 		 */
4764 		if (object->purgable != VM_PURGABLE_DENY ||
4765 		    backing_object->purgable != VM_PURGABLE_DENY) {
4766 			panic("vm_object_collapse() attempting to collapse "
4767 			    "purgeable object: %p(%d) %p(%d)\n",
4768 			    object, object->purgable,
4769 			    backing_object, backing_object->purgable);
4770 			/* try and collapse the rest of the shadow chain */
4771 			if (object != original_object) {
4772 				vm_object_unlock(object);
4773 			}
4774 			object = backing_object;
4775 			object_lock_type = backing_object_lock_type;
4776 			continue;
4777 		}
4778 
4779 		/*
4780 		 *	The backing object can't be a copy-object:
4781 		 *	the shadow_offset for the copy-object must stay
4782 		 *	as 0.  Furthermore (for the 'we have all the
4783 		 *	pages' case), if we bypass backing_object and
4784 		 *	just shadow the next object in the chain, old
4785 		 *	pages from that object would then have to be copied
4786 		 *	BOTH into the (former) backing_object and into the
4787 		 *	parent object.
4788 		 */
4789 		if (backing_object->shadow != VM_OBJECT_NULL &&
4790 		    backing_object->shadow->copy == backing_object) {
4791 			/* try and collapse the rest of the shadow chain */
4792 			if (object != original_object) {
4793 				vm_object_unlock(object);
4794 			}
4795 			object = backing_object;
4796 			object_lock_type = backing_object_lock_type;
4797 			continue;
4798 		}
4799 
4800 		/*
4801 		 *	We can now try to either collapse the backing
4802 		 *	object (if the parent is the only reference to
4803 		 *	it) or (perhaps) remove the parent's reference
4804 		 *	to it.
4805 		 *
4806 		 *	If there is exactly one reference to the backing
4807 		 *	object, we may be able to collapse it into the
4808 		 *	parent.
4809 		 *
4810 		 *	As long as one of the objects is still not known
4811 		 *	to the pager, we can collapse them.
4812 		 */
4813 		if (backing_object->ref_count == 1 &&
4814 		    (vm_object_collapse_compressor_allowed ||
4815 		    !object->pager_created
4816 		    || (!backing_object->pager_created)
4817 		    ) && vm_object_collapse_allowed) {
4818 			/*
4819 			 * We need the exclusive lock on the VM objects.
4820 			 */
4821 			if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4822 				/*
4823 				 * We have an object and its shadow locked
4824 				 * "shared".  We can't just upgrade the locks
4825 				 * to "exclusive", as some other thread might
4826 				 * also have these objects locked "shared" and
4827 				 * attempt to upgrade one or the other to
4828 				 * "exclusive".  The upgrades would block
4829 				 * forever waiting for the other "shared" locks
4830 				 * to get released.
4831 				 * So we have to release the locks and go
4832 				 * down the shadow chain again (since it could
4833 				 * have changed) with "exclusive" locking.
4834 				 */
4835 				vm_object_unlock(backing_object);
4836 				if (object != original_object) {
4837 					vm_object_unlock(object);
4838 				}
4839 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4840 				backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4841 				goto retry;
4842 			}
4843 
4844 			/*
4845 			 *	Collapse the object with its backing
4846 			 *	object, and try again with the object's
4847 			 *	new backing object.
4848 			 */
4849 
4850 			vm_object_do_collapse(object, backing_object);
4851 			vm_object_collapse_do_collapse++;
4852 			continue;
4853 		}
4854 
4855 		/*
4856 		 *	Collapsing the backing object was not possible
4857 		 *	or permitted, so let's try bypassing it.
4858 		 */
4859 
4860 		if (!(can_bypass && vm_object_bypass_allowed)) {
4861 			/* try and collapse the rest of the shadow chain */
4862 			if (object != original_object) {
4863 				vm_object_unlock(object);
4864 			}
4865 			object = backing_object;
4866 			object_lock_type = backing_object_lock_type;
4867 			continue;
4868 		}
4869 
4870 
4871 		/*
4872 		 *	If the object doesn't have all its pages present,
4873 		 *	we have to make sure no pages in the backing object
4874 		 *	"show through" before bypassing it.
4875 		 */
4876 		object_vcount = object->vo_size >> PAGE_SHIFT;
4877 		object_rcount = (vm_object_size_t)object->resident_page_count;
4878 
4879 		if (object_rcount != object_vcount) {
4880 			vm_object_offset_t      offset;
4881 			vm_object_offset_t      backing_offset;
4882 			vm_object_size_t        backing_rcount, backing_vcount;
4883 
4884 			/*
4885 			 *	If the backing object has a pager but no pagemap,
4886 			 *	then we cannot bypass it, because we don't know
4887 			 *	what pages it has.
4888 			 */
4889 			if (backing_object->pager_created) {
4890 				/* try and collapse the rest of the shadow chain */
4891 				if (object != original_object) {
4892 					vm_object_unlock(object);
4893 				}
4894 				object = backing_object;
4895 				object_lock_type = backing_object_lock_type;
4896 				continue;
4897 			}
4898 
4899 			/*
4900 			 *	If the object has a pager but no pagemap,
4901 			 *	then we cannot bypass it, because we don't know
4902 			 *	what pages it has.
4903 			 */
4904 			if (object->pager_created) {
4905 				/* try and collapse the rest of the shadow chain */
4906 				if (object != original_object) {
4907 					vm_object_unlock(object);
4908 				}
4909 				object = backing_object;
4910 				object_lock_type = backing_object_lock_type;
4911 				continue;
4912 			}
4913 
4914 			backing_offset = object->vo_shadow_offset;
4915 			backing_vcount = backing_object->vo_size >> PAGE_SHIFT;
4916 			backing_rcount = (vm_object_size_t)backing_object->resident_page_count;
4917 			assert(backing_vcount >= object_vcount);
4918 
4919 			if (backing_rcount > (backing_vcount - object_vcount) &&
4920 			    backing_rcount - (backing_vcount - object_vcount) > object_rcount) {
4921 				/*
4922 				 * we have enough pages in the backing object to guarantee that
4923 				 * at least 1 of them must be 'uncovered' by a resident page
4924 				 * in the object we're evaluating, so move on and
4925 				 * try to collapse the rest of the shadow chain
4926 				 */
4927 				if (object != original_object) {
4928 					vm_object_unlock(object);
4929 				}
4930 				object = backing_object;
4931 				object_lock_type = backing_object_lock_type;
4932 				continue;
4933 			}
4934 
4935 			/*
4936 			 *	If all of the pages in the backing object are
4937 			 *	shadowed by the parent object, the parent
4938 			 *	object no longer has to shadow the backing
4939 			 *	object; it can shadow the next one in the
4940 			 *	chain.
4941 			 *
4942 			 *	If the backing object has existence info,
4943 			 *	we must check examine its existence info
4944 			 *	as well.
4945 			 *
4946 			 */
4947 
4948 #define EXISTS_IN_OBJECT(obj, off, rc)                  \
4949 	((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
4950 	  == VM_EXTERNAL_STATE_EXISTS) ||               \
4951 	 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4952 
4953 			/*
4954 			 * Check the hint location first
4955 			 * (since it is often the quickest way out of here).
4956 			 */
4957 			if (object->cow_hint != ~(vm_offset_t)0) {
4958 				hint_offset = (vm_object_offset_t)object->cow_hint;
4959 			} else {
4960 				hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
4961 				    (hint_offset - 8 * PAGE_SIZE_64) : 0;
4962 			}
4963 
4964 			if (EXISTS_IN_OBJECT(backing_object, hint_offset +
4965 			    backing_offset, backing_rcount) &&
4966 			    !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) {
4967 				/* dependency right at the hint */
4968 				object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
4969 				/* try and collapse the rest of the shadow chain */
4970 				if (object != original_object) {
4971 					vm_object_unlock(object);
4972 				}
4973 				object = backing_object;
4974 				object_lock_type = backing_object_lock_type;
4975 				continue;
4976 			}
4977 
4978 			/*
4979 			 * If the object's window onto the backing_object
4980 			 * is large compared to the number of resident
4981 			 * pages in the backing object, it makes sense to
4982 			 * walk the backing_object's resident pages first.
4983 			 *
4984 			 * NOTE: Pages may be in both the existence map and/or
4985 			 * resident, so if we don't find a dependency while
4986 			 * walking the backing object's resident page list
4987 			 * directly, and there is an existence map, we'll have
4988 			 * to run the offset based 2nd pass.  Because we may
4989 			 * have to run both passes, we need to be careful
4990 			 * not to decrement 'rcount' in the 1st pass
4991 			 */
4992 			if (backing_rcount && backing_rcount < (object_vcount / 8)) {
4993 				vm_object_size_t rc = object_rcount;
4994 				vm_page_t p;
4995 
4996 				backing_rcount = backing_object->resident_page_count;
4997 				p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
4998 				do {
4999 					offset = (p->vmp_offset - backing_offset);
5000 
5001 					if (offset < object->vo_size &&
5002 					    offset != hint_offset &&
5003 					    !EXISTS_IN_OBJECT(object, offset, rc)) {
5004 						/* found a dependency */
5005 						object->cow_hint = (vm_offset_t) offset; /* atomic */
5006 
5007 						break;
5008 					}
5009 					p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5010 				} while (--backing_rcount);
5011 				if (backing_rcount != 0) {
5012 					/* try and collapse the rest of the shadow chain */
5013 					if (object != original_object) {
5014 						vm_object_unlock(object);
5015 					}
5016 					object = backing_object;
5017 					object_lock_type = backing_object_lock_type;
5018 					continue;
5019 				}
5020 			}
5021 
5022 			/*
5023 			 * Walk through the offsets looking for pages in the
5024 			 * backing object that show through to the object.
5025 			 */
5026 			if (backing_rcount) {
5027 				offset = hint_offset;
5028 
5029 				while ((offset =
5030 				    (offset + PAGE_SIZE_64 < object->vo_size) ?
5031 				    (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5032 					if (EXISTS_IN_OBJECT(backing_object, offset +
5033 					    backing_offset, backing_rcount) &&
5034 					    !EXISTS_IN_OBJECT(object, offset, object_rcount)) {
5035 						/* found a dependency */
5036 						object->cow_hint = (vm_offset_t) offset; /* atomic */
5037 						break;
5038 					}
5039 				}
5040 				if (offset != hint_offset) {
5041 					/* try and collapse the rest of the shadow chain */
5042 					if (object != original_object) {
5043 						vm_object_unlock(object);
5044 					}
5045 					object = backing_object;
5046 					object_lock_type = backing_object_lock_type;
5047 					continue;
5048 				}
5049 			}
5050 		}
5051 
5052 		/*
5053 		 * We need "exclusive" locks on the 2 VM objects.
5054 		 */
5055 		if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5056 			vm_object_unlock(backing_object);
5057 			if (object != original_object) {
5058 				vm_object_unlock(object);
5059 			}
5060 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5061 			backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5062 			goto retry;
5063 		}
5064 
5065 		/* reset the offset hint for any objects deeper in the chain */
5066 		object->cow_hint = (vm_offset_t)0;
5067 
5068 		/*
5069 		 *	All interesting pages in the backing object
5070 		 *	already live in the parent or its pager.
5071 		 *	Thus we can bypass the backing object.
5072 		 */
5073 
5074 		vm_object_do_bypass(object, backing_object);
5075 		vm_object_collapse_do_bypass++;
5076 
5077 		/*
5078 		 *	Try again with this object's new backing object.
5079 		 */
5080 
5081 		continue;
5082 	}
5083 
5084 	/* NOT REACHED */
5085 	/*
5086 	 *  if (object != original_object) {
5087 	 *       vm_object_unlock(object);
5088 	 *  }
5089 	 */
5090 }
5091 
5092 /*
5093  *	Routine:	vm_object_page_remove: [internal]
5094  *	Purpose:
5095  *		Removes all physical pages in the specified
5096  *		object range from the object's list of pages.
5097  *
5098  *	In/out conditions:
5099  *		The object must be locked.
5100  *		The object must not have paging_in_progress, usually
5101  *		guaranteed by not having a pager.
5102  */
5103 unsigned int vm_object_page_remove_lookup = 0;
5104 unsigned int vm_object_page_remove_iterate = 0;
5105 
5106 __private_extern__ void
vm_object_page_remove(vm_object_t object,vm_object_offset_t start,vm_object_offset_t end)5107 vm_object_page_remove(
5108 	vm_object_t             object,
5109 	vm_object_offset_t      start,
5110 	vm_object_offset_t      end)
5111 {
5112 	vm_page_t       p, next;
5113 
5114 	/*
5115 	 *	One and two page removals are most popular.
5116 	 *	The factor of 16 here is somewhat arbitrary.
5117 	 *	It balances vm_object_lookup vs iteration.
5118 	 */
5119 
5120 	if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5121 		vm_object_page_remove_lookup++;
5122 
5123 		for (; start < end; start += PAGE_SIZE_64) {
5124 			p = vm_page_lookup(object, start);
5125 			if (p != VM_PAGE_NULL) {
5126 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5127 				if (!p->vmp_fictitious && p->vmp_pmapped) {
5128 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5129 				}
5130 				VM_PAGE_FREE(p);
5131 			}
5132 		}
5133 	} else {
5134 		vm_object_page_remove_iterate++;
5135 
5136 		p = (vm_page_t) vm_page_queue_first(&object->memq);
5137 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5138 			next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5139 			if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5140 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5141 				if (!p->vmp_fictitious && p->vmp_pmapped) {
5142 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5143 				}
5144 				VM_PAGE_FREE(p);
5145 			}
5146 			p = next;
5147 		}
5148 	}
5149 }
5150 
5151 
5152 /*
5153  *	Routine:	vm_object_coalesce
5154  *	Function:	Coalesces two objects backing up adjoining
5155  *			regions of memory into a single object.
5156  *
5157  *	returns TRUE if objects were combined.
5158  *
5159  *	NOTE:	Only works at the moment if the second object is NULL -
5160  *		if it's not, which object do we lock first?
5161  *
5162  *	Parameters:
5163  *		prev_object	First object to coalesce
5164  *		prev_offset	Offset into prev_object
5165  *		next_object	Second object into coalesce
5166  *		next_offset	Offset into next_object
5167  *
5168  *		prev_size	Size of reference to prev_object
5169  *		next_size	Size of reference to next_object
5170  *
5171  *	Conditions:
5172  *	The object(s) must *not* be locked. The map must be locked
5173  *	to preserve the reference to the object(s).
5174  */
5175 static int vm_object_coalesce_count = 0;
5176 
5177 __private_extern__ boolean_t
vm_object_coalesce(vm_object_t prev_object,vm_object_t next_object,vm_object_offset_t prev_offset,__unused vm_object_offset_t next_offset,vm_object_size_t prev_size,vm_object_size_t next_size)5178 vm_object_coalesce(
5179 	vm_object_t                     prev_object,
5180 	vm_object_t                     next_object,
5181 	vm_object_offset_t              prev_offset,
5182 	__unused vm_object_offset_t next_offset,
5183 	vm_object_size_t                prev_size,
5184 	vm_object_size_t                next_size)
5185 {
5186 	vm_object_size_t        newsize;
5187 
5188 #ifdef  lint
5189 	next_offset++;
5190 #endif  /* lint */
5191 
5192 	if (next_object != VM_OBJECT_NULL) {
5193 		return FALSE;
5194 	}
5195 
5196 	if (prev_object == VM_OBJECT_NULL) {
5197 		return TRUE;
5198 	}
5199 
5200 	vm_object_lock(prev_object);
5201 
5202 	/*
5203 	 *	Try to collapse the object first
5204 	 */
5205 	vm_object_collapse(prev_object, prev_offset, TRUE);
5206 
5207 	/*
5208 	 *	Can't coalesce if pages not mapped to
5209 	 *	prev_entry may be in use any way:
5210 	 *	. more than one reference
5211 	 *	. paged out
5212 	 *	. shadows another object
5213 	 *	. has a copy elsewhere
5214 	 *	. is purgeable
5215 	 *	. paging references (pages might be in page-list)
5216 	 */
5217 
5218 	if ((prev_object->ref_count > 1) ||
5219 	    prev_object->pager_created ||
5220 	    (prev_object->shadow != VM_OBJECT_NULL) ||
5221 	    (prev_object->copy != VM_OBJECT_NULL) ||
5222 	    (prev_object->true_share != FALSE) ||
5223 	    (prev_object->purgable != VM_PURGABLE_DENY) ||
5224 	    (prev_object->paging_in_progress != 0) ||
5225 	    (prev_object->activity_in_progress != 0)) {
5226 		vm_object_unlock(prev_object);
5227 		return FALSE;
5228 	}
5229 
5230 	vm_object_coalesce_count++;
5231 
5232 	/*
5233 	 *	Remove any pages that may still be in the object from
5234 	 *	a previous deallocation.
5235 	 */
5236 	vm_object_page_remove(prev_object,
5237 	    prev_offset + prev_size,
5238 	    prev_offset + prev_size + next_size);
5239 
5240 	/*
5241 	 *	Extend the object if necessary.
5242 	 */
5243 	newsize = prev_offset + prev_size + next_size;
5244 	if (newsize > prev_object->vo_size) {
5245 		assertf(page_aligned(newsize),
5246 		    "object %p size 0x%llx",
5247 		    prev_object, (uint64_t)newsize);
5248 		prev_object->vo_size = newsize;
5249 	}
5250 
5251 	vm_object_unlock(prev_object);
5252 	return TRUE;
5253 }
5254 
5255 kern_return_t
vm_object_populate_with_private(vm_object_t object,vm_object_offset_t offset,ppnum_t phys_page,vm_size_t size)5256 vm_object_populate_with_private(
5257 	vm_object_t             object,
5258 	vm_object_offset_t      offset,
5259 	ppnum_t                 phys_page,
5260 	vm_size_t               size)
5261 {
5262 	ppnum_t                 base_page;
5263 	vm_object_offset_t      base_offset;
5264 
5265 
5266 	if (!object->private) {
5267 		return KERN_FAILURE;
5268 	}
5269 
5270 	base_page = phys_page;
5271 
5272 	vm_object_lock(object);
5273 
5274 	if (!object->phys_contiguous) {
5275 		vm_page_t       m;
5276 
5277 		if ((base_offset = trunc_page_64(offset)) != offset) {
5278 			vm_object_unlock(object);
5279 			return KERN_FAILURE;
5280 		}
5281 		base_offset += object->paging_offset;
5282 
5283 		while (size) {
5284 			m = vm_page_lookup(object, base_offset);
5285 
5286 			if (m != VM_PAGE_NULL) {
5287 				if (m->vmp_fictitious) {
5288 					if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
5289 						vm_page_lockspin_queues();
5290 						m->vmp_private = TRUE;
5291 						vm_page_unlock_queues();
5292 
5293 						m->vmp_fictitious = FALSE;
5294 						VM_PAGE_SET_PHYS_PAGE(m, base_page);
5295 					}
5296 				} else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
5297 					if (!m->vmp_private) {
5298 						/*
5299 						 * we'd leak a real page... that can't be right
5300 						 */
5301 						panic("vm_object_populate_with_private - %p not private", m);
5302 					}
5303 					if (m->vmp_pmapped) {
5304 						/*
5305 						 * pmap call to clear old mapping
5306 						 */
5307 						pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
5308 					}
5309 					VM_PAGE_SET_PHYS_PAGE(m, base_page);
5310 				}
5311 			} else {
5312 				m = vm_page_grab_fictitious(TRUE);
5313 
5314 				/*
5315 				 * private normally requires lock_queues but since we
5316 				 * are initializing the page, its not necessary here
5317 				 */
5318 				m->vmp_private = TRUE;
5319 				m->vmp_fictitious = FALSE;
5320 				VM_PAGE_SET_PHYS_PAGE(m, base_page);
5321 				m->vmp_unusual = TRUE;
5322 				m->vmp_busy = FALSE;
5323 
5324 				vm_page_insert(m, object, base_offset);
5325 			}
5326 			base_page++;                                                                    /* Go to the next physical page */
5327 			base_offset += PAGE_SIZE;
5328 			size -= PAGE_SIZE;
5329 		}
5330 	} else {
5331 		/* NOTE: we should check the original settings here */
5332 		/* if we have a size > zero a pmap call should be made */
5333 		/* to disable the range */
5334 
5335 		/* pmap_? */
5336 
5337 		/* shadows on contiguous memory are not allowed */
5338 		/* we therefore can use the offset field */
5339 		object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5340 		assertf(page_aligned(size),
5341 		    "object %p size 0x%llx",
5342 		    object, (uint64_t)size);
5343 		object->vo_size = size;
5344 	}
5345 	vm_object_unlock(object);
5346 
5347 	return KERN_SUCCESS;
5348 }
5349 
5350 
5351 kern_return_t
memory_object_create_named(memory_object_t pager,memory_object_offset_t size,memory_object_control_t * control)5352 memory_object_create_named(
5353 	memory_object_t pager,
5354 	memory_object_offset_t  size,
5355 	memory_object_control_t         *control)
5356 {
5357 	vm_object_t             object;
5358 
5359 	*control = MEMORY_OBJECT_CONTROL_NULL;
5360 	if (pager == MEMORY_OBJECT_NULL) {
5361 		return KERN_INVALID_ARGUMENT;
5362 	}
5363 
5364 	object = vm_object_memory_object_associate(pager,
5365 	    VM_OBJECT_NULL,
5366 	    size,
5367 	    TRUE);
5368 	if (object == VM_OBJECT_NULL) {
5369 		return KERN_INVALID_OBJECT;
5370 	}
5371 
5372 	/* wait for object (if any) to be ready */
5373 	if (object != VM_OBJECT_NULL) {
5374 		vm_object_lock(object);
5375 		object->named = TRUE;
5376 		while (!object->pager_ready) {
5377 			vm_object_sleep(object,
5378 			    VM_OBJECT_EVENT_PAGER_READY,
5379 			    THREAD_UNINT);
5380 		}
5381 		*control = object->pager_control;
5382 		vm_object_unlock(object);
5383 	}
5384 	return KERN_SUCCESS;
5385 }
5386 
5387 
5388 __private_extern__ kern_return_t
vm_object_lock_request(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,memory_object_return_t should_return,int flags,vm_prot_t prot)5389 vm_object_lock_request(
5390 	vm_object_t                     object,
5391 	vm_object_offset_t              offset,
5392 	vm_object_size_t                size,
5393 	memory_object_return_t          should_return,
5394 	int                             flags,
5395 	vm_prot_t                       prot)
5396 {
5397 	__unused boolean_t      should_flush;
5398 
5399 	should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5400 
5401 	/*
5402 	 *	Check for bogus arguments.
5403 	 */
5404 	if (object == VM_OBJECT_NULL) {
5405 		return KERN_INVALID_ARGUMENT;
5406 	}
5407 
5408 	if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5409 		return KERN_INVALID_ARGUMENT;
5410 	}
5411 
5412 	/*
5413 	 * XXX TODO4K
5414 	 * extend range for conservative operations (copy-on-write, sync, ...)
5415 	 * truncate range for destructive operations (purge, ...)
5416 	 */
5417 	size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5418 	offset = vm_object_trunc_page(offset);
5419 
5420 	/*
5421 	 *	Lock the object, and acquire a paging reference to
5422 	 *	prevent the memory_object reference from being released.
5423 	 */
5424 	vm_object_lock(object);
5425 	vm_object_paging_begin(object);
5426 
5427 	(void)vm_object_update(object,
5428 	    offset, size, NULL, NULL, should_return, flags, prot);
5429 
5430 	vm_object_paging_end(object);
5431 	vm_object_unlock(object);
5432 
5433 	return KERN_SUCCESS;
5434 }
5435 
5436 /*
5437  * Empty a purgeable object by grabbing the physical pages assigned to it and
5438  * putting them on the free queue without writing them to backing store, etc.
5439  * When the pages are next touched they will be demand zero-fill pages.  We
5440  * skip pages which are busy, being paged in/out, wired, etc.  We do _not_
5441  * skip referenced/dirty pages, pages on the active queue, etc.  We're more
5442  * than happy to grab these since this is a purgeable object.  We mark the
5443  * object as "empty" after reaping its pages.
5444  *
5445  * On entry the object must be locked and it must be
5446  * purgeable with no delayed copies pending.
5447  */
5448 uint64_t
vm_object_purge(vm_object_t object,int flags)5449 vm_object_purge(vm_object_t object, int flags)
5450 {
5451 	unsigned int    object_page_count = 0, pgcount = 0;
5452 	uint64_t        total_purged_pgcount = 0;
5453 	boolean_t       skipped_object = FALSE;
5454 
5455 	vm_object_lock_assert_exclusive(object);
5456 
5457 	if (object->purgable == VM_PURGABLE_DENY) {
5458 		return 0;
5459 	}
5460 
5461 	assert(object->copy == VM_OBJECT_NULL);
5462 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5463 
5464 	/*
5465 	 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5466 	 * reaping its pages.  We update vm_page_purgeable_count in bulk
5467 	 * and we don't want vm_page_remove() to update it again for each
5468 	 * page we reap later.
5469 	 *
5470 	 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5471 	 * are all accounted for in the "volatile" ledgers, so this does not
5472 	 * make any difference.
5473 	 * If we transitioned directly from NONVOLATILE to EMPTY,
5474 	 * vm_page_purgeable_count must have been updated when the object
5475 	 * was dequeued from its volatile queue and the purgeable ledgers
5476 	 * must have also been updated accordingly at that time (in
5477 	 * vm_object_purgable_control()).
5478 	 */
5479 	if (object->purgable == VM_PURGABLE_VOLATILE) {
5480 		unsigned int delta;
5481 		assert(object->resident_page_count >=
5482 		    object->wired_page_count);
5483 		delta = (object->resident_page_count -
5484 		    object->wired_page_count);
5485 		if (delta != 0) {
5486 			assert(vm_page_purgeable_count >=
5487 			    delta);
5488 			OSAddAtomic(-delta,
5489 			    (SInt32 *)&vm_page_purgeable_count);
5490 		}
5491 		if (object->wired_page_count != 0) {
5492 			assert(vm_page_purgeable_wired_count >=
5493 			    object->wired_page_count);
5494 			OSAddAtomic(-object->wired_page_count,
5495 			    (SInt32 *)&vm_page_purgeable_wired_count);
5496 		}
5497 		object->purgable = VM_PURGABLE_EMPTY;
5498 	}
5499 	assert(object->purgable == VM_PURGABLE_EMPTY);
5500 
5501 	object_page_count = object->resident_page_count;
5502 
5503 	vm_object_reap_pages(object, REAP_PURGEABLE);
5504 
5505 	if (object->resident_page_count >= object_page_count) {
5506 		total_purged_pgcount = 0;
5507 	} else {
5508 		total_purged_pgcount = object_page_count - object->resident_page_count;
5509 	}
5510 
5511 	if (object->pager != NULL) {
5512 		assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5513 
5514 		if (object->activity_in_progress == 0 &&
5515 		    object->paging_in_progress == 0) {
5516 			/*
5517 			 * Also reap any memory coming from this object
5518 			 * in the VM compressor.
5519 			 *
5520 			 * There are no operations in progress on the VM object
5521 			 * and no operation can start while we're holding the
5522 			 * VM object lock, so it's safe to reap the compressed
5523 			 * pages and update the page counts.
5524 			 */
5525 			pgcount = vm_compressor_pager_get_count(object->pager);
5526 			if (pgcount) {
5527 				pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
5528 				vm_compressor_pager_count(object->pager,
5529 				    -pgcount,
5530 				    FALSE,                       /* shared */
5531 				    object);
5532 				vm_object_owner_compressed_update(object,
5533 				    -pgcount);
5534 			}
5535 			if (!(flags & C_DONT_BLOCK)) {
5536 				assert(vm_compressor_pager_get_count(object->pager)
5537 				    == 0);
5538 			}
5539 		} else {
5540 			/*
5541 			 * There's some kind of paging activity in progress
5542 			 * for this object, which could result in a page
5543 			 * being compressed or decompressed, possibly while
5544 			 * the VM object is not locked, so it could race
5545 			 * with us.
5546 			 *
5547 			 * We can't really synchronize this without possibly
5548 			 * causing a deadlock when the compressor needs to
5549 			 * allocate or free memory while compressing or
5550 			 * decompressing a page from a purgeable object
5551 			 * mapped in the kernel_map...
5552 			 *
5553 			 * So let's not attempt to purge the compressor
5554 			 * pager if there's any kind of operation in
5555 			 * progress on the VM object.
5556 			 */
5557 			skipped_object = TRUE;
5558 		}
5559 	}
5560 
5561 	vm_object_lock_assert_exclusive(object);
5562 
5563 	total_purged_pgcount += pgcount;
5564 
5565 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
5566 	    VM_KERNEL_UNSLIDE_OR_PERM(object),                   /* purged object */
5567 	    object_page_count,
5568 	    total_purged_pgcount,
5569 	    skipped_object,
5570 	    0);
5571 
5572 	return total_purged_pgcount;
5573 }
5574 
5575 
5576 /*
5577  * vm_object_purgeable_control() allows the caller to control and investigate the
5578  * state of a purgeable object.  A purgeable object is created via a call to
5579  * vm_allocate() with VM_FLAGS_PURGABLE specified.  A purgeable object will
5580  * never be coalesced with any other object -- even other purgeable objects --
5581  * and will thus always remain a distinct object.  A purgeable object has
5582  * special semantics when its reference count is exactly 1.  If its reference
5583  * count is greater than 1, then a purgeable object will behave like a normal
5584  * object and attempts to use this interface will result in an error return
5585  * of KERN_INVALID_ARGUMENT.
5586  *
5587  * A purgeable object may be put into a "volatile" state which will make the
5588  * object's pages elligable for being reclaimed without paging to backing
5589  * store if the system runs low on memory.  If the pages in a volatile
5590  * purgeable object are reclaimed, the purgeable object is said to have been
5591  * "emptied."  When a purgeable object is emptied the system will reclaim as
5592  * many pages from the object as it can in a convenient manner (pages already
5593  * en route to backing store or busy for other reasons are left as is).  When
5594  * a purgeable object is made volatile, its pages will generally be reclaimed
5595  * before other pages in the application's working set.  This semantic is
5596  * generally used by applications which can recreate the data in the object
5597  * faster than it can be paged in.  One such example might be media assets
5598  * which can be reread from a much faster RAID volume.
5599  *
5600  * A purgeable object may be designated as "non-volatile" which means it will
5601  * behave like all other objects in the system with pages being written to and
5602  * read from backing store as needed to satisfy system memory needs.  If the
5603  * object was emptied before the object was made non-volatile, that fact will
5604  * be returned as the old state of the purgeable object (see
5605  * VM_PURGABLE_SET_STATE below).  In this case, any pages of the object which
5606  * were reclaimed as part of emptying the object will be refaulted in as
5607  * zero-fill on demand.  It is up to the application to note that an object
5608  * was emptied and recreate the objects contents if necessary.  When a
5609  * purgeable object is made non-volatile, its pages will generally not be paged
5610  * out to backing store in the immediate future.  A purgeable object may also
5611  * be manually emptied.
5612  *
5613  * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5614  * volatile purgeable object may be queried at any time.  This information may
5615  * be used as a control input to let the application know when the system is
5616  * experiencing memory pressure and is reclaiming memory.
5617  *
5618  * The specified address may be any address within the purgeable object.  If
5619  * the specified address does not represent any object in the target task's
5620  * virtual address space, then KERN_INVALID_ADDRESS will be returned.  If the
5621  * object containing the specified address is not a purgeable object, then
5622  * KERN_INVALID_ARGUMENT will be returned.  Otherwise, KERN_SUCCESS will be
5623  * returned.
5624  *
5625  * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5626  * VM_PURGABLE_GET_STATE.  For VM_PURGABLE_SET_STATE, the in/out parameter
5627  * state is used to set the new state of the purgeable object and return its
5628  * old state.  For VM_PURGABLE_GET_STATE, the current state of the purgeable
5629  * object is returned in the parameter state.
5630  *
5631  * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5632  * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY.  These, respectively, represent
5633  * the non-volatile, volatile and volatile/empty states described above.
5634  * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5635  * immediately reclaim as many pages in the object as can be conveniently
5636  * collected (some may have already been written to backing store or be
5637  * otherwise busy).
5638  *
5639  * The process of making a purgeable object non-volatile and determining its
5640  * previous state is atomic.  Thus, if a purgeable object is made
5641  * VM_PURGABLE_NONVOLATILE and the old state is returned as
5642  * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5643  * completely intact and will remain so until the object is made volatile
5644  * again.  If the old state is returned as VM_PURGABLE_EMPTY then the object
5645  * was reclaimed while it was in a volatile state and its previous contents
5646  * have been lost.
5647  */
5648 /*
5649  * The object must be locked.
5650  */
5651 kern_return_t
vm_object_purgable_control(vm_object_t object,vm_purgable_t control,int * state)5652 vm_object_purgable_control(
5653 	vm_object_t     object,
5654 	vm_purgable_t   control,
5655 	int             *state)
5656 {
5657 	int             old_state;
5658 	int             new_state;
5659 
5660 	if (object == VM_OBJECT_NULL) {
5661 		/*
5662 		 * Object must already be present or it can't be purgeable.
5663 		 */
5664 		return KERN_INVALID_ARGUMENT;
5665 	}
5666 
5667 	vm_object_lock_assert_exclusive(object);
5668 
5669 	/*
5670 	 * Get current state of the purgeable object.
5671 	 */
5672 	old_state = object->purgable;
5673 	if (old_state == VM_PURGABLE_DENY) {
5674 		return KERN_INVALID_ARGUMENT;
5675 	}
5676 
5677 	/* purgeable cant have delayed copies - now or in the future */
5678 	assert(object->copy == VM_OBJECT_NULL);
5679 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5680 
5681 	/*
5682 	 * Execute the desired operation.
5683 	 */
5684 	if (control == VM_PURGABLE_GET_STATE) {
5685 		*state = old_state;
5686 		return KERN_SUCCESS;
5687 	}
5688 
5689 	if (control == VM_PURGABLE_SET_STATE &&
5690 	    object->purgeable_only_by_kernel) {
5691 		return KERN_PROTECTION_FAILURE;
5692 	}
5693 
5694 	if (control != VM_PURGABLE_SET_STATE &&
5695 	    control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
5696 		return KERN_INVALID_ARGUMENT;
5697 	}
5698 
5699 	if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
5700 		object->volatile_empty = TRUE;
5701 	}
5702 	if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
5703 		object->volatile_fault = TRUE;
5704 	}
5705 
5706 	new_state = *state & VM_PURGABLE_STATE_MASK;
5707 	if (new_state == VM_PURGABLE_VOLATILE) {
5708 		if (old_state == VM_PURGABLE_EMPTY) {
5709 			/* what's been emptied must stay empty */
5710 			new_state = VM_PURGABLE_EMPTY;
5711 		}
5712 		if (object->volatile_empty) {
5713 			/* debugging mode: go straight to empty */
5714 			new_state = VM_PURGABLE_EMPTY;
5715 		}
5716 	}
5717 
5718 	switch (new_state) {
5719 	case VM_PURGABLE_DENY:
5720 		/*
5721 		 * Attempting to convert purgeable memory to non-purgeable:
5722 		 * not allowed.
5723 		 */
5724 		return KERN_INVALID_ARGUMENT;
5725 	case VM_PURGABLE_NONVOLATILE:
5726 		object->purgable = new_state;
5727 
5728 		if (old_state == VM_PURGABLE_VOLATILE) {
5729 			unsigned int delta;
5730 
5731 			assert(object->resident_page_count >=
5732 			    object->wired_page_count);
5733 			delta = (object->resident_page_count -
5734 			    object->wired_page_count);
5735 
5736 			assert(vm_page_purgeable_count >= delta);
5737 
5738 			if (delta != 0) {
5739 				OSAddAtomic(-delta,
5740 				    (SInt32 *)&vm_page_purgeable_count);
5741 			}
5742 			if (object->wired_page_count != 0) {
5743 				assert(vm_page_purgeable_wired_count >=
5744 				    object->wired_page_count);
5745 				OSAddAtomic(-object->wired_page_count,
5746 				    (SInt32 *)&vm_page_purgeable_wired_count);
5747 			}
5748 
5749 			vm_page_lock_queues();
5750 
5751 			/* object should be on a queue */
5752 			assert(object->objq.next != NULL &&
5753 			    object->objq.prev != NULL);
5754 			purgeable_q_t queue;
5755 
5756 			/*
5757 			 * Move object from its volatile queue to the
5758 			 * non-volatile queue...
5759 			 */
5760 			queue = vm_purgeable_object_remove(object);
5761 			assert(queue);
5762 
5763 			if (object->purgeable_when_ripe) {
5764 				vm_purgeable_token_delete_last(queue);
5765 			}
5766 			assert(queue->debug_count_objects >= 0);
5767 
5768 			vm_page_unlock_queues();
5769 		}
5770 		if (old_state == VM_PURGABLE_VOLATILE ||
5771 		    old_state == VM_PURGABLE_EMPTY) {
5772 			/*
5773 			 * Transfer the object's pages from the volatile to
5774 			 * non-volatile ledgers.
5775 			 */
5776 			vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
5777 		}
5778 
5779 		break;
5780 
5781 	case VM_PURGABLE_VOLATILE:
5782 		if (object->volatile_fault) {
5783 			vm_page_t       p;
5784 			int             refmod;
5785 
5786 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5787 				if (p->vmp_busy ||
5788 				    VM_PAGE_WIRED(p) ||
5789 				    p->vmp_fictitious) {
5790 					continue;
5791 				}
5792 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5793 				if ((refmod & VM_MEM_MODIFIED) &&
5794 				    !p->vmp_dirty) {
5795 					SET_PAGE_DIRTY(p, FALSE);
5796 				}
5797 			}
5798 		}
5799 
5800 		assert(old_state != VM_PURGABLE_EMPTY);
5801 
5802 		purgeable_q_t queue;
5803 
5804 		/* find the correct queue */
5805 		if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
5806 			queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
5807 		} else {
5808 			if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
5809 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
5810 			} else {
5811 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
5812 			}
5813 		}
5814 
5815 		if (old_state == VM_PURGABLE_NONVOLATILE ||
5816 		    old_state == VM_PURGABLE_EMPTY) {
5817 			unsigned int delta;
5818 
5819 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5820 			    VM_PURGABLE_NO_AGING) {
5821 				object->purgeable_when_ripe = FALSE;
5822 			} else {
5823 				object->purgeable_when_ripe = TRUE;
5824 			}
5825 
5826 			if (object->purgeable_when_ripe) {
5827 				kern_return_t result;
5828 
5829 				/* try to add token... this can fail */
5830 				vm_page_lock_queues();
5831 
5832 				result = vm_purgeable_token_add(queue);
5833 				if (result != KERN_SUCCESS) {
5834 					vm_page_unlock_queues();
5835 					return result;
5836 				}
5837 				vm_page_unlock_queues();
5838 			}
5839 
5840 			assert(object->resident_page_count >=
5841 			    object->wired_page_count);
5842 			delta = (object->resident_page_count -
5843 			    object->wired_page_count);
5844 
5845 			if (delta != 0) {
5846 				OSAddAtomic(delta,
5847 				    &vm_page_purgeable_count);
5848 			}
5849 			if (object->wired_page_count != 0) {
5850 				OSAddAtomic(object->wired_page_count,
5851 				    &vm_page_purgeable_wired_count);
5852 			}
5853 
5854 			object->purgable = new_state;
5855 
5856 			/* object should be on "non-volatile" queue */
5857 			assert(object->objq.next != NULL);
5858 			assert(object->objq.prev != NULL);
5859 		} else if (old_state == VM_PURGABLE_VOLATILE) {
5860 			purgeable_q_t   old_queue;
5861 			boolean_t       purgeable_when_ripe;
5862 
5863 			/*
5864 			 * if reassigning priorities / purgeable groups, we don't change the
5865 			 * token queue. So moving priorities will not make pages stay around longer.
5866 			 * Reasoning is that the algorithm gives most priority to the most important
5867 			 * object. If a new token is added, the most important object' priority is boosted.
5868 			 * This biases the system already for purgeable queues that move a lot.
5869 			 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5870 			 */
5871 			assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5872 
5873 			old_queue = vm_purgeable_object_remove(object);
5874 			assert(old_queue);
5875 
5876 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5877 			    VM_PURGABLE_NO_AGING) {
5878 				purgeable_when_ripe = FALSE;
5879 			} else {
5880 				purgeable_when_ripe = TRUE;
5881 			}
5882 
5883 			if (old_queue != queue ||
5884 			    (purgeable_when_ripe !=
5885 			    object->purgeable_when_ripe)) {
5886 				kern_return_t result;
5887 
5888 				/* Changing queue. Have to move token. */
5889 				vm_page_lock_queues();
5890 				if (object->purgeable_when_ripe) {
5891 					vm_purgeable_token_delete_last(old_queue);
5892 				}
5893 				object->purgeable_when_ripe = purgeable_when_ripe;
5894 				if (object->purgeable_when_ripe) {
5895 					result = vm_purgeable_token_add(queue);
5896 					assert(result == KERN_SUCCESS);   /* this should never fail since we just freed a token */
5897 				}
5898 				vm_page_unlock_queues();
5899 			}
5900 		}
5901 		;
5902 		vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
5903 		if (old_state == VM_PURGABLE_NONVOLATILE) {
5904 			vm_purgeable_accounting(object,
5905 			    VM_PURGABLE_NONVOLATILE);
5906 		}
5907 
5908 		assert(queue->debug_count_objects >= 0);
5909 
5910 		break;
5911 
5912 
5913 	case VM_PURGABLE_EMPTY:
5914 		if (object->volatile_fault) {
5915 			vm_page_t       p;
5916 			int             refmod;
5917 
5918 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5919 				if (p->vmp_busy ||
5920 				    VM_PAGE_WIRED(p) ||
5921 				    p->vmp_fictitious) {
5922 					continue;
5923 				}
5924 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5925 				if ((refmod & VM_MEM_MODIFIED) &&
5926 				    !p->vmp_dirty) {
5927 					SET_PAGE_DIRTY(p, FALSE);
5928 				}
5929 			}
5930 		}
5931 
5932 		if (old_state == VM_PURGABLE_VOLATILE) {
5933 			purgeable_q_t old_queue;
5934 
5935 			/* object should be on a queue */
5936 			assert(object->objq.next != NULL &&
5937 			    object->objq.prev != NULL);
5938 
5939 			old_queue = vm_purgeable_object_remove(object);
5940 			assert(old_queue);
5941 			if (object->purgeable_when_ripe) {
5942 				vm_page_lock_queues();
5943 				vm_purgeable_token_delete_first(old_queue);
5944 				vm_page_unlock_queues();
5945 			}
5946 		}
5947 
5948 		if (old_state == VM_PURGABLE_NONVOLATILE) {
5949 			/*
5950 			 * This object's pages were previously accounted as
5951 			 * "non-volatile" and now need to be accounted as
5952 			 * "volatile".
5953 			 */
5954 			vm_purgeable_accounting(object,
5955 			    VM_PURGABLE_NONVOLATILE);
5956 			/*
5957 			 * Set to VM_PURGABLE_EMPTY because the pages are no
5958 			 * longer accounted in the "non-volatile" ledger
5959 			 * and are also not accounted for in
5960 			 * "vm_page_purgeable_count".
5961 			 */
5962 			object->purgable = VM_PURGABLE_EMPTY;
5963 		}
5964 
5965 		(void) vm_object_purge(object, 0);
5966 		assert(object->purgable == VM_PURGABLE_EMPTY);
5967 
5968 		break;
5969 	}
5970 
5971 	*state = old_state;
5972 
5973 	vm_object_lock_assert_exclusive(object);
5974 
5975 	return KERN_SUCCESS;
5976 }
5977 
5978 kern_return_t
vm_object_get_page_counts(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,unsigned int * resident_page_count,unsigned int * dirty_page_count)5979 vm_object_get_page_counts(
5980 	vm_object_t             object,
5981 	vm_object_offset_t      offset,
5982 	vm_object_size_t        size,
5983 	unsigned int            *resident_page_count,
5984 	unsigned int            *dirty_page_count)
5985 {
5986 	kern_return_t           kr = KERN_SUCCESS;
5987 	boolean_t               count_dirty_pages = FALSE;
5988 	vm_page_t               p = VM_PAGE_NULL;
5989 	unsigned int            local_resident_count = 0;
5990 	unsigned int            local_dirty_count = 0;
5991 	vm_object_offset_t      cur_offset = 0;
5992 	vm_object_offset_t      end_offset = 0;
5993 
5994 	if (object == VM_OBJECT_NULL) {
5995 		return KERN_INVALID_ARGUMENT;
5996 	}
5997 
5998 
5999 	cur_offset = offset;
6000 
6001 	end_offset = offset + size;
6002 
6003 	vm_object_lock_assert_exclusive(object);
6004 
6005 	if (dirty_page_count != NULL) {
6006 		count_dirty_pages = TRUE;
6007 	}
6008 
6009 	if (resident_page_count != NULL && count_dirty_pages == FALSE) {
6010 		/*
6011 		 * Fast path when:
6012 		 * - we only want the resident page count, and,
6013 		 * - the entire object is exactly covered by the request.
6014 		 */
6015 		if (offset == 0 && (object->vo_size == size)) {
6016 			*resident_page_count = object->resident_page_count;
6017 			goto out;
6018 		}
6019 	}
6020 
6021 	if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
6022 		vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6023 			if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
6024 				local_resident_count++;
6025 
6026 				if (count_dirty_pages) {
6027 					if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6028 						local_dirty_count++;
6029 					}
6030 				}
6031 			}
6032 		}
6033 	} else {
6034 		for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6035 			p = vm_page_lookup(object, cur_offset);
6036 
6037 			if (p != VM_PAGE_NULL) {
6038 				local_resident_count++;
6039 
6040 				if (count_dirty_pages) {
6041 					if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6042 						local_dirty_count++;
6043 					}
6044 				}
6045 			}
6046 		}
6047 	}
6048 
6049 	if (resident_page_count != NULL) {
6050 		*resident_page_count = local_resident_count;
6051 	}
6052 
6053 	if (dirty_page_count != NULL) {
6054 		*dirty_page_count = local_dirty_count;
6055 	}
6056 
6057 out:
6058 	return kr;
6059 }
6060 
6061 
6062 /*
6063  *	vm_object_reference:
6064  *
6065  *	Gets another reference to the given object.
6066  */
6067 #ifdef vm_object_reference
6068 #undef vm_object_reference
6069 #endif
6070 __private_extern__ void
vm_object_reference(vm_object_t object)6071 vm_object_reference(
6072 	vm_object_t     object)
6073 {
6074 	if (object == VM_OBJECT_NULL) {
6075 		return;
6076 	}
6077 
6078 	vm_object_lock(object);
6079 	assert(object->ref_count > 0);
6080 	vm_object_reference_locked(object);
6081 	vm_object_unlock(object);
6082 }
6083 
6084 /*
6085  * vm_object_transpose
6086  *
6087  * This routine takes two VM objects of the same size and exchanges
6088  * their backing store.
6089  * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6090  * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6091  *
6092  * The VM objects must not be locked by caller.
6093  */
6094 unsigned int vm_object_transpose_count = 0;
6095 kern_return_t
vm_object_transpose(vm_object_t object1,vm_object_t object2,vm_object_size_t transpose_size)6096 vm_object_transpose(
6097 	vm_object_t             object1,
6098 	vm_object_t             object2,
6099 	vm_object_size_t        transpose_size)
6100 {
6101 	vm_object_t             tmp_object;
6102 	kern_return_t           retval;
6103 	boolean_t               object1_locked, object2_locked;
6104 	vm_page_t               page;
6105 	vm_object_offset_t      page_offset;
6106 
6107 	tmp_object = VM_OBJECT_NULL;
6108 	object1_locked = FALSE; object2_locked = FALSE;
6109 
6110 	if (object1 == object2 ||
6111 	    object1 == VM_OBJECT_NULL ||
6112 	    object2 == VM_OBJECT_NULL) {
6113 		/*
6114 		 * If the 2 VM objects are the same, there's
6115 		 * no point in exchanging their backing store.
6116 		 */
6117 		retval = KERN_INVALID_VALUE;
6118 		goto done;
6119 	}
6120 
6121 	/*
6122 	 * Since we need to lock both objects at the same time,
6123 	 * make sure we always lock them in the same order to
6124 	 * avoid deadlocks.
6125 	 */
6126 	if (object1 > object2) {
6127 		tmp_object = object1;
6128 		object1 = object2;
6129 		object2 = tmp_object;
6130 	}
6131 
6132 	/*
6133 	 * Allocate a temporary VM object to hold object1's contents
6134 	 * while we copy object2 to object1.
6135 	 */
6136 	tmp_object = vm_object_allocate(transpose_size);
6137 	vm_object_lock(tmp_object);
6138 	tmp_object->can_persist = FALSE;
6139 
6140 
6141 	/*
6142 	 * Grab control of the 1st VM object.
6143 	 */
6144 	vm_object_lock(object1);
6145 	object1_locked = TRUE;
6146 	if (!object1->alive || object1->terminating ||
6147 	    object1->copy || object1->shadow || object1->shadowed ||
6148 	    object1->purgable != VM_PURGABLE_DENY) {
6149 		/*
6150 		 * We don't deal with copy or shadow objects (yet).
6151 		 */
6152 		retval = KERN_INVALID_VALUE;
6153 		goto done;
6154 	}
6155 	/*
6156 	 * We're about to mess with the object's backing store and
6157 	 * taking a "paging_in_progress" reference wouldn't be enough
6158 	 * to prevent any paging activity on this object, so the caller should
6159 	 * have "quiesced" the objects beforehand, via a UPL operation with
6160 	 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6161 	 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6162 	 *
6163 	 * Wait for any paging operation to complete (but only paging, not
6164 	 * other kind of activities not linked to the pager).  After we're
6165 	 * statisfied that there's no more paging in progress, we keep the
6166 	 * object locked, to guarantee that no one tries to access its pager.
6167 	 */
6168 	vm_object_paging_only_wait(object1, THREAD_UNINT);
6169 
6170 	/*
6171 	 * Same as above for the 2nd object...
6172 	 */
6173 	vm_object_lock(object2);
6174 	object2_locked = TRUE;
6175 	if (!object2->alive || object2->terminating ||
6176 	    object2->copy || object2->shadow || object2->shadowed ||
6177 	    object2->purgable != VM_PURGABLE_DENY) {
6178 		retval = KERN_INVALID_VALUE;
6179 		goto done;
6180 	}
6181 	vm_object_paging_only_wait(object2, THREAD_UNINT);
6182 
6183 
6184 	if (object1->vo_size != object2->vo_size ||
6185 	    object1->vo_size != transpose_size) {
6186 		/*
6187 		 * If the 2 objects don't have the same size, we can't
6188 		 * exchange their backing stores or one would overflow.
6189 		 * If their size doesn't match the caller's
6190 		 * "transpose_size", we can't do it either because the
6191 		 * transpose operation will affect the entire span of
6192 		 * the objects.
6193 		 */
6194 		retval = KERN_INVALID_VALUE;
6195 		goto done;
6196 	}
6197 
6198 
6199 	/*
6200 	 * Transpose the lists of resident pages.
6201 	 * This also updates the resident_page_count and the memq_hint.
6202 	 */
6203 	if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6204 		/*
6205 		 * No pages in object1, just transfer pages
6206 		 * from object2 to object1.  No need to go through
6207 		 * an intermediate object.
6208 		 */
6209 		while (!vm_page_queue_empty(&object2->memq)) {
6210 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6211 			vm_page_rename(page, object1, page->vmp_offset);
6212 		}
6213 		assert(vm_page_queue_empty(&object2->memq));
6214 	} else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6215 		/*
6216 		 * No pages in object2, just transfer pages
6217 		 * from object1 to object2.  No need to go through
6218 		 * an intermediate object.
6219 		 */
6220 		while (!vm_page_queue_empty(&object1->memq)) {
6221 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6222 			vm_page_rename(page, object2, page->vmp_offset);
6223 		}
6224 		assert(vm_page_queue_empty(&object1->memq));
6225 	} else {
6226 		/* transfer object1's pages to tmp_object */
6227 		while (!vm_page_queue_empty(&object1->memq)) {
6228 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6229 			page_offset = page->vmp_offset;
6230 			vm_page_remove(page, TRUE);
6231 			page->vmp_offset = page_offset;
6232 			vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6233 		}
6234 		assert(vm_page_queue_empty(&object1->memq));
6235 		/* transfer object2's pages to object1 */
6236 		while (!vm_page_queue_empty(&object2->memq)) {
6237 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6238 			vm_page_rename(page, object1, page->vmp_offset);
6239 		}
6240 		assert(vm_page_queue_empty(&object2->memq));
6241 		/* transfer tmp_object's pages to object2 */
6242 		while (!vm_page_queue_empty(&tmp_object->memq)) {
6243 			page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6244 			vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6245 			vm_page_insert(page, object2, page->vmp_offset);
6246 		}
6247 		assert(vm_page_queue_empty(&tmp_object->memq));
6248 	}
6249 
6250 #define __TRANSPOSE_FIELD(field)                                \
6251 MACRO_BEGIN                                                     \
6252 	tmp_object->field = object1->field;                     \
6253 	object1->field = object2->field;                        \
6254 	object2->field = tmp_object->field;                     \
6255 MACRO_END
6256 
6257 	/* "Lock" refers to the object not its contents */
6258 	/* "size" should be identical */
6259 	assert(object1->vo_size == object2->vo_size);
6260 	/* "memq_hint" was updated above when transposing pages */
6261 	/* "ref_count" refers to the object not its contents */
6262 	assert(object1->ref_count >= 1);
6263 	assert(object2->ref_count >= 1);
6264 	/* "resident_page_count" was updated above when transposing pages */
6265 	/* "wired_page_count" was updated above when transposing pages */
6266 #if !VM_TAG_ACTIVE_UPDATE
6267 	/* "wired_objq" was dealt with along with "wired_page_count" */
6268 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6269 	/* "reusable_page_count" was updated above when transposing pages */
6270 	/* there should be no "copy" */
6271 	assert(!object1->copy);
6272 	assert(!object2->copy);
6273 	/* there should be no "shadow" */
6274 	assert(!object1->shadow);
6275 	assert(!object2->shadow);
6276 	__TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6277 	__TRANSPOSE_FIELD(pager);
6278 	__TRANSPOSE_FIELD(paging_offset);
6279 	__TRANSPOSE_FIELD(pager_control);
6280 	/* update the memory_objects' pointers back to the VM objects */
6281 	if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6282 		memory_object_control_collapse(&object1->pager_control,
6283 		    object1);
6284 	}
6285 	if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6286 		memory_object_control_collapse(&object2->pager_control,
6287 		    object2);
6288 	}
6289 	__TRANSPOSE_FIELD(copy_strategy);
6290 	/* "paging_in_progress" refers to the object not its contents */
6291 	assert(!object1->paging_in_progress);
6292 	assert(!object2->paging_in_progress);
6293 	assert(object1->activity_in_progress);
6294 	assert(object2->activity_in_progress);
6295 	/* "all_wanted" refers to the object not its contents */
6296 	__TRANSPOSE_FIELD(pager_created);
6297 	__TRANSPOSE_FIELD(pager_initialized);
6298 	__TRANSPOSE_FIELD(pager_ready);
6299 	__TRANSPOSE_FIELD(pager_trusted);
6300 	__TRANSPOSE_FIELD(can_persist);
6301 	__TRANSPOSE_FIELD(internal);
6302 	__TRANSPOSE_FIELD(private);
6303 	__TRANSPOSE_FIELD(pageout);
6304 	/* "alive" should be set */
6305 	assert(object1->alive);
6306 	assert(object2->alive);
6307 	/* "purgeable" should be non-purgeable */
6308 	assert(object1->purgable == VM_PURGABLE_DENY);
6309 	assert(object2->purgable == VM_PURGABLE_DENY);
6310 	/* "shadowed" refers to the the object not its contents */
6311 	__TRANSPOSE_FIELD(purgeable_when_ripe);
6312 	__TRANSPOSE_FIELD(true_share);
6313 	/* "terminating" should not be set */
6314 	assert(!object1->terminating);
6315 	assert(!object2->terminating);
6316 	/* transfer "named" reference if needed */
6317 	if (object1->named && !object2->named) {
6318 		assert(object1->ref_count >= 2);
6319 		assert(object2->ref_count >= 1);
6320 		object1->ref_count--;
6321 		object2->ref_count++;
6322 	} else if (!object1->named && object2->named) {
6323 		assert(object1->ref_count >= 1);
6324 		assert(object2->ref_count >= 2);
6325 		object1->ref_count++;
6326 		object2->ref_count--;
6327 	}
6328 	__TRANSPOSE_FIELD(named);
6329 	/* "shadow_severed" refers to the object not its contents */
6330 	__TRANSPOSE_FIELD(phys_contiguous);
6331 	__TRANSPOSE_FIELD(nophyscache);
6332 	/* "cached_list.next" points to transposed object */
6333 	object1->cached_list.next = (queue_entry_t) object2;
6334 	object2->cached_list.next = (queue_entry_t) object1;
6335 	/* "cached_list.prev" should be NULL */
6336 	assert(object1->cached_list.prev == NULL);
6337 	assert(object2->cached_list.prev == NULL);
6338 	__TRANSPOSE_FIELD(last_alloc);
6339 	__TRANSPOSE_FIELD(sequential);
6340 	__TRANSPOSE_FIELD(pages_created);
6341 	__TRANSPOSE_FIELD(pages_used);
6342 	__TRANSPOSE_FIELD(scan_collisions);
6343 	__TRANSPOSE_FIELD(cow_hint);
6344 	__TRANSPOSE_FIELD(wimg_bits);
6345 	__TRANSPOSE_FIELD(set_cache_attr);
6346 	__TRANSPOSE_FIELD(code_signed);
6347 	object1->transposed = TRUE;
6348 	object2->transposed = TRUE;
6349 	__TRANSPOSE_FIELD(mapping_in_progress);
6350 	__TRANSPOSE_FIELD(volatile_empty);
6351 	__TRANSPOSE_FIELD(volatile_fault);
6352 	__TRANSPOSE_FIELD(all_reusable);
6353 	assert(object1->blocked_access);
6354 	assert(object2->blocked_access);
6355 	__TRANSPOSE_FIELD(set_cache_attr);
6356 	assert(!object1->object_is_shared_cache);
6357 	assert(!object2->object_is_shared_cache);
6358 	/* ignore purgeable_queue_type and purgeable_queue_group */
6359 	assert(!object1->io_tracking);
6360 	assert(!object2->io_tracking);
6361 #if VM_OBJECT_ACCESS_TRACKING
6362 	assert(!object1->access_tracking);
6363 	assert(!object2->access_tracking);
6364 #endif /* VM_OBJECT_ACCESS_TRACKING */
6365 	__TRANSPOSE_FIELD(no_tag_update);
6366 #if CONFIG_SECLUDED_MEMORY
6367 	assert(!object1->eligible_for_secluded);
6368 	assert(!object2->eligible_for_secluded);
6369 	assert(!object1->can_grab_secluded);
6370 	assert(!object2->can_grab_secluded);
6371 #else /* CONFIG_SECLUDED_MEMORY */
6372 	assert(object1->__object3_unused_bits == 0);
6373 	assert(object2->__object3_unused_bits == 0);
6374 #endif /* CONFIG_SECLUDED_MEMORY */
6375 #if UPL_DEBUG
6376 	/* "uplq" refers to the object not its contents (see upl_transpose()) */
6377 #endif
6378 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6379 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6380 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6381 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6382 
6383 #undef __TRANSPOSE_FIELD
6384 
6385 	retval = KERN_SUCCESS;
6386 
6387 done:
6388 	/*
6389 	 * Cleanup.
6390 	 */
6391 	if (tmp_object != VM_OBJECT_NULL) {
6392 		vm_object_unlock(tmp_object);
6393 		/*
6394 		 * Re-initialize the temporary object to avoid
6395 		 * deallocating a real pager.
6396 		 */
6397 		_vm_object_allocate(transpose_size, tmp_object);
6398 		vm_object_deallocate(tmp_object);
6399 		tmp_object = VM_OBJECT_NULL;
6400 	}
6401 
6402 	if (object1_locked) {
6403 		vm_object_unlock(object1);
6404 		object1_locked = FALSE;
6405 	}
6406 	if (object2_locked) {
6407 		vm_object_unlock(object2);
6408 		object2_locked = FALSE;
6409 	}
6410 
6411 	vm_object_transpose_count++;
6412 
6413 	return retval;
6414 }
6415 
6416 
6417 /*
6418  *      vm_object_cluster_size
6419  *
6420  *      Determine how big a cluster we should issue an I/O for...
6421  *
6422  *	Inputs:   *start == offset of page needed
6423  *		  *length == maximum cluster pager can handle
6424  *	Outputs:  *start == beginning offset of cluster
6425  *		  *length == length of cluster to try
6426  *
6427  *	The original *start will be encompassed by the cluster
6428  *
6429  */
6430 extern int speculative_reads_disabled;
6431 
6432 /*
6433  * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6434  * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6435  * always be page-aligned. The derivation could involve operations (e.g. division)
6436  * that could give us non-page-size aligned values if we start out with values that
6437  * are odd multiples of PAGE_SIZE.
6438  */
6439 #if !XNU_TARGET_OS_OSX
6440 unsigned int preheat_max_bytes = (1024 * 512);
6441 #else /* !XNU_TARGET_OS_OSX */
6442 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6443 #endif /* !XNU_TARGET_OS_OSX */
6444 unsigned int preheat_min_bytes = (1024 * 32);
6445 
6446 
6447 __private_extern__ void
vm_object_cluster_size(vm_object_t object,vm_object_offset_t * start,vm_size_t * length,vm_object_fault_info_t fault_info,uint32_t * io_streaming)6448 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6449     vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6450 {
6451 	vm_size_t               pre_heat_size;
6452 	vm_size_t               tail_size;
6453 	vm_size_t               head_size;
6454 	vm_size_t               max_length;
6455 	vm_size_t               cluster_size;
6456 	vm_object_offset_t      object_size;
6457 	vm_object_offset_t      orig_start;
6458 	vm_object_offset_t      target_start;
6459 	vm_object_offset_t      offset;
6460 	vm_behavior_t           behavior;
6461 	boolean_t               look_behind = TRUE;
6462 	boolean_t               look_ahead  = TRUE;
6463 	boolean_t               isSSD = FALSE;
6464 	uint32_t                throttle_limit;
6465 	int                     sequential_run;
6466 	int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6467 	vm_size_t               max_ph_size;
6468 	vm_size_t               min_ph_size;
6469 
6470 	assert( !(*length & PAGE_MASK));
6471 	assert( !(*start & PAGE_MASK_64));
6472 
6473 	/*
6474 	 * remember maxiumum length of run requested
6475 	 */
6476 	max_length = *length;
6477 	/*
6478 	 * we'll always return a cluster size of at least
6479 	 * 1 page, since the original fault must always
6480 	 * be processed
6481 	 */
6482 	*length = PAGE_SIZE;
6483 	*io_streaming = 0;
6484 
6485 	if (speculative_reads_disabled || fault_info == NULL) {
6486 		/*
6487 		 * no cluster... just fault the page in
6488 		 */
6489 		return;
6490 	}
6491 	orig_start = *start;
6492 	target_start = orig_start;
6493 	cluster_size = round_page(fault_info->cluster_size);
6494 	behavior = fault_info->behavior;
6495 
6496 	vm_object_lock(object);
6497 
6498 	if (object->pager == MEMORY_OBJECT_NULL) {
6499 		goto out;       /* pager is gone for this object, nothing more to do */
6500 	}
6501 	vnode_pager_get_isSSD(object->pager, &isSSD);
6502 
6503 	min_ph_size = round_page(preheat_min_bytes);
6504 	max_ph_size = round_page(preheat_max_bytes);
6505 
6506 #if XNU_TARGET_OS_OSX
6507 	if (isSSD) {
6508 		min_ph_size /= 2;
6509 		max_ph_size /= 8;
6510 
6511 		if (min_ph_size & PAGE_MASK_64) {
6512 			min_ph_size = trunc_page(min_ph_size);
6513 		}
6514 
6515 		if (max_ph_size & PAGE_MASK_64) {
6516 			max_ph_size = trunc_page(max_ph_size);
6517 		}
6518 	}
6519 #endif /* XNU_TARGET_OS_OSX */
6520 
6521 	if (min_ph_size < PAGE_SIZE) {
6522 		min_ph_size = PAGE_SIZE;
6523 	}
6524 
6525 	if (max_ph_size < PAGE_SIZE) {
6526 		max_ph_size = PAGE_SIZE;
6527 	} else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
6528 		max_ph_size = MAX_UPL_TRANSFER_BYTES;
6529 	}
6530 
6531 	if (max_length > max_ph_size) {
6532 		max_length = max_ph_size;
6533 	}
6534 
6535 	if (max_length <= PAGE_SIZE) {
6536 		goto out;
6537 	}
6538 
6539 	if (object->internal) {
6540 		object_size = object->vo_size;
6541 	} else {
6542 		vnode_pager_get_object_size(object->pager, &object_size);
6543 	}
6544 
6545 	object_size = round_page_64(object_size);
6546 
6547 	if (orig_start >= object_size) {
6548 		/*
6549 		 * fault occurred beyond the EOF...
6550 		 * we need to punt w/o changing the
6551 		 * starting offset
6552 		 */
6553 		goto out;
6554 	}
6555 	if (object->pages_used > object->pages_created) {
6556 		/*
6557 		 * must have wrapped our 32 bit counters
6558 		 * so reset
6559 		 */
6560 		object->pages_used = object->pages_created = 0;
6561 	}
6562 	if ((sequential_run = object->sequential)) {
6563 		if (sequential_run < 0) {
6564 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
6565 			sequential_run = 0 - sequential_run;
6566 		} else {
6567 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6568 		}
6569 	}
6570 	switch (behavior) {
6571 	default:
6572 		behavior = VM_BEHAVIOR_DEFAULT;
6573 		OS_FALLTHROUGH;
6574 
6575 	case VM_BEHAVIOR_DEFAULT:
6576 		if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
6577 			goto out;
6578 		}
6579 
6580 		if (sequential_run >= (3 * PAGE_SIZE)) {
6581 			pre_heat_size = sequential_run + PAGE_SIZE;
6582 
6583 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
6584 				look_behind = FALSE;
6585 			} else {
6586 				look_ahead = FALSE;
6587 			}
6588 
6589 			*io_streaming = 1;
6590 		} else {
6591 			if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
6592 				/*
6593 				 * prime the pump
6594 				 */
6595 				pre_heat_size = min_ph_size;
6596 			} else {
6597 				/*
6598 				 * Linear growth in PH size: The maximum size is max_length...
6599 				 * this cacluation will result in a size that is neither a
6600 				 * power of 2 nor a multiple of PAGE_SIZE... so round
6601 				 * it up to the nearest PAGE_SIZE boundary
6602 				 */
6603 				pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
6604 
6605 				if (pre_heat_size < min_ph_size) {
6606 					pre_heat_size = min_ph_size;
6607 				} else {
6608 					pre_heat_size = round_page(pre_heat_size);
6609 				}
6610 			}
6611 		}
6612 		break;
6613 
6614 	case VM_BEHAVIOR_RANDOM:
6615 		if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
6616 			goto out;
6617 		}
6618 		break;
6619 
6620 	case VM_BEHAVIOR_SEQUENTIAL:
6621 		if ((pre_heat_size = cluster_size) == 0) {
6622 			pre_heat_size = sequential_run + PAGE_SIZE;
6623 		}
6624 		look_behind = FALSE;
6625 		*io_streaming = 1;
6626 
6627 		break;
6628 
6629 	case VM_BEHAVIOR_RSEQNTL:
6630 		if ((pre_heat_size = cluster_size) == 0) {
6631 			pre_heat_size = sequential_run + PAGE_SIZE;
6632 		}
6633 		look_ahead = FALSE;
6634 		*io_streaming = 1;
6635 
6636 		break;
6637 	}
6638 	throttle_limit = (uint32_t) max_length;
6639 	assert(throttle_limit == max_length);
6640 
6641 	if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
6642 		if (max_length > throttle_limit) {
6643 			max_length = throttle_limit;
6644 		}
6645 	}
6646 	if (pre_heat_size > max_length) {
6647 		pre_heat_size = max_length;
6648 	}
6649 
6650 	if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
6651 		unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
6652 
6653 		if (consider_free < vm_page_throttle_limit) {
6654 			pre_heat_size = trunc_page(pre_heat_size / 16);
6655 		} else if (consider_free < vm_page_free_target) {
6656 			pre_heat_size = trunc_page(pre_heat_size / 4);
6657 		}
6658 
6659 		if (pre_heat_size < min_ph_size) {
6660 			pre_heat_size = min_ph_size;
6661 		}
6662 	}
6663 	if (look_ahead == TRUE) {
6664 		if (look_behind == TRUE) {
6665 			/*
6666 			 * if we get here its due to a random access...
6667 			 * so we want to center the original fault address
6668 			 * within the cluster we will issue... make sure
6669 			 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6670 			 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6671 			 * necessarily an even number of pages so we need to truncate
6672 			 * the result to a PAGE_SIZE boundary
6673 			 */
6674 			head_size = trunc_page(pre_heat_size / 2);
6675 
6676 			if (target_start > head_size) {
6677 				target_start -= head_size;
6678 			} else {
6679 				target_start = 0;
6680 			}
6681 
6682 			/*
6683 			 * 'target_start' at this point represents the beginning offset
6684 			 * of the cluster we are considering... 'orig_start' will be in
6685 			 * the center of this cluster if we didn't have to clip the start
6686 			 * due to running into the start of the file
6687 			 */
6688 		}
6689 		if ((target_start + pre_heat_size) > object_size) {
6690 			pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
6691 		}
6692 		/*
6693 		 * at this point caclulate the number of pages beyond the original fault
6694 		 * address that we want to consider... this is guaranteed not to extend beyond
6695 		 * the current EOF...
6696 		 */
6697 		assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
6698 		tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
6699 	} else {
6700 		if (pre_heat_size > target_start) {
6701 			/*
6702 			 * since pre_heat_size is always smaller then 2^32,
6703 			 * if it is larger then target_start (a 64 bit value)
6704 			 * it is safe to clip target_start to 32 bits
6705 			 */
6706 			pre_heat_size = (vm_size_t) target_start;
6707 		}
6708 		tail_size = 0;
6709 	}
6710 	assert( !(target_start & PAGE_MASK_64));
6711 	assert( !(pre_heat_size & PAGE_MASK_64));
6712 
6713 	if (pre_heat_size <= PAGE_SIZE) {
6714 		goto out;
6715 	}
6716 
6717 	if (look_behind == TRUE) {
6718 		/*
6719 		 * take a look at the pages before the original
6720 		 * faulting offset... recalculate this in case
6721 		 * we had to clip 'pre_heat_size' above to keep
6722 		 * from running past the EOF.
6723 		 */
6724 		head_size = pre_heat_size - tail_size - PAGE_SIZE;
6725 
6726 		for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
6727 			/*
6728 			 * don't poke below the lowest offset
6729 			 */
6730 			if (offset < fault_info->lo_offset) {
6731 				break;
6732 			}
6733 			/*
6734 			 * for external objects or internal objects w/o a pager,
6735 			 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6736 			 */
6737 			if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6738 				break;
6739 			}
6740 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6741 				/*
6742 				 * don't bridge resident pages
6743 				 */
6744 				break;
6745 			}
6746 			*start = offset;
6747 			*length += PAGE_SIZE;
6748 		}
6749 	}
6750 	if (look_ahead == TRUE) {
6751 		for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
6752 			/*
6753 			 * don't poke above the highest offset
6754 			 */
6755 			if (offset >= fault_info->hi_offset) {
6756 				break;
6757 			}
6758 			assert(offset < object_size);
6759 
6760 			/*
6761 			 * for external objects or internal objects w/o a pager,
6762 			 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6763 			 */
6764 			if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6765 				break;
6766 			}
6767 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6768 				/*
6769 				 * don't bridge resident pages
6770 				 */
6771 				break;
6772 			}
6773 			*length += PAGE_SIZE;
6774 		}
6775 	}
6776 out:
6777 	if (*length > max_length) {
6778 		*length = max_length;
6779 	}
6780 
6781 	vm_object_unlock(object);
6782 
6783 	DTRACE_VM1(clustersize, vm_size_t, *length);
6784 }
6785 
6786 
6787 /*
6788  * Allow manipulation of individual page state.  This is actually part of
6789  * the UPL regimen but takes place on the VM object rather than on a UPL
6790  */
6791 
6792 kern_return_t
vm_object_page_op(vm_object_t object,vm_object_offset_t offset,int ops,ppnum_t * phys_entry,int * flags)6793 vm_object_page_op(
6794 	vm_object_t             object,
6795 	vm_object_offset_t      offset,
6796 	int                     ops,
6797 	ppnum_t                 *phys_entry,
6798 	int                     *flags)
6799 {
6800 	vm_page_t               dst_page;
6801 
6802 	vm_object_lock(object);
6803 
6804 	if (ops & UPL_POP_PHYSICAL) {
6805 		if (object->phys_contiguous) {
6806 			if (phys_entry) {
6807 				*phys_entry = (ppnum_t)
6808 				    (object->vo_shadow_offset >> PAGE_SHIFT);
6809 			}
6810 			vm_object_unlock(object);
6811 			return KERN_SUCCESS;
6812 		} else {
6813 			vm_object_unlock(object);
6814 			return KERN_INVALID_OBJECT;
6815 		}
6816 	}
6817 	if (object->phys_contiguous) {
6818 		vm_object_unlock(object);
6819 		return KERN_INVALID_OBJECT;
6820 	}
6821 
6822 	while (TRUE) {
6823 		if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
6824 			vm_object_unlock(object);
6825 			return KERN_FAILURE;
6826 		}
6827 
6828 		/* Sync up on getting the busy bit */
6829 		if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
6830 		    (((ops & UPL_POP_SET) &&
6831 		    (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
6832 			/* someone else is playing with the page, we will */
6833 			/* have to wait */
6834 			PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6835 			continue;
6836 		}
6837 
6838 		if (ops & UPL_POP_DUMP) {
6839 			if (dst_page->vmp_pmapped == TRUE) {
6840 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6841 			}
6842 
6843 			VM_PAGE_FREE(dst_page);
6844 			break;
6845 		}
6846 
6847 		if (flags) {
6848 			*flags = 0;
6849 
6850 			/* Get the condition of flags before requested ops */
6851 			/* are undertaken */
6852 
6853 			if (dst_page->vmp_dirty) {
6854 				*flags |= UPL_POP_DIRTY;
6855 			}
6856 			if (dst_page->vmp_free_when_done) {
6857 				*flags |= UPL_POP_PAGEOUT;
6858 			}
6859 			if (dst_page->vmp_precious) {
6860 				*flags |= UPL_POP_PRECIOUS;
6861 			}
6862 			if (dst_page->vmp_absent) {
6863 				*flags |= UPL_POP_ABSENT;
6864 			}
6865 			if (dst_page->vmp_busy) {
6866 				*flags |= UPL_POP_BUSY;
6867 			}
6868 		}
6869 
6870 		/* The caller should have made a call either contingent with */
6871 		/* or prior to this call to set UPL_POP_BUSY */
6872 		if (ops & UPL_POP_SET) {
6873 			/* The protection granted with this assert will */
6874 			/* not be complete.  If the caller violates the */
6875 			/* convention and attempts to change page state */
6876 			/* without first setting busy we may not see it */
6877 			/* because the page may already be busy.  However */
6878 			/* if such violations occur we will assert sooner */
6879 			/* or later. */
6880 			assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
6881 			if (ops & UPL_POP_DIRTY) {
6882 				SET_PAGE_DIRTY(dst_page, FALSE);
6883 			}
6884 			if (ops & UPL_POP_PAGEOUT) {
6885 				dst_page->vmp_free_when_done = TRUE;
6886 			}
6887 			if (ops & UPL_POP_PRECIOUS) {
6888 				dst_page->vmp_precious = TRUE;
6889 			}
6890 			if (ops & UPL_POP_ABSENT) {
6891 				dst_page->vmp_absent = TRUE;
6892 			}
6893 			if (ops & UPL_POP_BUSY) {
6894 				dst_page->vmp_busy = TRUE;
6895 			}
6896 		}
6897 
6898 		if (ops & UPL_POP_CLR) {
6899 			assert(dst_page->vmp_busy);
6900 			if (ops & UPL_POP_DIRTY) {
6901 				dst_page->vmp_dirty = FALSE;
6902 			}
6903 			if (ops & UPL_POP_PAGEOUT) {
6904 				dst_page->vmp_free_when_done = FALSE;
6905 			}
6906 			if (ops & UPL_POP_PRECIOUS) {
6907 				dst_page->vmp_precious = FALSE;
6908 			}
6909 			if (ops & UPL_POP_ABSENT) {
6910 				dst_page->vmp_absent = FALSE;
6911 			}
6912 			if (ops & UPL_POP_BUSY) {
6913 				dst_page->vmp_busy = FALSE;
6914 				PAGE_WAKEUP(dst_page);
6915 			}
6916 		}
6917 		if (phys_entry) {
6918 			/*
6919 			 * The physical page number will remain valid
6920 			 * only if the page is kept busy.
6921 			 */
6922 			assert(dst_page->vmp_busy);
6923 			*phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
6924 		}
6925 
6926 		break;
6927 	}
6928 
6929 	vm_object_unlock(object);
6930 	return KERN_SUCCESS;
6931 }
6932 
6933 /*
6934  * vm_object_range_op offers performance enhancement over
6935  * vm_object_page_op for page_op functions which do not require page
6936  * level state to be returned from the call.  Page_op was created to provide
6937  * a low-cost alternative to page manipulation via UPLs when only a single
6938  * page was involved.  The range_op call establishes the ability in the _op
6939  * family of functions to work on multiple pages where the lack of page level
6940  * state handling allows the caller to avoid the overhead of the upl structures.
6941  */
6942 
6943 kern_return_t
vm_object_range_op(vm_object_t object,vm_object_offset_t offset_beg,vm_object_offset_t offset_end,int ops,uint32_t * range)6944 vm_object_range_op(
6945 	vm_object_t             object,
6946 	vm_object_offset_t      offset_beg,
6947 	vm_object_offset_t      offset_end,
6948 	int                     ops,
6949 	uint32_t                *range)
6950 {
6951 	vm_object_offset_t      offset;
6952 	vm_page_t               dst_page;
6953 
6954 	if (offset_end - offset_beg > (uint32_t) -1) {
6955 		/* range is too big and would overflow "*range" */
6956 		return KERN_INVALID_ARGUMENT;
6957 	}
6958 	if (object->resident_page_count == 0) {
6959 		if (range) {
6960 			if (ops & UPL_ROP_PRESENT) {
6961 				*range = 0;
6962 			} else {
6963 				*range = (uint32_t) (offset_end - offset_beg);
6964 				assert(*range == (offset_end - offset_beg));
6965 			}
6966 		}
6967 		return KERN_SUCCESS;
6968 	}
6969 	vm_object_lock(object);
6970 
6971 	if (object->phys_contiguous) {
6972 		vm_object_unlock(object);
6973 		return KERN_INVALID_OBJECT;
6974 	}
6975 
6976 	offset = offset_beg & ~PAGE_MASK_64;
6977 
6978 	while (offset < offset_end) {
6979 		dst_page = vm_page_lookup(object, offset);
6980 		if (dst_page != VM_PAGE_NULL) {
6981 			if (ops & UPL_ROP_DUMP) {
6982 				if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6983 					/*
6984 					 * someone else is playing with the
6985 					 * page, we will have to wait
6986 					 */
6987 					PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6988 					/*
6989 					 * need to relook the page up since it's
6990 					 * state may have changed while we slept
6991 					 * it might even belong to a different object
6992 					 * at this point
6993 					 */
6994 					continue;
6995 				}
6996 				if (dst_page->vmp_laundry) {
6997 					vm_pageout_steal_laundry(dst_page, FALSE);
6998 				}
6999 
7000 				if (dst_page->vmp_pmapped == TRUE) {
7001 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7002 				}
7003 
7004 				VM_PAGE_FREE(dst_page);
7005 			} else if ((ops & UPL_ROP_ABSENT)
7006 			    && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
7007 				break;
7008 			}
7009 		} else if (ops & UPL_ROP_PRESENT) {
7010 			break;
7011 		}
7012 
7013 		offset += PAGE_SIZE;
7014 	}
7015 	vm_object_unlock(object);
7016 
7017 	if (range) {
7018 		if (offset > offset_end) {
7019 			offset = offset_end;
7020 		}
7021 		if (offset > offset_beg) {
7022 			*range = (uint32_t) (offset - offset_beg);
7023 			assert(*range == (offset - offset_beg));
7024 		} else {
7025 			*range = 0;
7026 		}
7027 	}
7028 	return KERN_SUCCESS;
7029 }
7030 
7031 /*
7032  * Used to point a pager directly to a range of memory (when the pager may be associated
7033  *   with a non-device vnode).  Takes a virtual address, an offset, and a size.  We currently
7034  *   expect that the virtual address will denote the start of a range that is physically contiguous.
7035  */
7036 kern_return_t
pager_map_to_phys_contiguous(memory_object_control_t object,memory_object_offset_t offset,addr64_t base_vaddr,vm_size_t size)7037 pager_map_to_phys_contiguous(
7038 	memory_object_control_t object,
7039 	memory_object_offset_t  offset,
7040 	addr64_t                base_vaddr,
7041 	vm_size_t               size)
7042 {
7043 	ppnum_t page_num;
7044 	boolean_t clobbered_private;
7045 	kern_return_t retval;
7046 	vm_object_t pager_object;
7047 
7048 	page_num = pmap_find_phys(kernel_pmap, base_vaddr);
7049 
7050 	if (!page_num) {
7051 		retval = KERN_FAILURE;
7052 		goto out;
7053 	}
7054 
7055 	pager_object = memory_object_control_to_vm_object(object);
7056 
7057 	if (!pager_object) {
7058 		retval = KERN_FAILURE;
7059 		goto out;
7060 	}
7061 
7062 	clobbered_private = pager_object->private;
7063 	if (pager_object->private != TRUE) {
7064 		vm_object_lock(pager_object);
7065 		pager_object->private = TRUE;
7066 		vm_object_unlock(pager_object);
7067 	}
7068 	retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7069 
7070 	if (retval != KERN_SUCCESS) {
7071 		if (pager_object->private != clobbered_private) {
7072 			vm_object_lock(pager_object);
7073 			pager_object->private = clobbered_private;
7074 			vm_object_unlock(pager_object);
7075 		}
7076 	}
7077 
7078 out:
7079 	return retval;
7080 }
7081 
7082 uint32_t scan_object_collision = 0;
7083 
7084 void
vm_object_lock(vm_object_t object)7085 vm_object_lock(vm_object_t object)
7086 {
7087 	if (object == vm_pageout_scan_wants_object) {
7088 		scan_object_collision++;
7089 		mutex_pause(2);
7090 	}
7091 	DTRACE_VM(vm_object_lock_w);
7092 	lck_rw_lock_exclusive(&object->Lock);
7093 }
7094 
7095 boolean_t
vm_object_lock_avoid(vm_object_t object)7096 vm_object_lock_avoid(vm_object_t object)
7097 {
7098 	if (object == vm_pageout_scan_wants_object) {
7099 		scan_object_collision++;
7100 		return TRUE;
7101 	}
7102 	return FALSE;
7103 }
7104 
7105 boolean_t
_vm_object_lock_try(vm_object_t object)7106 _vm_object_lock_try(vm_object_t object)
7107 {
7108 	boolean_t       retval;
7109 
7110 	retval = lck_rw_try_lock_exclusive(&object->Lock);
7111 #if DEVELOPMENT || DEBUG
7112 	if (retval == TRUE) {
7113 		DTRACE_VM(vm_object_lock_w);
7114 	}
7115 #endif
7116 	return retval;
7117 }
7118 
7119 boolean_t
vm_object_lock_try(vm_object_t object)7120 vm_object_lock_try(vm_object_t object)
7121 {
7122 	/*
7123 	 * Called from hibernate path so check before blocking.
7124 	 */
7125 	if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7126 		mutex_pause(2);
7127 	}
7128 	return _vm_object_lock_try(object);
7129 }
7130 
7131 /*
7132  * Lock the object exclusive.
7133  *
7134  * Returns true iff the thread had to spin or block before
7135  * acquiring the lock.
7136  */
7137 bool
vm_object_lock_check_contended(vm_object_t object)7138 vm_object_lock_check_contended(vm_object_t object)
7139 {
7140 	if (object == vm_pageout_scan_wants_object) {
7141 		scan_object_collision++;
7142 		mutex_pause(2);
7143 	}
7144 	DTRACE_VM(vm_object_lock_w);
7145 	return lck_rw_lock_exclusive_check_contended(&object->Lock);
7146 }
7147 
7148 void
vm_object_lock_shared(vm_object_t object)7149 vm_object_lock_shared(vm_object_t object)
7150 {
7151 	if (vm_object_lock_avoid(object)) {
7152 		mutex_pause(2);
7153 	}
7154 	DTRACE_VM(vm_object_lock_r);
7155 	lck_rw_lock_shared(&object->Lock);
7156 }
7157 
7158 boolean_t
vm_object_lock_yield_shared(vm_object_t object)7159 vm_object_lock_yield_shared(vm_object_t object)
7160 {
7161 	boolean_t retval = FALSE, force_yield = FALSE;
7162 
7163 	vm_object_lock_assert_shared(object);
7164 
7165 	force_yield = vm_object_lock_avoid(object);
7166 
7167 	retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7168 	if (retval) {
7169 		DTRACE_VM(vm_object_lock_yield);
7170 	}
7171 
7172 	return retval;
7173 }
7174 
7175 boolean_t
vm_object_lock_try_shared(vm_object_t object)7176 vm_object_lock_try_shared(vm_object_t object)
7177 {
7178 	boolean_t retval;
7179 
7180 	if (vm_object_lock_avoid(object)) {
7181 		mutex_pause(2);
7182 	}
7183 	retval = lck_rw_try_lock_shared(&object->Lock);
7184 	if (retval) {
7185 		DTRACE_VM(vm_object_lock_r);
7186 	}
7187 	return retval;
7188 }
7189 
7190 boolean_t
vm_object_lock_upgrade(vm_object_t object)7191 vm_object_lock_upgrade(vm_object_t object)
7192 {
7193 	boolean_t       retval;
7194 
7195 	retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7196 #if DEVELOPMENT || DEBUG
7197 	if (retval == TRUE) {
7198 		DTRACE_VM(vm_object_lock_w);
7199 	}
7200 #endif
7201 	return retval;
7202 }
7203 
7204 void
vm_object_unlock(vm_object_t object)7205 vm_object_unlock(vm_object_t object)
7206 {
7207 #if DEVELOPMENT || DEBUG
7208 	DTRACE_VM(vm_object_unlock);
7209 #endif
7210 	lck_rw_done(&object->Lock);
7211 }
7212 
7213 
7214 unsigned int vm_object_change_wimg_mode_count = 0;
7215 
7216 /*
7217  * The object must be locked
7218  */
7219 void
vm_object_change_wimg_mode(vm_object_t object,unsigned int wimg_mode)7220 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7221 {
7222 	vm_page_t p;
7223 
7224 	vm_object_lock_assert_exclusive(object);
7225 
7226 	vm_object_paging_only_wait(object, THREAD_UNINT);
7227 
7228 	vm_page_queue_iterate(&object->memq, p, vmp_listq) {
7229 		if (!p->vmp_fictitious) {
7230 			pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
7231 		}
7232 	}
7233 	if (wimg_mode == VM_WIMG_USE_DEFAULT) {
7234 		object->set_cache_attr = FALSE;
7235 	} else {
7236 		object->set_cache_attr = TRUE;
7237 	}
7238 
7239 	object->wimg_bits = wimg_mode;
7240 
7241 	vm_object_change_wimg_mode_count++;
7242 }
7243 
7244 #if CONFIG_FREEZE
7245 
7246 extern struct freezer_context   freezer_context_global;
7247 
7248 /*
7249  * This routine does the "relocation" of previously
7250  * compressed pages belonging to this object that are
7251  * residing in a number of compressed segments into
7252  * a set of compressed segments dedicated to hold
7253  * compressed pages belonging to this object.
7254  */
7255 
7256 extern AbsoluteTime c_freezer_last_yield_ts;
7257 
7258 #define MAX_FREE_BATCH  32
7259 #define FREEZER_DUTY_CYCLE_ON_MS        5
7260 #define FREEZER_DUTY_CYCLE_OFF_MS       5
7261 
7262 static int c_freezer_should_yield(void);
7263 
7264 
7265 static int
c_freezer_should_yield()7266 c_freezer_should_yield()
7267 {
7268 	AbsoluteTime    cur_time;
7269 	uint64_t        nsecs;
7270 
7271 	assert(c_freezer_last_yield_ts);
7272 	clock_get_uptime(&cur_time);
7273 
7274 	SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7275 	absolutetime_to_nanoseconds(cur_time, &nsecs);
7276 
7277 	if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7278 		return 1;
7279 	}
7280 	return 0;
7281 }
7282 
7283 
7284 void
vm_object_compressed_freezer_done()7285 vm_object_compressed_freezer_done()
7286 {
7287 	vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7288 }
7289 
7290 
7291 uint32_t
vm_object_compressed_freezer_pageout(vm_object_t object,uint32_t dirty_budget)7292 vm_object_compressed_freezer_pageout(
7293 	vm_object_t object, uint32_t dirty_budget)
7294 {
7295 	vm_page_t                       p;
7296 	vm_page_t                       local_freeq = NULL;
7297 	int                             local_freed = 0;
7298 	kern_return_t                   retval = KERN_SUCCESS;
7299 	int                             obj_resident_page_count_snapshot = 0;
7300 	uint32_t                        paged_out_count = 0;
7301 
7302 	assert(object != VM_OBJECT_NULL);
7303 	assert(object->internal);
7304 
7305 	vm_object_lock(object);
7306 
7307 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7308 		if (!object->pager_initialized) {
7309 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7310 
7311 			if (!object->pager_initialized) {
7312 				vm_object_compressor_pager_create(object);
7313 			}
7314 		}
7315 
7316 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7317 			vm_object_unlock(object);
7318 			return paged_out_count;
7319 		}
7320 	}
7321 
7322 	/*
7323 	 * We could be freezing a shared internal object that might
7324 	 * be part of some other thread's current VM operations.
7325 	 * We skip it if there's a paging-in-progress or activity-in-progress
7326 	 * because we could be here a long time with the map lock held.
7327 	 *
7328 	 * Note: We are holding the map locked while we wait.
7329 	 * This is fine in the freezer path because the task
7330 	 * is suspended and so this latency is acceptable.
7331 	 */
7332 	if (object->paging_in_progress || object->activity_in_progress) {
7333 		vm_object_unlock(object);
7334 		return paged_out_count;
7335 	}
7336 
7337 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7338 		vm_object_offset_t      curr_offset = 0;
7339 
7340 		/*
7341 		 * Go through the object and make sure that any
7342 		 * previously compressed pages are relocated into
7343 		 * a compressed segment associated with our "freezer_chead".
7344 		 */
7345 		while (curr_offset < object->vo_size) {
7346 			curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7347 
7348 			if (curr_offset == (vm_object_offset_t) -1) {
7349 				break;
7350 			}
7351 
7352 			retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7353 
7354 			if (retval != KERN_SUCCESS) {
7355 				break;
7356 			}
7357 
7358 			curr_offset += PAGE_SIZE_64;
7359 		}
7360 	}
7361 
7362 	/*
7363 	 * We can't hold the object lock while heading down into the compressed pager
7364 	 * layer because we might need the kernel map lock down there to allocate new
7365 	 * compressor data structures. And if this same object is mapped in the kernel
7366 	 * and there's a fault on it, then that thread will want the object lock while
7367 	 * holding the kernel map lock.
7368 	 *
7369 	 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7370 	 * we won't be stuck in an infinite loop if the same page(s) keep getting
7371 	 * decompressed. So we grab a snapshot of the number of pages in the object and
7372 	 * we won't process any more than that number of pages.
7373 	 */
7374 
7375 	obj_resident_page_count_snapshot = object->resident_page_count;
7376 
7377 	vm_object_activity_begin(object);
7378 
7379 	while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7380 		p = (vm_page_t)vm_page_queue_first(&object->memq);
7381 
7382 		KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
7383 
7384 		vm_page_lockspin_queues();
7385 
7386 		if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) {
7387 			vm_page_unlock_queues();
7388 
7389 			KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
7390 
7391 			vm_page_queue_remove(&object->memq, p, vmp_listq);
7392 			vm_page_queue_enter(&object->memq, p, vmp_listq);
7393 
7394 			continue;
7395 		}
7396 
7397 		if (p->vmp_pmapped == TRUE) {
7398 			int refmod_state, pmap_flags;
7399 
7400 			if (p->vmp_dirty || p->vmp_precious) {
7401 				pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7402 			} else {
7403 				pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7404 			}
7405 
7406 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7407 			if (refmod_state & VM_MEM_MODIFIED) {
7408 				SET_PAGE_DIRTY(p, FALSE);
7409 			}
7410 		}
7411 
7412 		if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7413 			/*
7414 			 * Clean and non-precious page.
7415 			 */
7416 			vm_page_unlock_queues();
7417 			VM_PAGE_FREE(p);
7418 
7419 			KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
7420 			continue;
7421 		}
7422 
7423 		if (p->vmp_laundry) {
7424 			vm_pageout_steal_laundry(p, TRUE);
7425 		}
7426 
7427 		vm_page_queues_remove(p, TRUE);
7428 
7429 		vm_page_unlock_queues();
7430 
7431 
7432 		/*
7433 		 * In case the compressor fails to compress this page, we need it at
7434 		 * the back of the object memq so that we don't keep trying to process it.
7435 		 * Make the move here while we have the object lock held.
7436 		 */
7437 
7438 		vm_page_queue_remove(&object->memq, p, vmp_listq);
7439 		vm_page_queue_enter(&object->memq, p, vmp_listq);
7440 
7441 		/*
7442 		 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7443 		 *
7444 		 * Mark the page busy so no one messes with it while we have the object lock dropped.
7445 		 */
7446 		p->vmp_busy = TRUE;
7447 
7448 		vm_object_activity_begin(object);
7449 
7450 		vm_object_unlock(object);
7451 
7452 		if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7453 		    (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7454 		    p) == KERN_SUCCESS) {
7455 			/*
7456 			 * page has already been un-tabled from the object via 'vm_page_remove'
7457 			 */
7458 			p->vmp_snext = local_freeq;
7459 			local_freeq = p;
7460 			local_freed++;
7461 			paged_out_count++;
7462 
7463 			if (local_freed >= MAX_FREE_BATCH) {
7464 				OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7465 
7466 				vm_page_free_list(local_freeq, TRUE);
7467 
7468 				local_freeq = NULL;
7469 				local_freed = 0;
7470 			}
7471 			freezer_context_global.freezer_ctx_uncompressed_pages++;
7472 		}
7473 		KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
7474 
7475 		if (local_freed == 0 && c_freezer_should_yield()) {
7476 			thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7477 			clock_get_uptime(&c_freezer_last_yield_ts);
7478 		}
7479 
7480 		vm_object_lock(object);
7481 	}
7482 
7483 	if (local_freeq) {
7484 		OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7485 
7486 		vm_page_free_list(local_freeq, TRUE);
7487 
7488 		local_freeq = NULL;
7489 		local_freed = 0;
7490 	}
7491 
7492 	vm_object_activity_end(object);
7493 
7494 	vm_object_unlock(object);
7495 
7496 	if (c_freezer_should_yield()) {
7497 		thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7498 		clock_get_uptime(&c_freezer_last_yield_ts);
7499 	}
7500 	return paged_out_count;
7501 }
7502 
7503 #endif /* CONFIG_FREEZE */
7504 
7505 
7506 void
vm_object_pageout(vm_object_t object)7507 vm_object_pageout(
7508 	vm_object_t object)
7509 {
7510 	vm_page_t                       p, next;
7511 	struct  vm_pageout_queue        *iq;
7512 
7513 	if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
7514 		return;
7515 	}
7516 
7517 	iq = &vm_pageout_queue_internal;
7518 
7519 	assert(object != VM_OBJECT_NULL );
7520 
7521 	vm_object_lock(object);
7522 
7523 	if (!object->internal ||
7524 	    object->terminating ||
7525 	    !object->alive) {
7526 		vm_object_unlock(object);
7527 		return;
7528 	}
7529 
7530 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7531 		if (!object->pager_initialized) {
7532 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7533 
7534 			if (!object->pager_initialized) {
7535 				vm_object_compressor_pager_create(object);
7536 			}
7537 		}
7538 
7539 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7540 			vm_object_unlock(object);
7541 			return;
7542 		}
7543 	}
7544 
7545 ReScan:
7546 	next = (vm_page_t)vm_page_queue_first(&object->memq);
7547 
7548 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
7549 		p = next;
7550 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
7551 
7552 		assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
7553 
7554 		if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
7555 		    p->vmp_cleaning ||
7556 		    p->vmp_laundry ||
7557 		    p->vmp_busy ||
7558 		    p->vmp_absent ||
7559 		    VMP_ERROR_GET(p) ||
7560 		    p->vmp_fictitious ||
7561 		    VM_PAGE_WIRED(p)) {
7562 			/*
7563 			 * Page is already being cleaned or can't be cleaned.
7564 			 */
7565 			continue;
7566 		}
7567 		if (vm_compressor_low_on_space()) {
7568 			break;
7569 		}
7570 
7571 		/* Throw to the pageout queue */
7572 
7573 		vm_page_lockspin_queues();
7574 
7575 		if (VM_PAGE_Q_THROTTLED(iq)) {
7576 			iq->pgo_draining = TRUE;
7577 
7578 			assert_wait((event_t) (&iq->pgo_laundry + 1),
7579 			    THREAD_INTERRUPTIBLE);
7580 			vm_page_unlock_queues();
7581 			vm_object_unlock(object);
7582 
7583 			thread_block(THREAD_CONTINUE_NULL);
7584 
7585 			vm_object_lock(object);
7586 			goto ReScan;
7587 		}
7588 
7589 		assert(!p->vmp_fictitious);
7590 		assert(!p->vmp_busy);
7591 		assert(!p->vmp_absent);
7592 		assert(!p->vmp_unusual);
7593 		assert(!VMP_ERROR_GET(p));      /* XXX there's a window here where we could have an ECC error! */
7594 		assert(!VM_PAGE_WIRED(p));
7595 		assert(!p->vmp_cleaning);
7596 
7597 		if (p->vmp_pmapped == TRUE) {
7598 			int refmod_state;
7599 			int pmap_options;
7600 
7601 			/*
7602 			 * Tell pmap the page should be accounted
7603 			 * for as "compressed" if it's been modified.
7604 			 */
7605 			pmap_options =
7606 			    PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7607 			if (p->vmp_dirty || p->vmp_precious) {
7608 				/*
7609 				 * We already know it's been modified,
7610 				 * so tell pmap to account for it
7611 				 * as "compressed".
7612 				 */
7613 				pmap_options = PMAP_OPTIONS_COMPRESSOR;
7614 			}
7615 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
7616 			    pmap_options,
7617 			    NULL);
7618 			if (refmod_state & VM_MEM_MODIFIED) {
7619 				SET_PAGE_DIRTY(p, FALSE);
7620 			}
7621 		}
7622 
7623 		if (!p->vmp_dirty && !p->vmp_precious) {
7624 			vm_page_unlock_queues();
7625 			VM_PAGE_FREE(p);
7626 			continue;
7627 		}
7628 		vm_page_queues_remove(p, TRUE);
7629 
7630 		vm_pageout_cluster(p);
7631 
7632 		vm_page_unlock_queues();
7633 	}
7634 	vm_object_unlock(object);
7635 }
7636 
7637 
7638 #if CONFIG_IOSCHED
7639 void
vm_page_request_reprioritize(vm_object_t o,uint64_t blkno,uint32_t len,int prio)7640 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
7641 {
7642 	io_reprioritize_req_t   req;
7643 	struct vnode            *devvp = NULL;
7644 
7645 	if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7646 		return;
7647 	}
7648 
7649 	/*
7650 	 * Create the request for I/O reprioritization.
7651 	 * We use the noblock variant of zalloc because we're holding the object
7652 	 * lock here and we could cause a deadlock in low memory conditions.
7653 	 */
7654 	req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
7655 	if (req == NULL) {
7656 		return;
7657 	}
7658 	req->blkno = blkno;
7659 	req->len = len;
7660 	req->priority = prio;
7661 	req->devvp = devvp;
7662 
7663 	/* Insert request into the reprioritization list */
7664 	IO_REPRIORITIZE_LIST_LOCK();
7665 	queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7666 	IO_REPRIORITIZE_LIST_UNLOCK();
7667 
7668 	/* Wakeup reprioritize thread */
7669 	IO_REPRIO_THREAD_WAKEUP();
7670 
7671 	return;
7672 }
7673 
7674 void
vm_decmp_upl_reprioritize(upl_t upl,int prio)7675 vm_decmp_upl_reprioritize(upl_t upl, int prio)
7676 {
7677 	int offset;
7678 	vm_object_t object;
7679 	io_reprioritize_req_t   req;
7680 	struct vnode            *devvp = NULL;
7681 	uint64_t                blkno;
7682 	uint32_t                len;
7683 	upl_t                   io_upl;
7684 	uint64_t                *io_upl_reprio_info;
7685 	int                     io_upl_size;
7686 
7687 	if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
7688 		return;
7689 	}
7690 
7691 	/*
7692 	 * We dont want to perform any allocations with the upl lock held since that might
7693 	 * result in a deadlock. If the system is low on memory, the pageout thread would
7694 	 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7695 	 * be freed up by the pageout thread, it would be a deadlock.
7696 	 */
7697 
7698 
7699 	/* First step is just to get the size of the upl to find out how big the reprio info is */
7700 	if (!upl_try_lock(upl)) {
7701 		return;
7702 	}
7703 
7704 	if (upl->decmp_io_upl == NULL) {
7705 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7706 		upl_unlock(upl);
7707 		return;
7708 	}
7709 
7710 	io_upl = upl->decmp_io_upl;
7711 	assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
7712 	assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
7713 	    "upl %p offset 0x%llx size 0x%x\n",
7714 	    io_upl, io_upl->u_offset, io_upl->u_size);
7715 	io_upl_size = io_upl->u_size;
7716 	upl_unlock(upl);
7717 
7718 	/* Now perform the allocation */
7719 	io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
7720 	if (io_upl_reprio_info == NULL) {
7721 		return;
7722 	}
7723 
7724 	/* Now again take the lock, recheck the state and grab out the required info */
7725 	if (!upl_try_lock(upl)) {
7726 		goto out;
7727 	}
7728 
7729 	if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
7730 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7731 		upl_unlock(upl);
7732 		goto out;
7733 	}
7734 	memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
7735 	    sizeof(uint64_t) * atop(io_upl_size));
7736 
7737 	/* Get the VM object for this UPL */
7738 	if (io_upl->flags & UPL_SHADOWED) {
7739 		object = io_upl->map_object->shadow;
7740 	} else {
7741 		object = io_upl->map_object;
7742 	}
7743 
7744 	/* Get the dev vnode ptr for this object */
7745 	if (!object || !object->pager ||
7746 	    vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7747 		upl_unlock(upl);
7748 		goto out;
7749 	}
7750 
7751 	upl_unlock(upl);
7752 
7753 	/* Now we have all the information needed to do the expedite */
7754 
7755 	offset = 0;
7756 	while (offset < io_upl_size) {
7757 		blkno   = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
7758 		len     = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
7759 
7760 		/*
7761 		 * This implementation may cause some spurious expedites due to the
7762 		 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7763 		 * even after the I/O is complete.
7764 		 */
7765 
7766 		if (blkno != 0 && len != 0) {
7767 			/* Create the request for I/O reprioritization */
7768 			req = zalloc_flags(io_reprioritize_req_zone,
7769 			    Z_WAITOK | Z_NOFAIL);
7770 			req->blkno = blkno;
7771 			req->len = len;
7772 			req->priority = prio;
7773 			req->devvp = devvp;
7774 
7775 			/* Insert request into the reprioritization list */
7776 			IO_REPRIORITIZE_LIST_LOCK();
7777 			queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7778 			IO_REPRIORITIZE_LIST_UNLOCK();
7779 
7780 			offset += len;
7781 		} else {
7782 			offset += PAGE_SIZE;
7783 		}
7784 	}
7785 
7786 	/* Wakeup reprioritize thread */
7787 	IO_REPRIO_THREAD_WAKEUP();
7788 
7789 out:
7790 	kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size));
7791 }
7792 
7793 void
vm_page_handle_prio_inversion(vm_object_t o,vm_page_t m)7794 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
7795 {
7796 	upl_t upl;
7797 	upl_page_info_t *pl;
7798 	unsigned int i, num_pages;
7799 	int cur_tier;
7800 
7801 	cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
7802 
7803 	/*
7804 	 *  Scan through all UPLs associated with the object to find the
7805 	 *  UPL containing the contended page.
7806 	 */
7807 	queue_iterate(&o->uplq, upl, upl_t, uplq) {
7808 		if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
7809 			continue;
7810 		}
7811 		pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
7812 		assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
7813 		    "upl %p offset 0x%llx size 0x%x\n",
7814 		    upl, upl->u_offset, upl->u_size);
7815 		num_pages = (upl->u_size / PAGE_SIZE);
7816 
7817 		/*
7818 		 *  For each page in the UPL page list, see if it matches the contended
7819 		 *  page and was issued as a low prio I/O.
7820 		 */
7821 		for (i = 0; i < num_pages; i++) {
7822 			if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
7823 				if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
7824 					KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7825 					    VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
7826 					vm_decmp_upl_reprioritize(upl, cur_tier);
7827 					break;
7828 				}
7829 				KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7830 				    upl->upl_reprio_info[i], upl->upl_priority, 0);
7831 				if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
7832 					vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
7833 				}
7834 				break;
7835 			}
7836 		}
7837 		/* Check if we found any hits */
7838 		if (i != num_pages) {
7839 			break;
7840 		}
7841 	}
7842 
7843 	return;
7844 }
7845 
7846 wait_result_t
vm_page_sleep(vm_object_t o,vm_page_t m,int interruptible)7847 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
7848 {
7849 	wait_result_t ret;
7850 
7851 	KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
7852 
7853 	if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
7854 		/*
7855 		 *  Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
7856 		 */
7857 		vm_page_handle_prio_inversion(o, m);
7858 	}
7859 	m->vmp_wanted = TRUE;
7860 	ret = thread_sleep_vm_object(o, m, interruptible);
7861 	KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
7862 	return ret;
7863 }
7864 
7865 static void
io_reprioritize_thread(void * param __unused,wait_result_t wr __unused)7866 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
7867 {
7868 	io_reprioritize_req_t   req = NULL;
7869 
7870 	while (1) {
7871 		IO_REPRIORITIZE_LIST_LOCK();
7872 		if (queue_empty(&io_reprioritize_list)) {
7873 			IO_REPRIORITIZE_LIST_UNLOCK();
7874 			break;
7875 		}
7876 
7877 		queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7878 		IO_REPRIORITIZE_LIST_UNLOCK();
7879 
7880 		vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
7881 		zfree(io_reprioritize_req_zone, req);
7882 	}
7883 
7884 	IO_REPRIO_THREAD_CONTINUATION();
7885 }
7886 #endif
7887 
7888 #if VM_OBJECT_ACCESS_TRACKING
7889 void
vm_object_access_tracking(vm_object_t object,int * access_tracking_p,uint32_t * access_tracking_reads_p,uint32_t * access_tracking_writes_p)7890 vm_object_access_tracking(
7891 	vm_object_t     object,
7892 	int             *access_tracking_p,
7893 	uint32_t        *access_tracking_reads_p,
7894 	uint32_t        *access_tracking_writes_p)
7895 {
7896 	int     access_tracking;
7897 
7898 	access_tracking = !!*access_tracking_p;
7899 
7900 	vm_object_lock(object);
7901 	*access_tracking_p = object->access_tracking;
7902 	if (access_tracking_reads_p) {
7903 		*access_tracking_reads_p = object->access_tracking_reads;
7904 	}
7905 	if (access_tracking_writes_p) {
7906 		*access_tracking_writes_p = object->access_tracking_writes;
7907 	}
7908 	object->access_tracking = access_tracking;
7909 	object->access_tracking_reads = 0;
7910 	object->access_tracking_writes = 0;
7911 	vm_object_unlock(object);
7912 
7913 	if (access_tracking) {
7914 		vm_object_pmap_protect_options(object,
7915 		    0,
7916 		    object->vo_size,
7917 		    PMAP_NULL,
7918 		    PAGE_SIZE,
7919 		    0,
7920 		    VM_PROT_NONE,
7921 		    0);
7922 	}
7923 }
7924 #endif /* VM_OBJECT_ACCESS_TRACKING */
7925 
7926 void
vm_object_ledger_tag_ledgers(vm_object_t object,int * ledger_idx_volatile,int * ledger_idx_nonvolatile,int * ledger_idx_volatile_compressed,int * ledger_idx_nonvolatile_compressed,boolean_t * do_footprint)7927 vm_object_ledger_tag_ledgers(
7928 	vm_object_t     object,
7929 	int             *ledger_idx_volatile,
7930 	int             *ledger_idx_nonvolatile,
7931 	int             *ledger_idx_volatile_compressed,
7932 	int             *ledger_idx_nonvolatile_compressed,
7933 	boolean_t       *do_footprint)
7934 {
7935 	assert(object->shadow == VM_OBJECT_NULL);
7936 
7937 	*do_footprint = !object->vo_no_footprint;
7938 
7939 	switch (object->vo_ledger_tag) {
7940 	case VM_LEDGER_TAG_NONE:
7941 		/*
7942 		 * Regular purgeable memory:
7943 		 * counts in footprint only when nonvolatile.
7944 		 */
7945 		*do_footprint = TRUE;
7946 		assert(object->purgable != VM_PURGABLE_DENY);
7947 		*ledger_idx_volatile = task_ledgers.purgeable_volatile;
7948 		*ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
7949 		*ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
7950 		*ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
7951 		break;
7952 	case VM_LEDGER_TAG_DEFAULT:
7953 		/*
7954 		 * "default" tagged memory:
7955 		 * counts in footprint only when nonvolatile and not marked
7956 		 * as "no_footprint".
7957 		 */
7958 		*ledger_idx_volatile = task_ledgers.tagged_nofootprint;
7959 		*ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7960 		if (*do_footprint) {
7961 			*ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
7962 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
7963 		} else {
7964 			*ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
7965 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7966 		}
7967 		break;
7968 	case VM_LEDGER_TAG_NETWORK:
7969 		/*
7970 		 * "network" tagged memory:
7971 		 * never counts in footprint.
7972 		 */
7973 		*do_footprint = FALSE;
7974 		*ledger_idx_volatile = task_ledgers.network_volatile;
7975 		*ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
7976 		*ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
7977 		*ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
7978 		break;
7979 	case VM_LEDGER_TAG_MEDIA:
7980 		/*
7981 		 * "media" tagged memory:
7982 		 * counts in footprint only when nonvolatile and not marked
7983 		 * as "no footprint".
7984 		 */
7985 		*ledger_idx_volatile = task_ledgers.media_nofootprint;
7986 		*ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
7987 		if (*do_footprint) {
7988 			*ledger_idx_nonvolatile = task_ledgers.media_footprint;
7989 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
7990 		} else {
7991 			*ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
7992 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
7993 		}
7994 		break;
7995 	case VM_LEDGER_TAG_GRAPHICS:
7996 		/*
7997 		 * "graphics" tagged memory:
7998 		 * counts in footprint only when nonvolatile and not marked
7999 		 * as "no footprint".
8000 		 */
8001 		*ledger_idx_volatile = task_ledgers.graphics_nofootprint;
8002 		*ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8003 		if (*do_footprint) {
8004 			*ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
8005 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
8006 		} else {
8007 			*ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
8008 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8009 		}
8010 		break;
8011 	case VM_LEDGER_TAG_NEURAL:
8012 		/*
8013 		 * "neural" tagged memory:
8014 		 * counts in footprint only when nonvolatile and not marked
8015 		 * as "no footprint".
8016 		 */
8017 		*ledger_idx_volatile = task_ledgers.neural_nofootprint;
8018 		*ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
8019 		if (*do_footprint) {
8020 			*ledger_idx_nonvolatile = task_ledgers.neural_footprint;
8021 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
8022 		} else {
8023 			*ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
8024 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
8025 		}
8026 		break;
8027 	default:
8028 		panic("%s: object %p has unsupported ledger_tag %d",
8029 		    __FUNCTION__, object, object->vo_ledger_tag);
8030 	}
8031 }
8032 
8033 kern_return_t
vm_object_ownership_change(vm_object_t object,int new_ledger_tag,task_t new_owner,int new_ledger_flags,boolean_t old_task_objq_locked)8034 vm_object_ownership_change(
8035 	vm_object_t     object,
8036 	int             new_ledger_tag,
8037 	task_t          new_owner,
8038 	int             new_ledger_flags,
8039 	boolean_t       old_task_objq_locked)
8040 {
8041 	int             old_ledger_tag;
8042 	task_t          old_owner;
8043 	int             resident_count, wired_count;
8044 	unsigned int    compressed_count;
8045 	int             ledger_idx_volatile;
8046 	int             ledger_idx_nonvolatile;
8047 	int             ledger_idx_volatile_compressed;
8048 	int             ledger_idx_nonvolatile_compressed;
8049 	int             ledger_idx;
8050 	int             ledger_idx_compressed;
8051 	boolean_t       do_footprint, old_no_footprint, new_no_footprint;
8052 	boolean_t       new_task_objq_locked;
8053 
8054 	vm_object_lock_assert_exclusive(object);
8055 
8056 	if (!object->internal) {
8057 		return KERN_INVALID_ARGUMENT;
8058 	}
8059 	if (new_owner == VM_OBJECT_OWNER_UNCHANGED) {
8060 		/* leave owner unchanged */
8061 		new_owner = VM_OBJECT_OWNER(object);
8062 	}
8063 	if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
8064 		/* leave ledger_tag unchanged */
8065 		new_ledger_tag = object->vo_ledger_tag;
8066 	}
8067 	if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
8068 	    object->purgable == VM_PURGABLE_DENY) {
8069 		/* non-purgeable memory must have a valid non-zero ledger tag */
8070 		return KERN_INVALID_ARGUMENT;
8071 	}
8072 	if (new_ledger_tag < 0 ||
8073 	    new_ledger_tag > VM_LEDGER_TAG_MAX) {
8074 		return KERN_INVALID_ARGUMENT;
8075 	}
8076 	if (new_ledger_flags & ~VM_LEDGER_FLAGS) {
8077 		return KERN_INVALID_ARGUMENT;
8078 	}
8079 	if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
8080 	    object->purgable == VM_PURGABLE_DENY) {
8081 		/*
8082 		 * This VM object is neither ledger-tagged nor purgeable.
8083 		 * We can convert it to "ledger tag" ownership iff it
8084 		 * has not been used at all yet (no resident pages and
8085 		 * no pager) and it's going to be assigned to a valid task.
8086 		 */
8087 		if (object->resident_page_count != 0 ||
8088 		    object->pager != NULL ||
8089 		    object->pager_created ||
8090 		    object->ref_count != 1 ||
8091 		    object->vo_owner != TASK_NULL ||
8092 		    object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
8093 		    new_owner == TASK_NULL) {
8094 			return KERN_FAILURE;
8095 		}
8096 	}
8097 
8098 	if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
8099 		new_no_footprint = TRUE;
8100 	} else {
8101 		new_no_footprint = FALSE;
8102 	}
8103 #if __arm64__
8104 	if (!new_no_footprint &&
8105 	    object->purgable != VM_PURGABLE_DENY &&
8106 	    new_owner != TASK_NULL &&
8107 	    new_owner != VM_OBJECT_OWNER_DISOWNED &&
8108 	    new_owner->task_legacy_footprint) {
8109 		/*
8110 		 * This task has been granted "legacy footprint" and should
8111 		 * not be charged for its IOKit purgeable memory.  Since we
8112 		 * might now change the accounting of such memory to the
8113 		 * "graphics" ledger, for example, give it the "no footprint"
8114 		 * option.
8115 		 */
8116 		new_no_footprint = TRUE;
8117 	}
8118 #endif /* __arm64__ */
8119 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
8120 	assert(object->shadow == VM_OBJECT_NULL);
8121 	assert(object->copy == VM_OBJECT_NULL);
8122 
8123 	old_ledger_tag = object->vo_ledger_tag;
8124 	old_no_footprint = object->vo_no_footprint;
8125 	old_owner = VM_OBJECT_OWNER(object);
8126 
8127 	DTRACE_VM8(object_ownership_change,
8128 	    vm_object_t, object,
8129 	    task_t, old_owner,
8130 	    int, old_ledger_tag,
8131 	    int, old_no_footprint,
8132 	    task_t, new_owner,
8133 	    int, new_ledger_tag,
8134 	    int, new_no_footprint,
8135 	    int, VM_OBJECT_ID(object));
8136 
8137 	assert(object->internal);
8138 	resident_count = object->resident_page_count - object->wired_page_count;
8139 	wired_count = object->wired_page_count;
8140 	compressed_count = vm_compressor_pager_get_count(object->pager);
8141 
8142 	/*
8143 	 * Deal with the old owner and/or ledger tag, if needed.
8144 	 */
8145 	if (old_owner != TASK_NULL &&
8146 	    ((old_owner != new_owner)           /* new owner ... */
8147 	    ||                                  /* ... or ... */
8148 	    (old_no_footprint != new_no_footprint) /* new "no_footprint" */
8149 	    ||                                  /* ... or ... */
8150 	    old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
8151 		/*
8152 		 * Take this object off of the old owner's ledgers.
8153 		 */
8154 		vm_object_ledger_tag_ledgers(object,
8155 		    &ledger_idx_volatile,
8156 		    &ledger_idx_nonvolatile,
8157 		    &ledger_idx_volatile_compressed,
8158 		    &ledger_idx_nonvolatile_compressed,
8159 		    &do_footprint);
8160 		if (object->purgable == VM_PURGABLE_VOLATILE ||
8161 		    object->purgable == VM_PURGABLE_EMPTY) {
8162 			ledger_idx = ledger_idx_volatile;
8163 			ledger_idx_compressed = ledger_idx_volatile_compressed;
8164 		} else {
8165 			ledger_idx = ledger_idx_nonvolatile;
8166 			ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8167 		}
8168 		if (resident_count) {
8169 			/*
8170 			 * Adjust the appropriate old owners's ledgers by the
8171 			 * number of resident pages.
8172 			 */
8173 			ledger_debit(old_owner->ledger,
8174 			    ledger_idx,
8175 			    ptoa_64(resident_count));
8176 			/* adjust old owner's footprint */
8177 			if (do_footprint &&
8178 			    object->purgable != VM_PURGABLE_VOLATILE &&
8179 			    object->purgable != VM_PURGABLE_EMPTY) {
8180 				ledger_debit(old_owner->ledger,
8181 				    task_ledgers.phys_footprint,
8182 				    ptoa_64(resident_count));
8183 			}
8184 		}
8185 		if (wired_count) {
8186 			/* wired pages are always nonvolatile */
8187 			ledger_debit(old_owner->ledger,
8188 			    ledger_idx_nonvolatile,
8189 			    ptoa_64(wired_count));
8190 			if (do_footprint) {
8191 				ledger_debit(old_owner->ledger,
8192 				    task_ledgers.phys_footprint,
8193 				    ptoa_64(wired_count));
8194 			}
8195 		}
8196 		if (compressed_count) {
8197 			/*
8198 			 * Adjust the appropriate old owner's ledgers
8199 			 * by the number of compressed pages.
8200 			 */
8201 			ledger_debit(old_owner->ledger,
8202 			    ledger_idx_compressed,
8203 			    ptoa_64(compressed_count));
8204 			if (do_footprint &&
8205 			    object->purgable != VM_PURGABLE_VOLATILE &&
8206 			    object->purgable != VM_PURGABLE_EMPTY) {
8207 				ledger_debit(old_owner->ledger,
8208 				    task_ledgers.phys_footprint,
8209 				    ptoa_64(compressed_count));
8210 			}
8211 		}
8212 		if (old_owner != new_owner) {
8213 			/* remove object from old_owner's list of owned objects */
8214 			DTRACE_VM2(object_owner_remove,
8215 			    vm_object_t, object,
8216 			    task_t, old_owner);
8217 			if (!old_task_objq_locked) {
8218 				task_objq_lock(old_owner);
8219 			}
8220 			old_owner->task_owned_objects--;
8221 			queue_remove(&old_owner->task_objq, object,
8222 			    vm_object_t, task_objq);
8223 			switch (object->purgable) {
8224 			case VM_PURGABLE_NONVOLATILE:
8225 			case VM_PURGABLE_EMPTY:
8226 				vm_purgeable_nonvolatile_owner_update(old_owner,
8227 				    -1);
8228 				break;
8229 			case VM_PURGABLE_VOLATILE:
8230 				vm_purgeable_volatile_owner_update(old_owner,
8231 				    -1);
8232 				break;
8233 			default:
8234 				break;
8235 			}
8236 			if (!old_task_objq_locked) {
8237 				task_objq_unlock(old_owner);
8238 			}
8239 		}
8240 	}
8241 
8242 	/*
8243 	 * Switch to new ledger tag and/or owner.
8244 	 */
8245 
8246 	new_task_objq_locked = FALSE;
8247 	if (new_owner != old_owner &&
8248 	    new_owner != TASK_NULL &&
8249 	    new_owner != VM_OBJECT_OWNER_DISOWNED) {
8250 		/*
8251 		 * If the new owner is not accepting new objects ("disowning"),
8252 		 * the object becomes "disowned" and will be added to
8253 		 * the kernel's task_objq.
8254 		 *
8255 		 * Check first without locking, to avoid blocking while the
8256 		 * task is disowning its objects.
8257 		 */
8258 		if (new_owner->task_objects_disowning) {
8259 			new_owner = VM_OBJECT_OWNER_DISOWNED;
8260 		} else {
8261 			task_objq_lock(new_owner);
8262 			/* check again now that we have the lock */
8263 			if (new_owner->task_objects_disowning) {
8264 				new_owner = VM_OBJECT_OWNER_DISOWNED;
8265 				task_objq_unlock(new_owner);
8266 			} else {
8267 				new_task_objq_locked = TRUE;
8268 			}
8269 		}
8270 	}
8271 
8272 	object->vo_ledger_tag = new_ledger_tag;
8273 	object->vo_owner = new_owner;
8274 	object->vo_no_footprint = new_no_footprint;
8275 
8276 	if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
8277 		/*
8278 		 * Disowned objects are added to the kernel's task_objq but
8279 		 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8280 		 * differentiate them from objects intentionally owned by
8281 		 * the kernel.
8282 		 */
8283 		assert(old_owner != kernel_task);
8284 		new_owner = kernel_task;
8285 		assert(!new_task_objq_locked);
8286 		task_objq_lock(new_owner);
8287 		new_task_objq_locked = TRUE;
8288 	}
8289 
8290 	/*
8291 	 * Deal with the new owner and/or ledger tag, if needed.
8292 	 */
8293 	if (new_owner != TASK_NULL &&
8294 	    ((new_owner != old_owner)           /* new owner ... */
8295 	    ||                                  /* ... or ... */
8296 	    (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
8297 	    ||                                  /* ... or ... */
8298 	    new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
8299 		/*
8300 		 * Add this object to the new owner's ledgers.
8301 		 */
8302 		vm_object_ledger_tag_ledgers(object,
8303 		    &ledger_idx_volatile,
8304 		    &ledger_idx_nonvolatile,
8305 		    &ledger_idx_volatile_compressed,
8306 		    &ledger_idx_nonvolatile_compressed,
8307 		    &do_footprint);
8308 		if (object->purgable == VM_PURGABLE_VOLATILE ||
8309 		    object->purgable == VM_PURGABLE_EMPTY) {
8310 			ledger_idx = ledger_idx_volatile;
8311 			ledger_idx_compressed = ledger_idx_volatile_compressed;
8312 		} else {
8313 			ledger_idx = ledger_idx_nonvolatile;
8314 			ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8315 		}
8316 		if (resident_count) {
8317 			/*
8318 			 * Adjust the appropriate new owners's ledgers by the
8319 			 * number of resident pages.
8320 			 */
8321 			ledger_credit(new_owner->ledger,
8322 			    ledger_idx,
8323 			    ptoa_64(resident_count));
8324 			/* adjust new owner's footprint */
8325 			if (do_footprint &&
8326 			    object->purgable != VM_PURGABLE_VOLATILE &&
8327 			    object->purgable != VM_PURGABLE_EMPTY) {
8328 				ledger_credit(new_owner->ledger,
8329 				    task_ledgers.phys_footprint,
8330 				    ptoa_64(resident_count));
8331 			}
8332 		}
8333 		if (wired_count) {
8334 			/* wired pages are always nonvolatile */
8335 			ledger_credit(new_owner->ledger,
8336 			    ledger_idx_nonvolatile,
8337 			    ptoa_64(wired_count));
8338 			if (do_footprint) {
8339 				ledger_credit(new_owner->ledger,
8340 				    task_ledgers.phys_footprint,
8341 				    ptoa_64(wired_count));
8342 			}
8343 		}
8344 		if (compressed_count) {
8345 			/*
8346 			 * Adjust the new owner's ledgers by the number of
8347 			 * compressed pages.
8348 			 */
8349 			ledger_credit(new_owner->ledger,
8350 			    ledger_idx_compressed,
8351 			    ptoa_64(compressed_count));
8352 			if (do_footprint &&
8353 			    object->purgable != VM_PURGABLE_VOLATILE &&
8354 			    object->purgable != VM_PURGABLE_EMPTY) {
8355 				ledger_credit(new_owner->ledger,
8356 				    task_ledgers.phys_footprint,
8357 				    ptoa_64(compressed_count));
8358 			}
8359 		}
8360 		if (new_owner != old_owner) {
8361 			/* add object to new_owner's list of owned objects */
8362 			DTRACE_VM2(object_owner_add,
8363 			    vm_object_t, object,
8364 			    task_t, new_owner);
8365 			assert(new_task_objq_locked);
8366 			new_owner->task_owned_objects++;
8367 			queue_enter(&new_owner->task_objq, object,
8368 			    vm_object_t, task_objq);
8369 			switch (object->purgable) {
8370 			case VM_PURGABLE_NONVOLATILE:
8371 			case VM_PURGABLE_EMPTY:
8372 				vm_purgeable_nonvolatile_owner_update(new_owner,
8373 				    +1);
8374 				break;
8375 			case VM_PURGABLE_VOLATILE:
8376 				vm_purgeable_volatile_owner_update(new_owner,
8377 				    +1);
8378 				break;
8379 			default:
8380 				break;
8381 			}
8382 		}
8383 	}
8384 
8385 	if (new_task_objq_locked) {
8386 		task_objq_unlock(new_owner);
8387 	}
8388 
8389 	return KERN_SUCCESS;
8390 }
8391 
8392 void
vm_owned_objects_disown(task_t task)8393 vm_owned_objects_disown(
8394 	task_t  task)
8395 {
8396 	vm_object_t     next_object;
8397 	vm_object_t     object;
8398 	int             collisions;
8399 	kern_return_t   kr;
8400 
8401 	if (task == NULL) {
8402 		return;
8403 	}
8404 
8405 	collisions = 0;
8406 
8407 again:
8408 	if (task->task_objects_disowned) {
8409 		/* task has already disowned its owned objects */
8410 		assert(task->task_volatile_objects == 0);
8411 		assert(task->task_nonvolatile_objects == 0);
8412 		assert(task->task_owned_objects == 0);
8413 		return;
8414 	}
8415 
8416 	task_objq_lock(task);
8417 
8418 	task->task_objects_disowning = TRUE;
8419 
8420 	for (object = (vm_object_t) queue_first(&task->task_objq);
8421 	    !queue_end(&task->task_objq, (queue_entry_t) object);
8422 	    object = next_object) {
8423 		if (task->task_nonvolatile_objects == 0 &&
8424 		    task->task_volatile_objects == 0 &&
8425 		    task->task_owned_objects == 0) {
8426 			/* no more objects owned by "task" */
8427 			break;
8428 		}
8429 
8430 		next_object = (vm_object_t) queue_next(&object->task_objq);
8431 
8432 #if DEBUG
8433 		assert(object->vo_purgeable_volatilizer == NULL);
8434 #endif /* DEBUG */
8435 		assert(object->vo_owner == task);
8436 		if (!vm_object_lock_try(object)) {
8437 			task_objq_unlock(task);
8438 			mutex_pause(collisions++);
8439 			goto again;
8440 		}
8441 		/* transfer ownership to the kernel */
8442 		assert(VM_OBJECT_OWNER(object) != kernel_task);
8443 		kr = vm_object_ownership_change(
8444 			object,
8445 			object->vo_ledger_tag, /* unchanged */
8446 			VM_OBJECT_OWNER_DISOWNED, /* new owner */
8447 			0, /* new_ledger_flags */
8448 			TRUE);  /* old_owner->task_objq locked */
8449 		assert(kr == KERN_SUCCESS);
8450 		assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
8451 		vm_object_unlock(object);
8452 	}
8453 
8454 	if (__improbable(task->task_owned_objects != 0)) {
8455 		panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8456 		    __FUNCTION__,
8457 		    task,
8458 		    task->task_volatile_objects,
8459 		    task->task_nonvolatile_objects,
8460 		    task->task_owned_objects,
8461 		    &task->task_objq,
8462 		    queue_first(&task->task_objq),
8463 		    queue_last(&task->task_objq));
8464 	}
8465 
8466 	/* there shouldn't be any objects owned by task now */
8467 	assert(task->task_volatile_objects == 0);
8468 	assert(task->task_nonvolatile_objects == 0);
8469 	assert(task->task_owned_objects == 0);
8470 	assert(task->task_objects_disowning);
8471 
8472 	/* and we don't need to try and disown again */
8473 	task->task_objects_disowned = TRUE;
8474 
8475 	task_objq_unlock(task);
8476 }
8477