1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
65 #include <debug.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/vm_param.h>
70
71 #include <mach/sdt.h>
72
73 #include <ipc/ipc_types.h>
74 #include <ipc/ipc_port.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/queue.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <kern/misc_protos.h>
85 #include <kern/policy_internal.h>
86
87 #include <sys/kdebug_triage.h>
88
89 #include <vm/memory_object.h>
90 #include <vm/vm_compressor_pager.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_protos.h>
97 #include <vm/vm_purgeable_internal.h>
98
99 #include <vm/vm_compressor.h>
100
101 #if CONFIG_PHANTOM_CACHE
102 #include <vm/vm_phantom_cache.h>
103 #endif
104
105 #if VM_OBJECT_ACCESS_TRACKING
106 uint64_t vm_object_access_tracking_reads = 0;
107 uint64_t vm_object_access_tracking_writes = 0;
108 #endif /* VM_OBJECT_ACCESS_TRACKING */
109
110 boolean_t vm_object_collapse_compressor_allowed = TRUE;
111
112 struct vm_counters vm_counters;
113
114 #if DEVELOPMENT || DEBUG
115 extern struct memory_object_pager_ops shared_region_pager_ops;
116 extern unsigned int shared_region_pagers_resident_count;
117 extern unsigned int shared_region_pagers_resident_peak;
118 #endif /* DEVELOPMENT || DEBUG */
119
120 #if VM_OBJECT_TRACKING
121 btlog_t vm_object_tracking_btlog;
122
123 void
vm_object_tracking_init(void)124 vm_object_tracking_init(void)
125 {
126 int vm_object_tracking;
127
128 vm_object_tracking = 1;
129 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
130 sizeof(vm_object_tracking));
131
132 if (vm_object_tracking) {
133 vm_object_tracking_btlog = btlog_create(BTLOG_HASH,
134 VM_OBJECT_TRACKING_NUM_RECORDS);
135 assert(vm_object_tracking_btlog);
136 }
137 }
138 #endif /* VM_OBJECT_TRACKING */
139
140 /*
141 * Virtual memory objects maintain the actual data
142 * associated with allocated virtual memory. A given
143 * page of memory exists within exactly one object.
144 *
145 * An object is only deallocated when all "references"
146 * are given up.
147 *
148 * Associated with each object is a list of all resident
149 * memory pages belonging to that object; this list is
150 * maintained by the "vm_page" module, but locked by the object's
151 * lock.
152 *
153 * Each object also records the memory object reference
154 * that is used by the kernel to request and write
155 * back data (the memory object, field "pager"), etc...
156 *
157 * Virtual memory objects are allocated to provide
158 * zero-filled memory (vm_allocate) or map a user-defined
159 * memory object into a virtual address space (vm_map).
160 *
161 * Virtual memory objects that refer to a user-defined
162 * memory object are called "permanent", because all changes
163 * made in virtual memory are reflected back to the
164 * memory manager, which may then store it permanently.
165 * Other virtual memory objects are called "temporary",
166 * meaning that changes need be written back only when
167 * necessary to reclaim pages, and that storage associated
168 * with the object can be discarded once it is no longer
169 * mapped.
170 *
171 * A permanent memory object may be mapped into more
172 * than one virtual address space. Moreover, two threads
173 * may attempt to make the first mapping of a memory
174 * object concurrently. Only one thread is allowed to
175 * complete this mapping; all others wait for the
176 * "pager_initialized" field is asserted, indicating
177 * that the first thread has initialized all of the
178 * necessary fields in the virtual memory object structure.
179 *
180 * The kernel relies on a *default memory manager* to
181 * provide backing storage for the zero-filled virtual
182 * memory objects. The pager memory objects associated
183 * with these temporary virtual memory objects are only
184 * requested from the default memory manager when it
185 * becomes necessary. Virtual memory objects
186 * that depend on the default memory manager are called
187 * "internal". The "pager_created" field is provided to
188 * indicate whether these ports have ever been allocated.
189 *
190 * The kernel may also create virtual memory objects to
191 * hold changed pages after a copy-on-write operation.
192 * In this case, the virtual memory object (and its
193 * backing storage -- its memory object) only contain
194 * those pages that have been changed. The "shadow"
195 * field refers to the virtual memory object that contains
196 * the remainder of the contents. The "shadow_offset"
197 * field indicates where in the "shadow" these contents begin.
198 * The "copy" field refers to a virtual memory object
199 * to which changed pages must be copied before changing
200 * this object, in order to implement another form
201 * of copy-on-write optimization.
202 *
203 * The virtual memory object structure also records
204 * the attributes associated with its memory object.
205 * The "pager_ready", "can_persist" and "copy_strategy"
206 * fields represent those attributes. The "cached_list"
207 * field is used in the implementation of the persistence
208 * attribute.
209 *
210 * ZZZ Continue this comment.
211 */
212
213 /* Forward declarations for internal functions. */
214 static kern_return_t vm_object_terminate(
215 vm_object_t object);
216
217 static void vm_object_do_collapse(
218 vm_object_t object,
219 vm_object_t backing_object);
220
221 static void vm_object_do_bypass(
222 vm_object_t object,
223 vm_object_t backing_object);
224
225 static void vm_object_release_pager(
226 memory_object_t pager);
227
228 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
229
230 /*
231 * All wired-down kernel memory belongs to this memory object
232 * memory object (kernel_object) by default to avoid wasting data structures.
233 */
234 static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED;
235 const vm_object_t kernel_object_default = &kernel_object_store;
236
237 static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED;
238 const vm_object_t compressor_object = &compressor_object_store;
239
240 /*
241 * This object holds all pages that have been retired due to errors like ECC.
242 * The system should never use the page or look at its contents. The offset
243 * in this object is the same as the page's physical address.
244 */
245 static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
246 const vm_object_t retired_pages_object = &retired_pages_object_store;
247
248
249 /*
250 * Virtual memory objects are initialized from
251 * a template (see vm_object_allocate).
252 *
253 * When adding a new field to the virtual memory
254 * object structure, be sure to add initialization
255 * (see _vm_object_allocate()).
256 */
257 static const struct vm_object vm_object_template = {
258 .memq.prev = 0,
259 .memq.next = 0,
260 /*
261 * The lock will be initialized for each allocated object in
262 * _vm_object_allocate(), so we don't need to initialize it in
263 * the vm_object_template.
264 */
265 .vo_size = 0,
266 .memq_hint = VM_PAGE_NULL,
267 .ref_count = 1,
268 .resident_page_count = 0,
269 .wired_page_count = 0,
270 .reusable_page_count = 0,
271 .vo_copy = VM_OBJECT_NULL,
272 .vo_copy_version = 0,
273 .shadow = VM_OBJECT_NULL,
274 .vo_shadow_offset = (vm_object_offset_t) 0,
275 .pager = MEMORY_OBJECT_NULL,
276 .paging_offset = 0,
277 .pager_control = MEMORY_OBJECT_CONTROL_NULL,
278 .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
279 .paging_in_progress = 0,
280 .vo_size_delta = 0,
281 .activity_in_progress = 0,
282
283 /* Begin bitfields */
284 .all_wanted = 0, /* all bits FALSE */
285 .pager_created = FALSE,
286 .pager_initialized = FALSE,
287 .pager_ready = FALSE,
288 .pager_trusted = FALSE,
289 .can_persist = FALSE,
290 .internal = TRUE,
291 .private = FALSE,
292 .pageout = FALSE,
293 .alive = TRUE,
294 .purgable = VM_PURGABLE_DENY,
295 .purgeable_when_ripe = FALSE,
296 .purgeable_only_by_kernel = FALSE,
297 .shadowed = FALSE,
298 .true_share = FALSE,
299 .terminating = FALSE,
300 .named = FALSE,
301 .shadow_severed = FALSE,
302 .phys_contiguous = FALSE,
303 .nophyscache = FALSE,
304 /* End bitfields */
305
306 .cached_list.prev = NULL,
307 .cached_list.next = NULL,
308
309 .last_alloc = (vm_object_offset_t) 0,
310 .sequential = (vm_object_offset_t) 0,
311 .pages_created = 0,
312 .pages_used = 0,
313 .scan_collisions = 0,
314 #if CONFIG_PHANTOM_CACHE
315 .phantom_object_id = 0,
316 #endif
317 .cow_hint = ~(vm_offset_t)0,
318
319 /* cache bitfields */
320 .wimg_bits = VM_WIMG_USE_DEFAULT,
321 .set_cache_attr = FALSE,
322 .object_is_shared_cache = FALSE,
323 .code_signed = FALSE,
324 .transposed = FALSE,
325 .mapping_in_progress = FALSE,
326 .phantom_isssd = FALSE,
327 .volatile_empty = FALSE,
328 .volatile_fault = FALSE,
329 .all_reusable = FALSE,
330 .blocked_access = FALSE,
331 .vo_ledger_tag = VM_LEDGER_TAG_NONE,
332 .vo_no_footprint = FALSE,
333 #if CONFIG_IOSCHED || UPL_DEBUG
334 .uplq.prev = NULL,
335 .uplq.next = NULL,
336 #endif /* UPL_DEBUG */
337 #ifdef VM_PIP_DEBUG
338 .pip_holders = {0},
339 #endif /* VM_PIP_DEBUG */
340
341 .objq.next = NULL,
342 .objq.prev = NULL,
343 .task_objq.next = NULL,
344 .task_objq.prev = NULL,
345
346 .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
347 .purgeable_queue_group = 0,
348
349 .wire_tag = VM_KERN_MEMORY_NONE,
350 #if !VM_TAG_ACTIVE_UPDATE
351 .wired_objq.next = NULL,
352 .wired_objq.prev = NULL,
353 #endif /* ! VM_TAG_ACTIVE_UPDATE */
354
355 .io_tracking = FALSE,
356
357 #if CONFIG_SECLUDED_MEMORY
358 .eligible_for_secluded = FALSE,
359 .can_grab_secluded = FALSE,
360 #else /* CONFIG_SECLUDED_MEMORY */
361 .__object3_unused_bits = 0,
362 #endif /* CONFIG_SECLUDED_MEMORY */
363
364 .for_realtime = false,
365 .no_pager_reason = VM_OBJECT_DESTROY_UNKNOWN_REASON,
366
367 #if VM_OBJECT_ACCESS_TRACKING
368 .access_tracking = FALSE,
369 .access_tracking_reads = 0,
370 .access_tracking_writes = 0,
371 #endif /* VM_OBJECT_ACCESS_TRACKING */
372
373 #if DEBUG
374 .purgeable_owner_bt = {0},
375 .vo_purgeable_volatilizer = NULL,
376 .purgeable_volatilizer_bt = {0},
377 #endif /* DEBUG */
378 };
379
380 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
381 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
382 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
383 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
384 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
385
386 unsigned int vm_page_purged_wired = 0;
387 unsigned int vm_page_purged_busy = 0;
388 unsigned int vm_page_purged_others = 0;
389
390 static queue_head_t vm_object_cached_list;
391 static uint32_t vm_object_cache_pages_freed = 0;
392 static uint32_t vm_object_cache_pages_moved = 0;
393 static uint32_t vm_object_cache_pages_skipped = 0;
394 static uint32_t vm_object_cache_adds = 0;
395 static uint32_t vm_object_cached_count = 0;
396 static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data,
397 &vm_object_cache_lck_grp, &vm_object_lck_attr);
398
399 static uint32_t vm_object_page_grab_failed = 0;
400 static uint32_t vm_object_page_grab_skipped = 0;
401 static uint32_t vm_object_page_grab_returned = 0;
402 static uint32_t vm_object_page_grab_pmapped = 0;
403 static uint32_t vm_object_page_grab_reactivations = 0;
404
405 #define vm_object_cache_lock_spin() \
406 lck_mtx_lock_spin(&vm_object_cached_lock_data)
407 #define vm_object_cache_unlock() \
408 lck_mtx_unlock(&vm_object_cached_lock_data)
409
410 static void vm_object_cache_remove_locked(vm_object_t);
411
412
413 static void vm_object_reap(vm_object_t object);
414 static void vm_object_reap_async(vm_object_t object);
415 static void vm_object_reaper_thread(void);
416
417 static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data,
418 &vm_object_lck_grp, &vm_object_lck_attr);
419
420 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
421 unsigned int vm_object_reap_count = 0;
422 unsigned int vm_object_reap_count_async = 0;
423
424 #define vm_object_reaper_lock() \
425 lck_mtx_lock(&vm_object_reaper_lock_data)
426 #define vm_object_reaper_lock_spin() \
427 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
428 #define vm_object_reaper_unlock() \
429 lck_mtx_unlock(&vm_object_reaper_lock_data)
430
431 #if CONFIG_IOSCHED
432 /* I/O Re-prioritization request list */
433 queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list);
434
435 LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock,
436 &vm_object_lck_grp, &vm_object_lck_attr);
437
438 #define IO_REPRIORITIZE_LIST_LOCK() \
439 lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
440 #define IO_REPRIORITIZE_LIST_UNLOCK() \
441 lck_spin_unlock(&io_reprioritize_list_lock)
442
443 ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req",
444 struct io_reprioritize_req, ZC_NONE);
445
446 /* I/O Re-prioritization thread */
447 int io_reprioritize_wakeup = 0;
448 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
449
450 #define IO_REPRIO_THREAD_WAKEUP() thread_wakeup((event_t)&io_reprioritize_wakeup)
451 #define IO_REPRIO_THREAD_CONTINUATION() \
452 { \
453 assert_wait(&io_reprioritize_wakeup, THREAD_UNINT); \
454 thread_block(io_reprioritize_thread); \
455 }
456
457 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
458 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
459 void vm_decmp_upl_reprioritize(upl_t, int);
460 #endif
461
462 #if 0
463 #undef KERNEL_DEBUG
464 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
465 #endif
466
467
468 void
vm_object_set_size(vm_object_t object,vm_object_size_t outer_size,vm_object_size_t inner_size)469 vm_object_set_size(
470 vm_object_t object,
471 vm_object_size_t outer_size,
472 vm_object_size_t inner_size)
473 {
474 object->vo_size = vm_object_round_page(outer_size);
475 #if KASAN
476 assert(object->vo_size - inner_size <= USHRT_MAX);
477 object->vo_size_delta = (unsigned short)(object->vo_size - inner_size);
478 #else
479 (void)inner_size;
480 #endif
481 }
482
483
484 /*
485 * vm_object_allocate:
486 *
487 * Returns a new object with the given size.
488 */
489
490 __private_extern__ void
_vm_object_allocate(vm_object_size_t size,vm_object_t object)491 _vm_object_allocate(
492 vm_object_size_t size,
493 vm_object_t object)
494 {
495 *object = vm_object_template;
496 vm_page_queue_init(&object->memq);
497 #if UPL_DEBUG || CONFIG_IOSCHED
498 queue_init(&object->uplq);
499 #endif
500 vm_object_lock_init(object);
501 vm_object_set_size(object, size, size);
502
503 #if VM_OBJECT_TRACKING_OP_CREATED
504 if (vm_object_tracking_btlog) {
505 btlog_record(vm_object_tracking_btlog, object,
506 VM_OBJECT_TRACKING_OP_CREATED,
507 btref_get(__builtin_frame_address(0), 0));
508 }
509 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
510 }
511
512 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size)513 vm_object_allocate(
514 vm_object_size_t size)
515 {
516 vm_object_t object;
517
518 object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL);
519 _vm_object_allocate(size, object);
520
521 return object;
522 }
523
524 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
525
526 /*
527 * vm_object_bootstrap:
528 *
529 * Initialize the VM objects module.
530 */
531 __startup_func
532 void
vm_object_bootstrap(void)533 vm_object_bootstrap(void)
534 {
535 vm_size_t vm_object_size;
536
537 assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
538
539 vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
540 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
541
542 vm_object_zone = zone_create("vm objects", vm_object_size,
543 ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NOTBITAG);
544
545 queue_init(&vm_object_cached_list);
546
547 queue_init(&vm_object_reaper_queue);
548
549 /*
550 * Initialize the "kernel object"
551 */
552
553 /*
554 * Note that in the following size specifications, we need to add 1 because
555 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
556 */
557 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_default);
558 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
559 kernel_object_default->copy_strategy = MEMORY_OBJECT_COPY_NONE;
560 compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
561 kernel_object_default->no_tag_update = TRUE;
562
563 /*
564 * The object to hold retired VM pages.
565 */
566 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
567 retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
568 }
569
570 #if CONFIG_IOSCHED
571 void
vm_io_reprioritize_init(void)572 vm_io_reprioritize_init(void)
573 {
574 kern_return_t result;
575 thread_t thread = THREAD_NULL;
576
577 result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
578 if (result == KERN_SUCCESS) {
579 thread_set_thread_name(thread, "VM_io_reprioritize_thread");
580 thread_deallocate(thread);
581 } else {
582 panic("Could not create io_reprioritize_thread");
583 }
584 }
585 #endif
586
587 void
vm_object_reaper_init(void)588 vm_object_reaper_init(void)
589 {
590 kern_return_t kr;
591 thread_t thread;
592
593 kr = kernel_thread_start_priority(
594 (thread_continue_t) vm_object_reaper_thread,
595 NULL,
596 BASEPRI_VM,
597 &thread);
598 if (kr != KERN_SUCCESS) {
599 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
600 }
601 thread_set_thread_name(thread, "VM_object_reaper_thread");
602 thread_deallocate(thread);
603 }
604
605
606 /*
607 * vm_object_deallocate:
608 *
609 * Release a reference to the specified object,
610 * gained either through a vm_object_allocate
611 * or a vm_object_reference call. When all references
612 * are gone, storage associated with this object
613 * may be relinquished.
614 *
615 * No object may be locked.
616 */
617 unsigned long vm_object_deallocate_shared_successes = 0;
618 unsigned long vm_object_deallocate_shared_failures = 0;
619 unsigned long vm_object_deallocate_shared_swap_failures = 0;
620
621 __private_extern__ void
vm_object_deallocate(vm_object_t object)622 vm_object_deallocate(
623 vm_object_t object)
624 {
625 vm_object_t shadow = VM_OBJECT_NULL;
626
627 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
628 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
629
630 if (object == VM_OBJECT_NULL) {
631 return;
632 }
633
634 if (is_kernel_object(object) || object == compressor_object || object == retired_pages_object) {
635 vm_object_lock_shared(object);
636
637 OSAddAtomic(-1, &object->ref_count);
638
639 if (object->ref_count == 0) {
640 if (is_kernel_object(object)) {
641 panic("vm_object_deallocate: losing a kernel_object");
642 } else if (object == retired_pages_object) {
643 panic("vm_object_deallocate: losing retired_pages_object");
644 } else {
645 panic("vm_object_deallocate: losing compressor_object");
646 }
647 }
648 vm_object_unlock(object);
649 return;
650 }
651
652 if (object->ref_count == 2 &&
653 object->named) {
654 /*
655 * This "named" object's reference count is about to
656 * drop from 2 to 1:
657 * we'll need to call memory_object_last_unmap().
658 */
659 } else if (object->ref_count == 2 &&
660 object->internal &&
661 object->shadow != VM_OBJECT_NULL) {
662 /*
663 * This internal object's reference count is about to
664 * drop from 2 to 1 and it has a shadow object:
665 * we'll want to try and collapse this object with its
666 * shadow.
667 */
668 } else if (object->ref_count >= 2) {
669 UInt32 original_ref_count;
670 volatile UInt32 *ref_count_p;
671 Boolean atomic_swap;
672
673 /*
674 * The object currently looks like it is not being
675 * kept alive solely by the reference we're about to release.
676 * Let's try and release our reference without taking
677 * all the locks we would need if we had to terminate the
678 * object (cache lock + exclusive object lock).
679 * Lock the object "shared" to make sure we don't race with
680 * anyone holding it "exclusive".
681 */
682 vm_object_lock_shared(object);
683 ref_count_p = (volatile UInt32 *) &object->ref_count;
684 original_ref_count = object->ref_count;
685 /*
686 * Test again as "ref_count" could have changed.
687 * "named" shouldn't change.
688 */
689 if (original_ref_count == 2 &&
690 object->named) {
691 /* need to take slow path for m_o_last_unmap() */
692 atomic_swap = FALSE;
693 } else if (original_ref_count == 2 &&
694 object->internal &&
695 object->shadow != VM_OBJECT_NULL) {
696 /* need to take slow path for vm_object_collapse() */
697 atomic_swap = FALSE;
698 } else if (original_ref_count < 2) {
699 /* need to take slow path for vm_object_terminate() */
700 atomic_swap = FALSE;
701 } else {
702 /* try an atomic update with the shared lock */
703 atomic_swap = OSCompareAndSwap(
704 original_ref_count,
705 original_ref_count - 1,
706 (UInt32 *) &object->ref_count);
707 if (atomic_swap == FALSE) {
708 vm_object_deallocate_shared_swap_failures++;
709 /* fall back to the slow path... */
710 }
711 }
712
713 vm_object_unlock(object);
714
715 if (atomic_swap) {
716 /*
717 * ref_count was updated atomically !
718 */
719 vm_object_deallocate_shared_successes++;
720 return;
721 }
722
723 /*
724 * Someone else updated the ref_count at the same
725 * time and we lost the race. Fall back to the usual
726 * slow but safe path...
727 */
728 vm_object_deallocate_shared_failures++;
729 }
730
731 while (object != VM_OBJECT_NULL) {
732 vm_object_lock(object);
733
734 assert(object->ref_count > 0);
735
736 /*
737 * If the object has a named reference, and only
738 * that reference would remain, inform the pager
739 * about the last "mapping" reference going away.
740 */
741 if ((object->ref_count == 2) && (object->named)) {
742 memory_object_t pager = object->pager;
743
744 /* Notify the Pager that there are no */
745 /* more mappers for this object */
746
747 if (pager != MEMORY_OBJECT_NULL) {
748 vm_object_mapping_wait(object, THREAD_UNINT);
749 vm_object_mapping_begin(object);
750 vm_object_unlock(object);
751
752 memory_object_last_unmap(pager);
753
754 vm_object_lock(object);
755 vm_object_mapping_end(object);
756 }
757 assert(object->ref_count > 0);
758 }
759
760 /*
761 * Lose the reference. If other references
762 * remain, then we are done, unless we need
763 * to retry a cache trim.
764 * If it is the last reference, then keep it
765 * until any pending initialization is completed.
766 */
767
768 /* if the object is terminating, it cannot go into */
769 /* the cache and we obviously should not call */
770 /* terminate again. */
771
772 if ((object->ref_count > 1) || object->terminating) {
773 vm_object_lock_assert_exclusive(object);
774 object->ref_count--;
775
776 if (object->ref_count == 1 &&
777 object->shadow != VM_OBJECT_NULL) {
778 /*
779 * There's only one reference left on this
780 * VM object. We can't tell if it's a valid
781 * one (from a mapping for example) or if this
782 * object is just part of a possibly stale and
783 * useless shadow chain.
784 * We would like to try and collapse it into
785 * its parent, but we don't have any pointers
786 * back to this parent object.
787 * But we can try and collapse this object with
788 * its own shadows, in case these are useless
789 * too...
790 * We can't bypass this object though, since we
791 * don't know if this last reference on it is
792 * meaningful or not.
793 */
794 vm_object_collapse(object, 0, FALSE);
795 }
796 vm_object_unlock(object);
797 return;
798 }
799
800 /*
801 * We have to wait for initialization
802 * before destroying or caching the object.
803 */
804
805 if (object->pager_created && !object->pager_initialized) {
806 assert(!object->can_persist);
807 vm_object_assert_wait(object,
808 VM_OBJECT_EVENT_INITIALIZED,
809 THREAD_UNINT);
810 vm_object_unlock(object);
811
812 thread_block(THREAD_CONTINUE_NULL);
813 continue;
814 }
815
816 /*
817 * Terminate this object. If it had a shadow,
818 * then deallocate it; otherwise, if we need
819 * to retry a cache trim, do so now; otherwise,
820 * we are done. "pageout" objects have a shadow,
821 * but maintain a "paging reference" rather than
822 * a normal reference.
823 */
824 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
825
826 if (vm_object_terminate(object) != KERN_SUCCESS) {
827 return;
828 }
829 if (shadow != VM_OBJECT_NULL) {
830 object = shadow;
831 continue;
832 }
833 return;
834 }
835 }
836
837
838
839 vm_page_t
vm_object_page_grab(vm_object_t object)840 vm_object_page_grab(
841 vm_object_t object)
842 {
843 vm_page_t p, next_p;
844 int p_limit = 0;
845 int p_skipped = 0;
846
847 vm_object_lock_assert_exclusive(object);
848
849 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
850 p_limit = MIN(50, object->resident_page_count);
851
852 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
853 p = next_p;
854 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
855
856 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) {
857 goto move_page_in_obj;
858 }
859
860 if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
861 vm_page_lockspin_queues();
862
863 if (p->vmp_pmapped) {
864 int refmod_state;
865
866 vm_object_page_grab_pmapped++;
867
868 if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
869 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
870
871 if (refmod_state & VM_MEM_REFERENCED) {
872 p->vmp_reference = TRUE;
873 }
874 if (refmod_state & VM_MEM_MODIFIED) {
875 SET_PAGE_DIRTY(p, FALSE);
876 }
877 }
878 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
879 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
880
881 if (refmod_state & VM_MEM_REFERENCED) {
882 p->vmp_reference = TRUE;
883 }
884 if (refmod_state & VM_MEM_MODIFIED) {
885 SET_PAGE_DIRTY(p, FALSE);
886 }
887
888 if (p->vmp_dirty == FALSE) {
889 goto take_page;
890 }
891 }
892 }
893 if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
894 vm_page_activate(p);
895
896 counter_inc(&vm_statistics_reactivations);
897 vm_object_page_grab_reactivations++;
898 }
899 vm_page_unlock_queues();
900 move_page_in_obj:
901 vm_page_queue_remove(&object->memq, p, vmp_listq);
902 vm_page_queue_enter(&object->memq, p, vmp_listq);
903
904 p_skipped++;
905 continue;
906 }
907 vm_page_lockspin_queues();
908 take_page:
909 vm_page_free_prepare_queues(p);
910 vm_object_page_grab_returned++;
911 vm_object_page_grab_skipped += p_skipped;
912
913 vm_page_unlock_queues();
914
915 vm_page_free_prepare_object(p, TRUE);
916
917 return p;
918 }
919 vm_object_page_grab_skipped += p_skipped;
920 vm_object_page_grab_failed++;
921
922 return NULL;
923 }
924
925
926
927 #define EVICT_PREPARE_LIMIT 64
928 #define EVICT_AGE 10
929
930 static clock_sec_t vm_object_cache_aging_ts = 0;
931
932 static void
vm_object_cache_remove_locked(vm_object_t object)933 vm_object_cache_remove_locked(
934 vm_object_t object)
935 {
936 assert(object->purgable == VM_PURGABLE_DENY);
937
938 queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
939 object->cached_list.next = NULL;
940 object->cached_list.prev = NULL;
941
942 vm_object_cached_count--;
943 }
944
945 void
vm_object_cache_remove(vm_object_t object)946 vm_object_cache_remove(
947 vm_object_t object)
948 {
949 vm_object_cache_lock_spin();
950
951 if (object->cached_list.next &&
952 object->cached_list.prev) {
953 vm_object_cache_remove_locked(object);
954 }
955
956 vm_object_cache_unlock();
957 }
958
959 void
vm_object_cache_add(vm_object_t object)960 vm_object_cache_add(
961 vm_object_t object)
962 {
963 clock_sec_t sec;
964 clock_nsec_t nsec;
965
966 assert(object->purgable == VM_PURGABLE_DENY);
967
968 if (object->resident_page_count == 0) {
969 return;
970 }
971 clock_get_system_nanotime(&sec, &nsec);
972
973 vm_object_cache_lock_spin();
974
975 if (object->cached_list.next == NULL &&
976 object->cached_list.prev == NULL) {
977 queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
978 object->vo_cache_ts = sec + EVICT_AGE;
979 object->vo_cache_pages_to_scan = object->resident_page_count;
980
981 vm_object_cached_count++;
982 vm_object_cache_adds++;
983 }
984 vm_object_cache_unlock();
985 }
986
987 int
vm_object_cache_evict(int num_to_evict,int max_objects_to_examine)988 vm_object_cache_evict(
989 int num_to_evict,
990 int max_objects_to_examine)
991 {
992 vm_object_t object = VM_OBJECT_NULL;
993 vm_object_t next_obj = VM_OBJECT_NULL;
994 vm_page_t local_free_q = VM_PAGE_NULL;
995 vm_page_t p;
996 vm_page_t next_p;
997 int object_cnt = 0;
998 vm_page_t ep_array[EVICT_PREPARE_LIMIT];
999 int ep_count;
1000 int ep_limit;
1001 int ep_index;
1002 int ep_freed = 0;
1003 int ep_moved = 0;
1004 uint32_t ep_skipped = 0;
1005 clock_sec_t sec;
1006 clock_nsec_t nsec;
1007
1008 KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1009 /*
1010 * do a couple of quick checks to see if it's
1011 * worthwhile grabbing the lock
1012 */
1013 if (queue_empty(&vm_object_cached_list)) {
1014 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1015 return 0;
1016 }
1017 clock_get_system_nanotime(&sec, &nsec);
1018
1019 /*
1020 * the object on the head of the queue has not
1021 * yet sufficiently aged
1022 */
1023 if (sec < vm_object_cache_aging_ts) {
1024 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1025 return 0;
1026 }
1027 /*
1028 * don't need the queue lock to find
1029 * and lock an object on the cached list
1030 */
1031 vm_page_unlock_queues();
1032
1033 vm_object_cache_lock_spin();
1034
1035 for (;;) {
1036 next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1037
1038 while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1039 object = next_obj;
1040 next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1041
1042 assert(object->purgable == VM_PURGABLE_DENY);
1043
1044 if (sec < object->vo_cache_ts) {
1045 KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1046
1047 vm_object_cache_aging_ts = object->vo_cache_ts;
1048 object = VM_OBJECT_NULL;
1049 break;
1050 }
1051 if (!vm_object_lock_try_scan(object)) {
1052 /*
1053 * just skip over this guy for now... if we find
1054 * an object to steal pages from, we'll revist in a bit...
1055 * hopefully, the lock will have cleared
1056 */
1057 KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1058
1059 object = VM_OBJECT_NULL;
1060 continue;
1061 }
1062 if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1063 /*
1064 * this case really shouldn't happen, but it's not fatal
1065 * so deal with it... if we don't remove the object from
1066 * the list, we'll never move past it.
1067 */
1068 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1069
1070 vm_object_cache_remove_locked(object);
1071 vm_object_unlock(object);
1072 object = VM_OBJECT_NULL;
1073 continue;
1074 }
1075 /*
1076 * we have a locked object with pages...
1077 * time to start harvesting
1078 */
1079 break;
1080 }
1081 vm_object_cache_unlock();
1082
1083 if (object == VM_OBJECT_NULL) {
1084 break;
1085 }
1086
1087 /*
1088 * object is locked at this point and
1089 * has resident pages
1090 */
1091 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1092
1093 /*
1094 * break the page scan into 2 pieces to minimize the time spent
1095 * behind the page queue lock...
1096 * the list of pages on these unused objects is likely to be cold
1097 * w/r to the cpu cache which increases the time to scan the list
1098 * tenfold... and we may have a 'run' of pages we can't utilize that
1099 * needs to be skipped over...
1100 */
1101 if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1102 ep_limit = EVICT_PREPARE_LIMIT;
1103 }
1104 ep_count = 0;
1105
1106 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1107 p = next_p;
1108 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1109
1110 object->vo_cache_pages_to_scan--;
1111
1112 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1113 vm_page_queue_remove(&object->memq, p, vmp_listq);
1114 vm_page_queue_enter(&object->memq, p, vmp_listq);
1115
1116 ep_skipped++;
1117 continue;
1118 }
1119 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1120 vm_page_queue_remove(&object->memq, p, vmp_listq);
1121 vm_page_queue_enter(&object->memq, p, vmp_listq);
1122
1123 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1124 }
1125 ep_array[ep_count++] = p;
1126 }
1127 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1128
1129 vm_page_lockspin_queues();
1130
1131 for (ep_index = 0; ep_index < ep_count; ep_index++) {
1132 p = ep_array[ep_index];
1133
1134 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1135 p->vmp_reference = FALSE;
1136 p->vmp_no_cache = FALSE;
1137
1138 /*
1139 * we've already filtered out pages that are in the laundry
1140 * so if we get here, this page can't be on the pageout queue
1141 */
1142 vm_page_queues_remove(p, FALSE);
1143 vm_page_enqueue_inactive(p, TRUE);
1144
1145 ep_moved++;
1146 } else {
1147 #if CONFIG_PHANTOM_CACHE
1148 vm_phantom_cache_add_ghost(p);
1149 #endif
1150 vm_page_free_prepare_queues(p);
1151
1152 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1153 /*
1154 * Add this page to our list of reclaimed pages,
1155 * to be freed later.
1156 */
1157 p->vmp_snext = local_free_q;
1158 local_free_q = p;
1159
1160 ep_freed++;
1161 }
1162 }
1163 vm_page_unlock_queues();
1164
1165 KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1166
1167 if (local_free_q) {
1168 vm_page_free_list(local_free_q, TRUE);
1169 local_free_q = VM_PAGE_NULL;
1170 }
1171 if (object->vo_cache_pages_to_scan == 0) {
1172 KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1173
1174 vm_object_cache_remove(object);
1175
1176 KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1177 }
1178 /*
1179 * done with this object
1180 */
1181 vm_object_unlock(object);
1182 object = VM_OBJECT_NULL;
1183
1184 /*
1185 * at this point, we are not holding any locks
1186 */
1187 if ((ep_freed + ep_moved) >= num_to_evict) {
1188 /*
1189 * we've reached our target for the
1190 * number of pages to evict
1191 */
1192 break;
1193 }
1194 vm_object_cache_lock_spin();
1195 }
1196 /*
1197 * put the page queues lock back to the caller's
1198 * idea of it
1199 */
1200 vm_page_lock_queues();
1201
1202 vm_object_cache_pages_freed += ep_freed;
1203 vm_object_cache_pages_moved += ep_moved;
1204 vm_object_cache_pages_skipped += ep_skipped;
1205
1206 KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1207 return ep_freed;
1208 }
1209
1210 /*
1211 * Routine: vm_object_terminate
1212 * Purpose:
1213 * Free all resources associated with a vm_object.
1214 * In/out conditions:
1215 * Upon entry, the object must be locked,
1216 * and the object must have exactly one reference.
1217 *
1218 * The shadow object reference is left alone.
1219 *
1220 * The object must be unlocked if its found that pages
1221 * must be flushed to a backing object. If someone
1222 * manages to map the object while it is being flushed
1223 * the object is returned unlocked and unchanged. Otherwise,
1224 * upon exit, the cache will be unlocked, and the
1225 * object will cease to exist.
1226 */
1227 static kern_return_t
vm_object_terminate(vm_object_t object)1228 vm_object_terminate(
1229 vm_object_t object)
1230 {
1231 vm_object_t shadow_object;
1232
1233 vm_object_lock_assert_exclusive(object);
1234
1235 if (!object->pageout && (!object->internal && object->can_persist) &&
1236 (object->pager != NULL || object->shadow_severed)) {
1237 /*
1238 * Clear pager_trusted bit so that the pages get yanked
1239 * out of the object instead of cleaned in place. This
1240 * prevents a deadlock in XMM and makes more sense anyway.
1241 */
1242 object->pager_trusted = FALSE;
1243
1244 vm_object_reap_pages(object, REAP_TERMINATE);
1245 }
1246 /*
1247 * Make sure the object isn't already being terminated
1248 */
1249 if (object->terminating) {
1250 vm_object_lock_assert_exclusive(object);
1251 object->ref_count--;
1252 assert(object->ref_count > 0);
1253 vm_object_unlock(object);
1254 return KERN_FAILURE;
1255 }
1256
1257 /*
1258 * Did somebody get a reference to the object while we were
1259 * cleaning it?
1260 */
1261 if (object->ref_count != 1) {
1262 vm_object_lock_assert_exclusive(object);
1263 object->ref_count--;
1264 assert(object->ref_count > 0);
1265 vm_object_unlock(object);
1266 return KERN_FAILURE;
1267 }
1268
1269 /*
1270 * Make sure no one can look us up now.
1271 */
1272
1273 object->terminating = TRUE;
1274 object->alive = FALSE;
1275
1276 if (!object->internal &&
1277 object->cached_list.next &&
1278 object->cached_list.prev) {
1279 vm_object_cache_remove(object);
1280 }
1281
1282 /*
1283 * Detach the object from its shadow if we are the shadow's
1284 * copy. The reference we hold on the shadow must be dropped
1285 * by our caller.
1286 */
1287 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1288 !(object->pageout)) {
1289 vm_object_lock(shadow_object);
1290 if (shadow_object->vo_copy == object) {
1291 VM_OBJECT_COPY_SET(shadow_object, VM_OBJECT_NULL);
1292 }
1293 vm_object_unlock(shadow_object);
1294 }
1295
1296 if (object->paging_in_progress != 0 ||
1297 object->activity_in_progress != 0) {
1298 /*
1299 * There are still some paging_in_progress references
1300 * on this object, meaning that there are some paging
1301 * or other I/O operations in progress for this VM object.
1302 * Such operations take some paging_in_progress references
1303 * up front to ensure that the object doesn't go away, but
1304 * they may also need to acquire a reference on the VM object,
1305 * to map it in kernel space, for example. That means that
1306 * they may end up releasing the last reference on the VM
1307 * object, triggering its termination, while still holding
1308 * paging_in_progress references. Waiting for these
1309 * pending paging_in_progress references to go away here would
1310 * deadlock.
1311 *
1312 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1313 * complete the VM object termination if it still holds
1314 * paging_in_progress references at this point.
1315 *
1316 * No new paging_in_progress should appear now that the
1317 * VM object is "terminating" and not "alive".
1318 */
1319 vm_object_reap_async(object);
1320 vm_object_unlock(object);
1321 /*
1322 * Return KERN_FAILURE to let the caller know that we
1323 * haven't completed the termination and it can't drop this
1324 * object's reference on its shadow object yet.
1325 * The reaper thread will take care of that once it has
1326 * completed this object's termination.
1327 */
1328 return KERN_FAILURE;
1329 }
1330 /*
1331 * complete the VM object termination
1332 */
1333 vm_object_reap(object);
1334 object = VM_OBJECT_NULL;
1335
1336 /*
1337 * the object lock was released by vm_object_reap()
1338 *
1339 * KERN_SUCCESS means that this object has been terminated
1340 * and no longer needs its shadow object but still holds a
1341 * reference on it.
1342 * The caller is responsible for dropping that reference.
1343 * We can't call vm_object_deallocate() here because that
1344 * would create a recursion.
1345 */
1346 return KERN_SUCCESS;
1347 }
1348
1349
1350 /*
1351 * vm_object_reap():
1352 *
1353 * Complete the termination of a VM object after it's been marked
1354 * as "terminating" and "!alive" by vm_object_terminate().
1355 *
1356 * The VM object must be locked by caller.
1357 * The lock will be released on return and the VM object is no longer valid.
1358 */
1359
1360 void
vm_object_reap(vm_object_t object)1361 vm_object_reap(
1362 vm_object_t object)
1363 {
1364 memory_object_t pager;
1365
1366 vm_object_lock_assert_exclusive(object);
1367 assert(object->paging_in_progress == 0);
1368 assert(object->activity_in_progress == 0);
1369
1370 vm_object_reap_count++;
1371
1372 /*
1373 * Disown this purgeable object to cleanup its owner's purgeable
1374 * ledgers. We need to do this before disconnecting the object
1375 * from its pager, to properly account for compressed pages.
1376 */
1377 if (object->internal &&
1378 (object->purgable != VM_PURGABLE_DENY ||
1379 object->vo_ledger_tag)) {
1380 int ledger_flags;
1381 kern_return_t kr;
1382
1383 ledger_flags = 0;
1384 if (object->vo_no_footprint) {
1385 ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1386 }
1387 assert(!object->alive);
1388 assert(object->terminating);
1389 kr = vm_object_ownership_change(object,
1390 object->vo_ledger_tag, /* unchanged */
1391 NULL, /* no owner */
1392 ledger_flags,
1393 FALSE); /* task_objq not locked */
1394 assert(kr == KERN_SUCCESS);
1395 assert(object->vo_owner == NULL);
1396 }
1397
1398 #if DEVELOPMENT || DEBUG
1399 if (object->object_is_shared_cache &&
1400 object->pager != NULL &&
1401 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1402 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1403 }
1404 #endif /* DEVELOPMENT || DEBUG */
1405
1406 pager = object->pager;
1407 object->pager = MEMORY_OBJECT_NULL;
1408
1409 if (pager != MEMORY_OBJECT_NULL) {
1410 memory_object_control_disable(&object->pager_control);
1411 }
1412
1413 object->ref_count--;
1414 assert(object->ref_count == 0);
1415
1416 /*
1417 * remove from purgeable queue if it's on
1418 */
1419 if (object->internal) {
1420 assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1421
1422 VM_OBJECT_UNWIRED(object);
1423
1424 if (object->purgable == VM_PURGABLE_DENY) {
1425 /* not purgeable: nothing to do */
1426 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1427 purgeable_q_t queue;
1428
1429 queue = vm_purgeable_object_remove(object);
1430 assert(queue);
1431
1432 if (object->purgeable_when_ripe) {
1433 /*
1434 * Must take page lock for this -
1435 * using it to protect token queue
1436 */
1437 vm_page_lock_queues();
1438 vm_purgeable_token_delete_first(queue);
1439
1440 assert(queue->debug_count_objects >= 0);
1441 vm_page_unlock_queues();
1442 }
1443
1444 /*
1445 * Update "vm_page_purgeable_count" in bulk and mark
1446 * object as VM_PURGABLE_EMPTY to avoid updating
1447 * "vm_page_purgeable_count" again in vm_page_remove()
1448 * when reaping the pages.
1449 */
1450 unsigned int delta;
1451 assert(object->resident_page_count >=
1452 object->wired_page_count);
1453 delta = (object->resident_page_count -
1454 object->wired_page_count);
1455 if (delta != 0) {
1456 assert(vm_page_purgeable_count >= delta);
1457 OSAddAtomic(-delta,
1458 (SInt32 *)&vm_page_purgeable_count);
1459 }
1460 if (object->wired_page_count != 0) {
1461 assert(vm_page_purgeable_wired_count >=
1462 object->wired_page_count);
1463 OSAddAtomic(-object->wired_page_count,
1464 (SInt32 *)&vm_page_purgeable_wired_count);
1465 }
1466 object->purgable = VM_PURGABLE_EMPTY;
1467 } else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1468 object->purgable == VM_PURGABLE_EMPTY) {
1469 /* remove from nonvolatile queue */
1470 vm_purgeable_nonvolatile_dequeue(object);
1471 } else {
1472 panic("object %p in unexpected purgeable state 0x%x",
1473 object, object->purgable);
1474 }
1475 if (object->transposed &&
1476 object->cached_list.next != NULL &&
1477 object->cached_list.prev == NULL) {
1478 /*
1479 * object->cached_list.next "points" to the
1480 * object that was transposed with this object.
1481 */
1482 } else {
1483 assert(object->cached_list.next == NULL);
1484 }
1485 assert(object->cached_list.prev == NULL);
1486 }
1487
1488 if (object->pageout) {
1489 /*
1490 * free all remaining pages tabled on
1491 * this object
1492 * clean up it's shadow
1493 */
1494 assert(object->shadow != VM_OBJECT_NULL);
1495
1496 vm_pageout_object_terminate(object);
1497 } else if (object->resident_page_count) {
1498 /*
1499 * free all remaining pages tabled on
1500 * this object
1501 */
1502 vm_object_reap_pages(object, REAP_REAP);
1503 }
1504 assert(vm_page_queue_empty(&object->memq));
1505 assert(object->paging_in_progress == 0);
1506 assert(object->activity_in_progress == 0);
1507 assert(object->ref_count == 0);
1508
1509 /*
1510 * If the pager has not already been released by
1511 * vm_object_destroy, we need to terminate it and
1512 * release our reference to it here.
1513 */
1514 if (pager != MEMORY_OBJECT_NULL) {
1515 vm_object_unlock(object);
1516 vm_object_release_pager(pager);
1517 vm_object_lock(object);
1518 }
1519
1520 /* kick off anyone waiting on terminating */
1521 object->terminating = FALSE;
1522 vm_object_paging_begin(object);
1523 vm_object_paging_end(object);
1524 vm_object_unlock(object);
1525
1526 object->shadow = VM_OBJECT_NULL;
1527
1528 #if VM_OBJECT_TRACKING
1529 if (vm_object_tracking_btlog) {
1530 btlog_erase(vm_object_tracking_btlog, object);
1531 }
1532 #endif /* VM_OBJECT_TRACKING */
1533
1534 vm_object_lock_destroy(object);
1535 /*
1536 * Free the space for the object.
1537 */
1538 zfree(vm_object_zone, object);
1539 object = VM_OBJECT_NULL;
1540 }
1541
1542
1543 unsigned int vm_max_batch = 256;
1544
1545 #define V_O_R_MAX_BATCH 128
1546
1547 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1548
1549
1550 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect) \
1551 MACRO_BEGIN \
1552 if (_local_free_q) { \
1553 if (do_disconnect) { \
1554 vm_page_t m; \
1555 for (m = _local_free_q; \
1556 m != VM_PAGE_NULL; \
1557 m = m->vmp_snext) { \
1558 if (m->vmp_pmapped) { \
1559 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1560 } \
1561 } \
1562 } \
1563 vm_page_free_list(_local_free_q, TRUE); \
1564 _local_free_q = VM_PAGE_NULL; \
1565 } \
1566 MACRO_END
1567
1568
1569 void
vm_object_reap_pages(vm_object_t object,int reap_type)1570 vm_object_reap_pages(
1571 vm_object_t object,
1572 int reap_type)
1573 {
1574 vm_page_t p;
1575 vm_page_t next;
1576 vm_page_t local_free_q = VM_PAGE_NULL;
1577 int loop_count;
1578 boolean_t disconnect_on_release;
1579 pmap_flush_context pmap_flush_context_storage;
1580
1581 if (reap_type == REAP_DATA_FLUSH) {
1582 /*
1583 * We need to disconnect pages from all pmaps before
1584 * releasing them to the free list
1585 */
1586 disconnect_on_release = TRUE;
1587 } else {
1588 /*
1589 * Either the caller has already disconnected the pages
1590 * from all pmaps, or we disconnect them here as we add
1591 * them to out local list of pages to be released.
1592 * No need to re-disconnect them when we release the pages
1593 * to the free list.
1594 */
1595 disconnect_on_release = FALSE;
1596 }
1597
1598 restart_after_sleep:
1599 if (vm_page_queue_empty(&object->memq)) {
1600 return;
1601 }
1602 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1603
1604 if (reap_type == REAP_PURGEABLE) {
1605 pmap_flush_context_init(&pmap_flush_context_storage);
1606 }
1607
1608 vm_page_lock_queues();
1609
1610 next = (vm_page_t)vm_page_queue_first(&object->memq);
1611
1612 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1613 p = next;
1614 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1615
1616 if (--loop_count == 0) {
1617 vm_page_unlock_queues();
1618
1619 if (local_free_q) {
1620 if (reap_type == REAP_PURGEABLE) {
1621 pmap_flush(&pmap_flush_context_storage);
1622 pmap_flush_context_init(&pmap_flush_context_storage);
1623 }
1624 /*
1625 * Free the pages we reclaimed so far
1626 * and take a little break to avoid
1627 * hogging the page queue lock too long
1628 */
1629 VM_OBJ_REAP_FREELIST(local_free_q,
1630 disconnect_on_release);
1631 } else {
1632 mutex_pause(0);
1633 }
1634
1635 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1636
1637 vm_page_lock_queues();
1638 }
1639 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1640 if (p->vmp_busy || p->vmp_cleaning) {
1641 vm_page_unlock_queues();
1642 /*
1643 * free the pages reclaimed so far
1644 */
1645 VM_OBJ_REAP_FREELIST(local_free_q,
1646 disconnect_on_release);
1647
1648 PAGE_SLEEP(object, p, THREAD_UNINT);
1649
1650 goto restart_after_sleep;
1651 }
1652 if (p->vmp_laundry) {
1653 vm_pageout_steal_laundry(p, TRUE);
1654 }
1655 }
1656 switch (reap_type) {
1657 case REAP_DATA_FLUSH:
1658 if (VM_PAGE_WIRED(p)) {
1659 /*
1660 * this is an odd case... perhaps we should
1661 * zero-fill this page since we're conceptually
1662 * tossing its data at this point, but leaving
1663 * it on the object to honor the 'wire' contract
1664 */
1665 continue;
1666 }
1667 break;
1668
1669 case REAP_PURGEABLE:
1670 if (VM_PAGE_WIRED(p)) {
1671 /*
1672 * can't purge a wired page
1673 */
1674 vm_page_purged_wired++;
1675 continue;
1676 }
1677 if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1678 vm_pageout_steal_laundry(p, TRUE);
1679 }
1680
1681 if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1682 /*
1683 * page is being acted upon,
1684 * so don't mess with it
1685 */
1686 vm_page_purged_others++;
1687 continue;
1688 }
1689 if (p->vmp_busy) {
1690 /*
1691 * We can't reclaim a busy page but we can
1692 * make it more likely to be paged (it's not wired) to make
1693 * sure that it gets considered by
1694 * vm_pageout_scan() later.
1695 */
1696 if (VM_PAGE_PAGEABLE(p)) {
1697 vm_page_deactivate(p);
1698 }
1699 vm_page_purged_busy++;
1700 continue;
1701 }
1702
1703 assert(!is_kernel_object(VM_PAGE_OBJECT(p)));
1704
1705 /*
1706 * we can discard this page...
1707 */
1708 if (p->vmp_pmapped == TRUE) {
1709 /*
1710 * unmap the page
1711 */
1712 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1713 }
1714 vm_page_purged_count++;
1715
1716 break;
1717
1718 case REAP_TERMINATE:
1719 if (p->vmp_absent || p->vmp_private) {
1720 /*
1721 * For private pages, VM_PAGE_FREE just
1722 * leaves the page structure around for
1723 * its owner to clean up. For absent
1724 * pages, the structure is returned to
1725 * the appropriate pool.
1726 */
1727 break;
1728 }
1729 if (p->vmp_fictitious) {
1730 assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
1731 break;
1732 }
1733 if (!p->vmp_dirty && p->vmp_wpmapped) {
1734 p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1735 }
1736
1737 if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) {
1738 assert(!object->internal);
1739
1740 p->vmp_free_when_done = TRUE;
1741
1742 if (!p->vmp_laundry) {
1743 vm_page_queues_remove(p, TRUE);
1744 /*
1745 * flush page... page will be freed
1746 * upon completion of I/O
1747 */
1748 vm_pageout_cluster(p);
1749 }
1750 vm_page_unlock_queues();
1751 /*
1752 * free the pages reclaimed so far
1753 */
1754 VM_OBJ_REAP_FREELIST(local_free_q,
1755 disconnect_on_release);
1756
1757 vm_object_paging_wait(object, THREAD_UNINT);
1758
1759 goto restart_after_sleep;
1760 }
1761 break;
1762
1763 case REAP_REAP:
1764 break;
1765 }
1766 vm_page_free_prepare_queues(p);
1767 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1768 /*
1769 * Add this page to our list of reclaimed pages,
1770 * to be freed later.
1771 */
1772 p->vmp_snext = local_free_q;
1773 local_free_q = p;
1774 }
1775 vm_page_unlock_queues();
1776
1777 /*
1778 * Free the remaining reclaimed pages
1779 */
1780 if (reap_type == REAP_PURGEABLE) {
1781 pmap_flush(&pmap_flush_context_storage);
1782 }
1783
1784 VM_OBJ_REAP_FREELIST(local_free_q,
1785 disconnect_on_release);
1786 }
1787
1788
1789 void
vm_object_reap_async(vm_object_t object)1790 vm_object_reap_async(
1791 vm_object_t object)
1792 {
1793 vm_object_lock_assert_exclusive(object);
1794
1795 vm_object_reaper_lock_spin();
1796
1797 vm_object_reap_count_async++;
1798
1799 /* enqueue the VM object... */
1800 queue_enter(&vm_object_reaper_queue, object,
1801 vm_object_t, cached_list);
1802
1803 vm_object_reaper_unlock();
1804
1805 /* ... and wake up the reaper thread */
1806 thread_wakeup((event_t) &vm_object_reaper_queue);
1807 }
1808
1809
1810 void
vm_object_reaper_thread(void)1811 vm_object_reaper_thread(void)
1812 {
1813 vm_object_t object, shadow_object;
1814
1815 vm_object_reaper_lock_spin();
1816
1817 while (!queue_empty(&vm_object_reaper_queue)) {
1818 queue_remove_first(&vm_object_reaper_queue,
1819 object,
1820 vm_object_t,
1821 cached_list);
1822
1823 vm_object_reaper_unlock();
1824 vm_object_lock(object);
1825
1826 assert(object->terminating);
1827 assert(!object->alive);
1828
1829 /*
1830 * The pageout daemon might be playing with our pages.
1831 * Now that the object is dead, it won't touch any more
1832 * pages, but some pages might already be on their way out.
1833 * Hence, we wait until the active paging activities have
1834 * ceased before we break the association with the pager
1835 * itself.
1836 */
1837 while (object->paging_in_progress != 0 ||
1838 object->activity_in_progress != 0) {
1839 vm_object_wait(object,
1840 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1841 THREAD_UNINT);
1842 vm_object_lock(object);
1843 }
1844
1845 shadow_object =
1846 object->pageout ? VM_OBJECT_NULL : object->shadow;
1847
1848 vm_object_reap(object);
1849 /* cache is unlocked and object is no longer valid */
1850 object = VM_OBJECT_NULL;
1851
1852 if (shadow_object != VM_OBJECT_NULL) {
1853 /*
1854 * Drop the reference "object" was holding on
1855 * its shadow object.
1856 */
1857 vm_object_deallocate(shadow_object);
1858 shadow_object = VM_OBJECT_NULL;
1859 }
1860 vm_object_reaper_lock_spin();
1861 }
1862
1863 /* wait for more work... */
1864 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
1865
1866 vm_object_reaper_unlock();
1867
1868 thread_block((thread_continue_t) vm_object_reaper_thread);
1869 /*NOTREACHED*/
1870 }
1871
1872 /*
1873 * Routine: vm_object_release_pager
1874 * Purpose: Terminate the pager and, upon completion,
1875 * release our last reference to it.
1876 */
1877 static void
vm_object_release_pager(memory_object_t pager)1878 vm_object_release_pager(
1879 memory_object_t pager)
1880 {
1881 /*
1882 * Terminate the pager.
1883 */
1884
1885 (void) memory_object_terminate(pager);
1886
1887 /*
1888 * Release reference to pager.
1889 */
1890 memory_object_deallocate(pager);
1891 }
1892
1893 /*
1894 * Routine: vm_object_destroy
1895 * Purpose:
1896 * Shut down a VM object, despite the
1897 * presence of address map (or other) references
1898 * to the vm_object.
1899 */
1900 #if MACH_ASSERT
1901 extern uint32_t system_inshutdown;
1902 int fbdp_no_panic = 1;
1903 #endif /* MACH_ASSERT */
1904 kern_return_t
vm_object_destroy(vm_object_t object,vm_object_destroy_reason_t reason)1905 vm_object_destroy(
1906 vm_object_t object,
1907 vm_object_destroy_reason_t reason)
1908 {
1909 memory_object_t old_pager;
1910
1911 if (object == VM_OBJECT_NULL) {
1912 return KERN_SUCCESS;
1913 }
1914
1915 /*
1916 * Remove the pager association immediately.
1917 *
1918 * This will prevent the memory manager from further
1919 * meddling. [If it wanted to flush data or make
1920 * other changes, it should have done so before performing
1921 * the destroy call.]
1922 */
1923
1924 vm_object_lock(object);
1925
1926 #if FBDP_DEBUG_OBJECT_NO_PAGER
1927 static bool fbdp_no_panic_retrieved = false;
1928 if (!fbdp_no_panic_retrieved) {
1929 PE_parse_boot_argn("fbdp_no_panic4", &fbdp_no_panic, sizeof(fbdp_no_panic));
1930 fbdp_no_panic_retrieved = true;
1931 }
1932
1933 bool forced_unmount = false;
1934 if (object->named &&
1935 object->ref_count > 2 &&
1936 object->pager != NULL &&
1937 vnode_pager_get_forced_unmount(object->pager, &forced_unmount) == KERN_SUCCESS &&
1938 forced_unmount == false) {
1939 if (!fbdp_no_panic) {
1940 panic("FBDP rdar://99829401 object %p refs %d pager %p (no forced unmount)\n", object, object->ref_count, object->pager);
1941 }
1942 DTRACE_VM3(vm_object_destroy_no_forced_unmount,
1943 vm_object_t, object,
1944 int, object->ref_count,
1945 memory_object_t, object->pager);
1946 }
1947
1948 if (object->fbdp_tracked) {
1949 if (object->ref_count > 2 && !system_inshutdown) {
1950 if (!fbdp_no_panic) {
1951 panic("FBDP/4 rdar://99829401 object %p refs %d pager %p (tracked)\n", object, object->ref_count, object->pager);
1952 }
1953 }
1954 object->fbdp_tracked = false;
1955 }
1956 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
1957
1958 if (reason != VM_OBJECT_DESTROY_UNKNOWN_REASON) {
1959 object->no_pager_reason = reason;
1960 }
1961
1962 object->can_persist = FALSE;
1963 object->named = FALSE;
1964 #if 00
1965 object->alive = FALSE;
1966 #endif /* 00 */
1967
1968 #if DEVELOPMENT || DEBUG
1969 if (object->object_is_shared_cache &&
1970 object->pager != NULL &&
1971 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1972 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1973 }
1974 #endif /* DEVELOPMENT || DEBUG */
1975
1976 old_pager = object->pager;
1977 object->pager = MEMORY_OBJECT_NULL;
1978 if (old_pager != MEMORY_OBJECT_NULL) {
1979 memory_object_control_disable(&object->pager_control);
1980 }
1981
1982 /*
1983 * Wait for the existing paging activity (that got
1984 * through before we nulled out the pager) to subside.
1985 */
1986
1987 vm_object_paging_wait(object, THREAD_UNINT);
1988 vm_object_unlock(object);
1989
1990 /*
1991 * Terminate the object now.
1992 */
1993 if (old_pager != MEMORY_OBJECT_NULL) {
1994 vm_object_release_pager(old_pager);
1995
1996 /*
1997 * JMM - Release the caller's reference. This assumes the
1998 * caller had a reference to release, which is a big (but
1999 * currently valid) assumption if this is driven from the
2000 * vnode pager (it is holding a named reference when making
2001 * this call)..
2002 */
2003 vm_object_deallocate(object);
2004 }
2005 return KERN_SUCCESS;
2006 }
2007
2008 /*
2009 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2010 * exist because of the need to handle shadow chains. When deactivating pages, we only
2011 * want to deactive the ones at the top most level in the object chain. In order to do
2012 * this efficiently, the specified address range is divided up into "chunks" and we use
2013 * a bit map to keep track of which pages have already been processed as we descend down
2014 * the shadow chain. These chunk macros hide the details of the bit map implementation
2015 * as much as we can.
2016 *
2017 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2018 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2019 * order bit represents page 0 in the current range and highest order bit represents
2020 * page 63.
2021 *
2022 * For further convenience, we also use negative logic for the page state in the bit map.
2023 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2024 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2025 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2026 * out with all the bits set. The macros below hide all these details from the caller.
2027 */
2028
2029 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2030 /* be the same as the number of bits in */
2031 /* the chunk_state_t type. We use 64 */
2032 /* just for convenience. */
2033
2034 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2035
2036 typedef uint64_t chunk_state_t;
2037
2038 /*
2039 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2040 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2041 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2042 * looking at pages in that range. This can save us from unnecessarily chasing down the
2043 * shadow chain.
2044 */
2045
2046 #define CHUNK_INIT(c, len) \
2047 MACRO_BEGIN \
2048 uint64_t p; \
2049 \
2050 (c) = 0xffffffffffffffffLL; \
2051 \
2052 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2053 MARK_PAGE_HANDLED(c, p); \
2054 MACRO_END
2055
2056
2057 /*
2058 * Return true if all pages in the chunk have not yet been processed.
2059 */
2060
2061 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2062
2063 /*
2064 * Return true if the page at offset 'p' in the bit map has already been handled
2065 * while processing a higher level object in the shadow chain.
2066 */
2067
2068 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0)
2069
2070 /*
2071 * Mark the page at offset 'p' in the bit map as having been processed.
2072 */
2073
2074 #define MARK_PAGE_HANDLED(c, p) \
2075 MACRO_BEGIN \
2076 (c) = (c) & ~(1ULL << (p)); \
2077 MACRO_END
2078
2079
2080 /*
2081 * Return true if the page at the given offset has been paged out. Object is
2082 * locked upon entry and returned locked.
2083 */
2084
2085 static boolean_t
page_is_paged_out(vm_object_t object,vm_object_offset_t offset)2086 page_is_paged_out(
2087 vm_object_t object,
2088 vm_object_offset_t offset)
2089 {
2090 if (object->internal &&
2091 object->alive &&
2092 !object->terminating &&
2093 object->pager_ready) {
2094 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2095 == VM_EXTERNAL_STATE_EXISTS) {
2096 return TRUE;
2097 }
2098 }
2099 return FALSE;
2100 }
2101
2102
2103
2104 /*
2105 * madvise_free_debug
2106 *
2107 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2108 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2109 * simulate the loss of the page's contents as if the page had been
2110 * reclaimed and then re-faulted.
2111 */
2112 #if DEVELOPMENT || DEBUG
2113 int madvise_free_debug = 0;
2114 int madvise_free_debug_sometimes = 1;
2115 #else /* DEBUG */
2116 int madvise_free_debug = 0;
2117 int madvise_free_debug_sometimes = 0;
2118 #endif /* DEBUG */
2119 int madvise_free_counter = 0;
2120
2121 __options_decl(deactivate_flags_t, uint32_t, {
2122 DEACTIVATE_KILL = 0x1,
2123 DEACTIVATE_REUSABLE = 0x2,
2124 DEACTIVATE_ALL_REUSABLE = 0x4,
2125 DEACTIVATE_CLEAR_REFMOD = 0x8,
2126 DEACTIVATE_REUSABLE_NO_WRITE = 0x10
2127 });
2128
2129 /*
2130 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2131 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2132 * a size that is less than or equal to the CHUNK_SIZE.
2133 */
2134
2135 static void
deactivate_pages_in_object(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,chunk_state_t * chunk_state,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2136 deactivate_pages_in_object(
2137 vm_object_t object,
2138 vm_object_offset_t offset,
2139 vm_object_size_t size,
2140 deactivate_flags_t flags,
2141 chunk_state_t *chunk_state,
2142 pmap_flush_context *pfc,
2143 struct pmap *pmap,
2144 vm_map_offset_t pmap_offset)
2145 {
2146 vm_page_t m;
2147 int p;
2148 struct vm_page_delayed_work dw_array;
2149 struct vm_page_delayed_work *dwp, *dwp_start;
2150 bool dwp_finish_ctx = TRUE;
2151 int dw_count;
2152 int dw_limit;
2153 unsigned int reusable = 0;
2154
2155 /*
2156 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2157 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2158 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2159 * all the pages in the chunk.
2160 */
2161
2162 dwp_start = dwp = NULL;
2163 dw_count = 0;
2164 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2165 dwp_start = vm_page_delayed_work_get_ctx();
2166 if (dwp_start == NULL) {
2167 dwp_start = &dw_array;
2168 dw_limit = 1;
2169 dwp_finish_ctx = FALSE;
2170 }
2171
2172 dwp = dwp_start;
2173
2174 for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2175 /*
2176 * If this offset has already been found and handled in a higher level object, then don't
2177 * do anything with it in the current shadow object.
2178 */
2179
2180 if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2181 continue;
2182 }
2183
2184 /*
2185 * See if the page at this offset is around. First check to see if the page is resident,
2186 * then if not, check the existence map or with the pager.
2187 */
2188
2189 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2190 /*
2191 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2192 * so that we won't bother looking for a page at this offset again if there are more
2193 * shadow objects. Then deactivate the page.
2194 */
2195
2196 MARK_PAGE_HANDLED(*chunk_state, p);
2197
2198 if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2199 (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2200 int clear_refmod_mask;
2201 int pmap_options;
2202 dwp->dw_mask = 0;
2203
2204 pmap_options = 0;
2205 clear_refmod_mask = VM_MEM_REFERENCED;
2206 dwp->dw_mask |= DW_clear_reference;
2207
2208 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2209 if (!(flags & DEACTIVATE_REUSABLE_NO_WRITE) &&
2210 (madvise_free_debug ||
2211 (madvise_free_debug_sometimes &&
2212 madvise_free_counter++ & 0x1))) {
2213 /*
2214 * zero-fill the page (or every
2215 * other page) now to simulate
2216 * it being reclaimed and
2217 * re-faulted.
2218 */
2219 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2220 if (!m->vmp_unmodified_ro) {
2221 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2222 if (true) {
2223 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2224 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2225 }
2226 }
2227 m->vmp_precious = FALSE;
2228 m->vmp_dirty = FALSE;
2229
2230 clear_refmod_mask |= VM_MEM_MODIFIED;
2231 if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2232 /*
2233 * This page is now clean and
2234 * reclaimable. Move it out
2235 * of the throttled queue, so
2236 * that vm_pageout_scan() can
2237 * find it.
2238 */
2239 dwp->dw_mask |= DW_move_page;
2240 }
2241
2242 #if 0
2243 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2244 /*
2245 * COMMENT BLOCK ON WHY THIS SHOULDN'T BE DONE.
2246 *
2247 * Since we are about to do a VM_COMPRESSOR_PAGER_STATE_CLR
2248 * below for this page, which drops any existing compressor
2249 * storage of this page (eg side-effect of a CoW operation or
2250 * a collapse operation), it is tempting to think that we should
2251 * treat this page as if it was just decompressed (during which
2252 * we also drop existing compressor storage) and so start its life
2253 * out with vmp_unmodified_ro set to FALSE.
2254 *
2255 * However, we can't do that here because we could swing around
2256 * and re-access this page in a read-only fault.
2257 * Clearing this bit means we'll try to zero it up above
2258 * and fail.
2259 *
2260 * Note that clearing the bit is unnecessary regardless because
2261 * dirty state has been cleared. During the next soft fault, the
2262 * right state will be restored and things will progress just fine.
2263 */
2264 if (m->vmp_unmodified_ro == true) {
2265 /* Need object and pageq locks for bit manipulation*/
2266 m->vmp_unmodified_ro = false;
2267 os_atomic_dec(&compressor_ro_uncompressed);
2268 }
2269 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2270 #endif /* 0 */
2271 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2272
2273 if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2274 assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2275 assert(!object->all_reusable);
2276 m->vmp_reusable = TRUE;
2277 object->reusable_page_count++;
2278 assert(object->resident_page_count >= object->reusable_page_count);
2279 reusable++;
2280 /*
2281 * Tell pmap this page is now
2282 * "reusable" (to update pmap
2283 * stats for all mappings).
2284 */
2285 pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2286 }
2287 }
2288 if (flags & DEACTIVATE_CLEAR_REFMOD) {
2289 /*
2290 * The caller didn't clear the refmod bits in advance.
2291 * Clear them for this page now.
2292 */
2293 pmap_options |= PMAP_OPTIONS_NOFLUSH;
2294 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2295 clear_refmod_mask,
2296 pmap_options,
2297 (void *)pfc);
2298 }
2299
2300 if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2301 !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2302 dwp->dw_mask |= DW_move_page;
2303 }
2304
2305 if (dwp->dw_mask) {
2306 VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2307 dw_count);
2308 }
2309
2310 if (dw_count >= dw_limit) {
2311 if (reusable) {
2312 OSAddAtomic(reusable,
2313 &vm_page_stats_reusable.reusable_count);
2314 vm_page_stats_reusable.reusable += reusable;
2315 reusable = 0;
2316 }
2317 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2318
2319 dwp = dwp_start;
2320 dw_count = 0;
2321 }
2322 }
2323 } else {
2324 /*
2325 * The page at this offset isn't memory resident, check to see if it's
2326 * been paged out. If so, mark it as handled so we don't bother looking
2327 * for it in the shadow chain.
2328 */
2329
2330 if (page_is_paged_out(object, offset)) {
2331 MARK_PAGE_HANDLED(*chunk_state, p);
2332
2333 /*
2334 * If we're killing a non-resident page, then clear the page in the existence
2335 * map so we don't bother paging it back in if it's touched again in the future.
2336 */
2337
2338 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2339 VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2340
2341 if (pmap != PMAP_NULL) {
2342 /*
2343 * Tell pmap that this page
2344 * is no longer mapped, to
2345 * adjust the footprint ledger
2346 * because this page is no
2347 * longer compressed.
2348 */
2349 pmap_remove_options(
2350 pmap,
2351 pmap_offset,
2352 (pmap_offset +
2353 PAGE_SIZE),
2354 PMAP_OPTIONS_REMOVE);
2355 }
2356 }
2357 }
2358 }
2359 }
2360
2361 if (reusable) {
2362 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2363 vm_page_stats_reusable.reusable += reusable;
2364 reusable = 0;
2365 }
2366
2367 if (dw_count) {
2368 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2369 dwp = dwp_start;
2370 dw_count = 0;
2371 }
2372
2373 if (dwp_start && dwp_finish_ctx) {
2374 vm_page_delayed_work_finish_ctx(dwp_start);
2375 dwp_start = dwp = NULL;
2376 }
2377 }
2378
2379
2380 /*
2381 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2382 * will always be less than or equal to the given size. The total range is divided up
2383 * into chunks for efficiency and performance related to the locks and handling the shadow
2384 * chain. This routine returns how much of the given "size" it actually processed. It's
2385 * up to the caler to loop and keep calling this routine until the entire range they want
2386 * to process has been done.
2387 * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2388 */
2389
2390 static vm_object_size_t
2391 deactivate_a_chunk(
2392 vm_object_t orig_object,
2393 vm_object_offset_t offset,
2394 vm_object_size_t size,
2395 deactivate_flags_t flags,
2396 pmap_flush_context *pfc,
2397 struct pmap *pmap,
2398 vm_map_offset_t pmap_offset)
2399 {
2400 vm_object_t object;
2401 vm_object_t tmp_object;
2402 vm_object_size_t length;
2403 chunk_state_t chunk_state;
2404
2405
2406 /*
2407 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2408 * remaining size the caller asked for.
2409 */
2410
2411 length = MIN(size, CHUNK_SIZE);
2412
2413 /*
2414 * The chunk_state keeps track of which pages we've already processed if there's
2415 * a shadow chain on this object. At this point, we haven't done anything with this
2416 * range of pages yet, so initialize the state to indicate no pages processed yet.
2417 */
2418
2419 CHUNK_INIT(chunk_state, length);
2420 object = orig_object;
2421
2422 /*
2423 * Start at the top level object and iterate around the loop once for each object
2424 * in the shadow chain. We stop processing early if we've already found all the pages
2425 * in the range. Otherwise we stop when we run out of shadow objects.
2426 */
2427
2428 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2429 vm_object_paging_begin(object);
2430
2431 deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2432
2433 vm_object_paging_end(object);
2434
2435 /*
2436 * We've finished with this object, see if there's a shadow object. If
2437 * there is, update the offset and lock the new object. We also turn off
2438 * kill_page at this point since we only kill pages in the top most object.
2439 */
2440
2441 tmp_object = object->shadow;
2442
2443 if (tmp_object) {
2444 assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2445 flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2446 offset += object->vo_shadow_offset;
2447 vm_object_lock(tmp_object);
2448 }
2449
2450 if (object != orig_object) {
2451 vm_object_unlock(object);
2452 }
2453
2454 object = tmp_object;
2455 }
2456
2457 if (object && object != orig_object) {
2458 vm_object_unlock(object);
2459 }
2460
2461 return length;
2462 }
2463
2464
2465
2466 /*
2467 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2468 * we also clear the modified status of the page and "forget" any changes that have been made
2469 * to the page.
2470 */
2471
2472 __private_extern__ void
2473 vm_object_deactivate_pages(
2474 vm_object_t object,
2475 vm_object_offset_t offset,
2476 vm_object_size_t size,
2477 boolean_t kill_page,
2478 boolean_t reusable_page,
2479 boolean_t reusable_no_write,
2480 struct pmap *pmap,
2481 vm_map_offset_t pmap_offset)
2482 {
2483 vm_object_size_t length;
2484 boolean_t all_reusable;
2485 pmap_flush_context pmap_flush_context_storage;
2486 unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2487 unsigned int pmap_clear_refmod_options = 0;
2488 deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2489 bool refmod_cleared = false;
2490 if (kill_page) {
2491 flags |= DEACTIVATE_KILL;
2492 }
2493 if (reusable_page) {
2494 flags |= DEACTIVATE_REUSABLE;
2495 }
2496 if (reusable_no_write) {
2497 flags |= DEACTIVATE_REUSABLE_NO_WRITE;
2498 }
2499
2500 /*
2501 * We break the range up into chunks and do one chunk at a time. This is for
2502 * efficiency and performance while handling the shadow chains and the locks.
2503 * The deactivate_a_chunk() function returns how much of the range it processed.
2504 * We keep calling this routine until the given size is exhausted.
2505 */
2506
2507
2508 all_reusable = FALSE;
2509 #if 11
2510 /*
2511 * For the sake of accurate "reusable" pmap stats, we need
2512 * to tell pmap about each page that is no longer "reusable",
2513 * so we can't do the "all_reusable" optimization.
2514 *
2515 * If we do go with the all_reusable optimization, we can't
2516 * return if size is 0 since we could have "all_reusable == TRUE"
2517 * In this case, we save the overhead of doing the pmap_flush_context
2518 * work.
2519 */
2520 if (size == 0) {
2521 return;
2522 }
2523 #else
2524 if (reusable_page &&
2525 object->internal &&
2526 object->vo_size != 0 &&
2527 object->vo_size == size &&
2528 object->reusable_page_count == 0) {
2529 all_reusable = TRUE;
2530 reusable_page = FALSE;
2531 flags |= DEACTIVATE_ALL_REUSABLE;
2532 }
2533 #endif
2534
2535 if ((reusable_page || all_reusable) && object->all_reusable) {
2536 /* This means MADV_FREE_REUSABLE has been called twice, which
2537 * is probably illegal. */
2538 return;
2539 }
2540
2541
2542 pmap_flush_context_init(&pmap_flush_context_storage);
2543
2544 /*
2545 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2546 * We can't do this if we're killing pages and there's a shadow chain as
2547 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2548 * safe to kill).
2549 * And we can only do this on hardware that supports it.
2550 */
2551 if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2552 if (kill_page && object->internal) {
2553 pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2554 }
2555 if (reusable_page) {
2556 pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2557 }
2558
2559 refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2560 if (refmod_cleared) {
2561 // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2562 flags &= ~DEACTIVATE_CLEAR_REFMOD;
2563 }
2564 }
2565
2566 while (size) {
2567 length = deactivate_a_chunk(object, offset, size, flags,
2568 &pmap_flush_context_storage, pmap, pmap_offset);
2569
2570 size -= length;
2571 offset += length;
2572 pmap_offset += length;
2573 }
2574 pmap_flush(&pmap_flush_context_storage);
2575
2576 if (all_reusable) {
2577 if (!object->all_reusable) {
2578 unsigned int reusable;
2579
2580 object->all_reusable = TRUE;
2581 assert(object->reusable_page_count == 0);
2582 /* update global stats */
2583 reusable = object->resident_page_count;
2584 OSAddAtomic(reusable,
2585 &vm_page_stats_reusable.reusable_count);
2586 vm_page_stats_reusable.reusable += reusable;
2587 vm_page_stats_reusable.all_reusable_calls++;
2588 }
2589 } else if (reusable_page) {
2590 vm_page_stats_reusable.partial_reusable_calls++;
2591 }
2592 }
2593
2594 void
2595 vm_object_reuse_pages(
2596 vm_object_t object,
2597 vm_object_offset_t start_offset,
2598 vm_object_offset_t end_offset,
2599 boolean_t allow_partial_reuse)
2600 {
2601 vm_object_offset_t cur_offset;
2602 vm_page_t m;
2603 unsigned int reused, reusable;
2604
2605 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2606 MACRO_BEGIN \
2607 if ((m) != VM_PAGE_NULL && \
2608 (m)->vmp_reusable) { \
2609 assert((object)->reusable_page_count <= \
2610 (object)->resident_page_count); \
2611 assert((object)->reusable_page_count > 0); \
2612 (object)->reusable_page_count--; \
2613 (m)->vmp_reusable = FALSE; \
2614 (reused)++; \
2615 /* \
2616 * Tell pmap that this page is no longer \
2617 * "reusable", to update the "reusable" stats \
2618 * for all the pmaps that have mapped this \
2619 * page. \
2620 */ \
2621 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2622 0, /* refmod */ \
2623 (PMAP_OPTIONS_CLEAR_REUSABLE \
2624 | PMAP_OPTIONS_NOFLUSH), \
2625 NULL); \
2626 } \
2627 MACRO_END
2628
2629 reused = 0;
2630 reusable = 0;
2631
2632 vm_object_lock_assert_exclusive(object);
2633
2634 if (object->all_reusable) {
2635 panic("object %p all_reusable: can't update pmap stats",
2636 object);
2637 assert(object->reusable_page_count == 0);
2638 object->all_reusable = FALSE;
2639 if (end_offset - start_offset == object->vo_size ||
2640 !allow_partial_reuse) {
2641 vm_page_stats_reusable.all_reuse_calls++;
2642 reused = object->resident_page_count;
2643 } else {
2644 vm_page_stats_reusable.partial_reuse_calls++;
2645 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2646 if (m->vmp_offset < start_offset ||
2647 m->vmp_offset >= end_offset) {
2648 m->vmp_reusable = TRUE;
2649 object->reusable_page_count++;
2650 assert(object->resident_page_count >= object->reusable_page_count);
2651 continue;
2652 } else {
2653 assert(!m->vmp_reusable);
2654 reused++;
2655 }
2656 }
2657 }
2658 } else if (object->resident_page_count >
2659 ((end_offset - start_offset) >> PAGE_SHIFT)) {
2660 vm_page_stats_reusable.partial_reuse_calls++;
2661 for (cur_offset = start_offset;
2662 cur_offset < end_offset;
2663 cur_offset += PAGE_SIZE_64) {
2664 if (object->reusable_page_count == 0) {
2665 break;
2666 }
2667 m = vm_page_lookup(object, cur_offset);
2668 VM_OBJECT_REUSE_PAGE(object, m, reused);
2669 }
2670 } else {
2671 vm_page_stats_reusable.partial_reuse_calls++;
2672 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2673 if (object->reusable_page_count == 0) {
2674 break;
2675 }
2676 if (m->vmp_offset < start_offset ||
2677 m->vmp_offset >= end_offset) {
2678 continue;
2679 }
2680 VM_OBJECT_REUSE_PAGE(object, m, reused);
2681 }
2682 }
2683
2684 /* update global stats */
2685 OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2686 vm_page_stats_reusable.reused += reused;
2687 vm_page_stats_reusable.reusable += reusable;
2688 }
2689
2690 /*
2691 * Routine: vm_object_pmap_protect
2692 *
2693 * Purpose:
2694 * Reduces the permission for all physical
2695 * pages in the specified object range.
2696 *
2697 * If removing write permission only, it is
2698 * sufficient to protect only the pages in
2699 * the top-level object; only those pages may
2700 * have write permission.
2701 *
2702 * If removing all access, we must follow the
2703 * shadow chain from the top-level object to
2704 * remove access to all pages in shadowed objects.
2705 *
2706 * The object must *not* be locked. The object must
2707 * be internal.
2708 *
2709 * If pmap is not NULL, this routine assumes that
2710 * the only mappings for the pages are in that
2711 * pmap.
2712 */
2713
2714 __private_extern__ void
2715 vm_object_pmap_protect(
2716 vm_object_t object,
2717 vm_object_offset_t offset,
2718 vm_object_size_t size,
2719 pmap_t pmap,
2720 vm_map_size_t pmap_page_size,
2721 vm_map_offset_t pmap_start,
2722 vm_prot_t prot)
2723 {
2724 vm_object_pmap_protect_options(object, offset, size, pmap,
2725 pmap_page_size,
2726 pmap_start, prot, 0);
2727 }
2728
2729 __private_extern__ void
2730 vm_object_pmap_protect_options(
2731 vm_object_t object,
2732 vm_object_offset_t offset,
2733 vm_object_size_t size,
2734 pmap_t pmap,
2735 vm_map_size_t pmap_page_size,
2736 vm_map_offset_t pmap_start,
2737 vm_prot_t prot,
2738 int options)
2739 {
2740 pmap_flush_context pmap_flush_context_storage;
2741 boolean_t delayed_pmap_flush = FALSE;
2742 vm_object_offset_t offset_in_object;
2743 vm_object_size_t size_in_object;
2744
2745 if (object == VM_OBJECT_NULL) {
2746 return;
2747 }
2748 if (pmap_page_size > PAGE_SIZE) {
2749 /* for 16K map on 4K device... */
2750 pmap_page_size = PAGE_SIZE;
2751 }
2752 /*
2753 * If we decide to work on the object itself, extend the range to
2754 * cover a full number of native pages.
2755 */
2756 size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
2757 offset_in_object = vm_object_trunc_page(offset);
2758 /*
2759 * If we decide to work on the pmap, use the exact range specified,
2760 * so no rounding/truncating offset and size. They should already
2761 * be aligned to pmap_page_size.
2762 */
2763 assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
2764 "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
2765 offset, size, (uint64_t)pmap_page_size);
2766
2767 vm_object_lock(object);
2768
2769 if (object->phys_contiguous) {
2770 if (pmap != NULL) {
2771 vm_object_unlock(object);
2772 pmap_protect_options(pmap,
2773 pmap_start,
2774 pmap_start + size,
2775 prot,
2776 options & ~PMAP_OPTIONS_NOFLUSH,
2777 NULL);
2778 } else {
2779 vm_object_offset_t phys_start, phys_end, phys_addr;
2780
2781 phys_start = object->vo_shadow_offset + offset_in_object;
2782 phys_end = phys_start + size_in_object;
2783 assert(phys_start <= phys_end);
2784 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
2785 vm_object_unlock(object);
2786
2787 pmap_flush_context_init(&pmap_flush_context_storage);
2788 delayed_pmap_flush = FALSE;
2789
2790 for (phys_addr = phys_start;
2791 phys_addr < phys_end;
2792 phys_addr += PAGE_SIZE_64) {
2793 pmap_page_protect_options(
2794 (ppnum_t) (phys_addr >> PAGE_SHIFT),
2795 prot,
2796 options | PMAP_OPTIONS_NOFLUSH,
2797 (void *)&pmap_flush_context_storage);
2798 delayed_pmap_flush = TRUE;
2799 }
2800 if (delayed_pmap_flush == TRUE) {
2801 pmap_flush(&pmap_flush_context_storage);
2802 }
2803 }
2804 return;
2805 }
2806
2807 assert(object->internal);
2808
2809 while (TRUE) {
2810 if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
2811 vm_object_unlock(object);
2812 if (pmap_page_size < PAGE_SIZE) {
2813 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
2814 }
2815 pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
2816 options & ~PMAP_OPTIONS_NOFLUSH, NULL);
2817 return;
2818 }
2819
2820 if (pmap_page_size < PAGE_SIZE) {
2821 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
2822 }
2823
2824 pmap_flush_context_init(&pmap_flush_context_storage);
2825 delayed_pmap_flush = FALSE;
2826
2827 /*
2828 * if we are doing large ranges with respect to resident
2829 * page count then we should interate over pages otherwise
2830 * inverse page look-up will be faster
2831 */
2832 if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
2833 vm_page_t p;
2834 vm_object_offset_t end;
2835
2836 end = offset_in_object + size_in_object;
2837
2838 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
2839 if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) {
2840 vm_map_offset_t start;
2841
2842 /*
2843 * XXX FBDP 4K: intentionally using "offset" here instead
2844 * of "offset_in_object", since "start" is a pmap address.
2845 */
2846 start = pmap_start + p->vmp_offset - offset;
2847
2848 if (pmap != PMAP_NULL) {
2849 vm_map_offset_t curr;
2850 for (curr = start;
2851 curr < start + PAGE_SIZE_64;
2852 curr += pmap_page_size) {
2853 if (curr < pmap_start) {
2854 continue;
2855 }
2856 if (curr >= pmap_start + size) {
2857 break;
2858 }
2859 pmap_protect_options(
2860 pmap,
2861 curr,
2862 curr + pmap_page_size,
2863 prot,
2864 options | PMAP_OPTIONS_NOFLUSH,
2865 &pmap_flush_context_storage);
2866 }
2867 } else {
2868 pmap_page_protect_options(
2869 VM_PAGE_GET_PHYS_PAGE(p),
2870 prot,
2871 options | PMAP_OPTIONS_NOFLUSH,
2872 &pmap_flush_context_storage);
2873 }
2874 delayed_pmap_flush = TRUE;
2875 }
2876 }
2877 } else {
2878 vm_page_t p;
2879 vm_object_offset_t end;
2880 vm_object_offset_t target_off;
2881
2882 end = offset_in_object + size_in_object;
2883
2884 for (target_off = offset_in_object;
2885 target_off < end; target_off += PAGE_SIZE) {
2886 p = vm_page_lookup(object, target_off);
2887
2888 if (p != VM_PAGE_NULL) {
2889 vm_object_offset_t start;
2890
2891 /*
2892 * XXX FBDP 4K: intentionally using "offset" here instead
2893 * of "offset_in_object", since "start" is a pmap address.
2894 */
2895 start = pmap_start + (p->vmp_offset - offset);
2896
2897 if (pmap != PMAP_NULL) {
2898 vm_map_offset_t curr;
2899 for (curr = start;
2900 curr < start + PAGE_SIZE;
2901 curr += pmap_page_size) {
2902 if (curr < pmap_start) {
2903 continue;
2904 }
2905 if (curr >= pmap_start + size) {
2906 break;
2907 }
2908 pmap_protect_options(
2909 pmap,
2910 curr,
2911 curr + pmap_page_size,
2912 prot,
2913 options | PMAP_OPTIONS_NOFLUSH,
2914 &pmap_flush_context_storage);
2915 }
2916 } else {
2917 pmap_page_protect_options(
2918 VM_PAGE_GET_PHYS_PAGE(p),
2919 prot,
2920 options | PMAP_OPTIONS_NOFLUSH,
2921 &pmap_flush_context_storage);
2922 }
2923 delayed_pmap_flush = TRUE;
2924 }
2925 }
2926 }
2927 if (delayed_pmap_flush == TRUE) {
2928 pmap_flush(&pmap_flush_context_storage);
2929 }
2930
2931 if (prot == VM_PROT_NONE) {
2932 /*
2933 * Must follow shadow chain to remove access
2934 * to pages in shadowed objects.
2935 */
2936 vm_object_t next_object;
2937
2938 next_object = object->shadow;
2939 if (next_object != VM_OBJECT_NULL) {
2940 offset_in_object += object->vo_shadow_offset;
2941 offset += object->vo_shadow_offset;
2942 vm_object_lock(next_object);
2943 vm_object_unlock(object);
2944 object = next_object;
2945 } else {
2946 /*
2947 * End of chain - we are done.
2948 */
2949 break;
2950 }
2951 } else {
2952 /*
2953 * Pages in shadowed objects may never have
2954 * write permission - we may stop here.
2955 */
2956 break;
2957 }
2958 }
2959
2960 vm_object_unlock(object);
2961 }
2962
2963 uint32_t vm_page_busy_absent_skipped = 0;
2964
2965 /*
2966 * Routine: vm_object_copy_slowly
2967 *
2968 * Description:
2969 * Copy the specified range of the source
2970 * virtual memory object without using
2971 * protection-based optimizations (such
2972 * as copy-on-write). The pages in the
2973 * region are actually copied.
2974 *
2975 * In/out conditions:
2976 * The caller must hold a reference and a lock
2977 * for the source virtual memory object. The source
2978 * object will be returned *unlocked*.
2979 *
2980 * Results:
2981 * If the copy is completed successfully, KERN_SUCCESS is
2982 * returned. If the caller asserted the interruptible
2983 * argument, and an interruption occurred while waiting
2984 * for a user-generated event, MACH_SEND_INTERRUPTED is
2985 * returned. Other values may be returned to indicate
2986 * hard errors during the copy operation.
2987 *
2988 * A new virtual memory object is returned in a
2989 * parameter (_result_object). The contents of this
2990 * new object, starting at a zero offset, are a copy
2991 * of the source memory region. In the event of
2992 * an error, this parameter will contain the value
2993 * VM_OBJECT_NULL.
2994 */
2995 __private_extern__ kern_return_t
2996 vm_object_copy_slowly(
2997 vm_object_t src_object,
2998 vm_object_offset_t src_offset,
2999 vm_object_size_t size,
3000 boolean_t interruptible,
3001 vm_object_t *_result_object) /* OUT */
3002 {
3003 vm_object_t new_object;
3004 vm_object_offset_t new_offset;
3005
3006 struct vm_object_fault_info fault_info = {};
3007
3008 if (size == 0) {
3009 vm_object_unlock(src_object);
3010 *_result_object = VM_OBJECT_NULL;
3011 return KERN_INVALID_ARGUMENT;
3012 }
3013
3014 /*
3015 * Prevent destruction of the source object while we copy.
3016 */
3017
3018 vm_object_reference_locked(src_object);
3019 vm_object_unlock(src_object);
3020
3021 /*
3022 * Create a new object to hold the copied pages.
3023 * A few notes:
3024 * We fill the new object starting at offset 0,
3025 * regardless of the input offset.
3026 * We don't bother to lock the new object within
3027 * this routine, since we have the only reference.
3028 */
3029
3030 size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
3031 src_offset = vm_object_trunc_page(src_offset);
3032 new_object = vm_object_allocate(size);
3033 new_offset = 0;
3034
3035 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
3036
3037 fault_info.interruptible = interruptible;
3038 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
3039 fault_info.lo_offset = src_offset;
3040 fault_info.hi_offset = src_offset + size;
3041 fault_info.stealth = TRUE;
3042
3043 for (;
3044 size != 0;
3045 src_offset += PAGE_SIZE_64,
3046 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3047 ) {
3048 vm_page_t new_page;
3049 vm_fault_return_t result;
3050
3051 vm_object_lock(new_object);
3052
3053 while ((new_page = vm_page_alloc(new_object, new_offset))
3054 == VM_PAGE_NULL) {
3055 vm_object_unlock(new_object);
3056
3057 if (!vm_page_wait(interruptible)) {
3058 vm_object_deallocate(new_object);
3059 vm_object_deallocate(src_object);
3060 *_result_object = VM_OBJECT_NULL;
3061 return MACH_SEND_INTERRUPTED;
3062 }
3063 vm_object_lock(new_object);
3064 }
3065 vm_object_unlock(new_object);
3066
3067 do {
3068 vm_prot_t prot = VM_PROT_READ;
3069 vm_page_t _result_page;
3070 vm_page_t top_page;
3071 vm_page_t result_page;
3072 kern_return_t error_code;
3073 vm_object_t result_page_object;
3074
3075
3076 vm_object_lock(src_object);
3077
3078 if (src_object->internal &&
3079 src_object->shadow == VM_OBJECT_NULL &&
3080 (src_object->pager == NULL ||
3081 (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3082 src_offset) ==
3083 VM_EXTERNAL_STATE_ABSENT))) {
3084 boolean_t can_skip_page;
3085
3086 _result_page = vm_page_lookup(src_object,
3087 src_offset);
3088 if (_result_page == VM_PAGE_NULL) {
3089 /*
3090 * This page is neither resident nor
3091 * compressed and there's no shadow
3092 * object below "src_object", so this
3093 * page is really missing.
3094 * There's no need to zero-fill it just
3095 * to copy it: let's leave it missing
3096 * in "new_object" and get zero-filled
3097 * on demand.
3098 */
3099 can_skip_page = TRUE;
3100 } else if (workaround_41447923 &&
3101 src_object->pager == NULL &&
3102 _result_page != VM_PAGE_NULL &&
3103 _result_page->vmp_busy &&
3104 _result_page->vmp_absent &&
3105 src_object->purgable == VM_PURGABLE_DENY &&
3106 !src_object->blocked_access) {
3107 /*
3108 * This page is "busy" and "absent"
3109 * but not because we're waiting for
3110 * it to be decompressed. It must
3111 * be because it's a "no zero fill"
3112 * page that is currently not
3113 * accessible until it gets overwritten
3114 * by a device driver.
3115 * Since its initial state would have
3116 * been "zero-filled", let's leave the
3117 * copy page missing and get zero-filled
3118 * on demand.
3119 */
3120 assert(src_object->internal);
3121 assert(src_object->shadow == NULL);
3122 assert(src_object->pager == NULL);
3123 can_skip_page = TRUE;
3124 vm_page_busy_absent_skipped++;
3125 } else {
3126 can_skip_page = FALSE;
3127 }
3128 if (can_skip_page) {
3129 vm_object_unlock(src_object);
3130 /* free the unused "new_page"... */
3131 vm_object_lock(new_object);
3132 VM_PAGE_FREE(new_page);
3133 new_page = VM_PAGE_NULL;
3134 vm_object_unlock(new_object);
3135 /* ...and go to next page in "src_object" */
3136 result = VM_FAULT_SUCCESS;
3137 break;
3138 }
3139 }
3140
3141 vm_object_paging_begin(src_object);
3142
3143 /* cap size at maximum UPL size */
3144 upl_size_t cluster_size;
3145 if (os_convert_overflow(size, &cluster_size)) {
3146 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3147 }
3148 fault_info.cluster_size = cluster_size;
3149
3150 _result_page = VM_PAGE_NULL;
3151 result = vm_fault_page(src_object, src_offset,
3152 VM_PROT_READ, FALSE,
3153 FALSE, /* page not looked up */
3154 &prot, &_result_page, &top_page,
3155 (int *)0,
3156 &error_code, FALSE, &fault_info);
3157
3158 switch (result) {
3159 case VM_FAULT_SUCCESS:
3160 result_page = _result_page;
3161 result_page_object = VM_PAGE_OBJECT(result_page);
3162
3163 /*
3164 * Copy the page to the new object.
3165 *
3166 * POLICY DECISION:
3167 * If result_page is clean,
3168 * we could steal it instead
3169 * of copying.
3170 */
3171
3172 vm_page_copy(result_page, new_page);
3173 vm_object_unlock(result_page_object);
3174
3175 /*
3176 * Let go of both pages (make them
3177 * not busy, perform wakeup, activate).
3178 */
3179 vm_object_lock(new_object);
3180 SET_PAGE_DIRTY(new_page, FALSE);
3181 PAGE_WAKEUP_DONE(new_page);
3182 vm_object_unlock(new_object);
3183
3184 vm_object_lock(result_page_object);
3185 PAGE_WAKEUP_DONE(result_page);
3186
3187 vm_page_lockspin_queues();
3188 if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3189 (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3190 vm_page_activate(result_page);
3191 }
3192 vm_page_activate(new_page);
3193 vm_page_unlock_queues();
3194
3195 /*
3196 * Release paging references and
3197 * top-level placeholder page, if any.
3198 */
3199
3200 vm_fault_cleanup(result_page_object,
3201 top_page);
3202
3203 break;
3204
3205 case VM_FAULT_RETRY:
3206 break;
3207
3208 case VM_FAULT_MEMORY_SHORTAGE:
3209 if (vm_page_wait(interruptible)) {
3210 break;
3211 }
3212 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), 0 /* arg */);
3213 OS_FALLTHROUGH;
3214
3215 case VM_FAULT_INTERRUPTED:
3216 vm_object_lock(new_object);
3217 VM_PAGE_FREE(new_page);
3218 vm_object_unlock(new_object);
3219
3220 vm_object_deallocate(new_object);
3221 vm_object_deallocate(src_object);
3222 *_result_object = VM_OBJECT_NULL;
3223 return MACH_SEND_INTERRUPTED;
3224
3225 case VM_FAULT_SUCCESS_NO_VM_PAGE:
3226 /* success but no VM page: fail */
3227 vm_object_paging_end(src_object);
3228 vm_object_unlock(src_object);
3229 OS_FALLTHROUGH;
3230 case VM_FAULT_MEMORY_ERROR:
3231 /*
3232 * A policy choice:
3233 * (a) ignore pages that we can't
3234 * copy
3235 * (b) return the null object if
3236 * any page fails [chosen]
3237 */
3238
3239 vm_object_lock(new_object);
3240 VM_PAGE_FREE(new_page);
3241 vm_object_unlock(new_object);
3242
3243 vm_object_deallocate(new_object);
3244 vm_object_deallocate(src_object);
3245 *_result_object = VM_OBJECT_NULL;
3246 return error_code ? error_code:
3247 KERN_MEMORY_ERROR;
3248
3249 default:
3250 panic("vm_object_copy_slowly: unexpected error"
3251 " 0x%x from vm_fault_page()\n", result);
3252 }
3253 } while (result != VM_FAULT_SUCCESS);
3254 }
3255
3256 /*
3257 * Lose the extra reference, and return our object.
3258 */
3259 vm_object_deallocate(src_object);
3260 *_result_object = new_object;
3261 return KERN_SUCCESS;
3262 }
3263
3264 /*
3265 * Routine: vm_object_copy_quickly
3266 *
3267 * Purpose:
3268 * Copy the specified range of the source virtual
3269 * memory object, if it can be done without waiting
3270 * for user-generated events.
3271 *
3272 * Results:
3273 * If the copy is successful, the copy is returned in
3274 * the arguments; otherwise, the arguments are not
3275 * affected.
3276 *
3277 * In/out conditions:
3278 * The object should be unlocked on entry and exit.
3279 */
3280
3281 /*ARGSUSED*/
3282 __private_extern__ boolean_t
3283 vm_object_copy_quickly(
3284 vm_object_t object, /* IN */
3285 __unused vm_object_offset_t offset, /* IN */
3286 __unused vm_object_size_t size, /* IN */
3287 boolean_t *_src_needs_copy, /* OUT */
3288 boolean_t *_dst_needs_copy) /* OUT */
3289 {
3290 memory_object_copy_strategy_t copy_strategy;
3291
3292 if (object == VM_OBJECT_NULL) {
3293 *_src_needs_copy = FALSE;
3294 *_dst_needs_copy = FALSE;
3295 return TRUE;
3296 }
3297
3298 vm_object_lock(object);
3299
3300 copy_strategy = object->copy_strategy;
3301
3302 switch (copy_strategy) {
3303 case MEMORY_OBJECT_COPY_SYMMETRIC:
3304
3305 /*
3306 * Symmetric copy strategy.
3307 * Make another reference to the object.
3308 * Leave object/offset unchanged.
3309 */
3310
3311 vm_object_reference_locked(object);
3312 object->shadowed = TRUE;
3313 vm_object_unlock(object);
3314
3315 /*
3316 * Both source and destination must make
3317 * shadows, and the source must be made
3318 * read-only if not already.
3319 */
3320
3321 *_src_needs_copy = TRUE;
3322 *_dst_needs_copy = TRUE;
3323
3324 break;
3325
3326 case MEMORY_OBJECT_COPY_DELAY:
3327 vm_object_unlock(object);
3328 return FALSE;
3329
3330 default:
3331 vm_object_unlock(object);
3332 return FALSE;
3333 }
3334 return TRUE;
3335 }
3336
3337 static uint32_t copy_delayed_lock_collisions;
3338 static uint32_t copy_delayed_max_collisions;
3339 static uint32_t copy_delayed_lock_contention;
3340 static uint32_t copy_delayed_protect_iterate;
3341
3342 /*
3343 * Routine: vm_object_copy_delayed [internal]
3344 *
3345 * Description:
3346 * Copy the specified virtual memory object, using
3347 * the asymmetric copy-on-write algorithm.
3348 *
3349 * In/out conditions:
3350 * The src_object must be locked on entry. It will be unlocked
3351 * on exit - so the caller must also hold a reference to it.
3352 *
3353 * This routine will not block waiting for user-generated
3354 * events. It is not interruptible.
3355 */
3356 __private_extern__ vm_object_t
3357 vm_object_copy_delayed(
3358 vm_object_t src_object,
3359 vm_object_offset_t src_offset,
3360 vm_object_size_t size,
3361 boolean_t src_object_shared)
3362 {
3363 vm_object_t new_copy = VM_OBJECT_NULL;
3364 vm_object_t old_copy;
3365 vm_page_t p;
3366 vm_object_size_t copy_size = src_offset + size;
3367 pmap_flush_context pmap_flush_context_storage;
3368 boolean_t delayed_pmap_flush = FALSE;
3369
3370
3371 uint32_t collisions = 0;
3372 /*
3373 * The user-level memory manager wants to see all of the changes
3374 * to this object, but it has promised not to make any changes on
3375 * its own.
3376 *
3377 * Perform an asymmetric copy-on-write, as follows:
3378 * Create a new object, called a "copy object" to hold
3379 * pages modified by the new mapping (i.e., the copy,
3380 * not the original mapping).
3381 * Record the original object as the backing object for
3382 * the copy object. If the original mapping does not
3383 * change a page, it may be used read-only by the copy.
3384 * Record the copy object in the original object.
3385 * When the original mapping causes a page to be modified,
3386 * it must be copied to a new page that is "pushed" to
3387 * the copy object.
3388 * Mark the new mapping (the copy object) copy-on-write.
3389 * This makes the copy object itself read-only, allowing
3390 * it to be reused if the original mapping makes no
3391 * changes, and simplifying the synchronization required
3392 * in the "push" operation described above.
3393 *
3394 * The copy-on-write is said to be assymetric because the original
3395 * object is *not* marked copy-on-write. A copied page is pushed
3396 * to the copy object, regardless which party attempted to modify
3397 * the page.
3398 *
3399 * Repeated asymmetric copy operations may be done. If the
3400 * original object has not been changed since the last copy, its
3401 * copy object can be reused. Otherwise, a new copy object can be
3402 * inserted between the original object and its previous copy
3403 * object. Since any copy object is read-only, this cannot affect
3404 * affect the contents of the previous copy object.
3405 *
3406 * Note that a copy object is higher in the object tree than the
3407 * original object; therefore, use of the copy object recorded in
3408 * the original object must be done carefully, to avoid deadlock.
3409 */
3410
3411 copy_size = vm_object_round_page(copy_size);
3412 Retry:
3413
3414 /*
3415 * Wait for paging in progress.
3416 */
3417 if (!src_object->true_share &&
3418 (src_object->paging_in_progress != 0 ||
3419 src_object->activity_in_progress != 0)) {
3420 if (src_object_shared == TRUE) {
3421 vm_object_unlock(src_object);
3422 vm_object_lock(src_object);
3423 src_object_shared = FALSE;
3424 goto Retry;
3425 }
3426 vm_object_paging_wait(src_object, THREAD_UNINT);
3427 }
3428 /*
3429 * See whether we can reuse the result of a previous
3430 * copy operation.
3431 */
3432
3433 old_copy = src_object->vo_copy;
3434 if (old_copy != VM_OBJECT_NULL) {
3435 int lock_granted;
3436
3437 /*
3438 * Try to get the locks (out of order)
3439 */
3440 if (src_object_shared == TRUE) {
3441 lock_granted = vm_object_lock_try_shared(old_copy);
3442 } else {
3443 lock_granted = vm_object_lock_try(old_copy);
3444 }
3445
3446 if (!lock_granted) {
3447 vm_object_unlock(src_object);
3448
3449 if (collisions++ == 0) {
3450 copy_delayed_lock_contention++;
3451 }
3452 mutex_pause(collisions);
3453
3454 /* Heisenberg Rules */
3455 copy_delayed_lock_collisions++;
3456
3457 if (collisions > copy_delayed_max_collisions) {
3458 copy_delayed_max_collisions = collisions;
3459 }
3460
3461 if (src_object_shared == TRUE) {
3462 vm_object_lock_shared(src_object);
3463 } else {
3464 vm_object_lock(src_object);
3465 }
3466
3467 goto Retry;
3468 }
3469
3470 /*
3471 * Determine whether the old copy object has
3472 * been modified.
3473 */
3474
3475 if (old_copy->resident_page_count == 0 &&
3476 !old_copy->pager_created) {
3477 /*
3478 * It has not been modified.
3479 *
3480 * Return another reference to
3481 * the existing copy-object if
3482 * we can safely grow it (if
3483 * needed).
3484 */
3485
3486 if (old_copy->vo_size < copy_size) {
3487 if (src_object_shared == TRUE) {
3488 vm_object_unlock(old_copy);
3489 vm_object_unlock(src_object);
3490
3491 vm_object_lock(src_object);
3492 src_object_shared = FALSE;
3493 goto Retry;
3494 }
3495 /*
3496 * We can't perform a delayed copy if any of the
3497 * pages in the extended range are wired (because
3498 * we can't safely take write permission away from
3499 * wired pages). If the pages aren't wired, then
3500 * go ahead and protect them.
3501 */
3502 copy_delayed_protect_iterate++;
3503
3504 pmap_flush_context_init(&pmap_flush_context_storage);
3505 delayed_pmap_flush = FALSE;
3506
3507 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3508 if (!p->vmp_fictitious &&
3509 p->vmp_offset >= old_copy->vo_size &&
3510 p->vmp_offset < copy_size) {
3511 if (VM_PAGE_WIRED(p)) {
3512 vm_object_unlock(old_copy);
3513 vm_object_unlock(src_object);
3514
3515 if (new_copy != VM_OBJECT_NULL) {
3516 vm_object_unlock(new_copy);
3517 vm_object_deallocate(new_copy);
3518 }
3519 if (delayed_pmap_flush == TRUE) {
3520 pmap_flush(&pmap_flush_context_storage);
3521 }
3522
3523 return VM_OBJECT_NULL;
3524 } else {
3525 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3526 (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3527 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3528 delayed_pmap_flush = TRUE;
3529 }
3530 }
3531 }
3532 if (delayed_pmap_flush == TRUE) {
3533 pmap_flush(&pmap_flush_context_storage);
3534 }
3535
3536 assertf(page_aligned(copy_size),
3537 "object %p size 0x%llx",
3538 old_copy, (uint64_t)copy_size);
3539 old_copy->vo_size = copy_size;
3540 }
3541 if (src_object_shared == TRUE) {
3542 vm_object_reference_shared(old_copy);
3543 } else {
3544 vm_object_reference_locked(old_copy);
3545 }
3546 vm_object_unlock(old_copy);
3547 vm_object_unlock(src_object);
3548
3549 if (new_copy != VM_OBJECT_NULL) {
3550 vm_object_unlock(new_copy);
3551 vm_object_deallocate(new_copy);
3552 }
3553 return old_copy;
3554 }
3555
3556
3557
3558 /*
3559 * Adjust the size argument so that the newly-created
3560 * copy object will be large enough to back either the
3561 * old copy object or the new mapping.
3562 */
3563 if (old_copy->vo_size > copy_size) {
3564 copy_size = old_copy->vo_size;
3565 }
3566
3567 if (new_copy == VM_OBJECT_NULL) {
3568 vm_object_unlock(old_copy);
3569 vm_object_unlock(src_object);
3570 new_copy = vm_object_allocate(copy_size);
3571 vm_object_lock(src_object);
3572 vm_object_lock(new_copy);
3573
3574 src_object_shared = FALSE;
3575 goto Retry;
3576 }
3577 assertf(page_aligned(copy_size),
3578 "object %p size 0x%llx",
3579 new_copy, (uint64_t)copy_size);
3580 new_copy->vo_size = copy_size;
3581
3582 /*
3583 * The copy-object is always made large enough to
3584 * completely shadow the original object, since
3585 * it may have several users who want to shadow
3586 * the original object at different points.
3587 */
3588
3589 assert((old_copy->shadow == src_object) &&
3590 (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
3591 } else if (new_copy == VM_OBJECT_NULL) {
3592 vm_object_unlock(src_object);
3593 new_copy = vm_object_allocate(copy_size);
3594 vm_object_lock(src_object);
3595 vm_object_lock(new_copy);
3596
3597 src_object_shared = FALSE;
3598 goto Retry;
3599 }
3600
3601 /*
3602 * We now have the src object locked, and the new copy object
3603 * allocated and locked (and potentially the old copy locked).
3604 * Before we go any further, make sure we can still perform
3605 * a delayed copy, as the situation may have changed.
3606 *
3607 * Specifically, we can't perform a delayed copy if any of the
3608 * pages in the range are wired (because we can't safely take
3609 * write permission away from wired pages). If the pages aren't
3610 * wired, then go ahead and protect them.
3611 */
3612 copy_delayed_protect_iterate++;
3613
3614 pmap_flush_context_init(&pmap_flush_context_storage);
3615 delayed_pmap_flush = FALSE;
3616
3617 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3618 if (!p->vmp_fictitious && p->vmp_offset < copy_size) {
3619 if (VM_PAGE_WIRED(p)) {
3620 if (old_copy) {
3621 vm_object_unlock(old_copy);
3622 }
3623 vm_object_unlock(src_object);
3624 vm_object_unlock(new_copy);
3625 vm_object_deallocate(new_copy);
3626
3627 if (delayed_pmap_flush == TRUE) {
3628 pmap_flush(&pmap_flush_context_storage);
3629 }
3630
3631 return VM_OBJECT_NULL;
3632 } else {
3633 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3634 (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3635 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3636 delayed_pmap_flush = TRUE;
3637 }
3638 }
3639 }
3640 if (delayed_pmap_flush == TRUE) {
3641 pmap_flush(&pmap_flush_context_storage);
3642 }
3643
3644 if (old_copy != VM_OBJECT_NULL) {
3645 /*
3646 * Make the old copy-object shadow the new one.
3647 * It will receive no more pages from the original
3648 * object.
3649 */
3650
3651 /* remove ref. from old_copy */
3652 vm_object_lock_assert_exclusive(src_object);
3653 src_object->ref_count--;
3654 assert(src_object->ref_count > 0);
3655 vm_object_lock_assert_exclusive(old_copy);
3656 old_copy->shadow = new_copy;
3657 vm_object_lock_assert_exclusive(new_copy);
3658 assert(new_copy->ref_count > 0);
3659 new_copy->ref_count++; /* for old_copy->shadow ref. */
3660
3661 vm_object_unlock(old_copy); /* done with old_copy */
3662 }
3663
3664 /*
3665 * Point the new copy at the existing object.
3666 */
3667 vm_object_lock_assert_exclusive(new_copy);
3668 new_copy->shadow = src_object;
3669 new_copy->vo_shadow_offset = 0;
3670 new_copy->shadowed = TRUE; /* caller must set needs_copy */
3671
3672 vm_object_lock_assert_exclusive(src_object);
3673 vm_object_reference_locked(src_object);
3674 VM_OBJECT_COPY_SET(src_object, new_copy);
3675 vm_object_unlock(src_object);
3676 vm_object_unlock(new_copy);
3677
3678 return new_copy;
3679 }
3680
3681 /*
3682 * Routine: vm_object_copy_strategically
3683 *
3684 * Purpose:
3685 * Perform a copy according to the source object's
3686 * declared strategy. This operation may block,
3687 * and may be interrupted.
3688 */
3689 __private_extern__ kern_return_t
3690 vm_object_copy_strategically(
3691 vm_object_t src_object,
3692 vm_object_offset_t src_offset,
3693 vm_object_size_t size,
3694 bool forking,
3695 vm_object_t *dst_object, /* OUT */
3696 vm_object_offset_t *dst_offset, /* OUT */
3697 boolean_t *dst_needs_copy) /* OUT */
3698 {
3699 boolean_t result;
3700 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
3701 boolean_t object_lock_shared = FALSE;
3702 memory_object_copy_strategy_t copy_strategy;
3703
3704 assert(src_object != VM_OBJECT_NULL);
3705
3706 copy_strategy = src_object->copy_strategy;
3707
3708 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
3709 vm_object_lock_shared(src_object);
3710 object_lock_shared = TRUE;
3711 } else {
3712 vm_object_lock(src_object);
3713 }
3714
3715 /*
3716 * The copy strategy is only valid if the memory manager
3717 * is "ready". Internal objects are always ready.
3718 */
3719
3720 while (!src_object->internal && !src_object->pager_ready) {
3721 wait_result_t wait_result;
3722
3723 if (object_lock_shared == TRUE) {
3724 vm_object_unlock(src_object);
3725 vm_object_lock(src_object);
3726 object_lock_shared = FALSE;
3727 continue;
3728 }
3729 wait_result = vm_object_sleep( src_object,
3730 VM_OBJECT_EVENT_PAGER_READY,
3731 interruptible);
3732 if (wait_result != THREAD_AWAKENED) {
3733 vm_object_unlock(src_object);
3734 *dst_object = VM_OBJECT_NULL;
3735 *dst_offset = 0;
3736 *dst_needs_copy = FALSE;
3737 return MACH_SEND_INTERRUPTED;
3738 }
3739 }
3740
3741 /*
3742 * Use the appropriate copy strategy.
3743 */
3744
3745 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) {
3746 if (forking) {
3747 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3748 } else {
3749 copy_strategy = MEMORY_OBJECT_COPY_NONE;
3750 if (object_lock_shared) {
3751 vm_object_unlock(src_object);
3752 vm_object_lock(src_object);
3753 object_lock_shared = FALSE;
3754 }
3755 }
3756 }
3757
3758 switch (copy_strategy) {
3759 case MEMORY_OBJECT_COPY_DELAY:
3760 *dst_object = vm_object_copy_delayed(src_object,
3761 src_offset, size, object_lock_shared);
3762 if (*dst_object != VM_OBJECT_NULL) {
3763 *dst_offset = src_offset;
3764 *dst_needs_copy = TRUE;
3765 result = KERN_SUCCESS;
3766 break;
3767 }
3768 vm_object_lock(src_object);
3769 OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
3770
3771 case MEMORY_OBJECT_COPY_NONE:
3772 result = vm_object_copy_slowly(src_object, src_offset, size,
3773 interruptible, dst_object);
3774 if (result == KERN_SUCCESS) {
3775 *dst_offset = src_offset - vm_object_trunc_page(src_offset);
3776 *dst_needs_copy = FALSE;
3777 }
3778 break;
3779
3780 case MEMORY_OBJECT_COPY_SYMMETRIC:
3781 vm_object_unlock(src_object);
3782 result = KERN_MEMORY_RESTART_COPY;
3783 break;
3784
3785 default:
3786 panic("copy_strategically: bad strategy %d for object %p",
3787 copy_strategy, src_object);
3788 result = KERN_INVALID_ARGUMENT;
3789 }
3790 return result;
3791 }
3792
3793 /*
3794 * vm_object_shadow:
3795 *
3796 * Create a new object which is backed by the
3797 * specified existing object range. The source
3798 * object reference is deallocated.
3799 *
3800 * The new object and offset into that object
3801 * are returned in the source parameters.
3802 */
3803 boolean_t vm_object_shadow_check = TRUE;
3804 uint64_t vm_object_shadow_forced = 0;
3805 uint64_t vm_object_shadow_skipped = 0;
3806
3807 __private_extern__ boolean_t
3808 vm_object_shadow(
3809 vm_object_t *object, /* IN/OUT */
3810 vm_object_offset_t *offset, /* IN/OUT */
3811 vm_object_size_t length,
3812 boolean_t always_shadow)
3813 {
3814 vm_object_t source;
3815 vm_object_t result;
3816
3817 source = *object;
3818 assert(source != VM_OBJECT_NULL);
3819 if (source == VM_OBJECT_NULL) {
3820 return FALSE;
3821 }
3822
3823 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
3824
3825 /*
3826 * Determine if we really need a shadow.
3827 *
3828 * If the source object is larger than what we are trying
3829 * to create, then force the shadow creation even if the
3830 * ref count is 1. This will allow us to [potentially]
3831 * collapse the underlying object away in the future
3832 * (freeing up the extra data it might contain and that
3833 * we don't need).
3834 */
3835
3836 assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
3837
3838 /*
3839 * The following optimization does not work in the context of submaps
3840 * (the shared region, in particular).
3841 * This object might have only 1 reference (in the submap) but that
3842 * submap can itself be mapped multiple times, so the object is
3843 * actually indirectly referenced more than once...
3844 * The caller can specify to "always_shadow" to bypass the optimization.
3845 */
3846 if (vm_object_shadow_check &&
3847 source->vo_size == length &&
3848 source->ref_count == 1) {
3849 if (always_shadow) {
3850 vm_object_shadow_forced++;
3851 } else {
3852 /*
3853 * Lock the object and check again.
3854 * We also check to see if there's
3855 * a shadow or copy object involved.
3856 * We can't do that earlier because
3857 * without the object locked, there
3858 * could be a collapse and the chain
3859 * gets modified leaving us with an
3860 * invalid pointer.
3861 */
3862 vm_object_lock(source);
3863 if (source->vo_size == length &&
3864 source->ref_count == 1 &&
3865 (source->shadow == VM_OBJECT_NULL ||
3866 source->shadow->vo_copy == VM_OBJECT_NULL)) {
3867 source->shadowed = FALSE;
3868 vm_object_unlock(source);
3869 vm_object_shadow_skipped++;
3870 return FALSE;
3871 }
3872 /* things changed while we were locking "source"... */
3873 vm_object_unlock(source);
3874 }
3875 }
3876
3877 /*
3878 * *offset is the map entry's offset into the VM object and
3879 * is aligned to the map's page size.
3880 * VM objects need to be aligned to the system's page size.
3881 * Record the necessary adjustment and re-align the offset so
3882 * that result->vo_shadow_offset is properly page-aligned.
3883 */
3884 vm_object_offset_t offset_adjustment;
3885 offset_adjustment = *offset - vm_object_trunc_page(*offset);
3886 length = vm_object_round_page(length + offset_adjustment);
3887 *offset = vm_object_trunc_page(*offset);
3888
3889 /*
3890 * Allocate a new object with the given length
3891 */
3892
3893 if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) {
3894 panic("vm_object_shadow: no object for shadowing");
3895 }
3896
3897 /*
3898 * The new object shadows the source object, adding
3899 * a reference to it. Our caller changes his reference
3900 * to point to the new object, removing a reference to
3901 * the source object. Net result: no change of reference
3902 * count.
3903 */
3904 result->shadow = source;
3905
3906 /*
3907 * Store the offset into the source object,
3908 * and fix up the offset into the new object.
3909 */
3910
3911 result->vo_shadow_offset = *offset;
3912 assertf(page_aligned(result->vo_shadow_offset),
3913 "result %p shadow offset 0x%llx",
3914 result, result->vo_shadow_offset);
3915
3916 /*
3917 * Return the new things
3918 */
3919
3920 *offset = 0;
3921 if (offset_adjustment) {
3922 /*
3923 * Make the map entry point to the equivalent offset
3924 * in the new object.
3925 */
3926 DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
3927 *offset += offset_adjustment;
3928 }
3929 *object = result;
3930 return TRUE;
3931 }
3932
3933 /*
3934 * The relationship between vm_object structures and
3935 * the memory_object requires careful synchronization.
3936 *
3937 * All associations are created by memory_object_create_named
3938 * for external pagers and vm_object_compressor_pager_create for internal
3939 * objects as follows:
3940 *
3941 * pager: the memory_object itself, supplied by
3942 * the user requesting a mapping (or the kernel,
3943 * when initializing internal objects); the
3944 * kernel simulates holding send rights by keeping
3945 * a port reference;
3946 *
3947 * pager_request:
3948 * the memory object control port,
3949 * created by the kernel; the kernel holds
3950 * receive (and ownership) rights to this
3951 * port, but no other references.
3952 *
3953 * When initialization is complete, the "initialized" field
3954 * is asserted. Other mappings using a particular memory object,
3955 * and any references to the vm_object gained through the
3956 * port association must wait for this initialization to occur.
3957 *
3958 * In order to allow the memory manager to set attributes before
3959 * requests (notably virtual copy operations, but also data or
3960 * unlock requests) are made, a "ready" attribute is made available.
3961 * Only the memory manager may affect the value of this attribute.
3962 * Its value does not affect critical kernel functions, such as
3963 * internal object initialization or destruction. [Furthermore,
3964 * memory objects created by the kernel are assumed to be ready
3965 * immediately; the default memory manager need not explicitly
3966 * set the "ready" attribute.]
3967 *
3968 * [Both the "initialized" and "ready" attribute wait conditions
3969 * use the "pager" field as the wait event.]
3970 *
3971 * The port associations can be broken down by any of the
3972 * following routines:
3973 * vm_object_terminate:
3974 * No references to the vm_object remain, and
3975 * the object cannot (or will not) be cached.
3976 * This is the normal case, and is done even
3977 * though one of the other cases has already been
3978 * done.
3979 * memory_object_destroy:
3980 * The memory manager has requested that the
3981 * kernel relinquish references to the memory
3982 * object. [The memory manager may not want to
3983 * destroy the memory object, but may wish to
3984 * refuse or tear down existing memory mappings.]
3985 *
3986 * Each routine that breaks an association must break all of
3987 * them at once. At some later time, that routine must clear
3988 * the pager field and release the memory object references.
3989 * [Furthermore, each routine must cope with the simultaneous
3990 * or previous operations of the others.]
3991 *
3992 * Because the pager field may be cleared spontaneously, it
3993 * cannot be used to determine whether a memory object has
3994 * ever been associated with a particular vm_object. [This
3995 * knowledge is important to the shadow object mechanism.]
3996 * For this reason, an additional "created" attribute is
3997 * provided.
3998 *
3999 * During various paging operations, the pager reference found in the
4000 * vm_object must be valid. To prevent this from being released,
4001 * (other than being removed, i.e., made null), routines may use
4002 * the vm_object_paging_begin/end routines [actually, macros].
4003 * The implementation uses the "paging_in_progress" and "wanted" fields.
4004 * [Operations that alter the validity of the pager values include the
4005 * termination routines and vm_object_collapse.]
4006 */
4007
4008
4009 /*
4010 * Routine: vm_object_memory_object_associate
4011 * Purpose:
4012 * Associate a VM object to the given pager.
4013 * If a VM object is not provided, create one.
4014 * Initialize the pager.
4015 */
4016 vm_object_t
4017 vm_object_memory_object_associate(
4018 memory_object_t pager,
4019 vm_object_t object,
4020 vm_object_size_t size,
4021 boolean_t named)
4022 {
4023 memory_object_control_t control;
4024
4025 assert(pager != MEMORY_OBJECT_NULL);
4026
4027 if (object != VM_OBJECT_NULL) {
4028 assert(object->internal);
4029 assert(object->pager_created);
4030 assert(!object->pager_initialized);
4031 assert(!object->pager_ready);
4032 assert(object->pager_trusted);
4033 } else {
4034 object = vm_object_allocate(size);
4035 assert(object != VM_OBJECT_NULL);
4036 object->internal = FALSE;
4037 object->pager_trusted = FALSE;
4038 /* copy strategy invalid until set by memory manager */
4039 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4040 }
4041
4042 /*
4043 * Allocate request port.
4044 */
4045
4046 control = memory_object_control_allocate(object);
4047 assert(control != MEMORY_OBJECT_CONTROL_NULL);
4048
4049 vm_object_lock(object);
4050
4051 assert(!object->pager_ready);
4052 assert(!object->pager_initialized);
4053 assert(object->pager == NULL);
4054 assert(object->pager_control == NULL);
4055
4056 /*
4057 * Copy the reference we were given.
4058 */
4059
4060 memory_object_reference(pager);
4061 object->pager_created = TRUE;
4062 object->pager = pager;
4063 object->pager_control = control;
4064 object->pager_ready = FALSE;
4065
4066 vm_object_unlock(object);
4067
4068 /*
4069 * Let the pager know we're using it.
4070 */
4071
4072 (void) memory_object_init(pager,
4073 object->pager_control,
4074 PAGE_SIZE);
4075
4076 vm_object_lock(object);
4077 if (named) {
4078 object->named = TRUE;
4079 }
4080 if (object->internal) {
4081 object->pager_ready = TRUE;
4082 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4083 }
4084
4085 object->pager_initialized = TRUE;
4086 vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4087
4088 vm_object_unlock(object);
4089
4090 return object;
4091 }
4092
4093 /*
4094 * Routine: vm_object_compressor_pager_create
4095 * Purpose:
4096 * Create a memory object for an internal object.
4097 * In/out conditions:
4098 * The object is locked on entry and exit;
4099 * it may be unlocked within this call.
4100 * Limitations:
4101 * Only one thread may be performing a
4102 * vm_object_compressor_pager_create on an object at
4103 * a time. Presumably, only the pageout
4104 * daemon will be using this routine.
4105 */
4106
4107 void
4108 vm_object_compressor_pager_create(
4109 vm_object_t object)
4110 {
4111 memory_object_t pager;
4112 vm_object_t pager_object = VM_OBJECT_NULL;
4113
4114 assert(!is_kernel_object(object));
4115
4116 /*
4117 * Prevent collapse or termination by holding a paging reference
4118 */
4119
4120 vm_object_paging_begin(object);
4121 if (object->pager_created) {
4122 /*
4123 * Someone else got to it first...
4124 * wait for them to finish initializing the ports
4125 */
4126 while (!object->pager_initialized) {
4127 vm_object_sleep(object,
4128 VM_OBJECT_EVENT_INITIALIZED,
4129 THREAD_UNINT);
4130 }
4131 vm_object_paging_end(object);
4132 return;
4133 }
4134
4135 if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4136 (object->vo_size / PAGE_SIZE)) {
4137 #if DEVELOPMENT || DEBUG
4138 printf("vm_object_compressor_pager_create(%p): "
4139 "object size 0x%llx >= 0x%llx\n",
4140 object,
4141 (uint64_t) object->vo_size,
4142 0x0FFFFFFFFULL * PAGE_SIZE);
4143 #endif /* DEVELOPMENT || DEBUG */
4144 vm_object_paging_end(object);
4145 return;
4146 }
4147
4148 /*
4149 * Indicate that a memory object has been assigned
4150 * before dropping the lock, to prevent a race.
4151 */
4152
4153 object->pager_created = TRUE;
4154 object->pager_trusted = TRUE;
4155 object->paging_offset = 0;
4156
4157 vm_object_unlock(object);
4158
4159 /*
4160 * Create the [internal] pager, and associate it with this object.
4161 *
4162 * We make the association here so that vm_object_enter()
4163 * can look up the object to complete initializing it. No
4164 * user will ever map this object.
4165 */
4166 {
4167 /* create our new memory object */
4168 assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4169 (object->vo_size / PAGE_SIZE));
4170 (void) compressor_memory_object_create(
4171 (memory_object_size_t) object->vo_size,
4172 &pager);
4173 if (pager == NULL) {
4174 panic("vm_object_compressor_pager_create(): "
4175 "no pager for object %p size 0x%llx\n",
4176 object, (uint64_t) object->vo_size);
4177 }
4178 }
4179
4180 /*
4181 * A reference was returned by
4182 * memory_object_create(), and it is
4183 * copied by vm_object_memory_object_associate().
4184 */
4185
4186 pager_object = vm_object_memory_object_associate(pager,
4187 object,
4188 object->vo_size,
4189 FALSE);
4190 if (pager_object != object) {
4191 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)", pager, pager_object, object, (uint64_t) object->vo_size);
4192 }
4193
4194 /*
4195 * Drop the reference we were passed.
4196 */
4197 memory_object_deallocate(pager);
4198
4199 vm_object_lock(object);
4200
4201 /*
4202 * Release the paging reference
4203 */
4204 vm_object_paging_end(object);
4205 }
4206
4207 /*
4208 * Global variables for vm_object_collapse():
4209 *
4210 * Counts for normal collapses and bypasses.
4211 * Debugging variables, to watch or disable collapse.
4212 */
4213 static long object_collapses = 0;
4214 static long object_bypasses = 0;
4215
4216 static boolean_t vm_object_collapse_allowed = TRUE;
4217 static boolean_t vm_object_bypass_allowed = TRUE;
4218
4219 void vm_object_do_collapse_compressor(vm_object_t object,
4220 vm_object_t backing_object);
4221 void
4222 vm_object_do_collapse_compressor(
4223 vm_object_t object,
4224 vm_object_t backing_object)
4225 {
4226 vm_object_offset_t new_offset, backing_offset;
4227 vm_object_size_t size;
4228
4229 vm_counters.do_collapse_compressor++;
4230
4231 vm_object_lock_assert_exclusive(object);
4232 vm_object_lock_assert_exclusive(backing_object);
4233
4234 size = object->vo_size;
4235
4236 /*
4237 * Move all compressed pages from backing_object
4238 * to the parent.
4239 */
4240
4241 for (backing_offset = object->vo_shadow_offset;
4242 backing_offset < object->vo_shadow_offset + object->vo_size;
4243 backing_offset += PAGE_SIZE) {
4244 memory_object_offset_t backing_pager_offset;
4245
4246 /* find the next compressed page at or after this offset */
4247 backing_pager_offset = (backing_offset +
4248 backing_object->paging_offset);
4249 backing_pager_offset = vm_compressor_pager_next_compressed(
4250 backing_object->pager,
4251 backing_pager_offset);
4252 if (backing_pager_offset == (memory_object_offset_t) -1) {
4253 /* no more compressed pages */
4254 break;
4255 }
4256 backing_offset = (backing_pager_offset -
4257 backing_object->paging_offset);
4258
4259 new_offset = backing_offset - object->vo_shadow_offset;
4260
4261 if (new_offset >= object->vo_size) {
4262 /* we're out of the scope of "object": done */
4263 break;
4264 }
4265
4266 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4267 (vm_compressor_pager_state_get(object->pager,
4268 (new_offset +
4269 object->paging_offset)) ==
4270 VM_EXTERNAL_STATE_EXISTS)) {
4271 /*
4272 * This page already exists in object, resident or
4273 * compressed.
4274 * We don't need this compressed page in backing_object
4275 * and it will be reclaimed when we release
4276 * backing_object.
4277 */
4278 continue;
4279 }
4280
4281 /*
4282 * backing_object has this page in the VM compressor and
4283 * we need to transfer it to object.
4284 */
4285 vm_counters.do_collapse_compressor_pages++;
4286 vm_compressor_pager_transfer(
4287 /* destination: */
4288 object->pager,
4289 (new_offset + object->paging_offset),
4290 /* source: */
4291 backing_object->pager,
4292 (backing_offset + backing_object->paging_offset));
4293 }
4294 }
4295
4296 /*
4297 * Routine: vm_object_do_collapse
4298 * Purpose:
4299 * Collapse an object with the object backing it.
4300 * Pages in the backing object are moved into the
4301 * parent, and the backing object is deallocated.
4302 * Conditions:
4303 * Both objects and the cache are locked; the page
4304 * queues are unlocked.
4305 *
4306 */
4307 static void
4308 vm_object_do_collapse(
4309 vm_object_t object,
4310 vm_object_t backing_object)
4311 {
4312 vm_page_t p, pp;
4313 vm_object_offset_t new_offset, backing_offset;
4314 vm_object_size_t size;
4315
4316 vm_object_lock_assert_exclusive(object);
4317 vm_object_lock_assert_exclusive(backing_object);
4318
4319 assert(object->purgable == VM_PURGABLE_DENY);
4320 assert(backing_object->purgable == VM_PURGABLE_DENY);
4321
4322 backing_offset = object->vo_shadow_offset;
4323 size = object->vo_size;
4324
4325 /*
4326 * Move all in-memory pages from backing_object
4327 * to the parent. Pages that have been paged out
4328 * will be overwritten by any of the parent's
4329 * pages that shadow them.
4330 */
4331
4332 while (!vm_page_queue_empty(&backing_object->memq)) {
4333 p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4334
4335 new_offset = (p->vmp_offset - backing_offset);
4336
4337 assert(!p->vmp_busy || p->vmp_absent);
4338
4339 /*
4340 * If the parent has a page here, or if
4341 * this page falls outside the parent,
4342 * dispose of it.
4343 *
4344 * Otherwise, move it as planned.
4345 */
4346
4347 if (p->vmp_offset < backing_offset || new_offset >= size) {
4348 VM_PAGE_FREE(p);
4349 } else {
4350 pp = vm_page_lookup(object, new_offset);
4351 if (pp == VM_PAGE_NULL) {
4352 if (VM_COMPRESSOR_PAGER_STATE_GET(object,
4353 new_offset)
4354 == VM_EXTERNAL_STATE_EXISTS) {
4355 /*
4356 * Parent object has this page
4357 * in the VM compressor.
4358 * Throw away the backing
4359 * object's page.
4360 */
4361 VM_PAGE_FREE(p);
4362 } else {
4363 /*
4364 * Parent now has no page.
4365 * Move the backing object's page
4366 * up.
4367 */
4368 vm_page_rename(p, object, new_offset);
4369 }
4370 } else {
4371 assert(!pp->vmp_absent);
4372
4373 /*
4374 * Parent object has a real page.
4375 * Throw away the backing object's
4376 * page.
4377 */
4378 VM_PAGE_FREE(p);
4379 }
4380 }
4381 }
4382
4383 if (vm_object_collapse_compressor_allowed &&
4384 object->pager != MEMORY_OBJECT_NULL &&
4385 backing_object->pager != MEMORY_OBJECT_NULL) {
4386 /* move compressed pages from backing_object to object */
4387 vm_object_do_collapse_compressor(object, backing_object);
4388 } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4389 assert((!object->pager_created &&
4390 (object->pager == MEMORY_OBJECT_NULL)) ||
4391 (!backing_object->pager_created &&
4392 (backing_object->pager == MEMORY_OBJECT_NULL)));
4393 /*
4394 * Move the pager from backing_object to object.
4395 *
4396 * XXX We're only using part of the paging space
4397 * for keeps now... we ought to discard the
4398 * unused portion.
4399 */
4400
4401 assert(!object->paging_in_progress);
4402 assert(!object->activity_in_progress);
4403 assert(!object->pager_created);
4404 assert(object->pager == NULL);
4405 object->pager = backing_object->pager;
4406
4407 object->pager_created = backing_object->pager_created;
4408 object->pager_control = backing_object->pager_control;
4409 object->pager_ready = backing_object->pager_ready;
4410 object->pager_initialized = backing_object->pager_initialized;
4411 object->paging_offset =
4412 backing_object->paging_offset + backing_offset;
4413 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4414 memory_object_control_collapse(&object->pager_control,
4415 object);
4416 }
4417 /* the backing_object has lost its pager: reset all fields */
4418 backing_object->pager_created = FALSE;
4419 backing_object->pager_control = NULL;
4420 backing_object->pager_ready = FALSE;
4421 backing_object->paging_offset = 0;
4422 backing_object->pager = NULL;
4423 }
4424 /*
4425 * Object now shadows whatever backing_object did.
4426 * Note that the reference to backing_object->shadow
4427 * moves from within backing_object to within object.
4428 */
4429
4430 assert(!object->phys_contiguous);
4431 assert(!backing_object->phys_contiguous);
4432 object->shadow = backing_object->shadow;
4433 if (object->shadow) {
4434 assertf(page_aligned(object->vo_shadow_offset),
4435 "object %p shadow_offset 0x%llx",
4436 object, object->vo_shadow_offset);
4437 assertf(page_aligned(backing_object->vo_shadow_offset),
4438 "backing_object %p shadow_offset 0x%llx",
4439 backing_object, backing_object->vo_shadow_offset);
4440 object->vo_shadow_offset += backing_object->vo_shadow_offset;
4441 /* "backing_object" gave its shadow to "object" */
4442 backing_object->shadow = VM_OBJECT_NULL;
4443 backing_object->vo_shadow_offset = 0;
4444 } else {
4445 /* no shadow, therefore no shadow offset... */
4446 object->vo_shadow_offset = 0;
4447 }
4448 assert((object->shadow == VM_OBJECT_NULL) ||
4449 (object->shadow->vo_copy != backing_object));
4450
4451 /*
4452 * Discard backing_object.
4453 *
4454 * Since the backing object has no pages, no
4455 * pager left, and no object references within it,
4456 * all that is necessary is to dispose of it.
4457 */
4458 object_collapses++;
4459
4460 assert(backing_object->ref_count == 1);
4461 assert(backing_object->resident_page_count == 0);
4462 assert(backing_object->paging_in_progress == 0);
4463 assert(backing_object->activity_in_progress == 0);
4464 assert(backing_object->shadow == VM_OBJECT_NULL);
4465 assert(backing_object->vo_shadow_offset == 0);
4466
4467 if (backing_object->pager != MEMORY_OBJECT_NULL) {
4468 /* ... unless it has a pager; need to terminate pager too */
4469 vm_counters.do_collapse_terminate++;
4470 if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4471 vm_counters.do_collapse_terminate_failure++;
4472 }
4473 return;
4474 }
4475
4476 assert(backing_object->pager == NULL);
4477
4478 backing_object->alive = FALSE;
4479 vm_object_unlock(backing_object);
4480
4481 #if VM_OBJECT_TRACKING
4482 if (vm_object_tracking_btlog) {
4483 btlog_erase(vm_object_tracking_btlog, backing_object);
4484 }
4485 #endif /* VM_OBJECT_TRACKING */
4486
4487 vm_object_lock_destroy(backing_object);
4488
4489 zfree(vm_object_zone, backing_object);
4490 }
4491
4492 static void
4493 vm_object_do_bypass(
4494 vm_object_t object,
4495 vm_object_t backing_object)
4496 {
4497 /*
4498 * Make the parent shadow the next object
4499 * in the chain.
4500 */
4501
4502 vm_object_lock_assert_exclusive(object);
4503 vm_object_lock_assert_exclusive(backing_object);
4504
4505 vm_object_reference(backing_object->shadow);
4506
4507 assert(!object->phys_contiguous);
4508 assert(!backing_object->phys_contiguous);
4509 object->shadow = backing_object->shadow;
4510 if (object->shadow) {
4511 assertf(page_aligned(object->vo_shadow_offset),
4512 "object %p shadow_offset 0x%llx",
4513 object, object->vo_shadow_offset);
4514 assertf(page_aligned(backing_object->vo_shadow_offset),
4515 "backing_object %p shadow_offset 0x%llx",
4516 backing_object, backing_object->vo_shadow_offset);
4517 object->vo_shadow_offset += backing_object->vo_shadow_offset;
4518 } else {
4519 /* no shadow, therefore no shadow offset... */
4520 object->vo_shadow_offset = 0;
4521 }
4522
4523 /*
4524 * Backing object might have had a copy pointer
4525 * to us. If it did, clear it.
4526 */
4527 if (backing_object->vo_copy == object) {
4528 VM_OBJECT_COPY_SET(backing_object, VM_OBJECT_NULL);
4529 }
4530
4531 /*
4532 * Drop the reference count on backing_object.
4533 #if TASK_SWAPPER
4534 * Since its ref_count was at least 2, it
4535 * will not vanish; so we don't need to call
4536 * vm_object_deallocate.
4537 * [with a caveat for "named" objects]
4538 *
4539 * The res_count on the backing object is
4540 * conditionally decremented. It's possible
4541 * (via vm_pageout_scan) to get here with
4542 * a "swapped" object, which has a 0 res_count,
4543 * in which case, the backing object res_count
4544 * is already down by one.
4545 #else
4546 * Don't call vm_object_deallocate unless
4547 * ref_count drops to zero.
4548 *
4549 * The ref_count can drop to zero here if the
4550 * backing object could be bypassed but not
4551 * collapsed, such as when the backing object
4552 * is temporary and cachable.
4553 #endif
4554 */
4555 if (backing_object->ref_count > 2 ||
4556 (!backing_object->named && backing_object->ref_count > 1)) {
4557 vm_object_lock_assert_exclusive(backing_object);
4558 backing_object->ref_count--;
4559 vm_object_unlock(backing_object);
4560 } else {
4561 /*
4562 * Drop locks so that we can deallocate
4563 * the backing object.
4564 */
4565
4566 /*
4567 * vm_object_collapse (the caller of this function) is
4568 * now called from contexts that may not guarantee that a
4569 * valid reference is held on the object... w/o a valid
4570 * reference, it is unsafe and unwise (you will definitely
4571 * regret it) to unlock the object and then retake the lock
4572 * since the object may be terminated and recycled in between.
4573 * The "activity_in_progress" reference will keep the object
4574 * 'stable'.
4575 */
4576 vm_object_activity_begin(object);
4577 vm_object_unlock(object);
4578
4579 vm_object_unlock(backing_object);
4580 vm_object_deallocate(backing_object);
4581
4582 /*
4583 * Relock object. We don't have to reverify
4584 * its state since vm_object_collapse will
4585 * do that for us as it starts at the
4586 * top of its loop.
4587 */
4588
4589 vm_object_lock(object);
4590 vm_object_activity_end(object);
4591 }
4592
4593 object_bypasses++;
4594 }
4595
4596
4597 /*
4598 * vm_object_collapse:
4599 *
4600 * Perform an object collapse or an object bypass if appropriate.
4601 * The real work of collapsing and bypassing is performed in
4602 * the routines vm_object_do_collapse and vm_object_do_bypass.
4603 *
4604 * Requires that the object be locked and the page queues be unlocked.
4605 *
4606 */
4607 static unsigned long vm_object_collapse_calls = 0;
4608 static unsigned long vm_object_collapse_objects = 0;
4609 static unsigned long vm_object_collapse_do_collapse = 0;
4610 static unsigned long vm_object_collapse_do_bypass = 0;
4611
4612 __private_extern__ void
4613 vm_object_collapse(
4614 vm_object_t object,
4615 vm_object_offset_t hint_offset,
4616 boolean_t can_bypass)
4617 {
4618 vm_object_t backing_object;
4619 vm_object_size_t object_vcount, object_rcount;
4620 vm_object_t original_object;
4621 int object_lock_type;
4622 int backing_object_lock_type;
4623
4624 vm_object_collapse_calls++;
4625
4626 assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
4627
4628 if (!vm_object_collapse_allowed &&
4629 !(can_bypass && vm_object_bypass_allowed)) {
4630 return;
4631 }
4632
4633 if (object == VM_OBJECT_NULL) {
4634 return;
4635 }
4636
4637 original_object = object;
4638
4639 /*
4640 * The top object was locked "exclusive" by the caller.
4641 * In the first pass, to determine if we can collapse the shadow chain,
4642 * take a "shared" lock on the shadow objects. If we can collapse,
4643 * we'll have to go down the chain again with exclusive locks.
4644 */
4645 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4646 backing_object_lock_type = OBJECT_LOCK_SHARED;
4647
4648 retry:
4649 object = original_object;
4650 vm_object_lock_assert_exclusive(object);
4651
4652 while (TRUE) {
4653 vm_object_collapse_objects++;
4654 /*
4655 * Verify that the conditions are right for either
4656 * collapse or bypass:
4657 */
4658
4659 /*
4660 * There is a backing object, and
4661 */
4662
4663 backing_object = object->shadow;
4664 if (backing_object == VM_OBJECT_NULL) {
4665 if (object != original_object) {
4666 vm_object_unlock(object);
4667 }
4668 return;
4669 }
4670 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
4671 vm_object_lock_shared(backing_object);
4672 } else {
4673 vm_object_lock(backing_object);
4674 }
4675
4676 /*
4677 * No pages in the object are currently
4678 * being paged out, and
4679 */
4680 if (object->paging_in_progress != 0 ||
4681 object->activity_in_progress != 0) {
4682 /* try and collapse the rest of the shadow chain */
4683 if (object != original_object) {
4684 vm_object_unlock(object);
4685 }
4686 object = backing_object;
4687 object_lock_type = backing_object_lock_type;
4688 continue;
4689 }
4690
4691 /*
4692 * ...
4693 * The backing object is not read_only,
4694 * and no pages in the backing object are
4695 * currently being paged out.
4696 * The backing object is internal.
4697 *
4698 */
4699
4700 if (!backing_object->internal ||
4701 backing_object->paging_in_progress != 0 ||
4702 backing_object->activity_in_progress != 0) {
4703 /* try and collapse the rest of the shadow chain */
4704 if (object != original_object) {
4705 vm_object_unlock(object);
4706 }
4707 object = backing_object;
4708 object_lock_type = backing_object_lock_type;
4709 continue;
4710 }
4711
4712 /*
4713 * Purgeable objects are not supposed to engage in
4714 * copy-on-write activities, so should not have
4715 * any shadow objects or be a shadow object to another
4716 * object.
4717 * Collapsing a purgeable object would require some
4718 * updates to the purgeable compressed ledgers.
4719 */
4720 if (object->purgable != VM_PURGABLE_DENY ||
4721 backing_object->purgable != VM_PURGABLE_DENY) {
4722 panic("vm_object_collapse() attempting to collapse "
4723 "purgeable object: %p(%d) %p(%d)\n",
4724 object, object->purgable,
4725 backing_object, backing_object->purgable);
4726 /* try and collapse the rest of the shadow chain */
4727 if (object != original_object) {
4728 vm_object_unlock(object);
4729 }
4730 object = backing_object;
4731 object_lock_type = backing_object_lock_type;
4732 continue;
4733 }
4734
4735 /*
4736 * The backing object can't be a copy-object:
4737 * the shadow_offset for the copy-object must stay
4738 * as 0. Furthermore (for the 'we have all the
4739 * pages' case), if we bypass backing_object and
4740 * just shadow the next object in the chain, old
4741 * pages from that object would then have to be copied
4742 * BOTH into the (former) backing_object and into the
4743 * parent object.
4744 */
4745 if (backing_object->shadow != VM_OBJECT_NULL &&
4746 backing_object->shadow->vo_copy == backing_object) {
4747 /* try and collapse the rest of the shadow chain */
4748 if (object != original_object) {
4749 vm_object_unlock(object);
4750 }
4751 object = backing_object;
4752 object_lock_type = backing_object_lock_type;
4753 continue;
4754 }
4755
4756 /*
4757 * We can now try to either collapse the backing
4758 * object (if the parent is the only reference to
4759 * it) or (perhaps) remove the parent's reference
4760 * to it.
4761 *
4762 * If there is exactly one reference to the backing
4763 * object, we may be able to collapse it into the
4764 * parent.
4765 *
4766 * As long as one of the objects is still not known
4767 * to the pager, we can collapse them.
4768 */
4769 if (backing_object->ref_count == 1 &&
4770 (vm_object_collapse_compressor_allowed ||
4771 !object->pager_created
4772 || (!backing_object->pager_created)
4773 ) && vm_object_collapse_allowed) {
4774 /*
4775 * We need the exclusive lock on the VM objects.
4776 */
4777 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4778 /*
4779 * We have an object and its shadow locked
4780 * "shared". We can't just upgrade the locks
4781 * to "exclusive", as some other thread might
4782 * also have these objects locked "shared" and
4783 * attempt to upgrade one or the other to
4784 * "exclusive". The upgrades would block
4785 * forever waiting for the other "shared" locks
4786 * to get released.
4787 * So we have to release the locks and go
4788 * down the shadow chain again (since it could
4789 * have changed) with "exclusive" locking.
4790 */
4791 vm_object_unlock(backing_object);
4792 if (object != original_object) {
4793 vm_object_unlock(object);
4794 }
4795 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4796 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4797 goto retry;
4798 }
4799
4800 /*
4801 * Collapse the object with its backing
4802 * object, and try again with the object's
4803 * new backing object.
4804 */
4805
4806 vm_object_do_collapse(object, backing_object);
4807 vm_object_collapse_do_collapse++;
4808 continue;
4809 }
4810
4811 /*
4812 * Collapsing the backing object was not possible
4813 * or permitted, so let's try bypassing it.
4814 */
4815
4816 if (!(can_bypass && vm_object_bypass_allowed)) {
4817 /* try and collapse the rest of the shadow chain */
4818 if (object != original_object) {
4819 vm_object_unlock(object);
4820 }
4821 object = backing_object;
4822 object_lock_type = backing_object_lock_type;
4823 continue;
4824 }
4825
4826
4827 /*
4828 * If the object doesn't have all its pages present,
4829 * we have to make sure no pages in the backing object
4830 * "show through" before bypassing it.
4831 */
4832 object_vcount = object->vo_size >> PAGE_SHIFT;
4833 object_rcount = (vm_object_size_t)object->resident_page_count;
4834
4835 if (object_rcount != object_vcount) {
4836 vm_object_offset_t offset;
4837 vm_object_offset_t backing_offset;
4838 vm_object_size_t backing_rcount, backing_vcount;
4839
4840 /*
4841 * If the backing object has a pager but no pagemap,
4842 * then we cannot bypass it, because we don't know
4843 * what pages it has.
4844 */
4845 if (backing_object->pager_created) {
4846 /* try and collapse the rest of the shadow chain */
4847 if (object != original_object) {
4848 vm_object_unlock(object);
4849 }
4850 object = backing_object;
4851 object_lock_type = backing_object_lock_type;
4852 continue;
4853 }
4854
4855 /*
4856 * If the object has a pager but no pagemap,
4857 * then we cannot bypass it, because we don't know
4858 * what pages it has.
4859 */
4860 if (object->pager_created) {
4861 /* try and collapse the rest of the shadow chain */
4862 if (object != original_object) {
4863 vm_object_unlock(object);
4864 }
4865 object = backing_object;
4866 object_lock_type = backing_object_lock_type;
4867 continue;
4868 }
4869
4870 backing_offset = object->vo_shadow_offset;
4871 backing_vcount = backing_object->vo_size >> PAGE_SHIFT;
4872 backing_rcount = (vm_object_size_t)backing_object->resident_page_count;
4873 assert(backing_vcount >= object_vcount);
4874
4875 if (backing_rcount > (backing_vcount - object_vcount) &&
4876 backing_rcount - (backing_vcount - object_vcount) > object_rcount) {
4877 /*
4878 * we have enough pages in the backing object to guarantee that
4879 * at least 1 of them must be 'uncovered' by a resident page
4880 * in the object we're evaluating, so move on and
4881 * try to collapse the rest of the shadow chain
4882 */
4883 if (object != original_object) {
4884 vm_object_unlock(object);
4885 }
4886 object = backing_object;
4887 object_lock_type = backing_object_lock_type;
4888 continue;
4889 }
4890
4891 /*
4892 * If all of the pages in the backing object are
4893 * shadowed by the parent object, the parent
4894 * object no longer has to shadow the backing
4895 * object; it can shadow the next one in the
4896 * chain.
4897 *
4898 * If the backing object has existence info,
4899 * we must check examine its existence info
4900 * as well.
4901 *
4902 */
4903
4904 #define EXISTS_IN_OBJECT(obj, off, rc) \
4905 ((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off)) \
4906 == VM_EXTERNAL_STATE_EXISTS) || \
4907 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4908
4909 /*
4910 * Check the hint location first
4911 * (since it is often the quickest way out of here).
4912 */
4913 if (object->cow_hint != ~(vm_offset_t)0) {
4914 hint_offset = (vm_object_offset_t)object->cow_hint;
4915 } else {
4916 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
4917 (hint_offset - 8 * PAGE_SIZE_64) : 0;
4918 }
4919
4920 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
4921 backing_offset, backing_rcount) &&
4922 !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) {
4923 /* dependency right at the hint */
4924 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
4925 /* try and collapse the rest of the shadow chain */
4926 if (object != original_object) {
4927 vm_object_unlock(object);
4928 }
4929 object = backing_object;
4930 object_lock_type = backing_object_lock_type;
4931 continue;
4932 }
4933
4934 /*
4935 * If the object's window onto the backing_object
4936 * is large compared to the number of resident
4937 * pages in the backing object, it makes sense to
4938 * walk the backing_object's resident pages first.
4939 *
4940 * NOTE: Pages may be in both the existence map and/or
4941 * resident, so if we don't find a dependency while
4942 * walking the backing object's resident page list
4943 * directly, and there is an existence map, we'll have
4944 * to run the offset based 2nd pass. Because we may
4945 * have to run both passes, we need to be careful
4946 * not to decrement 'rcount' in the 1st pass
4947 */
4948 if (backing_rcount && backing_rcount < (object_vcount / 8)) {
4949 vm_object_size_t rc = object_rcount;
4950 vm_page_t p;
4951
4952 backing_rcount = backing_object->resident_page_count;
4953 p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
4954 do {
4955 offset = (p->vmp_offset - backing_offset);
4956
4957 if (offset < object->vo_size &&
4958 offset != hint_offset &&
4959 !EXISTS_IN_OBJECT(object, offset, rc)) {
4960 /* found a dependency */
4961 object->cow_hint = (vm_offset_t) offset; /* atomic */
4962
4963 break;
4964 }
4965 p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
4966 } while (--backing_rcount);
4967 if (backing_rcount != 0) {
4968 /* try and collapse the rest of the shadow chain */
4969 if (object != original_object) {
4970 vm_object_unlock(object);
4971 }
4972 object = backing_object;
4973 object_lock_type = backing_object_lock_type;
4974 continue;
4975 }
4976 }
4977
4978 /*
4979 * Walk through the offsets looking for pages in the
4980 * backing object that show through to the object.
4981 */
4982 if (backing_rcount) {
4983 offset = hint_offset;
4984
4985 while ((offset =
4986 (offset + PAGE_SIZE_64 < object->vo_size) ?
4987 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
4988 if (EXISTS_IN_OBJECT(backing_object, offset +
4989 backing_offset, backing_rcount) &&
4990 !EXISTS_IN_OBJECT(object, offset, object_rcount)) {
4991 /* found a dependency */
4992 object->cow_hint = (vm_offset_t) offset; /* atomic */
4993 break;
4994 }
4995 }
4996 if (offset != hint_offset) {
4997 /* try and collapse the rest of the shadow chain */
4998 if (object != original_object) {
4999 vm_object_unlock(object);
5000 }
5001 object = backing_object;
5002 object_lock_type = backing_object_lock_type;
5003 continue;
5004 }
5005 }
5006 }
5007
5008 /*
5009 * We need "exclusive" locks on the 2 VM objects.
5010 */
5011 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5012 vm_object_unlock(backing_object);
5013 if (object != original_object) {
5014 vm_object_unlock(object);
5015 }
5016 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5017 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5018 goto retry;
5019 }
5020
5021 /* reset the offset hint for any objects deeper in the chain */
5022 object->cow_hint = (vm_offset_t)0;
5023
5024 /*
5025 * All interesting pages in the backing object
5026 * already live in the parent or its pager.
5027 * Thus we can bypass the backing object.
5028 */
5029
5030 vm_object_do_bypass(object, backing_object);
5031 vm_object_collapse_do_bypass++;
5032
5033 /*
5034 * Try again with this object's new backing object.
5035 */
5036
5037 continue;
5038 }
5039
5040 /* NOT REACHED */
5041 /*
5042 * if (object != original_object) {
5043 * vm_object_unlock(object);
5044 * }
5045 */
5046 }
5047
5048 /*
5049 * Routine: vm_object_page_remove: [internal]
5050 * Purpose:
5051 * Removes all physical pages in the specified
5052 * object range from the object's list of pages.
5053 *
5054 * In/out conditions:
5055 * The object must be locked.
5056 * The object must not have paging_in_progress, usually
5057 * guaranteed by not having a pager.
5058 */
5059 unsigned int vm_object_page_remove_lookup = 0;
5060 unsigned int vm_object_page_remove_iterate = 0;
5061
5062 __private_extern__ void
5063 vm_object_page_remove(
5064 vm_object_t object,
5065 vm_object_offset_t start,
5066 vm_object_offset_t end)
5067 {
5068 vm_page_t p, next;
5069
5070 /*
5071 * One and two page removals are most popular.
5072 * The factor of 16 here is somewhat arbitrary.
5073 * It balances vm_object_lookup vs iteration.
5074 */
5075
5076 if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5077 vm_object_page_remove_lookup++;
5078
5079 for (; start < end; start += PAGE_SIZE_64) {
5080 p = vm_page_lookup(object, start);
5081 if (p != VM_PAGE_NULL) {
5082 assert(!p->vmp_cleaning && !p->vmp_laundry);
5083 if (!p->vmp_fictitious && p->vmp_pmapped) {
5084 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5085 }
5086 VM_PAGE_FREE(p);
5087 }
5088 }
5089 } else {
5090 vm_object_page_remove_iterate++;
5091
5092 p = (vm_page_t) vm_page_queue_first(&object->memq);
5093 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5094 next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5095 if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5096 assert(!p->vmp_cleaning && !p->vmp_laundry);
5097 if (!p->vmp_fictitious && p->vmp_pmapped) {
5098 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5099 }
5100 VM_PAGE_FREE(p);
5101 }
5102 p = next;
5103 }
5104 }
5105 }
5106
5107
5108 /*
5109 * Routine: vm_object_coalesce
5110 * Function: Coalesces two objects backing up adjoining
5111 * regions of memory into a single object.
5112 *
5113 * returns TRUE if objects were combined.
5114 *
5115 * NOTE: Only works at the moment if the second object is NULL -
5116 * if it's not, which object do we lock first?
5117 *
5118 * Parameters:
5119 * prev_object First object to coalesce
5120 * prev_offset Offset into prev_object
5121 * next_object Second object into coalesce
5122 * next_offset Offset into next_object
5123 *
5124 * prev_size Size of reference to prev_object
5125 * next_size Size of reference to next_object
5126 *
5127 * Conditions:
5128 * The object(s) must *not* be locked. The map must be locked
5129 * to preserve the reference to the object(s).
5130 */
5131 static int vm_object_coalesce_count = 0;
5132
5133 __private_extern__ boolean_t
5134 vm_object_coalesce(
5135 vm_object_t prev_object,
5136 vm_object_t next_object,
5137 vm_object_offset_t prev_offset,
5138 __unused vm_object_offset_t next_offset,
5139 vm_object_size_t prev_size,
5140 vm_object_size_t next_size)
5141 {
5142 vm_object_size_t newsize;
5143
5144 #ifdef lint
5145 next_offset++;
5146 #endif /* lint */
5147
5148 if (next_object != VM_OBJECT_NULL) {
5149 return FALSE;
5150 }
5151
5152 if (prev_object == VM_OBJECT_NULL) {
5153 return TRUE;
5154 }
5155
5156 vm_object_lock(prev_object);
5157
5158 /*
5159 * Try to collapse the object first
5160 */
5161 vm_object_collapse(prev_object, prev_offset, TRUE);
5162
5163 /*
5164 * Can't coalesce if pages not mapped to
5165 * prev_entry may be in use any way:
5166 * . more than one reference
5167 * . paged out
5168 * . shadows another object
5169 * . has a copy elsewhere
5170 * . is purgeable
5171 * . paging references (pages might be in page-list)
5172 */
5173
5174 if ((prev_object->ref_count > 1) ||
5175 prev_object->pager_created ||
5176 (prev_object->shadow != VM_OBJECT_NULL) ||
5177 (prev_object->vo_copy != VM_OBJECT_NULL) ||
5178 (prev_object->true_share != FALSE) ||
5179 (prev_object->purgable != VM_PURGABLE_DENY) ||
5180 (prev_object->paging_in_progress != 0) ||
5181 (prev_object->activity_in_progress != 0)) {
5182 vm_object_unlock(prev_object);
5183 return FALSE;
5184 }
5185
5186 vm_object_coalesce_count++;
5187
5188 /*
5189 * Remove any pages that may still be in the object from
5190 * a previous deallocation.
5191 */
5192 vm_object_page_remove(prev_object,
5193 prev_offset + prev_size,
5194 prev_offset + prev_size + next_size);
5195
5196 /*
5197 * Extend the object if necessary.
5198 */
5199 newsize = prev_offset + prev_size + next_size;
5200 if (newsize > prev_object->vo_size) {
5201 assertf(page_aligned(newsize),
5202 "object %p size 0x%llx",
5203 prev_object, (uint64_t)newsize);
5204 prev_object->vo_size = newsize;
5205 }
5206
5207 vm_object_unlock(prev_object);
5208 return TRUE;
5209 }
5210
5211 kern_return_t
5212 vm_object_populate_with_private(
5213 vm_object_t object,
5214 vm_object_offset_t offset,
5215 ppnum_t phys_page,
5216 vm_size_t size)
5217 {
5218 ppnum_t base_page;
5219 vm_object_offset_t base_offset;
5220
5221
5222 if (!object->private) {
5223 return KERN_FAILURE;
5224 }
5225
5226 base_page = phys_page;
5227
5228 vm_object_lock(object);
5229
5230 if (!object->phys_contiguous) {
5231 vm_page_t m;
5232
5233 if ((base_offset = trunc_page_64(offset)) != offset) {
5234 vm_object_unlock(object);
5235 return KERN_FAILURE;
5236 }
5237 base_offset += object->paging_offset;
5238
5239 while (size) {
5240 m = vm_page_lookup(object, base_offset);
5241
5242 if (m != VM_PAGE_NULL) {
5243 if (m->vmp_fictitious) {
5244 if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
5245 vm_page_lockspin_queues();
5246 m->vmp_private = TRUE;
5247 vm_page_unlock_queues();
5248
5249 m->vmp_fictitious = FALSE;
5250 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5251 }
5252 } else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
5253 if (!m->vmp_private) {
5254 /*
5255 * we'd leak a real page... that can't be right
5256 */
5257 panic("vm_object_populate_with_private - %p not private", m);
5258 }
5259 if (m->vmp_pmapped) {
5260 /*
5261 * pmap call to clear old mapping
5262 */
5263 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
5264 }
5265 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5266 }
5267 } else {
5268 m = vm_page_grab_fictitious(TRUE);
5269
5270 /*
5271 * private normally requires lock_queues but since we
5272 * are initializing the page, its not necessary here
5273 */
5274 m->vmp_private = TRUE;
5275 m->vmp_fictitious = FALSE;
5276 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5277 m->vmp_unusual = TRUE;
5278 m->vmp_busy = FALSE;
5279
5280 vm_page_insert(m, object, base_offset);
5281 }
5282 base_page++; /* Go to the next physical page */
5283 base_offset += PAGE_SIZE;
5284 size -= PAGE_SIZE;
5285 }
5286 } else {
5287 /* NOTE: we should check the original settings here */
5288 /* if we have a size > zero a pmap call should be made */
5289 /* to disable the range */
5290
5291 /* pmap_? */
5292
5293 /* shadows on contiguous memory are not allowed */
5294 /* we therefore can use the offset field */
5295 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5296 assertf(page_aligned(size),
5297 "object %p size 0x%llx",
5298 object, (uint64_t)size);
5299 object->vo_size = size;
5300 }
5301 vm_object_unlock(object);
5302
5303 return KERN_SUCCESS;
5304 }
5305
5306
5307 kern_return_t
5308 memory_object_create_named(
5309 memory_object_t pager,
5310 memory_object_offset_t size,
5311 memory_object_control_t *control)
5312 {
5313 vm_object_t object;
5314
5315 *control = MEMORY_OBJECT_CONTROL_NULL;
5316 if (pager == MEMORY_OBJECT_NULL) {
5317 return KERN_INVALID_ARGUMENT;
5318 }
5319
5320 object = vm_object_memory_object_associate(pager,
5321 VM_OBJECT_NULL,
5322 size,
5323 TRUE);
5324 if (object == VM_OBJECT_NULL) {
5325 return KERN_INVALID_OBJECT;
5326 }
5327
5328 /* wait for object (if any) to be ready */
5329 if (object != VM_OBJECT_NULL) {
5330 vm_object_lock(object);
5331 object->named = TRUE;
5332 while (!object->pager_ready) {
5333 vm_object_sleep(object,
5334 VM_OBJECT_EVENT_PAGER_READY,
5335 THREAD_UNINT);
5336 }
5337 *control = object->pager_control;
5338 vm_object_unlock(object);
5339 }
5340 return KERN_SUCCESS;
5341 }
5342
5343
5344 __private_extern__ kern_return_t
5345 vm_object_lock_request(
5346 vm_object_t object,
5347 vm_object_offset_t offset,
5348 vm_object_size_t size,
5349 memory_object_return_t should_return,
5350 int flags,
5351 vm_prot_t prot)
5352 {
5353 __unused boolean_t should_flush;
5354
5355 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5356
5357 /*
5358 * Check for bogus arguments.
5359 */
5360 if (object == VM_OBJECT_NULL) {
5361 return KERN_INVALID_ARGUMENT;
5362 }
5363
5364 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5365 return KERN_INVALID_ARGUMENT;
5366 }
5367
5368 /*
5369 * XXX TODO4K
5370 * extend range for conservative operations (copy-on-write, sync, ...)
5371 * truncate range for destructive operations (purge, ...)
5372 */
5373 size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5374 offset = vm_object_trunc_page(offset);
5375
5376 /*
5377 * Lock the object, and acquire a paging reference to
5378 * prevent the memory_object reference from being released.
5379 */
5380 vm_object_lock(object);
5381 vm_object_paging_begin(object);
5382
5383 (void)vm_object_update(object,
5384 offset, size, NULL, NULL, should_return, flags, prot);
5385
5386 vm_object_paging_end(object);
5387 vm_object_unlock(object);
5388
5389 return KERN_SUCCESS;
5390 }
5391
5392 /*
5393 * Empty a purgeable object by grabbing the physical pages assigned to it and
5394 * putting them on the free queue without writing them to backing store, etc.
5395 * When the pages are next touched they will be demand zero-fill pages. We
5396 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5397 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5398 * than happy to grab these since this is a purgeable object. We mark the
5399 * object as "empty" after reaping its pages.
5400 *
5401 * On entry the object must be locked and it must be
5402 * purgeable with no delayed copies pending.
5403 */
5404 uint64_t
5405 vm_object_purge(vm_object_t object, int flags)
5406 {
5407 unsigned int object_page_count = 0, pgcount = 0;
5408 uint64_t total_purged_pgcount = 0;
5409 boolean_t skipped_object = FALSE;
5410
5411 vm_object_lock_assert_exclusive(object);
5412
5413 if (object->purgable == VM_PURGABLE_DENY) {
5414 return 0;
5415 }
5416
5417 assert(object->vo_copy == VM_OBJECT_NULL);
5418 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5419
5420 /*
5421 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5422 * reaping its pages. We update vm_page_purgeable_count in bulk
5423 * and we don't want vm_page_remove() to update it again for each
5424 * page we reap later.
5425 *
5426 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5427 * are all accounted for in the "volatile" ledgers, so this does not
5428 * make any difference.
5429 * If we transitioned directly from NONVOLATILE to EMPTY,
5430 * vm_page_purgeable_count must have been updated when the object
5431 * was dequeued from its volatile queue and the purgeable ledgers
5432 * must have also been updated accordingly at that time (in
5433 * vm_object_purgable_control()).
5434 */
5435 if (object->purgable == VM_PURGABLE_VOLATILE) {
5436 unsigned int delta;
5437 assert(object->resident_page_count >=
5438 object->wired_page_count);
5439 delta = (object->resident_page_count -
5440 object->wired_page_count);
5441 if (delta != 0) {
5442 assert(vm_page_purgeable_count >=
5443 delta);
5444 OSAddAtomic(-delta,
5445 (SInt32 *)&vm_page_purgeable_count);
5446 }
5447 if (object->wired_page_count != 0) {
5448 assert(vm_page_purgeable_wired_count >=
5449 object->wired_page_count);
5450 OSAddAtomic(-object->wired_page_count,
5451 (SInt32 *)&vm_page_purgeable_wired_count);
5452 }
5453 object->purgable = VM_PURGABLE_EMPTY;
5454 }
5455 assert(object->purgable == VM_PURGABLE_EMPTY);
5456
5457 object_page_count = object->resident_page_count;
5458
5459 vm_object_reap_pages(object, REAP_PURGEABLE);
5460
5461 if (object->resident_page_count >= object_page_count) {
5462 total_purged_pgcount = 0;
5463 } else {
5464 total_purged_pgcount = object_page_count - object->resident_page_count;
5465 }
5466
5467 if (object->pager != NULL) {
5468 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5469
5470 if (object->activity_in_progress == 0 &&
5471 object->paging_in_progress == 0) {
5472 /*
5473 * Also reap any memory coming from this object
5474 * in the VM compressor.
5475 *
5476 * There are no operations in progress on the VM object
5477 * and no operation can start while we're holding the
5478 * VM object lock, so it's safe to reap the compressed
5479 * pages and update the page counts.
5480 */
5481 pgcount = vm_compressor_pager_get_count(object->pager);
5482 if (pgcount) {
5483 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
5484 vm_compressor_pager_count(object->pager,
5485 -pgcount,
5486 FALSE, /* shared */
5487 object);
5488 vm_object_owner_compressed_update(object,
5489 -pgcount);
5490 }
5491 if (!(flags & C_DONT_BLOCK)) {
5492 assert(vm_compressor_pager_get_count(object->pager)
5493 == 0);
5494 }
5495 } else {
5496 /*
5497 * There's some kind of paging activity in progress
5498 * for this object, which could result in a page
5499 * being compressed or decompressed, possibly while
5500 * the VM object is not locked, so it could race
5501 * with us.
5502 *
5503 * We can't really synchronize this without possibly
5504 * causing a deadlock when the compressor needs to
5505 * allocate or free memory while compressing or
5506 * decompressing a page from a purgeable object
5507 * mapped in the kernel_map...
5508 *
5509 * So let's not attempt to purge the compressor
5510 * pager if there's any kind of operation in
5511 * progress on the VM object.
5512 */
5513 skipped_object = TRUE;
5514 }
5515 }
5516
5517 vm_object_lock_assert_exclusive(object);
5518
5519 total_purged_pgcount += pgcount;
5520
5521 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
5522 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
5523 object_page_count,
5524 total_purged_pgcount,
5525 skipped_object,
5526 0);
5527
5528 return total_purged_pgcount;
5529 }
5530
5531
5532 /*
5533 * vm_object_purgeable_control() allows the caller to control and investigate the
5534 * state of a purgeable object. A purgeable object is created via a call to
5535 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
5536 * never be coalesced with any other object -- even other purgeable objects --
5537 * and will thus always remain a distinct object. A purgeable object has
5538 * special semantics when its reference count is exactly 1. If its reference
5539 * count is greater than 1, then a purgeable object will behave like a normal
5540 * object and attempts to use this interface will result in an error return
5541 * of KERN_INVALID_ARGUMENT.
5542 *
5543 * A purgeable object may be put into a "volatile" state which will make the
5544 * object's pages elligable for being reclaimed without paging to backing
5545 * store if the system runs low on memory. If the pages in a volatile
5546 * purgeable object are reclaimed, the purgeable object is said to have been
5547 * "emptied." When a purgeable object is emptied the system will reclaim as
5548 * many pages from the object as it can in a convenient manner (pages already
5549 * en route to backing store or busy for other reasons are left as is). When
5550 * a purgeable object is made volatile, its pages will generally be reclaimed
5551 * before other pages in the application's working set. This semantic is
5552 * generally used by applications which can recreate the data in the object
5553 * faster than it can be paged in. One such example might be media assets
5554 * which can be reread from a much faster RAID volume.
5555 *
5556 * A purgeable object may be designated as "non-volatile" which means it will
5557 * behave like all other objects in the system with pages being written to and
5558 * read from backing store as needed to satisfy system memory needs. If the
5559 * object was emptied before the object was made non-volatile, that fact will
5560 * be returned as the old state of the purgeable object (see
5561 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
5562 * were reclaimed as part of emptying the object will be refaulted in as
5563 * zero-fill on demand. It is up to the application to note that an object
5564 * was emptied and recreate the objects contents if necessary. When a
5565 * purgeable object is made non-volatile, its pages will generally not be paged
5566 * out to backing store in the immediate future. A purgeable object may also
5567 * be manually emptied.
5568 *
5569 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5570 * volatile purgeable object may be queried at any time. This information may
5571 * be used as a control input to let the application know when the system is
5572 * experiencing memory pressure and is reclaiming memory.
5573 *
5574 * The specified address may be any address within the purgeable object. If
5575 * the specified address does not represent any object in the target task's
5576 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
5577 * object containing the specified address is not a purgeable object, then
5578 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
5579 * returned.
5580 *
5581 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5582 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
5583 * state is used to set the new state of the purgeable object and return its
5584 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
5585 * object is returned in the parameter state.
5586 *
5587 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5588 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
5589 * the non-volatile, volatile and volatile/empty states described above.
5590 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5591 * immediately reclaim as many pages in the object as can be conveniently
5592 * collected (some may have already been written to backing store or be
5593 * otherwise busy).
5594 *
5595 * The process of making a purgeable object non-volatile and determining its
5596 * previous state is atomic. Thus, if a purgeable object is made
5597 * VM_PURGABLE_NONVOLATILE and the old state is returned as
5598 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5599 * completely intact and will remain so until the object is made volatile
5600 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
5601 * was reclaimed while it was in a volatile state and its previous contents
5602 * have been lost.
5603 */
5604 /*
5605 * The object must be locked.
5606 */
5607 kern_return_t
5608 vm_object_purgable_control(
5609 vm_object_t object,
5610 vm_purgable_t control,
5611 int *state)
5612 {
5613 int old_state;
5614 int new_state;
5615
5616 if (object == VM_OBJECT_NULL) {
5617 /*
5618 * Object must already be present or it can't be purgeable.
5619 */
5620 return KERN_INVALID_ARGUMENT;
5621 }
5622
5623 vm_object_lock_assert_exclusive(object);
5624
5625 /*
5626 * Get current state of the purgeable object.
5627 */
5628 old_state = object->purgable;
5629 if (old_state == VM_PURGABLE_DENY) {
5630 return KERN_INVALID_ARGUMENT;
5631 }
5632
5633 /* purgeable cant have delayed copies - now or in the future */
5634 assert(object->vo_copy == VM_OBJECT_NULL);
5635 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5636
5637 /*
5638 * Execute the desired operation.
5639 */
5640 if (control == VM_PURGABLE_GET_STATE) {
5641 *state = old_state;
5642 return KERN_SUCCESS;
5643 }
5644
5645 if (control == VM_PURGABLE_SET_STATE &&
5646 object->purgeable_only_by_kernel) {
5647 return KERN_PROTECTION_FAILURE;
5648 }
5649
5650 if (control != VM_PURGABLE_SET_STATE &&
5651 control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
5652 return KERN_INVALID_ARGUMENT;
5653 }
5654
5655 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
5656 object->volatile_empty = TRUE;
5657 }
5658 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
5659 object->volatile_fault = TRUE;
5660 }
5661
5662 new_state = *state & VM_PURGABLE_STATE_MASK;
5663 if (new_state == VM_PURGABLE_VOLATILE) {
5664 if (old_state == VM_PURGABLE_EMPTY) {
5665 /* what's been emptied must stay empty */
5666 new_state = VM_PURGABLE_EMPTY;
5667 }
5668 if (object->volatile_empty) {
5669 /* debugging mode: go straight to empty */
5670 new_state = VM_PURGABLE_EMPTY;
5671 }
5672 }
5673
5674 switch (new_state) {
5675 case VM_PURGABLE_DENY:
5676 /*
5677 * Attempting to convert purgeable memory to non-purgeable:
5678 * not allowed.
5679 */
5680 return KERN_INVALID_ARGUMENT;
5681 case VM_PURGABLE_NONVOLATILE:
5682 object->purgable = new_state;
5683
5684 if (old_state == VM_PURGABLE_VOLATILE) {
5685 unsigned int delta;
5686
5687 assert(object->resident_page_count >=
5688 object->wired_page_count);
5689 delta = (object->resident_page_count -
5690 object->wired_page_count);
5691
5692 assert(vm_page_purgeable_count >= delta);
5693
5694 if (delta != 0) {
5695 OSAddAtomic(-delta,
5696 (SInt32 *)&vm_page_purgeable_count);
5697 }
5698 if (object->wired_page_count != 0) {
5699 assert(vm_page_purgeable_wired_count >=
5700 object->wired_page_count);
5701 OSAddAtomic(-object->wired_page_count,
5702 (SInt32 *)&vm_page_purgeable_wired_count);
5703 }
5704
5705 vm_page_lock_queues();
5706
5707 /* object should be on a queue */
5708 assert(object->objq.next != NULL &&
5709 object->objq.prev != NULL);
5710 purgeable_q_t queue;
5711
5712 /*
5713 * Move object from its volatile queue to the
5714 * non-volatile queue...
5715 */
5716 queue = vm_purgeable_object_remove(object);
5717 assert(queue);
5718
5719 if (object->purgeable_when_ripe) {
5720 vm_purgeable_token_delete_last(queue);
5721 }
5722 assert(queue->debug_count_objects >= 0);
5723
5724 vm_page_unlock_queues();
5725 }
5726 if (old_state == VM_PURGABLE_VOLATILE ||
5727 old_state == VM_PURGABLE_EMPTY) {
5728 /*
5729 * Transfer the object's pages from the volatile to
5730 * non-volatile ledgers.
5731 */
5732 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
5733 }
5734
5735 break;
5736
5737 case VM_PURGABLE_VOLATILE:
5738 if (object->volatile_fault) {
5739 vm_page_t p;
5740 int refmod;
5741
5742 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5743 if (p->vmp_busy ||
5744 VM_PAGE_WIRED(p) ||
5745 p->vmp_fictitious) {
5746 continue;
5747 }
5748 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5749 if ((refmod & VM_MEM_MODIFIED) &&
5750 !p->vmp_dirty) {
5751 SET_PAGE_DIRTY(p, FALSE);
5752 }
5753 }
5754 }
5755
5756 assert(old_state != VM_PURGABLE_EMPTY);
5757
5758 purgeable_q_t queue;
5759
5760 /* find the correct queue */
5761 if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
5762 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
5763 } else {
5764 if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
5765 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
5766 } else {
5767 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
5768 }
5769 }
5770
5771 if (old_state == VM_PURGABLE_NONVOLATILE ||
5772 old_state == VM_PURGABLE_EMPTY) {
5773 unsigned int delta;
5774
5775 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5776 VM_PURGABLE_NO_AGING) {
5777 object->purgeable_when_ripe = FALSE;
5778 } else {
5779 object->purgeable_when_ripe = TRUE;
5780 }
5781
5782 if (object->purgeable_when_ripe) {
5783 kern_return_t result;
5784
5785 /* try to add token... this can fail */
5786 vm_page_lock_queues();
5787
5788 result = vm_purgeable_token_add(queue);
5789 if (result != KERN_SUCCESS) {
5790 vm_page_unlock_queues();
5791 return result;
5792 }
5793 vm_page_unlock_queues();
5794 }
5795
5796 assert(object->resident_page_count >=
5797 object->wired_page_count);
5798 delta = (object->resident_page_count -
5799 object->wired_page_count);
5800
5801 if (delta != 0) {
5802 OSAddAtomic(delta,
5803 &vm_page_purgeable_count);
5804 }
5805 if (object->wired_page_count != 0) {
5806 OSAddAtomic(object->wired_page_count,
5807 &vm_page_purgeable_wired_count);
5808 }
5809
5810 object->purgable = new_state;
5811
5812 /* object should be on "non-volatile" queue */
5813 assert(object->objq.next != NULL);
5814 assert(object->objq.prev != NULL);
5815 } else if (old_state == VM_PURGABLE_VOLATILE) {
5816 purgeable_q_t old_queue;
5817 boolean_t purgeable_when_ripe;
5818
5819 /*
5820 * if reassigning priorities / purgeable groups, we don't change the
5821 * token queue. So moving priorities will not make pages stay around longer.
5822 * Reasoning is that the algorithm gives most priority to the most important
5823 * object. If a new token is added, the most important object' priority is boosted.
5824 * This biases the system already for purgeable queues that move a lot.
5825 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5826 */
5827 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5828
5829 old_queue = vm_purgeable_object_remove(object);
5830 assert(old_queue);
5831
5832 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5833 VM_PURGABLE_NO_AGING) {
5834 purgeable_when_ripe = FALSE;
5835 } else {
5836 purgeable_when_ripe = TRUE;
5837 }
5838
5839 if (old_queue != queue ||
5840 (purgeable_when_ripe !=
5841 object->purgeable_when_ripe)) {
5842 kern_return_t result;
5843
5844 /* Changing queue. Have to move token. */
5845 vm_page_lock_queues();
5846 if (object->purgeable_when_ripe) {
5847 vm_purgeable_token_delete_last(old_queue);
5848 }
5849 object->purgeable_when_ripe = purgeable_when_ripe;
5850 if (object->purgeable_when_ripe) {
5851 result = vm_purgeable_token_add(queue);
5852 assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */
5853 }
5854 vm_page_unlock_queues();
5855 }
5856 }
5857 ;
5858 vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
5859 if (old_state == VM_PURGABLE_NONVOLATILE) {
5860 vm_purgeable_accounting(object,
5861 VM_PURGABLE_NONVOLATILE);
5862 }
5863
5864 assert(queue->debug_count_objects >= 0);
5865
5866 break;
5867
5868
5869 case VM_PURGABLE_EMPTY:
5870 if (object->volatile_fault) {
5871 vm_page_t p;
5872 int refmod;
5873
5874 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5875 if (p->vmp_busy ||
5876 VM_PAGE_WIRED(p) ||
5877 p->vmp_fictitious) {
5878 continue;
5879 }
5880 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5881 if ((refmod & VM_MEM_MODIFIED) &&
5882 !p->vmp_dirty) {
5883 SET_PAGE_DIRTY(p, FALSE);
5884 }
5885 }
5886 }
5887
5888 if (old_state == VM_PURGABLE_VOLATILE) {
5889 purgeable_q_t old_queue;
5890
5891 /* object should be on a queue */
5892 assert(object->objq.next != NULL &&
5893 object->objq.prev != NULL);
5894
5895 old_queue = vm_purgeable_object_remove(object);
5896 assert(old_queue);
5897 if (object->purgeable_when_ripe) {
5898 vm_page_lock_queues();
5899 vm_purgeable_token_delete_first(old_queue);
5900 vm_page_unlock_queues();
5901 }
5902 }
5903
5904 if (old_state == VM_PURGABLE_NONVOLATILE) {
5905 /*
5906 * This object's pages were previously accounted as
5907 * "non-volatile" and now need to be accounted as
5908 * "volatile".
5909 */
5910 vm_purgeable_accounting(object,
5911 VM_PURGABLE_NONVOLATILE);
5912 /*
5913 * Set to VM_PURGABLE_EMPTY because the pages are no
5914 * longer accounted in the "non-volatile" ledger
5915 * and are also not accounted for in
5916 * "vm_page_purgeable_count".
5917 */
5918 object->purgable = VM_PURGABLE_EMPTY;
5919 }
5920
5921 (void) vm_object_purge(object, 0);
5922 assert(object->purgable == VM_PURGABLE_EMPTY);
5923
5924 break;
5925 }
5926
5927 *state = old_state;
5928
5929 vm_object_lock_assert_exclusive(object);
5930
5931 return KERN_SUCCESS;
5932 }
5933
5934 kern_return_t
5935 vm_object_get_page_counts(
5936 vm_object_t object,
5937 vm_object_offset_t offset,
5938 vm_object_size_t size,
5939 unsigned int *resident_page_count,
5940 unsigned int *dirty_page_count)
5941 {
5942 kern_return_t kr = KERN_SUCCESS;
5943 boolean_t count_dirty_pages = FALSE;
5944 vm_page_t p = VM_PAGE_NULL;
5945 unsigned int local_resident_count = 0;
5946 unsigned int local_dirty_count = 0;
5947 vm_object_offset_t cur_offset = 0;
5948 vm_object_offset_t end_offset = 0;
5949
5950 if (object == VM_OBJECT_NULL) {
5951 return KERN_INVALID_ARGUMENT;
5952 }
5953
5954
5955 cur_offset = offset;
5956
5957 end_offset = offset + size;
5958
5959 vm_object_lock_assert_exclusive(object);
5960
5961 if (dirty_page_count != NULL) {
5962 count_dirty_pages = TRUE;
5963 }
5964
5965 if (resident_page_count != NULL && count_dirty_pages == FALSE) {
5966 /*
5967 * Fast path when:
5968 * - we only want the resident page count, and,
5969 * - the entire object is exactly covered by the request.
5970 */
5971 if (offset == 0 && (object->vo_size == size)) {
5972 *resident_page_count = object->resident_page_count;
5973 goto out;
5974 }
5975 }
5976
5977 if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
5978 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5979 if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
5980 local_resident_count++;
5981
5982 if (count_dirty_pages) {
5983 if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
5984 local_dirty_count++;
5985 }
5986 }
5987 }
5988 }
5989 } else {
5990 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
5991 p = vm_page_lookup(object, cur_offset);
5992
5993 if (p != VM_PAGE_NULL) {
5994 local_resident_count++;
5995
5996 if (count_dirty_pages) {
5997 if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
5998 local_dirty_count++;
5999 }
6000 }
6001 }
6002 }
6003 }
6004
6005 if (resident_page_count != NULL) {
6006 *resident_page_count = local_resident_count;
6007 }
6008
6009 if (dirty_page_count != NULL) {
6010 *dirty_page_count = local_dirty_count;
6011 }
6012
6013 out:
6014 return kr;
6015 }
6016
6017
6018 /*
6019 * vm_object_reference:
6020 *
6021 * Gets another reference to the given object.
6022 */
6023 #ifdef vm_object_reference
6024 #undef vm_object_reference
6025 #endif
6026 __private_extern__ void
6027 vm_object_reference(
6028 vm_object_t object)
6029 {
6030 if (object == VM_OBJECT_NULL) {
6031 return;
6032 }
6033
6034 vm_object_lock(object);
6035 assert(object->ref_count > 0);
6036 vm_object_reference_locked(object);
6037 vm_object_unlock(object);
6038 }
6039
6040 /*
6041 * vm_object_transpose
6042 *
6043 * This routine takes two VM objects of the same size and exchanges
6044 * their backing store.
6045 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6046 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6047 *
6048 * The VM objects must not be locked by caller.
6049 */
6050 unsigned int vm_object_transpose_count = 0;
6051 kern_return_t
6052 vm_object_transpose(
6053 vm_object_t object1,
6054 vm_object_t object2,
6055 vm_object_size_t transpose_size)
6056 {
6057 vm_object_t tmp_object;
6058 kern_return_t retval;
6059 boolean_t object1_locked, object2_locked;
6060 vm_page_t page;
6061 vm_object_offset_t page_offset;
6062
6063 tmp_object = VM_OBJECT_NULL;
6064 object1_locked = FALSE; object2_locked = FALSE;
6065
6066 if (object1 == object2 ||
6067 object1 == VM_OBJECT_NULL ||
6068 object2 == VM_OBJECT_NULL) {
6069 /*
6070 * If the 2 VM objects are the same, there's
6071 * no point in exchanging their backing store.
6072 */
6073 retval = KERN_INVALID_VALUE;
6074 goto done;
6075 }
6076
6077 /*
6078 * Since we need to lock both objects at the same time,
6079 * make sure we always lock them in the same order to
6080 * avoid deadlocks.
6081 */
6082 if (object1 > object2) {
6083 tmp_object = object1;
6084 object1 = object2;
6085 object2 = tmp_object;
6086 }
6087
6088 /*
6089 * Allocate a temporary VM object to hold object1's contents
6090 * while we copy object2 to object1.
6091 */
6092 tmp_object = vm_object_allocate(transpose_size);
6093 vm_object_lock(tmp_object);
6094 tmp_object->can_persist = FALSE;
6095
6096
6097 /*
6098 * Grab control of the 1st VM object.
6099 */
6100 vm_object_lock(object1);
6101 object1_locked = TRUE;
6102 if (!object1->alive || object1->terminating ||
6103 object1->vo_copy || object1->shadow || object1->shadowed ||
6104 object1->purgable != VM_PURGABLE_DENY) {
6105 /*
6106 * We don't deal with copy or shadow objects (yet).
6107 */
6108 retval = KERN_INVALID_VALUE;
6109 goto done;
6110 }
6111 /*
6112 * We're about to mess with the object's backing store and
6113 * taking a "paging_in_progress" reference wouldn't be enough
6114 * to prevent any paging activity on this object, so the caller should
6115 * have "quiesced" the objects beforehand, via a UPL operation with
6116 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6117 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6118 *
6119 * Wait for any paging operation to complete (but only paging, not
6120 * other kind of activities not linked to the pager). After we're
6121 * statisfied that there's no more paging in progress, we keep the
6122 * object locked, to guarantee that no one tries to access its pager.
6123 */
6124 vm_object_paging_only_wait(object1, THREAD_UNINT);
6125
6126 /*
6127 * Same as above for the 2nd object...
6128 */
6129 vm_object_lock(object2);
6130 object2_locked = TRUE;
6131 if (!object2->alive || object2->terminating ||
6132 object2->vo_copy || object2->shadow || object2->shadowed ||
6133 object2->purgable != VM_PURGABLE_DENY) {
6134 retval = KERN_INVALID_VALUE;
6135 goto done;
6136 }
6137 vm_object_paging_only_wait(object2, THREAD_UNINT);
6138
6139
6140 if (object1->vo_size != object2->vo_size ||
6141 object1->vo_size != transpose_size) {
6142 /*
6143 * If the 2 objects don't have the same size, we can't
6144 * exchange their backing stores or one would overflow.
6145 * If their size doesn't match the caller's
6146 * "transpose_size", we can't do it either because the
6147 * transpose operation will affect the entire span of
6148 * the objects.
6149 */
6150 retval = KERN_INVALID_VALUE;
6151 goto done;
6152 }
6153
6154
6155 /*
6156 * Transpose the lists of resident pages.
6157 * This also updates the resident_page_count and the memq_hint.
6158 */
6159 if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6160 /*
6161 * No pages in object1, just transfer pages
6162 * from object2 to object1. No need to go through
6163 * an intermediate object.
6164 */
6165 while (!vm_page_queue_empty(&object2->memq)) {
6166 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6167 vm_page_rename(page, object1, page->vmp_offset);
6168 }
6169 assert(vm_page_queue_empty(&object2->memq));
6170 } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6171 /*
6172 * No pages in object2, just transfer pages
6173 * from object1 to object2. No need to go through
6174 * an intermediate object.
6175 */
6176 while (!vm_page_queue_empty(&object1->memq)) {
6177 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6178 vm_page_rename(page, object2, page->vmp_offset);
6179 }
6180 assert(vm_page_queue_empty(&object1->memq));
6181 } else {
6182 /* transfer object1's pages to tmp_object */
6183 while (!vm_page_queue_empty(&object1->memq)) {
6184 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6185 page_offset = page->vmp_offset;
6186 vm_page_remove(page, TRUE);
6187 page->vmp_offset = page_offset;
6188 vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6189 }
6190 assert(vm_page_queue_empty(&object1->memq));
6191 /* transfer object2's pages to object1 */
6192 while (!vm_page_queue_empty(&object2->memq)) {
6193 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6194 vm_page_rename(page, object1, page->vmp_offset);
6195 }
6196 assert(vm_page_queue_empty(&object2->memq));
6197 /* transfer tmp_object's pages to object2 */
6198 while (!vm_page_queue_empty(&tmp_object->memq)) {
6199 page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6200 vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6201 vm_page_insert(page, object2, page->vmp_offset);
6202 }
6203 assert(vm_page_queue_empty(&tmp_object->memq));
6204 }
6205
6206 #define __TRANSPOSE_FIELD(field) \
6207 MACRO_BEGIN \
6208 tmp_object->field = object1->field; \
6209 object1->field = object2->field; \
6210 object2->field = tmp_object->field; \
6211 MACRO_END
6212
6213 /* "Lock" refers to the object not its contents */
6214 /* "size" should be identical */
6215 assert(object1->vo_size == object2->vo_size);
6216 /* "memq_hint" was updated above when transposing pages */
6217 /* "ref_count" refers to the object not its contents */
6218 assert(object1->ref_count >= 1);
6219 assert(object2->ref_count >= 1);
6220 /* "resident_page_count" was updated above when transposing pages */
6221 /* "wired_page_count" was updated above when transposing pages */
6222 #if !VM_TAG_ACTIVE_UPDATE
6223 /* "wired_objq" was dealt with along with "wired_page_count" */
6224 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6225 /* "reusable_page_count" was updated above when transposing pages */
6226 /* there should be no "copy" */
6227 assert(!object1->vo_copy);
6228 assert(!object2->vo_copy);
6229 /* there should be no "shadow" */
6230 assert(!object1->shadow);
6231 assert(!object2->shadow);
6232 __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6233 __TRANSPOSE_FIELD(pager);
6234 __TRANSPOSE_FIELD(paging_offset);
6235 __TRANSPOSE_FIELD(pager_control);
6236 /* update the memory_objects' pointers back to the VM objects */
6237 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6238 memory_object_control_collapse(&object1->pager_control,
6239 object1);
6240 }
6241 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6242 memory_object_control_collapse(&object2->pager_control,
6243 object2);
6244 }
6245 __TRANSPOSE_FIELD(copy_strategy);
6246 /* "paging_in_progress" refers to the object not its contents */
6247 assert(!object1->paging_in_progress);
6248 assert(!object2->paging_in_progress);
6249 assert(object1->activity_in_progress);
6250 assert(object2->activity_in_progress);
6251 /* "all_wanted" refers to the object not its contents */
6252 __TRANSPOSE_FIELD(pager_created);
6253 __TRANSPOSE_FIELD(pager_initialized);
6254 __TRANSPOSE_FIELD(pager_ready);
6255 __TRANSPOSE_FIELD(pager_trusted);
6256 __TRANSPOSE_FIELD(can_persist);
6257 __TRANSPOSE_FIELD(internal);
6258 __TRANSPOSE_FIELD(private);
6259 __TRANSPOSE_FIELD(pageout);
6260 /* "alive" should be set */
6261 assert(object1->alive);
6262 assert(object2->alive);
6263 /* "purgeable" should be non-purgeable */
6264 assert(object1->purgable == VM_PURGABLE_DENY);
6265 assert(object2->purgable == VM_PURGABLE_DENY);
6266 /* "shadowed" refers to the the object not its contents */
6267 __TRANSPOSE_FIELD(purgeable_when_ripe);
6268 __TRANSPOSE_FIELD(true_share);
6269 /* "terminating" should not be set */
6270 assert(!object1->terminating);
6271 assert(!object2->terminating);
6272 /* transfer "named" reference if needed */
6273 if (object1->named && !object2->named) {
6274 assert(object1->ref_count >= 2);
6275 assert(object2->ref_count >= 1);
6276 object1->ref_count--;
6277 object2->ref_count++;
6278 } else if (!object1->named && object2->named) {
6279 assert(object1->ref_count >= 1);
6280 assert(object2->ref_count >= 2);
6281 object1->ref_count++;
6282 object2->ref_count--;
6283 }
6284 __TRANSPOSE_FIELD(named);
6285 /* "shadow_severed" refers to the object not its contents */
6286 __TRANSPOSE_FIELD(phys_contiguous);
6287 __TRANSPOSE_FIELD(nophyscache);
6288 __TRANSPOSE_FIELD(no_pager_reason);
6289 /* "cached_list.next" points to transposed object */
6290 object1->cached_list.next = (queue_entry_t) object2;
6291 object2->cached_list.next = (queue_entry_t) object1;
6292 /* "cached_list.prev" should be NULL */
6293 assert(object1->cached_list.prev == NULL);
6294 assert(object2->cached_list.prev == NULL);
6295 __TRANSPOSE_FIELD(last_alloc);
6296 __TRANSPOSE_FIELD(sequential);
6297 __TRANSPOSE_FIELD(pages_created);
6298 __TRANSPOSE_FIELD(pages_used);
6299 __TRANSPOSE_FIELD(scan_collisions);
6300 __TRANSPOSE_FIELD(cow_hint);
6301 __TRANSPOSE_FIELD(wimg_bits);
6302 __TRANSPOSE_FIELD(set_cache_attr);
6303 __TRANSPOSE_FIELD(code_signed);
6304 object1->transposed = TRUE;
6305 object2->transposed = TRUE;
6306 __TRANSPOSE_FIELD(mapping_in_progress);
6307 __TRANSPOSE_FIELD(volatile_empty);
6308 __TRANSPOSE_FIELD(volatile_fault);
6309 __TRANSPOSE_FIELD(all_reusable);
6310 assert(object1->blocked_access);
6311 assert(object2->blocked_access);
6312 __TRANSPOSE_FIELD(set_cache_attr);
6313 assert(!object1->object_is_shared_cache);
6314 assert(!object2->object_is_shared_cache);
6315 /* ignore purgeable_queue_type and purgeable_queue_group */
6316 assert(!object1->io_tracking);
6317 assert(!object2->io_tracking);
6318 #if VM_OBJECT_ACCESS_TRACKING
6319 assert(!object1->access_tracking);
6320 assert(!object2->access_tracking);
6321 #endif /* VM_OBJECT_ACCESS_TRACKING */
6322 __TRANSPOSE_FIELD(no_tag_update);
6323 #if CONFIG_SECLUDED_MEMORY
6324 assert(!object1->eligible_for_secluded);
6325 assert(!object2->eligible_for_secluded);
6326 assert(!object1->can_grab_secluded);
6327 assert(!object2->can_grab_secluded);
6328 #else /* CONFIG_SECLUDED_MEMORY */
6329 assert(object1->__object3_unused_bits == 0);
6330 assert(object2->__object3_unused_bits == 0);
6331 #endif /* CONFIG_SECLUDED_MEMORY */
6332 #if UPL_DEBUG
6333 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6334 #endif
6335 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6336 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6337 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6338 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6339
6340 #undef __TRANSPOSE_FIELD
6341
6342 retval = KERN_SUCCESS;
6343
6344 done:
6345 /*
6346 * Cleanup.
6347 */
6348 if (tmp_object != VM_OBJECT_NULL) {
6349 vm_object_unlock(tmp_object);
6350 /*
6351 * Re-initialize the temporary object to avoid
6352 * deallocating a real pager.
6353 */
6354 _vm_object_allocate(transpose_size, tmp_object);
6355 vm_object_deallocate(tmp_object);
6356 tmp_object = VM_OBJECT_NULL;
6357 }
6358
6359 if (object1_locked) {
6360 vm_object_unlock(object1);
6361 object1_locked = FALSE;
6362 }
6363 if (object2_locked) {
6364 vm_object_unlock(object2);
6365 object2_locked = FALSE;
6366 }
6367
6368 vm_object_transpose_count++;
6369
6370 return retval;
6371 }
6372
6373
6374 /*
6375 * vm_object_cluster_size
6376 *
6377 * Determine how big a cluster we should issue an I/O for...
6378 *
6379 * Inputs: *start == offset of page needed
6380 * *length == maximum cluster pager can handle
6381 * Outputs: *start == beginning offset of cluster
6382 * *length == length of cluster to try
6383 *
6384 * The original *start will be encompassed by the cluster
6385 *
6386 */
6387 extern int speculative_reads_disabled;
6388
6389 /*
6390 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6391 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6392 * always be page-aligned. The derivation could involve operations (e.g. division)
6393 * that could give us non-page-size aligned values if we start out with values that
6394 * are odd multiples of PAGE_SIZE.
6395 */
6396 #if !XNU_TARGET_OS_OSX
6397 unsigned int preheat_max_bytes = (1024 * 512);
6398 #else /* !XNU_TARGET_OS_OSX */
6399 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6400 #endif /* !XNU_TARGET_OS_OSX */
6401 unsigned int preheat_min_bytes = (1024 * 32);
6402
6403
6404 __private_extern__ void
6405 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6406 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6407 {
6408 vm_size_t pre_heat_size;
6409 vm_size_t tail_size;
6410 vm_size_t head_size;
6411 vm_size_t max_length;
6412 vm_size_t cluster_size;
6413 vm_object_offset_t object_size;
6414 vm_object_offset_t orig_start;
6415 vm_object_offset_t target_start;
6416 vm_object_offset_t offset;
6417 vm_behavior_t behavior;
6418 boolean_t look_behind = TRUE;
6419 boolean_t look_ahead = TRUE;
6420 boolean_t isSSD = FALSE;
6421 uint32_t throttle_limit;
6422 int sequential_run;
6423 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6424 vm_size_t max_ph_size;
6425 vm_size_t min_ph_size;
6426
6427 assert( !(*length & PAGE_MASK));
6428 assert( !(*start & PAGE_MASK_64));
6429
6430 /*
6431 * remember maxiumum length of run requested
6432 */
6433 max_length = *length;
6434 /*
6435 * we'll always return a cluster size of at least
6436 * 1 page, since the original fault must always
6437 * be processed
6438 */
6439 *length = PAGE_SIZE;
6440 *io_streaming = 0;
6441
6442 if (speculative_reads_disabled || fault_info == NULL) {
6443 /*
6444 * no cluster... just fault the page in
6445 */
6446 return;
6447 }
6448 orig_start = *start;
6449 target_start = orig_start;
6450 cluster_size = round_page(fault_info->cluster_size);
6451 behavior = fault_info->behavior;
6452
6453 vm_object_lock(object);
6454
6455 if (object->pager == MEMORY_OBJECT_NULL) {
6456 goto out; /* pager is gone for this object, nothing more to do */
6457 }
6458 vnode_pager_get_isSSD(object->pager, &isSSD);
6459
6460 min_ph_size = round_page(preheat_min_bytes);
6461 max_ph_size = round_page(preheat_max_bytes);
6462
6463 #if XNU_TARGET_OS_OSX
6464 if (isSSD) {
6465 min_ph_size /= 2;
6466 max_ph_size /= 8;
6467
6468 if (min_ph_size & PAGE_MASK_64) {
6469 min_ph_size = trunc_page(min_ph_size);
6470 }
6471
6472 if (max_ph_size & PAGE_MASK_64) {
6473 max_ph_size = trunc_page(max_ph_size);
6474 }
6475 }
6476 #endif /* XNU_TARGET_OS_OSX */
6477
6478 if (min_ph_size < PAGE_SIZE) {
6479 min_ph_size = PAGE_SIZE;
6480 }
6481
6482 if (max_ph_size < PAGE_SIZE) {
6483 max_ph_size = PAGE_SIZE;
6484 } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
6485 max_ph_size = MAX_UPL_TRANSFER_BYTES;
6486 }
6487
6488 if (max_length > max_ph_size) {
6489 max_length = max_ph_size;
6490 }
6491
6492 if (max_length <= PAGE_SIZE) {
6493 goto out;
6494 }
6495
6496 if (object->internal) {
6497 object_size = object->vo_size;
6498 } else {
6499 vnode_pager_get_object_size(object->pager, &object_size);
6500 }
6501
6502 object_size = round_page_64(object_size);
6503
6504 if (orig_start >= object_size) {
6505 /*
6506 * fault occurred beyond the EOF...
6507 * we need to punt w/o changing the
6508 * starting offset
6509 */
6510 goto out;
6511 }
6512 if (object->pages_used > object->pages_created) {
6513 /*
6514 * must have wrapped our 32 bit counters
6515 * so reset
6516 */
6517 object->pages_used = object->pages_created = 0;
6518 }
6519 if ((sequential_run = object->sequential)) {
6520 if (sequential_run < 0) {
6521 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
6522 sequential_run = 0 - sequential_run;
6523 } else {
6524 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6525 }
6526 }
6527 switch (behavior) {
6528 default:
6529 behavior = VM_BEHAVIOR_DEFAULT;
6530 OS_FALLTHROUGH;
6531
6532 case VM_BEHAVIOR_DEFAULT:
6533 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
6534 goto out;
6535 }
6536
6537 if (sequential_run >= (3 * PAGE_SIZE)) {
6538 pre_heat_size = sequential_run + PAGE_SIZE;
6539
6540 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
6541 look_behind = FALSE;
6542 } else {
6543 look_ahead = FALSE;
6544 }
6545
6546 *io_streaming = 1;
6547 } else {
6548 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
6549 /*
6550 * prime the pump
6551 */
6552 pre_heat_size = min_ph_size;
6553 } else {
6554 /*
6555 * Linear growth in PH size: The maximum size is max_length...
6556 * this cacluation will result in a size that is neither a
6557 * power of 2 nor a multiple of PAGE_SIZE... so round
6558 * it up to the nearest PAGE_SIZE boundary
6559 */
6560 pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
6561
6562 if (pre_heat_size < min_ph_size) {
6563 pre_heat_size = min_ph_size;
6564 } else {
6565 pre_heat_size = round_page(pre_heat_size);
6566 }
6567 }
6568 }
6569 break;
6570
6571 case VM_BEHAVIOR_RANDOM:
6572 if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
6573 goto out;
6574 }
6575 break;
6576
6577 case VM_BEHAVIOR_SEQUENTIAL:
6578 if ((pre_heat_size = cluster_size) == 0) {
6579 pre_heat_size = sequential_run + PAGE_SIZE;
6580 }
6581 look_behind = FALSE;
6582 *io_streaming = 1;
6583
6584 break;
6585
6586 case VM_BEHAVIOR_RSEQNTL:
6587 if ((pre_heat_size = cluster_size) == 0) {
6588 pre_heat_size = sequential_run + PAGE_SIZE;
6589 }
6590 look_ahead = FALSE;
6591 *io_streaming = 1;
6592
6593 break;
6594 }
6595 throttle_limit = (uint32_t) max_length;
6596 assert(throttle_limit == max_length);
6597
6598 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
6599 if (max_length > throttle_limit) {
6600 max_length = throttle_limit;
6601 }
6602 }
6603 if (pre_heat_size > max_length) {
6604 pre_heat_size = max_length;
6605 }
6606
6607 if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
6608 unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
6609
6610 if (consider_free < vm_page_throttle_limit) {
6611 pre_heat_size = trunc_page(pre_heat_size / 16);
6612 } else if (consider_free < vm_page_free_target) {
6613 pre_heat_size = trunc_page(pre_heat_size / 4);
6614 }
6615
6616 if (pre_heat_size < min_ph_size) {
6617 pre_heat_size = min_ph_size;
6618 }
6619 }
6620 if (look_ahead == TRUE) {
6621 if (look_behind == TRUE) {
6622 /*
6623 * if we get here its due to a random access...
6624 * so we want to center the original fault address
6625 * within the cluster we will issue... make sure
6626 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6627 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6628 * necessarily an even number of pages so we need to truncate
6629 * the result to a PAGE_SIZE boundary
6630 */
6631 head_size = trunc_page(pre_heat_size / 2);
6632
6633 if (target_start > head_size) {
6634 target_start -= head_size;
6635 } else {
6636 target_start = 0;
6637 }
6638
6639 /*
6640 * 'target_start' at this point represents the beginning offset
6641 * of the cluster we are considering... 'orig_start' will be in
6642 * the center of this cluster if we didn't have to clip the start
6643 * due to running into the start of the file
6644 */
6645 }
6646 if ((target_start + pre_heat_size) > object_size) {
6647 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
6648 }
6649 /*
6650 * at this point caclulate the number of pages beyond the original fault
6651 * address that we want to consider... this is guaranteed not to extend beyond
6652 * the current EOF...
6653 */
6654 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
6655 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
6656 } else {
6657 if (pre_heat_size > target_start) {
6658 /*
6659 * since pre_heat_size is always smaller then 2^32,
6660 * if it is larger then target_start (a 64 bit value)
6661 * it is safe to clip target_start to 32 bits
6662 */
6663 pre_heat_size = (vm_size_t) target_start;
6664 }
6665 tail_size = 0;
6666 }
6667 assert( !(target_start & PAGE_MASK_64));
6668 assert( !(pre_heat_size & PAGE_MASK_64));
6669
6670 if (pre_heat_size <= PAGE_SIZE) {
6671 goto out;
6672 }
6673
6674 if (look_behind == TRUE) {
6675 /*
6676 * take a look at the pages before the original
6677 * faulting offset... recalculate this in case
6678 * we had to clip 'pre_heat_size' above to keep
6679 * from running past the EOF.
6680 */
6681 head_size = pre_heat_size - tail_size - PAGE_SIZE;
6682
6683 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
6684 /*
6685 * don't poke below the lowest offset
6686 */
6687 if (offset < fault_info->lo_offset) {
6688 break;
6689 }
6690 /*
6691 * for external objects or internal objects w/o a pager,
6692 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6693 */
6694 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6695 break;
6696 }
6697 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6698 /*
6699 * don't bridge resident pages
6700 */
6701 break;
6702 }
6703 *start = offset;
6704 *length += PAGE_SIZE;
6705 }
6706 }
6707 if (look_ahead == TRUE) {
6708 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
6709 /*
6710 * don't poke above the highest offset
6711 */
6712 if (offset >= fault_info->hi_offset) {
6713 break;
6714 }
6715 assert(offset < object_size);
6716
6717 /*
6718 * for external objects or internal objects w/o a pager,
6719 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6720 */
6721 if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6722 break;
6723 }
6724 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6725 /*
6726 * don't bridge resident pages
6727 */
6728 break;
6729 }
6730 *length += PAGE_SIZE;
6731 }
6732 }
6733 out:
6734 if (*length > max_length) {
6735 *length = max_length;
6736 }
6737
6738 vm_object_unlock(object);
6739
6740 DTRACE_VM1(clustersize, vm_size_t, *length);
6741 }
6742
6743
6744 /*
6745 * Allow manipulation of individual page state. This is actually part of
6746 * the UPL regimen but takes place on the VM object rather than on a UPL
6747 */
6748
6749 kern_return_t
6750 vm_object_page_op(
6751 vm_object_t object,
6752 vm_object_offset_t offset,
6753 int ops,
6754 ppnum_t *phys_entry,
6755 int *flags)
6756 {
6757 vm_page_t dst_page;
6758
6759 vm_object_lock(object);
6760
6761 if (ops & UPL_POP_PHYSICAL) {
6762 if (object->phys_contiguous) {
6763 if (phys_entry) {
6764 *phys_entry = (ppnum_t)
6765 (object->vo_shadow_offset >> PAGE_SHIFT);
6766 }
6767 vm_object_unlock(object);
6768 return KERN_SUCCESS;
6769 } else {
6770 vm_object_unlock(object);
6771 return KERN_INVALID_OBJECT;
6772 }
6773 }
6774 if (object->phys_contiguous) {
6775 vm_object_unlock(object);
6776 return KERN_INVALID_OBJECT;
6777 }
6778
6779 while (TRUE) {
6780 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
6781 vm_object_unlock(object);
6782 return KERN_FAILURE;
6783 }
6784
6785 /* Sync up on getting the busy bit */
6786 if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
6787 (((ops & UPL_POP_SET) &&
6788 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
6789 /* someone else is playing with the page, we will */
6790 /* have to wait */
6791 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6792 continue;
6793 }
6794
6795 if (ops & UPL_POP_DUMP) {
6796 if (dst_page->vmp_pmapped == TRUE) {
6797 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6798 }
6799
6800 VM_PAGE_FREE(dst_page);
6801 break;
6802 }
6803
6804 if (flags) {
6805 *flags = 0;
6806
6807 /* Get the condition of flags before requested ops */
6808 /* are undertaken */
6809
6810 if (dst_page->vmp_dirty) {
6811 *flags |= UPL_POP_DIRTY;
6812 }
6813 if (dst_page->vmp_free_when_done) {
6814 *flags |= UPL_POP_PAGEOUT;
6815 }
6816 if (dst_page->vmp_precious) {
6817 *flags |= UPL_POP_PRECIOUS;
6818 }
6819 if (dst_page->vmp_absent) {
6820 *flags |= UPL_POP_ABSENT;
6821 }
6822 if (dst_page->vmp_busy) {
6823 *flags |= UPL_POP_BUSY;
6824 }
6825 }
6826
6827 /* The caller should have made a call either contingent with */
6828 /* or prior to this call to set UPL_POP_BUSY */
6829 if (ops & UPL_POP_SET) {
6830 /* The protection granted with this assert will */
6831 /* not be complete. If the caller violates the */
6832 /* convention and attempts to change page state */
6833 /* without first setting busy we may not see it */
6834 /* because the page may already be busy. However */
6835 /* if such violations occur we will assert sooner */
6836 /* or later. */
6837 assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
6838 if (ops & UPL_POP_DIRTY) {
6839 SET_PAGE_DIRTY(dst_page, FALSE);
6840 }
6841 if (ops & UPL_POP_PAGEOUT) {
6842 dst_page->vmp_free_when_done = TRUE;
6843 }
6844 if (ops & UPL_POP_PRECIOUS) {
6845 dst_page->vmp_precious = TRUE;
6846 }
6847 if (ops & UPL_POP_ABSENT) {
6848 dst_page->vmp_absent = TRUE;
6849 }
6850 if (ops & UPL_POP_BUSY) {
6851 dst_page->vmp_busy = TRUE;
6852 }
6853 }
6854
6855 if (ops & UPL_POP_CLR) {
6856 assert(dst_page->vmp_busy);
6857 if (ops & UPL_POP_DIRTY) {
6858 dst_page->vmp_dirty = FALSE;
6859 }
6860 if (ops & UPL_POP_PAGEOUT) {
6861 dst_page->vmp_free_when_done = FALSE;
6862 }
6863 if (ops & UPL_POP_PRECIOUS) {
6864 dst_page->vmp_precious = FALSE;
6865 }
6866 if (ops & UPL_POP_ABSENT) {
6867 dst_page->vmp_absent = FALSE;
6868 }
6869 if (ops & UPL_POP_BUSY) {
6870 dst_page->vmp_busy = FALSE;
6871 PAGE_WAKEUP(dst_page);
6872 }
6873 }
6874 if (phys_entry) {
6875 /*
6876 * The physical page number will remain valid
6877 * only if the page is kept busy.
6878 */
6879 assert(dst_page->vmp_busy);
6880 *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
6881 }
6882
6883 break;
6884 }
6885
6886 vm_object_unlock(object);
6887 return KERN_SUCCESS;
6888 }
6889
6890 /*
6891 * vm_object_range_op offers performance enhancement over
6892 * vm_object_page_op for page_op functions which do not require page
6893 * level state to be returned from the call. Page_op was created to provide
6894 * a low-cost alternative to page manipulation via UPLs when only a single
6895 * page was involved. The range_op call establishes the ability in the _op
6896 * family of functions to work on multiple pages where the lack of page level
6897 * state handling allows the caller to avoid the overhead of the upl structures.
6898 */
6899
6900 kern_return_t
6901 vm_object_range_op(
6902 vm_object_t object,
6903 vm_object_offset_t offset_beg,
6904 vm_object_offset_t offset_end,
6905 int ops,
6906 uint32_t *range)
6907 {
6908 vm_object_offset_t offset;
6909 vm_page_t dst_page;
6910
6911 if (offset_end - offset_beg > (uint32_t) -1) {
6912 /* range is too big and would overflow "*range" */
6913 return KERN_INVALID_ARGUMENT;
6914 }
6915 if (object->resident_page_count == 0) {
6916 if (range) {
6917 if (ops & UPL_ROP_PRESENT) {
6918 *range = 0;
6919 } else {
6920 *range = (uint32_t) (offset_end - offset_beg);
6921 assert(*range == (offset_end - offset_beg));
6922 }
6923 }
6924 return KERN_SUCCESS;
6925 }
6926 vm_object_lock(object);
6927
6928 if (object->phys_contiguous) {
6929 vm_object_unlock(object);
6930 return KERN_INVALID_OBJECT;
6931 }
6932
6933 offset = offset_beg & ~PAGE_MASK_64;
6934
6935 while (offset < offset_end) {
6936 dst_page = vm_page_lookup(object, offset);
6937 if (dst_page != VM_PAGE_NULL) {
6938 if (ops & UPL_ROP_DUMP) {
6939 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6940 /*
6941 * someone else is playing with the
6942 * page, we will have to wait
6943 */
6944 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6945 /*
6946 * need to relook the page up since it's
6947 * state may have changed while we slept
6948 * it might even belong to a different object
6949 * at this point
6950 */
6951 continue;
6952 }
6953 if (dst_page->vmp_laundry) {
6954 vm_pageout_steal_laundry(dst_page, FALSE);
6955 }
6956
6957 if (dst_page->vmp_pmapped == TRUE) {
6958 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6959 }
6960
6961 VM_PAGE_FREE(dst_page);
6962 } else if ((ops & UPL_ROP_ABSENT)
6963 && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
6964 break;
6965 }
6966 } else if (ops & UPL_ROP_PRESENT) {
6967 break;
6968 }
6969
6970 offset += PAGE_SIZE;
6971 }
6972 vm_object_unlock(object);
6973
6974 if (range) {
6975 if (offset > offset_end) {
6976 offset = offset_end;
6977 }
6978 if (offset > offset_beg) {
6979 *range = (uint32_t) (offset - offset_beg);
6980 assert(*range == (offset - offset_beg));
6981 } else {
6982 *range = 0;
6983 }
6984 }
6985 return KERN_SUCCESS;
6986 }
6987
6988 /*
6989 * Used to point a pager directly to a range of memory (when the pager may be associated
6990 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
6991 * expect that the virtual address will denote the start of a range that is physically contiguous.
6992 */
6993 kern_return_t
6994 pager_map_to_phys_contiguous(
6995 memory_object_control_t object,
6996 memory_object_offset_t offset,
6997 addr64_t base_vaddr,
6998 vm_size_t size)
6999 {
7000 ppnum_t page_num;
7001 boolean_t clobbered_private;
7002 kern_return_t retval;
7003 vm_object_t pager_object;
7004
7005 page_num = pmap_find_phys(kernel_pmap, base_vaddr);
7006
7007 if (!page_num) {
7008 retval = KERN_FAILURE;
7009 goto out;
7010 }
7011
7012 pager_object = memory_object_control_to_vm_object(object);
7013
7014 if (!pager_object) {
7015 retval = KERN_FAILURE;
7016 goto out;
7017 }
7018
7019 clobbered_private = pager_object->private;
7020 if (pager_object->private != TRUE) {
7021 vm_object_lock(pager_object);
7022 pager_object->private = TRUE;
7023 vm_object_unlock(pager_object);
7024 }
7025 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7026
7027 if (retval != KERN_SUCCESS) {
7028 if (pager_object->private != clobbered_private) {
7029 vm_object_lock(pager_object);
7030 pager_object->private = clobbered_private;
7031 vm_object_unlock(pager_object);
7032 }
7033 }
7034
7035 out:
7036 return retval;
7037 }
7038
7039 uint32_t scan_object_collision = 0;
7040
7041 void
7042 vm_object_lock(vm_object_t object)
7043 {
7044 if (object == vm_pageout_scan_wants_object) {
7045 scan_object_collision++;
7046 mutex_pause(2);
7047 }
7048 DTRACE_VM(vm_object_lock_w);
7049 lck_rw_lock_exclusive(&object->Lock);
7050 }
7051
7052 boolean_t
7053 vm_object_lock_avoid(vm_object_t object)
7054 {
7055 if (object == vm_pageout_scan_wants_object) {
7056 scan_object_collision++;
7057 return TRUE;
7058 }
7059 return FALSE;
7060 }
7061
7062 boolean_t
7063 _vm_object_lock_try(vm_object_t object)
7064 {
7065 boolean_t retval;
7066
7067 retval = lck_rw_try_lock_exclusive(&object->Lock);
7068 #if DEVELOPMENT || DEBUG
7069 if (retval == TRUE) {
7070 DTRACE_VM(vm_object_lock_w);
7071 }
7072 #endif
7073 return retval;
7074 }
7075
7076 boolean_t
7077 vm_object_lock_try(vm_object_t object)
7078 {
7079 /*
7080 * Called from hibernate path so check before blocking.
7081 */
7082 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7083 mutex_pause(2);
7084 }
7085 return _vm_object_lock_try(object);
7086 }
7087
7088 /*
7089 * Lock the object exclusive.
7090 *
7091 * Returns true iff the thread had to spin or block before
7092 * acquiring the lock.
7093 */
7094 bool
7095 vm_object_lock_check_contended(vm_object_t object)
7096 {
7097 if (object == vm_pageout_scan_wants_object) {
7098 scan_object_collision++;
7099 mutex_pause(2);
7100 }
7101 DTRACE_VM(vm_object_lock_w);
7102 return lck_rw_lock_exclusive_check_contended(&object->Lock);
7103 }
7104
7105 void
7106 vm_object_lock_shared(vm_object_t object)
7107 {
7108 if (vm_object_lock_avoid(object)) {
7109 mutex_pause(2);
7110 }
7111 DTRACE_VM(vm_object_lock_r);
7112 lck_rw_lock_shared(&object->Lock);
7113 }
7114
7115 boolean_t
7116 vm_object_lock_yield_shared(vm_object_t object)
7117 {
7118 boolean_t retval = FALSE, force_yield = FALSE;
7119
7120 vm_object_lock_assert_shared(object);
7121
7122 force_yield = vm_object_lock_avoid(object);
7123
7124 retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7125 if (retval) {
7126 DTRACE_VM(vm_object_lock_yield);
7127 }
7128
7129 return retval;
7130 }
7131
7132 boolean_t
7133 vm_object_lock_try_shared(vm_object_t object)
7134 {
7135 boolean_t retval;
7136
7137 if (vm_object_lock_avoid(object)) {
7138 mutex_pause(2);
7139 }
7140 retval = lck_rw_try_lock_shared(&object->Lock);
7141 if (retval) {
7142 DTRACE_VM(vm_object_lock_r);
7143 }
7144 return retval;
7145 }
7146
7147 boolean_t
7148 vm_object_lock_upgrade(vm_object_t object)
7149 {
7150 boolean_t retval;
7151
7152 retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7153 #if DEVELOPMENT || DEBUG
7154 if (retval == TRUE) {
7155 DTRACE_VM(vm_object_lock_w);
7156 }
7157 #endif
7158 return retval;
7159 }
7160
7161 void
7162 vm_object_unlock(vm_object_t object)
7163 {
7164 #if DEVELOPMENT || DEBUG
7165 DTRACE_VM(vm_object_unlock);
7166 #endif
7167 lck_rw_done(&object->Lock);
7168 }
7169
7170
7171 unsigned int vm_object_change_wimg_mode_count = 0;
7172
7173 /*
7174 * The object must be locked
7175 */
7176 void
7177 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7178 {
7179 vm_page_t p;
7180
7181 vm_object_lock_assert_exclusive(object);
7182
7183 vm_object_paging_only_wait(object, THREAD_UNINT);
7184
7185 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
7186 if (!p->vmp_fictitious) {
7187 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
7188 }
7189 }
7190 if (wimg_mode == VM_WIMG_USE_DEFAULT) {
7191 object->set_cache_attr = FALSE;
7192 } else {
7193 object->set_cache_attr = TRUE;
7194 }
7195
7196 object->wimg_bits = wimg_mode;
7197
7198 vm_object_change_wimg_mode_count++;
7199 }
7200
7201 #if CONFIG_FREEZE
7202
7203 extern struct freezer_context freezer_context_global;
7204
7205 /*
7206 * This routine does the "relocation" of previously
7207 * compressed pages belonging to this object that are
7208 * residing in a number of compressed segments into
7209 * a set of compressed segments dedicated to hold
7210 * compressed pages belonging to this object.
7211 */
7212
7213 extern AbsoluteTime c_freezer_last_yield_ts;
7214
7215 #define MAX_FREE_BATCH 32
7216 #define FREEZER_DUTY_CYCLE_ON_MS 5
7217 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7218
7219 static int c_freezer_should_yield(void);
7220
7221
7222 static int
7223 c_freezer_should_yield()
7224 {
7225 AbsoluteTime cur_time;
7226 uint64_t nsecs;
7227
7228 assert(c_freezer_last_yield_ts);
7229 clock_get_uptime(&cur_time);
7230
7231 SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7232 absolutetime_to_nanoseconds(cur_time, &nsecs);
7233
7234 if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7235 return 1;
7236 }
7237 return 0;
7238 }
7239
7240
7241 void
7242 vm_object_compressed_freezer_done()
7243 {
7244 vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7245 }
7246
7247
7248 uint32_t
7249 vm_object_compressed_freezer_pageout(
7250 vm_object_t object, uint32_t dirty_budget)
7251 {
7252 vm_page_t p;
7253 vm_page_t local_freeq = NULL;
7254 int local_freed = 0;
7255 kern_return_t retval = KERN_SUCCESS;
7256 int obj_resident_page_count_snapshot = 0;
7257 uint32_t paged_out_count = 0;
7258
7259 assert(object != VM_OBJECT_NULL);
7260 assert(object->internal);
7261
7262 vm_object_lock(object);
7263
7264 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7265 if (!object->pager_initialized) {
7266 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7267
7268 if (!object->pager_initialized) {
7269 vm_object_compressor_pager_create(object);
7270 }
7271 }
7272
7273 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7274 vm_object_unlock(object);
7275 return paged_out_count;
7276 }
7277 }
7278
7279 /*
7280 * We could be freezing a shared internal object that might
7281 * be part of some other thread's current VM operations.
7282 * We skip it if there's a paging-in-progress or activity-in-progress
7283 * because we could be here a long time with the map lock held.
7284 *
7285 * Note: We are holding the map locked while we wait.
7286 * This is fine in the freezer path because the task
7287 * is suspended and so this latency is acceptable.
7288 */
7289 if (object->paging_in_progress || object->activity_in_progress) {
7290 vm_object_unlock(object);
7291 return paged_out_count;
7292 }
7293
7294 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7295 vm_object_offset_t curr_offset = 0;
7296
7297 /*
7298 * Go through the object and make sure that any
7299 * previously compressed pages are relocated into
7300 * a compressed segment associated with our "freezer_chead".
7301 */
7302 while (curr_offset < object->vo_size) {
7303 curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7304
7305 if (curr_offset == (vm_object_offset_t) -1) {
7306 break;
7307 }
7308
7309 retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7310
7311 if (retval != KERN_SUCCESS) {
7312 break;
7313 }
7314
7315 curr_offset += PAGE_SIZE_64;
7316 }
7317 }
7318
7319 /*
7320 * We can't hold the object lock while heading down into the compressed pager
7321 * layer because we might need the kernel map lock down there to allocate new
7322 * compressor data structures. And if this same object is mapped in the kernel
7323 * and there's a fault on it, then that thread will want the object lock while
7324 * holding the kernel map lock.
7325 *
7326 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7327 * we won't be stuck in an infinite loop if the same page(s) keep getting
7328 * decompressed. So we grab a snapshot of the number of pages in the object and
7329 * we won't process any more than that number of pages.
7330 */
7331
7332 obj_resident_page_count_snapshot = object->resident_page_count;
7333
7334 vm_object_activity_begin(object);
7335
7336 while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7337 p = (vm_page_t)vm_page_queue_first(&object->memq);
7338
7339 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
7340
7341 vm_page_lockspin_queues();
7342
7343 if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) {
7344 vm_page_unlock_queues();
7345
7346 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
7347
7348 vm_page_queue_remove(&object->memq, p, vmp_listq);
7349 vm_page_queue_enter(&object->memq, p, vmp_listq);
7350
7351 continue;
7352 }
7353
7354 if (p->vmp_pmapped == TRUE) {
7355 int refmod_state, pmap_flags;
7356
7357 if (p->vmp_dirty || p->vmp_precious) {
7358 pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7359 } else {
7360 pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7361 }
7362
7363 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7364 if (refmod_state & VM_MEM_MODIFIED) {
7365 SET_PAGE_DIRTY(p, FALSE);
7366 }
7367 }
7368
7369 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7370 /*
7371 * Clean and non-precious page.
7372 */
7373 vm_page_unlock_queues();
7374 VM_PAGE_FREE(p);
7375
7376 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
7377 continue;
7378 }
7379
7380 if (p->vmp_laundry) {
7381 vm_pageout_steal_laundry(p, TRUE);
7382 }
7383
7384 vm_page_queues_remove(p, TRUE);
7385
7386 vm_page_unlock_queues();
7387
7388
7389 /*
7390 * In case the compressor fails to compress this page, we need it at
7391 * the back of the object memq so that we don't keep trying to process it.
7392 * Make the move here while we have the object lock held.
7393 */
7394
7395 vm_page_queue_remove(&object->memq, p, vmp_listq);
7396 vm_page_queue_enter(&object->memq, p, vmp_listq);
7397
7398 /*
7399 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7400 *
7401 * Mark the page busy so no one messes with it while we have the object lock dropped.
7402 */
7403 p->vmp_busy = TRUE;
7404
7405 vm_object_activity_begin(object);
7406
7407 vm_object_unlock(object);
7408
7409 if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7410 (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7411 p) == KERN_SUCCESS) {
7412 /*
7413 * page has already been un-tabled from the object via 'vm_page_remove'
7414 */
7415 p->vmp_snext = local_freeq;
7416 local_freeq = p;
7417 local_freed++;
7418 paged_out_count++;
7419
7420 if (local_freed >= MAX_FREE_BATCH) {
7421 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7422
7423 vm_page_free_list(local_freeq, TRUE);
7424
7425 local_freeq = NULL;
7426 local_freed = 0;
7427 }
7428 freezer_context_global.freezer_ctx_uncompressed_pages++;
7429 }
7430 KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
7431
7432 if (local_freed == 0 && c_freezer_should_yield()) {
7433 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7434 clock_get_uptime(&c_freezer_last_yield_ts);
7435 }
7436
7437 vm_object_lock(object);
7438 }
7439
7440 if (local_freeq) {
7441 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7442
7443 vm_page_free_list(local_freeq, TRUE);
7444
7445 local_freeq = NULL;
7446 local_freed = 0;
7447 }
7448
7449 vm_object_activity_end(object);
7450
7451 vm_object_unlock(object);
7452
7453 if (c_freezer_should_yield()) {
7454 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7455 clock_get_uptime(&c_freezer_last_yield_ts);
7456 }
7457 return paged_out_count;
7458 }
7459
7460 #endif /* CONFIG_FREEZE */
7461
7462
7463 void
7464 vm_object_pageout(
7465 vm_object_t object)
7466 {
7467 vm_page_t p, next;
7468 struct vm_pageout_queue *iq;
7469
7470 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
7471 return;
7472 }
7473
7474 iq = &vm_pageout_queue_internal;
7475
7476 assert(object != VM_OBJECT_NULL );
7477
7478 vm_object_lock(object);
7479
7480 if (!object->internal ||
7481 object->terminating ||
7482 !object->alive) {
7483 vm_object_unlock(object);
7484 return;
7485 }
7486
7487 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7488 if (!object->pager_initialized) {
7489 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7490
7491 if (!object->pager_initialized) {
7492 vm_object_compressor_pager_create(object);
7493 }
7494 }
7495
7496 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7497 vm_object_unlock(object);
7498 return;
7499 }
7500 }
7501
7502 ReScan:
7503 next = (vm_page_t)vm_page_queue_first(&object->memq);
7504
7505 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
7506 p = next;
7507 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
7508
7509 assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
7510
7511 if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
7512 p->vmp_cleaning ||
7513 p->vmp_laundry ||
7514 p->vmp_busy ||
7515 p->vmp_absent ||
7516 VMP_ERROR_GET(p) ||
7517 p->vmp_fictitious ||
7518 VM_PAGE_WIRED(p)) {
7519 /*
7520 * Page is already being cleaned or can't be cleaned.
7521 */
7522 continue;
7523 }
7524 if (vm_compressor_low_on_space()) {
7525 break;
7526 }
7527
7528 /* Throw to the pageout queue */
7529
7530 vm_page_lockspin_queues();
7531
7532 if (VM_PAGE_Q_THROTTLED(iq)) {
7533 iq->pgo_draining = TRUE;
7534
7535 assert_wait((event_t) (&iq->pgo_laundry + 1),
7536 THREAD_INTERRUPTIBLE);
7537 vm_page_unlock_queues();
7538 vm_object_unlock(object);
7539
7540 thread_block(THREAD_CONTINUE_NULL);
7541
7542 vm_object_lock(object);
7543 goto ReScan;
7544 }
7545
7546 assert(!p->vmp_fictitious);
7547 assert(!p->vmp_busy);
7548 assert(!p->vmp_absent);
7549 assert(!p->vmp_unusual);
7550 assert(!VMP_ERROR_GET(p)); /* XXX there's a window here where we could have an ECC error! */
7551 assert(!VM_PAGE_WIRED(p));
7552 assert(!p->vmp_cleaning);
7553
7554 if (p->vmp_pmapped == TRUE) {
7555 int refmod_state;
7556 int pmap_options;
7557
7558 /*
7559 * Tell pmap the page should be accounted
7560 * for as "compressed" if it's been modified.
7561 */
7562 pmap_options =
7563 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7564 if (p->vmp_dirty || p->vmp_precious) {
7565 /*
7566 * We already know it's been modified,
7567 * so tell pmap to account for it
7568 * as "compressed".
7569 */
7570 pmap_options = PMAP_OPTIONS_COMPRESSOR;
7571 }
7572 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
7573 pmap_options,
7574 NULL);
7575 if (refmod_state & VM_MEM_MODIFIED) {
7576 SET_PAGE_DIRTY(p, FALSE);
7577 }
7578 }
7579
7580 if (!p->vmp_dirty && !p->vmp_precious) {
7581 vm_page_unlock_queues();
7582 VM_PAGE_FREE(p);
7583 continue;
7584 }
7585 vm_page_queues_remove(p, TRUE);
7586
7587 vm_pageout_cluster(p);
7588
7589 vm_page_unlock_queues();
7590 }
7591 vm_object_unlock(object);
7592 }
7593
7594
7595 #if CONFIG_IOSCHED
7596 void
7597 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
7598 {
7599 io_reprioritize_req_t req;
7600 struct vnode *devvp = NULL;
7601
7602 if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7603 return;
7604 }
7605
7606 /*
7607 * Create the request for I/O reprioritization.
7608 * We use the noblock variant of zalloc because we're holding the object
7609 * lock here and we could cause a deadlock in low memory conditions.
7610 */
7611 req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
7612 if (req == NULL) {
7613 return;
7614 }
7615 req->blkno = blkno;
7616 req->len = len;
7617 req->priority = prio;
7618 req->devvp = devvp;
7619
7620 /* Insert request into the reprioritization list */
7621 IO_REPRIORITIZE_LIST_LOCK();
7622 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7623 IO_REPRIORITIZE_LIST_UNLOCK();
7624
7625 /* Wakeup reprioritize thread */
7626 IO_REPRIO_THREAD_WAKEUP();
7627
7628 return;
7629 }
7630
7631 void
7632 vm_decmp_upl_reprioritize(upl_t upl, int prio)
7633 {
7634 int offset;
7635 vm_object_t object;
7636 io_reprioritize_req_t req;
7637 struct vnode *devvp = NULL;
7638 uint64_t blkno;
7639 uint32_t len;
7640 upl_t io_upl;
7641 uint64_t *io_upl_reprio_info;
7642 int io_upl_size;
7643
7644 if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
7645 return;
7646 }
7647
7648 /*
7649 * We dont want to perform any allocations with the upl lock held since that might
7650 * result in a deadlock. If the system is low on memory, the pageout thread would
7651 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7652 * be freed up by the pageout thread, it would be a deadlock.
7653 */
7654
7655
7656 /* First step is just to get the size of the upl to find out how big the reprio info is */
7657 if (!upl_try_lock(upl)) {
7658 return;
7659 }
7660
7661 if (upl->decmp_io_upl == NULL) {
7662 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7663 upl_unlock(upl);
7664 return;
7665 }
7666
7667 io_upl = upl->decmp_io_upl;
7668 assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
7669 assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
7670 "upl %p offset 0x%llx size 0x%x\n",
7671 io_upl, io_upl->u_offset, io_upl->u_size);
7672 io_upl_size = io_upl->u_size;
7673 upl_unlock(upl);
7674
7675 /* Now perform the allocation */
7676 io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
7677 if (io_upl_reprio_info == NULL) {
7678 return;
7679 }
7680
7681 /* Now again take the lock, recheck the state and grab out the required info */
7682 if (!upl_try_lock(upl)) {
7683 goto out;
7684 }
7685
7686 if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
7687 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7688 upl_unlock(upl);
7689 goto out;
7690 }
7691 memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
7692 sizeof(uint64_t) * atop(io_upl_size));
7693
7694 /* Get the VM object for this UPL */
7695 if (io_upl->flags & UPL_SHADOWED) {
7696 object = io_upl->map_object->shadow;
7697 } else {
7698 object = io_upl->map_object;
7699 }
7700
7701 /* Get the dev vnode ptr for this object */
7702 if (!object || !object->pager ||
7703 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7704 upl_unlock(upl);
7705 goto out;
7706 }
7707
7708 upl_unlock(upl);
7709
7710 /* Now we have all the information needed to do the expedite */
7711
7712 offset = 0;
7713 while (offset < io_upl_size) {
7714 blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
7715 len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
7716
7717 /*
7718 * This implementation may cause some spurious expedites due to the
7719 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7720 * even after the I/O is complete.
7721 */
7722
7723 if (blkno != 0 && len != 0) {
7724 /* Create the request for I/O reprioritization */
7725 req = zalloc_flags(io_reprioritize_req_zone,
7726 Z_WAITOK | Z_NOFAIL);
7727 req->blkno = blkno;
7728 req->len = len;
7729 req->priority = prio;
7730 req->devvp = devvp;
7731
7732 /* Insert request into the reprioritization list */
7733 IO_REPRIORITIZE_LIST_LOCK();
7734 queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7735 IO_REPRIORITIZE_LIST_UNLOCK();
7736
7737 offset += len;
7738 } else {
7739 offset += PAGE_SIZE;
7740 }
7741 }
7742
7743 /* Wakeup reprioritize thread */
7744 IO_REPRIO_THREAD_WAKEUP();
7745
7746 out:
7747 kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size));
7748 }
7749
7750 void
7751 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
7752 {
7753 upl_t upl;
7754 upl_page_info_t *pl;
7755 unsigned int i, num_pages;
7756 int cur_tier;
7757
7758 cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
7759
7760 /*
7761 * Scan through all UPLs associated with the object to find the
7762 * UPL containing the contended page.
7763 */
7764 queue_iterate(&o->uplq, upl, upl_t, uplq) {
7765 if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
7766 continue;
7767 }
7768 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
7769 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
7770 "upl %p offset 0x%llx size 0x%x\n",
7771 upl, upl->u_offset, upl->u_size);
7772 num_pages = (upl->u_size / PAGE_SIZE);
7773
7774 /*
7775 * For each page in the UPL page list, see if it matches the contended
7776 * page and was issued as a low prio I/O.
7777 */
7778 for (i = 0; i < num_pages; i++) {
7779 if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
7780 if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
7781 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7782 VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
7783 vm_decmp_upl_reprioritize(upl, cur_tier);
7784 break;
7785 }
7786 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7787 upl->upl_reprio_info[i], upl->upl_priority, 0);
7788 if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
7789 vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
7790 }
7791 break;
7792 }
7793 }
7794 /* Check if we found any hits */
7795 if (i != num_pages) {
7796 break;
7797 }
7798 }
7799
7800 return;
7801 }
7802
7803 wait_result_t
7804 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
7805 {
7806 wait_result_t ret;
7807
7808 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
7809
7810 if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
7811 /*
7812 * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
7813 */
7814 vm_page_handle_prio_inversion(o, m);
7815 }
7816 m->vmp_wanted = TRUE;
7817 ret = thread_sleep_vm_object(o, m, interruptible);
7818 KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
7819 return ret;
7820 }
7821
7822 static void
7823 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
7824 {
7825 io_reprioritize_req_t req = NULL;
7826
7827 while (1) {
7828 IO_REPRIORITIZE_LIST_LOCK();
7829 if (queue_empty(&io_reprioritize_list)) {
7830 IO_REPRIORITIZE_LIST_UNLOCK();
7831 break;
7832 }
7833
7834 queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7835 IO_REPRIORITIZE_LIST_UNLOCK();
7836
7837 vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
7838 zfree(io_reprioritize_req_zone, req);
7839 }
7840
7841 IO_REPRIO_THREAD_CONTINUATION();
7842 }
7843 #endif
7844
7845 #if VM_OBJECT_ACCESS_TRACKING
7846 void
7847 vm_object_access_tracking(
7848 vm_object_t object,
7849 int *access_tracking_p,
7850 uint32_t *access_tracking_reads_p,
7851 uint32_t *access_tracking_writes_p)
7852 {
7853 int access_tracking;
7854
7855 access_tracking = !!*access_tracking_p;
7856
7857 vm_object_lock(object);
7858 *access_tracking_p = object->access_tracking;
7859 if (access_tracking_reads_p) {
7860 *access_tracking_reads_p = object->access_tracking_reads;
7861 }
7862 if (access_tracking_writes_p) {
7863 *access_tracking_writes_p = object->access_tracking_writes;
7864 }
7865 object->access_tracking = access_tracking;
7866 object->access_tracking_reads = 0;
7867 object->access_tracking_writes = 0;
7868 vm_object_unlock(object);
7869
7870 if (access_tracking) {
7871 vm_object_pmap_protect_options(object,
7872 0,
7873 object->vo_size,
7874 PMAP_NULL,
7875 PAGE_SIZE,
7876 0,
7877 VM_PROT_NONE,
7878 0);
7879 }
7880 }
7881 #endif /* VM_OBJECT_ACCESS_TRACKING */
7882
7883 void
7884 vm_object_ledger_tag_ledgers(
7885 vm_object_t object,
7886 int *ledger_idx_volatile,
7887 int *ledger_idx_nonvolatile,
7888 int *ledger_idx_volatile_compressed,
7889 int *ledger_idx_nonvolatile_compressed,
7890 boolean_t *do_footprint)
7891 {
7892 assert(object->shadow == VM_OBJECT_NULL);
7893
7894 *do_footprint = !object->vo_no_footprint;
7895
7896 switch (object->vo_ledger_tag) {
7897 case VM_LEDGER_TAG_NONE:
7898 /*
7899 * Regular purgeable memory:
7900 * counts in footprint only when nonvolatile.
7901 */
7902 *do_footprint = TRUE;
7903 assert(object->purgable != VM_PURGABLE_DENY);
7904 *ledger_idx_volatile = task_ledgers.purgeable_volatile;
7905 *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
7906 *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
7907 *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
7908 break;
7909 case VM_LEDGER_TAG_DEFAULT:
7910 /*
7911 * "default" tagged memory:
7912 * counts in footprint only when nonvolatile and not marked
7913 * as "no_footprint".
7914 */
7915 *ledger_idx_volatile = task_ledgers.tagged_nofootprint;
7916 *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7917 if (*do_footprint) {
7918 *ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
7919 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
7920 } else {
7921 *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
7922 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7923 }
7924 break;
7925 case VM_LEDGER_TAG_NETWORK:
7926 /*
7927 * "network" tagged memory:
7928 * never counts in footprint.
7929 */
7930 *do_footprint = FALSE;
7931 *ledger_idx_volatile = task_ledgers.network_volatile;
7932 *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
7933 *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
7934 *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
7935 break;
7936 case VM_LEDGER_TAG_MEDIA:
7937 /*
7938 * "media" tagged memory:
7939 * counts in footprint only when nonvolatile and not marked
7940 * as "no footprint".
7941 */
7942 *ledger_idx_volatile = task_ledgers.media_nofootprint;
7943 *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
7944 if (*do_footprint) {
7945 *ledger_idx_nonvolatile = task_ledgers.media_footprint;
7946 *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
7947 } else {
7948 *ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
7949 *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
7950 }
7951 break;
7952 case VM_LEDGER_TAG_GRAPHICS:
7953 /*
7954 * "graphics" tagged memory:
7955 * counts in footprint only when nonvolatile and not marked
7956 * as "no footprint".
7957 */
7958 *ledger_idx_volatile = task_ledgers.graphics_nofootprint;
7959 *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
7960 if (*do_footprint) {
7961 *ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
7962 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
7963 } else {
7964 *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
7965 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
7966 }
7967 break;
7968 case VM_LEDGER_TAG_NEURAL:
7969 /*
7970 * "neural" tagged memory:
7971 * counts in footprint only when nonvolatile and not marked
7972 * as "no footprint".
7973 */
7974 *ledger_idx_volatile = task_ledgers.neural_nofootprint;
7975 *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
7976 if (*do_footprint) {
7977 *ledger_idx_nonvolatile = task_ledgers.neural_footprint;
7978 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
7979 } else {
7980 *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
7981 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
7982 }
7983 break;
7984 default:
7985 panic("%s: object %p has unsupported ledger_tag %d",
7986 __FUNCTION__, object, object->vo_ledger_tag);
7987 }
7988 }
7989
7990 kern_return_t
7991 vm_object_ownership_change(
7992 vm_object_t object,
7993 int new_ledger_tag,
7994 task_t new_owner,
7995 int new_ledger_flags,
7996 boolean_t old_task_objq_locked)
7997 {
7998 int old_ledger_tag;
7999 task_t old_owner;
8000 int resident_count, wired_count;
8001 unsigned int compressed_count;
8002 int ledger_idx_volatile;
8003 int ledger_idx_nonvolatile;
8004 int ledger_idx_volatile_compressed;
8005 int ledger_idx_nonvolatile_compressed;
8006 int ledger_idx;
8007 int ledger_idx_compressed;
8008 boolean_t do_footprint, old_no_footprint, new_no_footprint;
8009 boolean_t new_task_objq_locked;
8010
8011 vm_object_lock_assert_exclusive(object);
8012
8013 if (!object->internal) {
8014 return KERN_INVALID_ARGUMENT;
8015 }
8016 if (new_owner == VM_OBJECT_OWNER_UNCHANGED) {
8017 /* leave owner unchanged */
8018 new_owner = VM_OBJECT_OWNER(object);
8019 }
8020 if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
8021 /* leave ledger_tag unchanged */
8022 new_ledger_tag = object->vo_ledger_tag;
8023 }
8024 if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
8025 object->purgable == VM_PURGABLE_DENY) {
8026 /* non-purgeable memory must have a valid non-zero ledger tag */
8027 return KERN_INVALID_ARGUMENT;
8028 }
8029 if (new_ledger_tag < 0 ||
8030 new_ledger_tag > VM_LEDGER_TAG_MAX) {
8031 return KERN_INVALID_ARGUMENT;
8032 }
8033 if (new_ledger_flags & ~VM_LEDGER_FLAGS) {
8034 return KERN_INVALID_ARGUMENT;
8035 }
8036 if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
8037 object->purgable == VM_PURGABLE_DENY) {
8038 /*
8039 * This VM object is neither ledger-tagged nor purgeable.
8040 * We can convert it to "ledger tag" ownership iff it
8041 * has not been used at all yet (no resident pages and
8042 * no pager) and it's going to be assigned to a valid task.
8043 */
8044 if (object->resident_page_count != 0 ||
8045 object->pager != NULL ||
8046 object->pager_created ||
8047 object->ref_count != 1 ||
8048 object->vo_owner != TASK_NULL ||
8049 object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
8050 new_owner == TASK_NULL) {
8051 return KERN_FAILURE;
8052 }
8053 }
8054
8055 if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
8056 new_no_footprint = TRUE;
8057 } else {
8058 new_no_footprint = FALSE;
8059 }
8060 #if __arm64__
8061 if (!new_no_footprint &&
8062 object->purgable != VM_PURGABLE_DENY &&
8063 new_owner != TASK_NULL &&
8064 new_owner != VM_OBJECT_OWNER_DISOWNED &&
8065 new_owner->task_legacy_footprint) {
8066 /*
8067 * This task has been granted "legacy footprint" and should
8068 * not be charged for its IOKit purgeable memory. Since we
8069 * might now change the accounting of such memory to the
8070 * "graphics" ledger, for example, give it the "no footprint"
8071 * option.
8072 */
8073 new_no_footprint = TRUE;
8074 }
8075 #endif /* __arm64__ */
8076 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
8077 assert(object->shadow == VM_OBJECT_NULL);
8078 assert(object->vo_copy == VM_OBJECT_NULL);
8079
8080 old_ledger_tag = object->vo_ledger_tag;
8081 old_no_footprint = object->vo_no_footprint;
8082 old_owner = VM_OBJECT_OWNER(object);
8083
8084 DTRACE_VM8(object_ownership_change,
8085 vm_object_t, object,
8086 task_t, old_owner,
8087 int, old_ledger_tag,
8088 int, old_no_footprint,
8089 task_t, new_owner,
8090 int, new_ledger_tag,
8091 int, new_no_footprint,
8092 int, VM_OBJECT_ID(object));
8093
8094 assert(object->internal);
8095 resident_count = object->resident_page_count - object->wired_page_count;
8096 wired_count = object->wired_page_count;
8097 compressed_count = vm_compressor_pager_get_count(object->pager);
8098
8099 /*
8100 * Deal with the old owner and/or ledger tag, if needed.
8101 */
8102 if (old_owner != TASK_NULL &&
8103 ((old_owner != new_owner) /* new owner ... */
8104 || /* ... or ... */
8105 (old_no_footprint != new_no_footprint) /* new "no_footprint" */
8106 || /* ... or ... */
8107 old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
8108 /*
8109 * Take this object off of the old owner's ledgers.
8110 */
8111 vm_object_ledger_tag_ledgers(object,
8112 &ledger_idx_volatile,
8113 &ledger_idx_nonvolatile,
8114 &ledger_idx_volatile_compressed,
8115 &ledger_idx_nonvolatile_compressed,
8116 &do_footprint);
8117 if (object->purgable == VM_PURGABLE_VOLATILE ||
8118 object->purgable == VM_PURGABLE_EMPTY) {
8119 ledger_idx = ledger_idx_volatile;
8120 ledger_idx_compressed = ledger_idx_volatile_compressed;
8121 } else {
8122 ledger_idx = ledger_idx_nonvolatile;
8123 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8124 }
8125 if (resident_count) {
8126 /*
8127 * Adjust the appropriate old owners's ledgers by the
8128 * number of resident pages.
8129 */
8130 ledger_debit(old_owner->ledger,
8131 ledger_idx,
8132 ptoa_64(resident_count));
8133 /* adjust old owner's footprint */
8134 if (do_footprint &&
8135 object->purgable != VM_PURGABLE_VOLATILE &&
8136 object->purgable != VM_PURGABLE_EMPTY) {
8137 ledger_debit(old_owner->ledger,
8138 task_ledgers.phys_footprint,
8139 ptoa_64(resident_count));
8140 }
8141 }
8142 if (wired_count) {
8143 /* wired pages are always nonvolatile */
8144 ledger_debit(old_owner->ledger,
8145 ledger_idx_nonvolatile,
8146 ptoa_64(wired_count));
8147 if (do_footprint) {
8148 ledger_debit(old_owner->ledger,
8149 task_ledgers.phys_footprint,
8150 ptoa_64(wired_count));
8151 }
8152 }
8153 if (compressed_count) {
8154 /*
8155 * Adjust the appropriate old owner's ledgers
8156 * by the number of compressed pages.
8157 */
8158 ledger_debit(old_owner->ledger,
8159 ledger_idx_compressed,
8160 ptoa_64(compressed_count));
8161 if (do_footprint &&
8162 object->purgable != VM_PURGABLE_VOLATILE &&
8163 object->purgable != VM_PURGABLE_EMPTY) {
8164 ledger_debit(old_owner->ledger,
8165 task_ledgers.phys_footprint,
8166 ptoa_64(compressed_count));
8167 }
8168 }
8169 if (old_owner != new_owner) {
8170 /* remove object from old_owner's list of owned objects */
8171 DTRACE_VM2(object_owner_remove,
8172 vm_object_t, object,
8173 task_t, old_owner);
8174 if (!old_task_objq_locked) {
8175 task_objq_lock(old_owner);
8176 }
8177 old_owner->task_owned_objects--;
8178 queue_remove(&old_owner->task_objq, object,
8179 vm_object_t, task_objq);
8180 switch (object->purgable) {
8181 case VM_PURGABLE_NONVOLATILE:
8182 case VM_PURGABLE_EMPTY:
8183 vm_purgeable_nonvolatile_owner_update(old_owner,
8184 -1);
8185 break;
8186 case VM_PURGABLE_VOLATILE:
8187 vm_purgeable_volatile_owner_update(old_owner,
8188 -1);
8189 break;
8190 default:
8191 break;
8192 }
8193 if (!old_task_objq_locked) {
8194 task_objq_unlock(old_owner);
8195 }
8196 }
8197 }
8198
8199 /*
8200 * Switch to new ledger tag and/or owner.
8201 */
8202
8203 new_task_objq_locked = FALSE;
8204 if (new_owner != old_owner &&
8205 new_owner != TASK_NULL &&
8206 new_owner != VM_OBJECT_OWNER_DISOWNED) {
8207 /*
8208 * If the new owner is not accepting new objects ("disowning"),
8209 * the object becomes "disowned" and will be added to
8210 * the kernel's task_objq.
8211 *
8212 * Check first without locking, to avoid blocking while the
8213 * task is disowning its objects.
8214 */
8215 if (new_owner->task_objects_disowning) {
8216 new_owner = VM_OBJECT_OWNER_DISOWNED;
8217 } else {
8218 task_objq_lock(new_owner);
8219 /* check again now that we have the lock */
8220 if (new_owner->task_objects_disowning) {
8221 new_owner = VM_OBJECT_OWNER_DISOWNED;
8222 task_objq_unlock(new_owner);
8223 } else {
8224 new_task_objq_locked = TRUE;
8225 }
8226 }
8227 }
8228
8229 object->vo_ledger_tag = new_ledger_tag;
8230 object->vo_owner = new_owner;
8231 object->vo_no_footprint = new_no_footprint;
8232
8233 if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
8234 /*
8235 * Disowned objects are added to the kernel's task_objq but
8236 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8237 * differentiate them from objects intentionally owned by
8238 * the kernel.
8239 */
8240 assert(old_owner != kernel_task);
8241 new_owner = kernel_task;
8242 assert(!new_task_objq_locked);
8243 task_objq_lock(new_owner);
8244 new_task_objq_locked = TRUE;
8245 }
8246
8247 /*
8248 * Deal with the new owner and/or ledger tag, if needed.
8249 */
8250 if (new_owner != TASK_NULL &&
8251 ((new_owner != old_owner) /* new owner ... */
8252 || /* ... or ... */
8253 (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
8254 || /* ... or ... */
8255 new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
8256 /*
8257 * Add this object to the new owner's ledgers.
8258 */
8259 vm_object_ledger_tag_ledgers(object,
8260 &ledger_idx_volatile,
8261 &ledger_idx_nonvolatile,
8262 &ledger_idx_volatile_compressed,
8263 &ledger_idx_nonvolatile_compressed,
8264 &do_footprint);
8265 if (object->purgable == VM_PURGABLE_VOLATILE ||
8266 object->purgable == VM_PURGABLE_EMPTY) {
8267 ledger_idx = ledger_idx_volatile;
8268 ledger_idx_compressed = ledger_idx_volatile_compressed;
8269 } else {
8270 ledger_idx = ledger_idx_nonvolatile;
8271 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8272 }
8273 if (resident_count) {
8274 /*
8275 * Adjust the appropriate new owners's ledgers by the
8276 * number of resident pages.
8277 */
8278 ledger_credit(new_owner->ledger,
8279 ledger_idx,
8280 ptoa_64(resident_count));
8281 /* adjust new owner's footprint */
8282 if (do_footprint &&
8283 object->purgable != VM_PURGABLE_VOLATILE &&
8284 object->purgable != VM_PURGABLE_EMPTY) {
8285 ledger_credit(new_owner->ledger,
8286 task_ledgers.phys_footprint,
8287 ptoa_64(resident_count));
8288 }
8289 }
8290 if (wired_count) {
8291 /* wired pages are always nonvolatile */
8292 ledger_credit(new_owner->ledger,
8293 ledger_idx_nonvolatile,
8294 ptoa_64(wired_count));
8295 if (do_footprint) {
8296 ledger_credit(new_owner->ledger,
8297 task_ledgers.phys_footprint,
8298 ptoa_64(wired_count));
8299 }
8300 }
8301 if (compressed_count) {
8302 /*
8303 * Adjust the new owner's ledgers by the number of
8304 * compressed pages.
8305 */
8306 ledger_credit(new_owner->ledger,
8307 ledger_idx_compressed,
8308 ptoa_64(compressed_count));
8309 if (do_footprint &&
8310 object->purgable != VM_PURGABLE_VOLATILE &&
8311 object->purgable != VM_PURGABLE_EMPTY) {
8312 ledger_credit(new_owner->ledger,
8313 task_ledgers.phys_footprint,
8314 ptoa_64(compressed_count));
8315 }
8316 }
8317 if (new_owner != old_owner) {
8318 /* add object to new_owner's list of owned objects */
8319 DTRACE_VM2(object_owner_add,
8320 vm_object_t, object,
8321 task_t, new_owner);
8322 assert(new_task_objq_locked);
8323 new_owner->task_owned_objects++;
8324 queue_enter(&new_owner->task_objq, object,
8325 vm_object_t, task_objq);
8326 switch (object->purgable) {
8327 case VM_PURGABLE_NONVOLATILE:
8328 case VM_PURGABLE_EMPTY:
8329 vm_purgeable_nonvolatile_owner_update(new_owner,
8330 +1);
8331 break;
8332 case VM_PURGABLE_VOLATILE:
8333 vm_purgeable_volatile_owner_update(new_owner,
8334 +1);
8335 break;
8336 default:
8337 break;
8338 }
8339 }
8340 }
8341
8342 if (new_task_objq_locked) {
8343 task_objq_unlock(new_owner);
8344 }
8345
8346 return KERN_SUCCESS;
8347 }
8348
8349 void
8350 vm_owned_objects_disown(
8351 task_t task)
8352 {
8353 vm_object_t next_object;
8354 vm_object_t object;
8355 int collisions;
8356 kern_return_t kr;
8357
8358 if (task == NULL) {
8359 return;
8360 }
8361
8362 collisions = 0;
8363
8364 again:
8365 if (task->task_objects_disowned) {
8366 /* task has already disowned its owned objects */
8367 assert(task->task_volatile_objects == 0);
8368 assert(task->task_nonvolatile_objects == 0);
8369 assert(task->task_owned_objects == 0);
8370 return;
8371 }
8372
8373 task_objq_lock(task);
8374
8375 task->task_objects_disowning = TRUE;
8376
8377 for (object = (vm_object_t) queue_first(&task->task_objq);
8378 !queue_end(&task->task_objq, (queue_entry_t) object);
8379 object = next_object) {
8380 if (task->task_nonvolatile_objects == 0 &&
8381 task->task_volatile_objects == 0 &&
8382 task->task_owned_objects == 0) {
8383 /* no more objects owned by "task" */
8384 break;
8385 }
8386
8387 next_object = (vm_object_t) queue_next(&object->task_objq);
8388
8389 #if DEBUG
8390 assert(object->vo_purgeable_volatilizer == NULL);
8391 #endif /* DEBUG */
8392 assert(object->vo_owner == task);
8393 if (!vm_object_lock_try(object)) {
8394 task_objq_unlock(task);
8395 mutex_pause(collisions++);
8396 goto again;
8397 }
8398 /* transfer ownership to the kernel */
8399 assert(VM_OBJECT_OWNER(object) != kernel_task);
8400 kr = vm_object_ownership_change(
8401 object,
8402 object->vo_ledger_tag, /* unchanged */
8403 VM_OBJECT_OWNER_DISOWNED, /* new owner */
8404 0, /* new_ledger_flags */
8405 TRUE); /* old_owner->task_objq locked */
8406 assert(kr == KERN_SUCCESS);
8407 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
8408 vm_object_unlock(object);
8409 }
8410
8411 if (__improbable(task->task_owned_objects != 0)) {
8412 panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8413 __FUNCTION__,
8414 task,
8415 task->task_volatile_objects,
8416 task->task_nonvolatile_objects,
8417 task->task_owned_objects,
8418 &task->task_objq,
8419 queue_first(&task->task_objq),
8420 queue_last(&task->task_objq));
8421 }
8422
8423 /* there shouldn't be any objects owned by task now */
8424 assert(task->task_volatile_objects == 0);
8425 assert(task->task_nonvolatile_objects == 0);
8426 assert(task->task_owned_objects == 0);
8427 assert(task->task_objects_disowning);
8428
8429 /* and we don't need to try and disown again */
8430 task->task_objects_disowned = TRUE;
8431
8432 task_objq_unlock(task);
8433 }
8434