1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_object.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Virtual memory object module.
63 */
64
65 #include <debug.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/vm_param.h>
70
71 #include <mach/sdt.h>
72
73 #include <ipc/ipc_types.h>
74 #include <ipc/ipc_port.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/queue.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <kern/misc_protos.h>
85 #include <kern/policy_internal.h>
86 #include <kern/coalition.h>
87
88 #include <sys/kdebug.h>
89 #include <sys/kdebug_triage.h>
90
91 #include <vm/memory_object_internal.h>
92 #include <vm/vm_compressor_pager_internal.h>
93 #include <vm/vm_fault_internal.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object_internal.h>
96 #include <vm/vm_page_internal.h>
97 #include <vm/vm_pageout_internal.h>
98 #include <vm/vm_protos_internal.h>
99 #include <vm/vm_purgeable_internal.h>
100 #include <vm/vm_ubc.h>
101 #include <vm/vm_compressor_xnu.h>
102 #include <os/hash.h>
103
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache_internal.h>
106 #endif
107
108 #if VM_OBJECT_ACCESS_TRACKING
109 uint64_t vm_object_access_tracking_reads = 0;
110 uint64_t vm_object_access_tracking_writes = 0;
111 #endif /* VM_OBJECT_ACCESS_TRACKING */
112
113 boolean_t vm_object_collapse_compressor_allowed = TRUE;
114
115 struct vm_counters vm_counters;
116
117 os_refgrp_decl(, vm_object_refgrp, "vm_object", NULL);
118
119 #if DEVELOPMENT || DEBUG
120 extern struct memory_object_pager_ops shared_region_pager_ops;
121 extern unsigned int shared_region_pagers_resident_count;
122 extern unsigned int shared_region_pagers_resident_peak;
123 #endif /* DEVELOPMENT || DEBUG */
124
125 #if VM_OBJECT_TRACKING
126 btlog_t vm_object_tracking_btlog;
127
128 void
vm_object_tracking_init(void)129 vm_object_tracking_init(void)
130 {
131 int vm_object_tracking;
132
133 vm_object_tracking = 1;
134 PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
135 sizeof(vm_object_tracking));
136
137 if (vm_object_tracking) {
138 vm_object_tracking_btlog = btlog_create(BTLOG_HASH,
139 VM_OBJECT_TRACKING_NUM_RECORDS);
140 assert(vm_object_tracking_btlog);
141 }
142 }
143 #endif /* VM_OBJECT_TRACKING */
144
145 /*
146 * Virtual memory objects maintain the actual data
147 * associated with allocated virtual memory. A given
148 * page of memory exists within exactly one object.
149 *
150 * An object is only deallocated when all "references"
151 * are given up.
152 *
153 * Associated with each object is a list of all resident
154 * memory pages belonging to that object; this list is
155 * maintained by the "vm_page" module, but locked by the object's
156 * lock.
157 *
158 * Each object also records the memory object reference
159 * that is used by the kernel to request and write
160 * back data (the memory object, field "pager"), etc...
161 *
162 * Virtual memory objects are allocated to provide
163 * zero-filled memory (vm_allocate) or map a user-defined
164 * memory object into a virtual address space (vm_map).
165 *
166 * Virtual memory objects that refer to a user-defined
167 * memory object are called "permanent", because all changes
168 * made in virtual memory are reflected back to the
169 * memory manager, which may then store it permanently.
170 * Other virtual memory objects are called "temporary",
171 * meaning that changes need be written back only when
172 * necessary to reclaim pages, and that storage associated
173 * with the object can be discarded once it is no longer
174 * mapped.
175 *
176 * A permanent memory object may be mapped into more
177 * than one virtual address space. Moreover, two threads
178 * may attempt to make the first mapping of a memory
179 * object concurrently. Only one thread is allowed to
180 * complete this mapping; all others wait for the
181 * "pager_initialized" field is asserted, indicating
182 * that the first thread has initialized all of the
183 * necessary fields in the virtual memory object structure.
184 *
185 * The kernel relies on a *default memory manager* to
186 * provide backing storage for the zero-filled virtual
187 * memory objects. The pager memory objects associated
188 * with these temporary virtual memory objects are only
189 * requested from the default memory manager when it
190 * becomes necessary. Virtual memory objects
191 * that depend on the default memory manager are called
192 * "internal". The "pager_created" field is provided to
193 * indicate whether these ports have ever been allocated.
194 *
195 * The kernel may also create virtual memory objects to
196 * hold changed pages after a copy-on-write operation.
197 * In this case, the virtual memory object (and its
198 * backing storage -- its memory object) only contain
199 * those pages that have been changed. The "shadow"
200 * field refers to the virtual memory object that contains
201 * the remainder of the contents. The "shadow_offset"
202 * field indicates where in the "shadow" these contents begin.
203 * The "copy" field refers to a virtual memory object
204 * to which changed pages must be copied before changing
205 * this object, in order to implement another form
206 * of copy-on-write optimization.
207 *
208 * The virtual memory object structure also records
209 * the attributes associated with its memory object.
210 * The "pager_ready", "can_persist" and "copy_strategy"
211 * fields represent those attributes. The "cached_list"
212 * field is used in the implementation of the persistence
213 * attribute.
214 *
215 * ZZZ Continue this comment.
216 */
217
218 /* Forward declarations for internal functions. */
219 static kern_return_t vm_object_terminate(
220 vm_object_t object);
221
222 static void vm_object_do_collapse(
223 vm_object_t object,
224 vm_object_t backing_object);
225
226 static void vm_object_do_bypass(
227 vm_object_t object,
228 vm_object_t backing_object);
229
230 static void vm_object_release_pager(
231 memory_object_t pager);
232
233 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
234
235 /*
236 * Wired-down kernel memory belongs to this memory object (kernel_object)
237 * by default to avoid wasting data structures.
238 */
239 static struct vm_object kernel_object_store VM_PAGE_PACKED_ALIGNED;
240 const vm_object_t kernel_object_default = &kernel_object_store;
241
242 static struct vm_object compressor_object_store VM_PAGE_PACKED_ALIGNED;
243 const vm_object_t compressor_object = &compressor_object_store;
244
245 /*
246 * This object holds all pages that have been retired due to errors like ECC.
247 * The system should never use the page or look at its contents. The offset
248 * in this object is the same as the page's physical address.
249 */
250 static struct vm_object retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
251 const vm_object_t retired_pages_object = &retired_pages_object_store;
252
253 #if HAS_MTE
254 /*
255 * This object holds all pages that are currently being used to hold MTE tags.
256 * The pages are wired and may have no pmap mappings of any kind.
257 * The object offset will be the same as physical address.
258 */
259 static struct vm_object mte_tags_object_store VM_PAGE_PACKED_ALIGNED;
260 const vm_object_t mte_tags_object = &mte_tags_object_store;
261
262 /*
263 * This object is for pages that would have been on kernel_object_default, except
264 * that they are using MTE tags.
265 */
266 static struct vm_object kernel_object_tagged_store VM_PAGE_PACKED_ALIGNED;
267 const vm_object_t kernel_object_tagged = &kernel_object_tagged_store;
268 #endif /* HAS_MTE */
269
270 static struct vm_object exclaves_object_store VM_PAGE_PACKED_ALIGNED;
271 const vm_object_t exclaves_object = &exclaves_object_store;
272 #if HAS_MTE
273 static struct vm_object exclaves_object_tagged_store VM_PAGE_PACKED_ALIGNED;
274 const vm_object_t exclaves_object_tagged = &exclaves_object_tagged_store;
275 #endif /* HAS_MTE */
276
277
278 /*
279 * Virtual memory objects are initialized from
280 * a template (see vm_object_allocate).
281 *
282 * When adding a new field to the virtual memory
283 * object structure, be sure to add initialization
284 * (see _vm_object_allocate()).
285 */
286 static const struct vm_object vm_object_template = {
287 .memq.prev = 0,
288 .memq.next = 0,
289 /*
290 * The lock will be initialized for each allocated object in
291 * _vm_object_allocate(), so we don't need to initialize it in
292 * the vm_object_template.
293 */
294 .vo_size = 0,
295 .memq_hint = VM_PAGE_NULL,
296 /*
297 * The ref count will be initialized for each allocated object in
298 * _vm_object_allocate(), so we don't need to initialize it in the
299 * vm_object_template.
300 */
301 .resident_page_count = 0,
302 .wired_page_count = 0,
303 .reusable_page_count = 0,
304 .vo_copy = VM_OBJECT_NULL,
305 .vo_copy_version = 0,
306 .vo_inherit_copy_none = false,
307 .shadow = VM_OBJECT_NULL,
308 .vo_shadow_offset = (vm_object_offset_t) 0,
309 .pager = MEMORY_OBJECT_NULL,
310 .paging_offset = 0,
311 .pager_control = MEMORY_OBJECT_CONTROL_NULL,
312 .copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
313 .paging_in_progress = 0,
314 .vo_size_delta = 0,
315 .activity_in_progress = 0,
316
317 /* Begin bitfields */
318 .all_wanted = 0, /* all bits FALSE */
319 .pager_created = FALSE,
320 .pager_initialized = FALSE,
321 .pager_ready = FALSE,
322 .pager_trusted = FALSE,
323 .can_persist = FALSE,
324 .internal = TRUE,
325 .private = FALSE,
326 .pageout = FALSE,
327 .alive = TRUE,
328 .purgable = VM_PURGABLE_DENY,
329 .purgeable_when_ripe = FALSE,
330 .purgeable_only_by_kernel = FALSE,
331 .shadowed = FALSE,
332 .true_share = FALSE,
333 .terminating = FALSE,
334 .named = FALSE,
335 .shadow_severed = FALSE,
336 .phys_contiguous = FALSE,
337 .nophyscache = FALSE,
338 /* End bitfields */
339
340 .cached_list.prev = NULL,
341 .cached_list.next = NULL,
342
343 .last_alloc = (vm_object_offset_t) 0,
344 .sequential = (vm_object_offset_t) 0,
345 .pages_created = 0,
346 .pages_used = 0,
347 .scan_collisions = 0,
348 #if COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1
349 .vo_chead_hint = 0,
350 #endif /* COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1 */
351 #if CONFIG_PHANTOM_CACHE
352 .phantom_object_id = 0,
353 #endif
354 .cow_hint = ~(vm_offset_t)0,
355
356 /* cache bitfields */
357 .wimg_bits = VM_WIMG_USE_DEFAULT,
358 .set_cache_attr = FALSE,
359 .object_is_shared_cache = FALSE,
360 .code_signed = FALSE,
361 .transposed = FALSE,
362 .mapping_in_progress = FALSE,
363 .phantom_isssd = FALSE,
364 .volatile_empty = FALSE,
365 .volatile_fault = FALSE,
366 .all_reusable = FALSE,
367 .blocked_access = FALSE,
368 .vo_ledger_tag = VM_LEDGER_TAG_NONE,
369 .vo_no_footprint = FALSE,
370 #if CONFIG_IOSCHED || UPL_DEBUG
371 .uplq.prev = NULL,
372 .uplq.next = NULL,
373 #endif /* UPL_DEBUG */
374 #ifdef VM_PIP_DEBUG
375 .pip_holders = {0},
376 #endif /* VM_PIP_DEBUG */
377
378 .objq.next = NULL,
379 .objq.prev = NULL,
380 .task_objq.next = NULL,
381 .task_objq.prev = NULL,
382
383 .purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
384 .purgeable_queue_group = 0,
385
386 .wire_tag = VM_KERN_MEMORY_NONE,
387 #if !VM_TAG_ACTIVE_UPDATE
388 .wired_objq.next = NULL,
389 .wired_objq.prev = NULL,
390 #endif /* ! VM_TAG_ACTIVE_UPDATE */
391
392 .io_tracking = FALSE,
393
394 #if CONFIG_SECLUDED_MEMORY
395 .eligible_for_secluded = FALSE,
396 .can_grab_secluded = FALSE,
397 #else /* CONFIG_SECLUDED_MEMORY */
398 .__object3_unused_bits = 0,
399 #endif /* CONFIG_SECLUDED_MEMORY */
400
401 .for_realtime = false,
402 .no_pager_reason = VM_OBJECT_DESTROY_UNKNOWN_REASON,
403
404 #if VM_OBJECT_ACCESS_TRACKING
405 .access_tracking = FALSE,
406 .access_tracking_reads = 0,
407 .access_tracking_writes = 0,
408 #endif /* VM_OBJECT_ACCESS_TRACKING */
409
410 #if DEBUG
411 .purgeable_owner_bt = {0},
412 .vo_purgeable_volatilizer = NULL,
413 .purgeable_volatilizer_bt = {0},
414 #endif /* DEBUG */
415 .vmo_provenance = VM_MAP_SERIAL_NONE,
416 .vmo_pl_req_in_progress = 0,
417 };
418
419 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
420 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
421 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
422 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
423 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
424
425 unsigned int vm_page_purged_wired = 0;
426 unsigned int vm_page_purged_busy = 0;
427 unsigned int vm_page_purged_others = 0;
428
429 static queue_head_t vm_object_cached_list;
430 static uint32_t vm_object_cache_pages_freed = 0;
431 static uint32_t vm_object_cache_pages_moved = 0;
432 static uint32_t vm_object_cache_pages_skipped = 0;
433 static uint32_t vm_object_cache_adds = 0;
434 static uint32_t vm_object_cached_count = 0;
435 static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data,
436 &vm_object_cache_lck_grp, &vm_object_lck_attr);
437
438 static uint32_t vm_object_page_grab_failed = 0;
439 static uint32_t vm_object_page_grab_skipped = 0;
440 static uint32_t vm_object_page_grab_returned = 0;
441 static uint32_t vm_object_page_grab_pmapped = 0;
442 static uint32_t vm_object_page_grab_reactivations = 0;
443
444 #define vm_object_cache_lock_spin() \
445 lck_mtx_lock_spin(&vm_object_cached_lock_data)
446 #define vm_object_cache_unlock() \
447 lck_mtx_unlock(&vm_object_cached_lock_data)
448
449 static void vm_object_cache_remove_locked(vm_object_t);
450
451
452 static void vm_object_reap(vm_object_t object);
453 static void vm_object_reap_async(vm_object_t object);
454 static void vm_object_reaper_thread(void);
455
456 static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data,
457 &vm_object_lck_grp, &vm_object_lck_attr);
458
459 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
460 unsigned int vm_object_reap_count = 0;
461 unsigned int vm_object_reap_count_async = 0;
462
463 #if HAS_MTE
464 unsigned int vm_object_no_compressor_pager_for_mte_count = 0;
465 TUNABLE(bool, vm_object_allow_compressor_pager_for_mte, "compress_mte", true);
466 #endif
467
468 #define vm_object_reaper_lock() \
469 lck_mtx_lock(&vm_object_reaper_lock_data)
470 #define vm_object_reaper_lock_spin() \
471 lck_mtx_lock_spin(&vm_object_reaper_lock_data)
472 #define vm_object_reaper_unlock() \
473 lck_mtx_unlock(&vm_object_reaper_lock_data)
474
475 #if CONFIG_IOSCHED
476 /* I/O Re-prioritization request list */
477 struct mpsc_daemon_queue io_reprioritize_q;
478
479 ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req",
480 struct io_reprioritize_req, ZC_NONE);
481
482 /* I/O re-prioritization MPSC callback */
483 static void io_reprioritize(mpsc_queue_chain_t elm, mpsc_daemon_queue_t dq);
484
485 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
486 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
487 void vm_decmp_upl_reprioritize(upl_t, int);
488 #endif
489
490 void
vm_object_set_size(vm_object_t object,vm_object_size_t outer_size,vm_object_size_t inner_size)491 vm_object_set_size(
492 vm_object_t object,
493 vm_object_size_t outer_size,
494 vm_object_size_t inner_size)
495 {
496 object->vo_size = vm_object_round_page(outer_size);
497 #if KASAN
498 assert(object->vo_size - inner_size <= USHRT_MAX);
499 object->vo_size_delta = (unsigned short)(object->vo_size - inner_size);
500 #else
501 (void)inner_size;
502 #endif
503 }
504
505
506 /*
507 * vm_object_allocate:
508 *
509 * Returns a new object with the given size.
510 */
511
512 __private_extern__ void
_vm_object_allocate(vm_object_size_t size,vm_object_t object,vm_map_serial_t provenance)513 _vm_object_allocate(
514 vm_object_size_t size,
515 vm_object_t object,
516 vm_map_serial_t provenance)
517 {
518 *object = vm_object_template;
519 object->vmo_provenance = provenance;
520
521 vm_page_queue_init(&object->memq);
522 #if UPL_DEBUG || CONFIG_IOSCHED
523 queue_init(&object->uplq);
524 #endif
525 vm_object_lock_init(object);
526 vm_object_set_size(object, size, size);
527
528 os_ref_init_raw(&object->ref_count, &vm_object_refgrp);
529
530 #if VM_OBJECT_TRACKING_OP_CREATED
531 if (vm_object_tracking_btlog) {
532 btlog_record(vm_object_tracking_btlog, object,
533 VM_OBJECT_TRACKING_OP_CREATED,
534 btref_get(__builtin_frame_address(0), 0));
535 }
536 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
537 }
538
539 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size,vm_map_serial_t provenance)540 vm_object_allocate(
541 vm_object_size_t size, vm_map_serial_t provenance)
542 {
543 vm_object_t object;
544
545 object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL);
546 _vm_object_allocate(size, object, provenance);
547
548 return object;
549 }
550
551 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
552
553 /*
554 * vm_object_bootstrap:
555 *
556 * Initialize the VM objects module.
557 */
558 __startup_func
559 void
vm_object_bootstrap(void)560 vm_object_bootstrap(void)
561 {
562 vm_size_t vm_object_size;
563
564 assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
565
566 vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
567 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
568
569 vm_object_zone = zone_create("vm objects", vm_object_size,
570 ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM);
571
572 queue_init(&vm_object_cached_list);
573
574 queue_init(&vm_object_reaper_queue);
575
576 /*
577 * Initialize the "kernel object"
578 */
579
580 /*
581 * Note that in the following size specifications, we need to add 1 because
582 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
583 */
584 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_default, VM_MAP_SERIAL_SPECIAL);
585 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object, VM_MAP_SERIAL_SPECIAL);
586 kernel_object_default->copy_strategy = MEMORY_OBJECT_COPY_NONE;
587 compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
588 kernel_object_default->no_tag_update = TRUE;
589
590 /*
591 * The object to hold retired VM pages.
592 */
593 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object, VM_MAP_SERIAL_SPECIAL);
594 retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
595
596 #if HAS_MTE
597 /*
598 * The object to hold MTE tag pages.
599 */
600 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, mte_tags_object, VM_MAP_SERIAL_SPECIAL);
601 mte_tags_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
602
603 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_tagged, VM_MAP_SERIAL_SPECIAL);
604 kernel_object_tagged->copy_strategy = MEMORY_OBJECT_COPY_NONE;
605 kernel_object_tagged->no_tag_update = TRUE;
606 kernel_object_tagged->wimg_bits = VM_WIMG_MTE;
607 #endif /* HAS_MTE */
608
609 /**
610 * The object to hold pages owned by exclaves.
611 */
612 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, exclaves_object, VM_MAP_SERIAL_SPECIAL);
613 exclaves_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
614 #if HAS_MTE
615 /**
616 * The object to hold MTE tag pages owned by exclaves.
617 */
618 _vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, exclaves_object_tagged, VM_MAP_SERIAL_SPECIAL);
619 exclaves_object_tagged->copy_strategy = MEMORY_OBJECT_COPY_NONE;
620 exclaves_object_tagged->no_tag_update = TRUE;
621 exclaves_object_tagged->wimg_bits = VM_WIMG_MTE;
622 #endif /* HAS_MTE */
623 }
624
625 #if CONFIG_IOSCHED
626 void
vm_io_reprioritize_init(void)627 vm_io_reprioritize_init(void)
628 {
629 kern_return_t result;
630
631 result = mpsc_daemon_queue_init_with_thread(&io_reprioritize_q, io_reprioritize, BASEPRI_KERNEL,
632 "VM_io_reprioritize_thread", MPSC_DAEMON_INIT_NONE);
633 if (result != KERN_SUCCESS) {
634 panic("Unable to start I/O reprioritization thread (%d)", result);
635 }
636 }
637 #endif
638
639 void
vm_object_reaper_init(void)640 vm_object_reaper_init(void)
641 {
642 kern_return_t kr;
643 thread_t thread;
644
645 kr = kernel_thread_start_priority(
646 (thread_continue_t) vm_object_reaper_thread,
647 NULL,
648 BASEPRI_VM,
649 &thread);
650 if (kr != KERN_SUCCESS) {
651 panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
652 }
653 thread_set_thread_name(thread, "VM_object_reaper_thread");
654 thread_deallocate(thread);
655 }
656
657
658 /*
659 * vm_object_deallocate:
660 *
661 * Release a reference to the specified object,
662 * gained either through a vm_object_allocate
663 * or a vm_object_reference call. When all references
664 * are gone, storage associated with this object
665 * may be relinquished.
666 *
667 * No object may be locked.
668 */
669 unsigned long vm_object_deallocate_shared_successes = 0;
670 unsigned long vm_object_deallocate_shared_failures = 0;
671 unsigned long vm_object_deallocate_shared_swap_failures = 0;
672
673 __private_extern__ void
vm_object_deallocate(vm_object_t object)674 vm_object_deallocate(
675 vm_object_t object)
676 {
677 vm_object_t shadow = VM_OBJECT_NULL;
678
679 // if(object)dbgLog(object, object->ref_count, object->can_persist, 3); /* (TEST/DEBUG) */
680 // else dbgLog(object, 0, 0, 3); /* (TEST/DEBUG) */
681
682 if (object == VM_OBJECT_NULL) {
683 return;
684 }
685
686 if (is_kernel_object(object) || object == compressor_object || object == retired_pages_object) {
687 vm_object_lock_shared(object);
688
689 if (os_ref_get_count_raw(&object->ref_count) == 1) {
690 if (is_kernel_object(object)) {
691 panic("vm_object_deallocate: losing a kernel_object");
692 } else if (object == retired_pages_object) {
693 panic("vm_object_deallocate: losing retired_pages_object");
694 } else {
695 panic("vm_object_deallocate: losing compressor_object");
696 }
697 }
698
699 os_ref_release_live_raw(&object->ref_count, &vm_object_refgrp);
700
701 vm_object_unlock(object);
702 return;
703 }
704
705 if (os_ref_get_count_raw(&object->ref_count) == 2 &&
706 object->named) {
707 /*
708 * This "named" object's reference count is about to
709 * drop from 2 to 1:
710 * we'll need to call memory_object_last_unmap().
711 */
712 } else if (os_ref_get_count_raw(&object->ref_count) == 2 &&
713 object->internal &&
714 object->shadow != VM_OBJECT_NULL) {
715 /*
716 * This internal object's reference count is about to
717 * drop from 2 to 1 and it has a shadow object:
718 * we'll want to try and collapse this object with its
719 * shadow.
720 */
721 } else if (os_ref_get_count_raw(&object->ref_count) >= 2) {
722 UInt32 original_ref_count;
723 volatile UInt32 *ref_count_p;
724 Boolean atomic_swap;
725
726 /*
727 * The object currently looks like it is not being
728 * kept alive solely by the reference we're about to release.
729 * Let's try and release our reference without taking
730 * all the locks we would need if we had to terminate the
731 * object (cache lock + exclusive object lock).
732 * Lock the object "shared" to make sure we don't race with
733 * anyone holding it "exclusive".
734 */
735 vm_object_lock_shared(object);
736 ref_count_p = (volatile UInt32 *) &object->ref_count;
737 original_ref_count = os_ref_get_count_raw(&object->ref_count);
738 /*
739 * Test again as "ref_count" could have changed.
740 * "named" shouldn't change.
741 */
742 if (original_ref_count == 2 &&
743 object->named) {
744 /* need to take slow path for m_o_last_unmap() */
745 atomic_swap = FALSE;
746 } else if (original_ref_count == 2 &&
747 object->internal &&
748 object->shadow != VM_OBJECT_NULL) {
749 /* need to take slow path for vm_object_collapse() */
750 atomic_swap = FALSE;
751 } else if (original_ref_count < 2) {
752 /* need to take slow path for vm_object_terminate() */
753 atomic_swap = FALSE;
754 } else {
755 /* try an atomic update with the shared lock */
756 atomic_swap = OSCompareAndSwap(
757 original_ref_count,
758 original_ref_count - 1,
759 (UInt32 *) &object->ref_count);
760 if (atomic_swap == FALSE) {
761 vm_object_deallocate_shared_swap_failures++;
762 /* fall back to the slow path... */
763 }
764 }
765
766 vm_object_unlock(object);
767
768 if (atomic_swap) {
769 /*
770 * ref_count was updated atomically !
771 */
772 vm_object_deallocate_shared_successes++;
773 return;
774 }
775
776 /*
777 * Someone else updated the ref_count at the same
778 * time and we lost the race. Fall back to the usual
779 * slow but safe path...
780 */
781 vm_object_deallocate_shared_failures++;
782 }
783
784 while (object != VM_OBJECT_NULL) {
785 vm_object_lock(object);
786
787 assert(os_ref_get_count_raw(&object->ref_count) > 0);
788
789 /*
790 * If the object has a named reference, and only
791 * that reference would remain, inform the pager
792 * about the last "mapping" reference going away.
793 */
794 if ((os_ref_get_count_raw(&object->ref_count) == 2) && (object->named)) {
795 memory_object_t pager = object->pager;
796
797 /* Notify the Pager that there are no */
798 /* more mappers for this object */
799
800 if (pager != MEMORY_OBJECT_NULL) {
801 vm_object_mapping_wait(object, THREAD_UNINT);
802 /* object might have lost its pager while waiting */
803 pager = object->pager;
804 if (object->ref_count == 2 &&
805 object->named &&
806 pager != MEMORY_OBJECT_NULL) {
807 vm_object_mapping_begin(object);
808 assert(pager->mo_last_unmap_ctid == 0);
809 /*
810 * Signal that we're the thread that triggered
811 * the memory_object_last_unmap(), so that we
812 * don't deadlock in vm_object_destroy() if this
813 * was the last reference and we're releasing
814 * the pager there.
815 */
816 pager->mo_last_unmap_ctid = thread_get_ctid(current_thread());
817 vm_object_unlock(object);
818
819 memory_object_last_unmap(pager);
820 /* pager might no longer be valid now */
821 pager = MEMORY_OBJECT_NULL;
822
823 vm_object_lock(object);
824
825 vm_object_mapping_end(object);
826 pager = object->pager;
827 if (pager != MEMORY_OBJECT_NULL) {
828 /*
829 * The pager is still there, so reset its
830 * "mo_last_unmap_ctid" now that we're done.
831 */
832 assert3u(pager->mo_last_unmap_ctid, ==, thread_get_ctid(current_thread()));
833 pager->mo_last_unmap_ctid = 0;
834 }
835 }
836 }
837 assert(os_ref_get_count_raw(&object->ref_count) > 0);
838 }
839
840 /*
841 * Lose the reference. If other references
842 * remain, then we are done, unless we need
843 * to retry a cache trim.
844 * If it is the last reference, then keep it
845 * until any pending initialization is completed.
846 */
847
848 /* if the object is terminating, it cannot go into */
849 /* the cache and we obviously should not call */
850 /* terminate again. */
851
852 if ((os_ref_get_count_raw(&object->ref_count) > 1) ||
853 object->terminating) {
854 vm_object_lock_assert_exclusive(object);
855 os_ref_release_live_locked_raw(&object->ref_count,
856 &vm_object_refgrp);
857
858 if (os_ref_get_count_raw(&object->ref_count) == 1 &&
859 object->shadow != VM_OBJECT_NULL) {
860 /*
861 * There's only one reference left on this
862 * VM object. We can't tell if it's a valid
863 * one (from a mapping for example) or if this
864 * object is just part of a possibly stale and
865 * useless shadow chain.
866 * We would like to try and collapse it into
867 * its parent, but we don't have any pointers
868 * back to this parent object.
869 * But we can try and collapse this object with
870 * its own shadows, in case these are useless
871 * too...
872 * We can't bypass this object though, since we
873 * don't know if this last reference on it is
874 * meaningful or not.
875 */
876 vm_object_collapse(object, 0, FALSE);
877 }
878 vm_object_unlock(object);
879 return;
880 }
881
882 /*
883 * We have to wait for initialization
884 * before destroying or caching the object.
885 */
886
887 if (object->pager_created && !object->pager_ready) {
888 assert(!object->can_persist);
889 vm_object_sleep(object,
890 VM_OBJECT_EVENT_PAGER_READY,
891 THREAD_UNINT,
892 LCK_SLEEP_UNLOCK);
893 continue;
894 }
895
896 /*
897 * Terminate this object. If it had a shadow,
898 * then deallocate it; otherwise, if we need
899 * to retry a cache trim, do so now; otherwise,
900 * we are done. "pageout" objects have a shadow,
901 * but maintain a "paging reference" rather than
902 * a normal reference.
903 */
904 shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
905
906 if (vm_object_terminate(object) != KERN_SUCCESS) {
907 return;
908 }
909 if (shadow != VM_OBJECT_NULL) {
910 object = shadow;
911 continue;
912 }
913 return;
914 }
915 }
916
917
918
919 vm_page_t
vm_object_page_grab(vm_object_t object)920 vm_object_page_grab(
921 vm_object_t object)
922 {
923 vm_page_t p, next_p;
924 int p_limit = 0;
925 int p_skipped = 0;
926
927 vm_object_lock_assert_exclusive(object);
928
929 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
930 p_limit = MIN(50, object->resident_page_count);
931
932 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
933 p = next_p;
934 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
935
936 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning ||
937 p->vmp_laundry || vm_page_is_fictitious(p)) {
938 goto move_page_in_obj;
939 }
940
941 if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
942 vm_page_lockspin_queues();
943
944 if (p->vmp_pmapped) {
945 int refmod_state;
946
947 vm_object_page_grab_pmapped++;
948
949 if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
950 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
951
952 if (refmod_state & VM_MEM_REFERENCED) {
953 p->vmp_reference = TRUE;
954 }
955 if (refmod_state & VM_MEM_MODIFIED) {
956 SET_PAGE_DIRTY(p, FALSE);
957 }
958 }
959 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
960 vm_page_lockconvert_queues();
961 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
962
963 if (refmod_state & VM_MEM_REFERENCED) {
964 p->vmp_reference = TRUE;
965 }
966 if (refmod_state & VM_MEM_MODIFIED) {
967 SET_PAGE_DIRTY(p, FALSE);
968 }
969
970 if (p->vmp_dirty == FALSE) {
971 goto take_page;
972 }
973 }
974 }
975 if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
976 vm_page_activate(p);
977
978 counter_inc(&vm_statistics_reactivations);
979 vm_object_page_grab_reactivations++;
980 }
981 vm_page_unlock_queues();
982 move_page_in_obj:
983 vm_page_queue_remove(&object->memq, p, vmp_listq);
984 vm_page_queue_enter(&object->memq, p, vmp_listq);
985
986 p_skipped++;
987 continue;
988 }
989 vm_page_lockspin_queues();
990 take_page:
991 vm_page_free_prepare_queues(p);
992 vm_object_page_grab_returned++;
993 vm_object_page_grab_skipped += p_skipped;
994
995 vm_page_unlock_queues();
996
997 vm_page_free_prepare_object(p, TRUE);
998
999 return p;
1000 }
1001 vm_object_page_grab_skipped += p_skipped;
1002 vm_object_page_grab_failed++;
1003
1004 return NULL;
1005 }
1006
1007 #if COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1
1008
1009 /* This is the actual number of filling cheads that's going to be used.
1010 * must be 1 <= vm_cheads <= COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT */
1011 TUNABLE_WRITEABLE(uint32_t, vm_cheads, "vm_cheads", 8);
1012 /* This determines what criteria is used for selecting the chead,
1013 * either the PID of the grabber task or it's coalition */
1014 TUNABLE_WRITEABLE(vm_chead_select_t, vm_chead_select, "vm_chead_select", CSEL_BY_PID);
1015 /* This determines if the grabber-id is set on every page-fault insert or just the first insert */
1016 TUNABLE_WRITEABLE(boolean_t, vm_chead_rehint, "vm_chead_rehint", false);
1017
1018 /*
1019 * This function is called from vm_page_insert_internal(). When it's called from the context
1020 * of a vm_fault where a task has just requested a new page/paged-in a existing page,
1021 * this function records some bits of information about the task. These bits are then
1022 * going to be used when the page is sent to the compressor to select the compressor-head
1023 * that will be used.
1024 * The goal of this is to make pages that come from the same task/coalition be compressed to the
1025 * same compressor segment, This helps the locality of swap-in and decompression.
1026 * This optimization relies on a heuristic assumptions that the vm_object is only ever mapped
1027 * in a single task/coalition. vm_objects that violate this would not benefit from this optimization.
1028 * See also vm_pageout_select_filling_chead()
1029 */
1030 void
vm_object_set_chead_hint(vm_object_t object)1031 vm_object_set_chead_hint(
1032 vm_object_t object)
1033 {
1034 if (!object->internal) {
1035 /* not relevant for pages that are not going to get to the compressor */
1036 return;
1037 }
1038
1039 if (object->vo_chead_hint != 0 && !vm_chead_rehint) {
1040 /* there's already a value there and we don't want to set it again */
1041 return;
1042 }
1043 task_t cur_task = current_task_early();
1044 if (cur_task == TASK_NULL || cur_task == kernel_task || vm_cheads <= 1) {
1045 /* avoid doing extra work for the kernel map case */
1046 object->vo_chead_hint = 0;
1047 return;
1048 }
1049 int value = 0;
1050 if (vm_chead_select == CSEL_BY_PID) {
1051 value = task_pid(cur_task);
1052 } else if (vm_chead_select == CSEL_BY_COALITION) {
1053 /* The choice of coalition type is not very significant here since both
1054 * types seem to have a similar task division. */
1055 coalition_t coalition = task_get_coalition(cur_task, COALITION_TYPE_JETSAM);
1056 if (coalition != COALITION_NULL) {
1057 value = coalition_id(coalition);
1058 }
1059 }
1060 uint32_t mod_by = MIN(vm_cheads, COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT);
1061 object->vo_chead_hint = (uint8_t)value % mod_by;
1062 }
1063
1064 #endif /* COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1 */
1065
1066 #define EVICT_PREPARE_LIMIT 64
1067 #define EVICT_AGE 10
1068
1069 static clock_sec_t vm_object_cache_aging_ts = 0;
1070
1071 static void
vm_object_cache_remove_locked(vm_object_t object)1072 vm_object_cache_remove_locked(
1073 vm_object_t object)
1074 {
1075 assert(object->purgable == VM_PURGABLE_DENY);
1076
1077 queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
1078 object->cached_list.next = NULL;
1079 object->cached_list.prev = NULL;
1080
1081 vm_object_cached_count--;
1082 }
1083
1084 void
vm_object_cache_remove(vm_object_t object)1085 vm_object_cache_remove(
1086 vm_object_t object)
1087 {
1088 vm_object_cache_lock_spin();
1089
1090 if (object->cached_list.next &&
1091 object->cached_list.prev) {
1092 vm_object_cache_remove_locked(object);
1093 }
1094
1095 vm_object_cache_unlock();
1096 }
1097
1098 void
vm_object_cache_add(vm_object_t object)1099 vm_object_cache_add(
1100 vm_object_t object)
1101 {
1102 clock_sec_t sec;
1103 clock_nsec_t nsec;
1104
1105 assert(object->purgable == VM_PURGABLE_DENY);
1106
1107 if (object->resident_page_count == 0) {
1108 return;
1109 }
1110 if (object->vo_ledger_tag) {
1111 /*
1112 * We can't add an "owned" object to the cache because
1113 * the "vo_owner" and "vo_cache_ts" fields are part of the
1114 * same "union" and can't be used at the same time.
1115 */
1116 return;
1117 }
1118 clock_get_system_nanotime(&sec, &nsec);
1119
1120 vm_object_cache_lock_spin();
1121
1122 if (object->cached_list.next == NULL &&
1123 object->cached_list.prev == NULL) {
1124 queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
1125 object->vo_cache_ts = sec + EVICT_AGE;
1126 object->vo_cache_pages_to_scan = object->resident_page_count;
1127
1128 vm_object_cached_count++;
1129 vm_object_cache_adds++;
1130 }
1131 vm_object_cache_unlock();
1132 }
1133
1134 int
vm_object_cache_evict(int num_to_evict,int max_objects_to_examine)1135 vm_object_cache_evict(
1136 int num_to_evict,
1137 int max_objects_to_examine)
1138 {
1139 vm_object_t object = VM_OBJECT_NULL;
1140 vm_object_t next_obj = VM_OBJECT_NULL;
1141 vm_page_t local_free_q = VM_PAGE_NULL;
1142 vm_page_t p;
1143 vm_page_t next_p;
1144 int object_cnt = 0;
1145 vm_page_t ep_array[EVICT_PREPARE_LIMIT];
1146 int ep_count;
1147 int ep_limit;
1148 int ep_index;
1149 int ep_freed = 0;
1150 int ep_moved = 0;
1151 uint32_t ep_skipped = 0;
1152 clock_sec_t sec;
1153 clock_nsec_t nsec;
1154
1155 KDBG_DEBUG(0x13001ec | DBG_FUNC_START);
1156 /*
1157 * do a couple of quick checks to see if it's
1158 * worthwhile grabbing the lock
1159 */
1160 if (queue_empty(&vm_object_cached_list)) {
1161 KDBG_DEBUG(0x13001ec | DBG_FUNC_END);
1162 return 0;
1163 }
1164 clock_get_system_nanotime(&sec, &nsec);
1165 if (max_objects_to_examine == INT_MAX) {
1166 /* evict all pages from all cached objects now */
1167 sec = (clock_sec_t)-1;
1168 }
1169
1170 /*
1171 * the object on the head of the queue has not
1172 * yet sufficiently aged
1173 */
1174 if (sec < vm_object_cache_aging_ts) {
1175 KDBG_DEBUG(0x13001ec | DBG_FUNC_END);
1176 return 0;
1177 }
1178 /*
1179 * don't need the queue lock to find
1180 * and lock an object on the cached list
1181 */
1182 vm_page_unlock_queues();
1183
1184 vm_object_cache_lock_spin();
1185
1186 for (;;) { /* loop for as long as we have objects to process */
1187 next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1188
1189 /* loop to find the next target in the cache_list */
1190 while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1191 object = next_obj;
1192 next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1193
1194 assert(object->purgable == VM_PURGABLE_DENY);
1195
1196 if (sec < object->vo_cache_ts) { // reached the point in the queue beyond the time we started
1197 KDBG_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec);
1198
1199 vm_object_cache_aging_ts = object->vo_cache_ts;
1200 object = VM_OBJECT_NULL; /* this will cause to break away from the outer loop */
1201 break;
1202 }
1203 if (!vm_object_lock_try_scan(object)) {
1204 /*
1205 * just skip over this guy for now... if we find
1206 * an object to steal pages from, we'll revist in a bit...
1207 * hopefully, the lock will have cleared
1208 */
1209 KDBG_DEBUG(0x13001f8, object, object->resident_page_count);
1210
1211 object = VM_OBJECT_NULL;
1212 continue;
1213 }
1214 if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1215 /*
1216 * this case really shouldn't happen, but it's not fatal
1217 * so deal with it... if we don't remove the object from
1218 * the list, we'll never move past it.
1219 */
1220 KDBG_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved);
1221
1222 vm_object_cache_remove_locked(object);
1223 vm_object_unlock(object);
1224 object = VM_OBJECT_NULL;
1225 continue;
1226 }
1227 /*
1228 * we have a locked object with pages...
1229 * time to start harvesting
1230 */
1231 break;
1232 }
1233 vm_object_cache_unlock();
1234
1235 if (object == VM_OBJECT_NULL) {
1236 break;
1237 }
1238
1239 /*
1240 * object is locked at this point and
1241 * has resident pages
1242 */
1243 next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1244
1245 /*
1246 * break the page scan into 2 pieces to minimize the time spent
1247 * behind the page queue lock...
1248 * the list of pages on these unused objects is likely to be cold
1249 * w/r to the cpu cache which increases the time to scan the list
1250 * tenfold... and we may have a 'run' of pages we can't utilize that
1251 * needs to be skipped over...
1252 */
1253 if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1254 ep_limit = EVICT_PREPARE_LIMIT;
1255 }
1256 ep_count = 0;
1257
1258 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1259 p = next_p;
1260 next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1261
1262 object->vo_cache_pages_to_scan--;
1263
1264 if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1265 vm_page_queue_remove(&object->memq, p, vmp_listq);
1266 vm_page_queue_enter(&object->memq, p, vmp_listq);
1267
1268 ep_skipped++;
1269 continue;
1270 }
1271 if (!object->internal &&
1272 object->pager_created &&
1273 object->pager == NULL) {
1274 /*
1275 * This object has lost its pager, most likely
1276 * due to a force-unmount or ungraft. The pager
1277 * will never come back, so there's no point in
1278 * keeping these pages, even if modified.
1279 * The object could still be mapped, so we need
1280 * to clear any PTE that might still be pointing
1281 * at this physical page before we can reclaim
1282 * it.
1283 */
1284 if (p->vmp_pmapped) {
1285 int refmod;
1286 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
1287 if (refmod & VM_MEM_MODIFIED) {
1288 assert(p->vmp_wpmapped);
1289 p->vmp_dirty = TRUE;
1290 }
1291 }
1292 // printf("FBDP %s:%d object %p reason %d page %p offset 0x%llx pmapped %d wpmapped %d xpmapped %d dirty %d precious %d\n", __FUNCTION__, __LINE__, object, object->no_pager_reason, p, p->vmp_offset, p->vmp_pmapped, p->vmp_wpmapped, p->vmp_xpmapped, p->vmp_dirty, p->vmp_precious);
1293 /* clear any reason to skip this page below */
1294 p->vmp_dirty = FALSE;
1295 p->vmp_precious = FALSE;
1296 p->vmp_wpmapped = FALSE;
1297 }
1298 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1299 vm_page_queue_remove(&object->memq, p, vmp_listq);
1300 vm_page_queue_enter(&object->memq, p, vmp_listq);
1301
1302 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1303 }
1304 ep_array[ep_count++] = p;
1305 }
1306 KDBG_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved);
1307
1308 vm_page_lockspin_queues();
1309
1310 for (ep_index = 0; ep_index < ep_count; ep_index++) {
1311 p = ep_array[ep_index];
1312
1313 if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1314 p->vmp_reference = FALSE;
1315 p->vmp_no_cache = FALSE;
1316
1317 /*
1318 * we've already filtered out pages that are in the laundry
1319 * so if we get here, this page can't be on the pageout queue
1320 */
1321 vm_page_queues_remove(p, FALSE);
1322 vm_page_enqueue_inactive(p, TRUE);
1323
1324 ep_moved++;
1325 } else {
1326 #if CONFIG_PHANTOM_CACHE
1327 vm_phantom_cache_add_ghost(p);
1328 #endif
1329 vm_page_free_prepare_queues(p);
1330
1331 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1332 /*
1333 * Add this page to our list of reclaimed pages,
1334 * to be freed later.
1335 */
1336 p->vmp_snext = local_free_q;
1337 local_free_q = p;
1338
1339 ep_freed++;
1340 }
1341 }
1342 vm_page_unlock_queues();
1343
1344 KDBG_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved);
1345
1346 if (local_free_q) {
1347 vm_page_free_list(local_free_q, TRUE);
1348 local_free_q = VM_PAGE_NULL;
1349 }
1350 if (object->vo_cache_pages_to_scan == 0) {
1351 KDBG_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved);
1352
1353 vm_object_cache_remove(object);
1354
1355 KDBG_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved);
1356 }
1357 /*
1358 * done with this object
1359 */
1360 vm_object_unlock(object);
1361 object = VM_OBJECT_NULL;
1362
1363 /*
1364 * at this point, we are not holding any locks
1365 */
1366 if ((ep_freed + ep_moved) >= num_to_evict) {
1367 /*
1368 * we've reached our target for the
1369 * number of pages to evict
1370 */
1371 break;
1372 }
1373 vm_object_cache_lock_spin();
1374 }
1375 /*
1376 * put the page queues lock back to the caller's
1377 * idea of it
1378 */
1379 vm_page_lock_queues();
1380
1381 vm_object_cache_pages_freed += ep_freed;
1382 vm_object_cache_pages_moved += ep_moved;
1383 vm_object_cache_pages_skipped += ep_skipped;
1384
1385 KDBG_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed);
1386 // printf("FBDP %s(0x%x,0x%x) freed %d moved %d skipped %u\n", __func__, num_to_evict, max_objects_to_examine, ep_freed, ep_moved, ep_skipped);
1387 return ep_freed;
1388 }
1389
1390 int vm_object_cache_evict_all(void);
1391 int
vm_object_cache_evict_all(void)1392 vm_object_cache_evict_all(void)
1393 {
1394 int freed;
1395
1396 vm_page_lock_queues();
1397 freed = vm_object_cache_evict(INT_MAX, INT_MAX);
1398 vm_page_unlock_queues();
1399 printf("%s: freed %d\n", __func__, freed);
1400 return freed;
1401 }
1402
1403 /*
1404 * Routine: vm_object_terminate
1405 * Purpose:
1406 * Free all resources associated with a vm_object.
1407 * In/out conditions:
1408 * Upon entry, the object must be locked,
1409 * and the object must have exactly one reference.
1410 *
1411 * The shadow object reference is left alone.
1412 *
1413 * The object must be unlocked if its found that pages
1414 * must be flushed to a backing object. If someone
1415 * manages to map the object while it is being flushed
1416 * the object is returned unlocked and unchanged. Otherwise,
1417 * upon exit, the cache will be unlocked, and the
1418 * object will cease to exist.
1419 */
1420 static kern_return_t
vm_object_terminate(vm_object_t object)1421 vm_object_terminate(
1422 vm_object_t object)
1423 {
1424 vm_object_t shadow_object;
1425
1426 vm_object_lock_assert_exclusive(object);
1427
1428 if (!object->pageout && (!object->internal && object->can_persist) &&
1429 (object->pager != NULL || object->shadow_severed)) {
1430 /*
1431 * Clear pager_trusted bit so that the pages get yanked
1432 * out of the object instead of cleaned in place. This
1433 * prevents a deadlock in XMM and makes more sense anyway.
1434 */
1435 VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE);
1436
1437 vm_object_reap_pages(object, REAP_TERMINATE);
1438 }
1439 /*
1440 * Make sure the object isn't already being terminated
1441 */
1442 if (object->terminating) {
1443 vm_object_lock_assert_exclusive(object);
1444 os_ref_release_live_locked_raw(&object->ref_count, &vm_object_refgrp);
1445 vm_object_unlock(object);
1446 return KERN_FAILURE;
1447 }
1448
1449 /*
1450 * Did somebody get a reference to the object while we were
1451 * cleaning it?
1452 */
1453 if (os_ref_get_count_raw(&object->ref_count) != 1) {
1454 vm_object_lock_assert_exclusive(object);
1455 os_ref_release_live_locked_raw(&object->ref_count, &vm_object_refgrp);
1456 vm_object_unlock(object);
1457 return KERN_FAILURE;
1458 }
1459
1460 /*
1461 * Make sure no one can look us up now.
1462 */
1463
1464 VM_OBJECT_SET_TERMINATING(object, TRUE);
1465 VM_OBJECT_SET_ALIVE(object, FALSE);
1466
1467 if (!object->internal &&
1468 object->cached_list.next &&
1469 object->cached_list.prev) {
1470 vm_object_cache_remove(object);
1471 }
1472
1473 /*
1474 * Detach the object from its shadow if we are the shadow's
1475 * copy. The reference we hold on the shadow must be dropped
1476 * by our caller.
1477 */
1478 if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1479 !(object->pageout)) {
1480 vm_object_lock(shadow_object);
1481 if (shadow_object->vo_copy == object) {
1482 VM_OBJECT_COPY_SET(shadow_object, VM_OBJECT_NULL);
1483 }
1484 vm_object_unlock(shadow_object);
1485 }
1486
1487 if (object->paging_in_progress != 0 ||
1488 object->activity_in_progress != 0) {
1489 /*
1490 * There are still some paging_in_progress references
1491 * on this object, meaning that there are some paging
1492 * or other I/O operations in progress for this VM object.
1493 * Such operations take some paging_in_progress references
1494 * up front to ensure that the object doesn't go away, but
1495 * they may also need to acquire a reference on the VM object,
1496 * to map it in kernel space, for example. That means that
1497 * they may end up releasing the last reference on the VM
1498 * object, triggering its termination, while still holding
1499 * paging_in_progress references. Waiting for these
1500 * pending paging_in_progress references to go away here would
1501 * deadlock.
1502 *
1503 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1504 * complete the VM object termination if it still holds
1505 * paging_in_progress references at this point.
1506 *
1507 * No new paging_in_progress should appear now that the
1508 * VM object is "terminating" and not "alive".
1509 */
1510 vm_object_reap_async(object);
1511 vm_object_unlock(object);
1512 /*
1513 * Return KERN_FAILURE to let the caller know that we
1514 * haven't completed the termination and it can't drop this
1515 * object's reference on its shadow object yet.
1516 * The reaper thread will take care of that once it has
1517 * completed this object's termination.
1518 */
1519 return KERN_FAILURE;
1520 }
1521 /*
1522 * complete the VM object termination
1523 */
1524 vm_object_reap(object);
1525 object = VM_OBJECT_NULL;
1526
1527 /*
1528 * the object lock was released by vm_object_reap()
1529 *
1530 * KERN_SUCCESS means that this object has been terminated
1531 * and no longer needs its shadow object but still holds a
1532 * reference on it.
1533 * The caller is responsible for dropping that reference.
1534 * We can't call vm_object_deallocate() here because that
1535 * would create a recursion.
1536 */
1537 return KERN_SUCCESS;
1538 }
1539
1540
1541 /*
1542 * vm_object_reap():
1543 *
1544 * Complete the termination of a VM object after it's been marked
1545 * as "terminating" and "!alive" by vm_object_terminate().
1546 *
1547 * The VM object must be locked by caller.
1548 * The lock will be released on return and the VM object is no longer valid.
1549 */
1550
1551 void
vm_object_reap(vm_object_t object)1552 vm_object_reap(
1553 vm_object_t object)
1554 {
1555 memory_object_t pager;
1556 os_ref_count_t ref_count;
1557
1558 vm_object_lock_assert_exclusive(object);
1559 assert(object->paging_in_progress == 0);
1560 assert(object->activity_in_progress == 0);
1561
1562 vm_object_reap_count++;
1563
1564 /*
1565 * Disown this purgeable object to cleanup its owner's purgeable
1566 * ledgers. We need to do this before disconnecting the object
1567 * from its pager, to properly account for compressed pages.
1568 */
1569 if (/* object->internal && */
1570 (object->purgable != VM_PURGABLE_DENY ||
1571 object->vo_ledger_tag)) {
1572 int ledger_flags;
1573 kern_return_t kr;
1574
1575 ledger_flags = 0;
1576 assert(!object->alive);
1577 assert(object->terminating);
1578 kr = vm_object_ownership_change(object,
1579 VM_LEDGER_TAG_NONE,
1580 NULL, /* no owner */
1581 ledger_flags,
1582 FALSE); /* task_objq not locked */
1583 assert(kr == KERN_SUCCESS);
1584 assert(object->vo_owner == NULL);
1585 }
1586
1587 #if DEVELOPMENT || DEBUG
1588 if (object->object_is_shared_cache &&
1589 object->pager != NULL &&
1590 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1591 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1592 }
1593 #endif /* DEVELOPMENT || DEBUG */
1594
1595 pager = object->pager;
1596 object->pager = MEMORY_OBJECT_NULL;
1597
1598 if (pager != MEMORY_OBJECT_NULL) {
1599 memory_object_control_disable(&object->pager_control);
1600 }
1601
1602 ref_count = os_ref_release_locked_raw(&object->ref_count,
1603 &vm_object_refgrp);
1604 if (__improbable(ref_count != 0)) {
1605 panic("Attempting to deallocate vm_object with outstanding refs: %u",
1606 ref_count);
1607 }
1608
1609 /*
1610 * remove from purgeable queue if it's on
1611 */
1612 if (object->internal) {
1613 assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1614
1615 VM_OBJECT_UNWIRED(object);
1616
1617 if (object->purgable == VM_PURGABLE_DENY) {
1618 /* not purgeable: nothing to do */
1619 } else if (object->purgable == VM_PURGABLE_VOLATILE) {
1620 purgeable_q_t queue;
1621
1622 queue = vm_purgeable_object_remove(object);
1623 assert(queue);
1624
1625 if (object->purgeable_when_ripe) {
1626 /*
1627 * Must take page lock for this -
1628 * using it to protect token queue
1629 */
1630 vm_page_lock_queues();
1631 vm_purgeable_token_delete_first(queue);
1632
1633 assert(queue->debug_count_objects >= 0);
1634 vm_page_unlock_queues();
1635 }
1636
1637 /*
1638 * Update "vm_page_purgeable_count" in bulk and mark
1639 * object as VM_PURGABLE_EMPTY to avoid updating
1640 * "vm_page_purgeable_count" again in vm_page_remove()
1641 * when reaping the pages.
1642 */
1643 unsigned int delta;
1644 assert(object->resident_page_count >=
1645 object->wired_page_count);
1646 delta = (object->resident_page_count -
1647 object->wired_page_count);
1648 if (delta != 0) {
1649 assert(vm_page_purgeable_count >= delta);
1650 OSAddAtomic(-delta,
1651 (SInt32 *)&vm_page_purgeable_count);
1652 }
1653 if (object->wired_page_count != 0) {
1654 assert(vm_page_purgeable_wired_count >=
1655 object->wired_page_count);
1656 OSAddAtomic(-object->wired_page_count,
1657 (SInt32 *)&vm_page_purgeable_wired_count);
1658 }
1659 VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
1660 } else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1661 object->purgable == VM_PURGABLE_EMPTY) {
1662 /* remove from nonvolatile queue */
1663 vm_purgeable_nonvolatile_dequeue(object);
1664 } else {
1665 panic("object %p in unexpected purgeable state 0x%x",
1666 object, object->purgable);
1667 }
1668 if (object->transposed &&
1669 object->cached_list.next != NULL &&
1670 object->cached_list.prev == NULL) {
1671 /*
1672 * object->cached_list.next "points" to the
1673 * object that was transposed with this object.
1674 */
1675 } else {
1676 assert(object->cached_list.next == NULL);
1677 }
1678 assert(object->cached_list.prev == NULL);
1679 }
1680
1681 if (object->pageout) {
1682 /*
1683 * free all remaining pages tabled on
1684 * this object
1685 * clean up it's shadow
1686 */
1687 assert(object->shadow != VM_OBJECT_NULL);
1688
1689 vm_pageout_object_terminate(object);
1690 } else if (object->resident_page_count) {
1691 /*
1692 * free all remaining pages tabled on
1693 * this object
1694 */
1695 vm_object_reap_pages(object, REAP_REAP);
1696 }
1697 assert(vm_page_queue_empty(&object->memq));
1698 assert(object->paging_in_progress == 0);
1699 assert(object->activity_in_progress == 0);
1700 assert(os_ref_get_count_raw(&object->ref_count) == 0);
1701
1702 /*
1703 * If the pager has not already been released by
1704 * vm_object_destroy, we need to terminate it and
1705 * release our reference to it here.
1706 */
1707 if (pager != MEMORY_OBJECT_NULL) {
1708 vm_object_unlock(object);
1709 vm_object_release_pager(pager);
1710 vm_object_lock(object);
1711 }
1712
1713 /* kick off anyone waiting on terminating */
1714 VM_OBJECT_SET_TERMINATING(object, FALSE);
1715 vm_object_paging_begin(object);
1716 vm_object_paging_end(object);
1717 vm_object_unlock(object);
1718
1719 object->shadow = VM_OBJECT_NULL;
1720
1721 #if VM_OBJECT_TRACKING
1722 if (vm_object_tracking_btlog) {
1723 btlog_erase(vm_object_tracking_btlog, object);
1724 }
1725 #endif /* VM_OBJECT_TRACKING */
1726
1727 vm_object_lock_destroy(object);
1728 /*
1729 * Free the space for the object.
1730 */
1731 zfree(vm_object_zone, object);
1732 object = VM_OBJECT_NULL;
1733 }
1734
1735
1736 unsigned int vm_max_batch = 256;
1737
1738 #define V_O_R_MAX_BATCH 128
1739
1740 #define BATCH_LIMIT(max) (vm_max_batch >= max ? max : vm_max_batch)
1741
1742 static inline vm_page_t
vm_object_reap_freelist(vm_page_t local_free_q,bool do_disconnect,bool set_cache_attr)1743 vm_object_reap_freelist(vm_page_t local_free_q, bool do_disconnect, bool set_cache_attr)
1744 {
1745 vm_page_t page;
1746 if (local_free_q) {
1747 if (do_disconnect) {
1748 _vm_page_list_foreach(page, local_free_q) {
1749 if (page->vmp_pmapped) {
1750 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
1751 }
1752 }
1753 }
1754
1755 if (set_cache_attr) {
1756 #if HAS_MTE
1757 assert(!local_free_q->vmp_using_mte);
1758 #endif /* HAS_MTE */
1759 const unified_page_list_t pmap_batch_list = {
1760 .page_slist = local_free_q,
1761 .type = UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST,
1762 };
1763 pmap_batch_set_cache_attributes(&pmap_batch_list, 0);
1764 }
1765 vm_page_free_list(local_free_q, TRUE);
1766 }
1767 return VM_PAGE_NULL;
1768 }
1769
1770 void
vm_object_reap_pages(vm_object_t object,int reap_type)1771 vm_object_reap_pages(
1772 vm_object_t object,
1773 int reap_type)
1774 {
1775 vm_page_t p;
1776 vm_page_t next;
1777 vm_page_t local_free_q = VM_PAGE_NULL;
1778 int loop_count;
1779 bool disconnect_on_release;
1780 bool set_cache_attr_needed;
1781 pmap_flush_context pmap_flush_context_storage;
1782
1783 if (reap_type == REAP_DATA_FLUSH) {
1784 /*
1785 * We need to disconnect pages from all pmaps before
1786 * releasing them to the free list
1787 */
1788 disconnect_on_release = true;
1789 } else {
1790 /*
1791 * Either the caller has already disconnected the pages
1792 * from all pmaps, or we disconnect them here as we add
1793 * them to out local list of pages to be released.
1794 * No need to re-disconnect them when we release the pages
1795 * to the free list.
1796 */
1797 disconnect_on_release = false;
1798 }
1799
1800 restart_after_sleep:
1801 set_cache_attr_needed = false;
1802 if (object->set_cache_attr) {
1803 /**
1804 * If the cache attributes need to be reset for the pages to
1805 * be freed, we clear object->set_cache_attr here so that
1806 * our call to vm_page_free_list (which will ultimately call
1807 * vm_page_remove() on each page) won't try to reset the
1808 * cache attributes on each page individually. Depending on
1809 * the architecture, it may be much faster for us to call
1810 * pmap_batch_set_cache_attributes() instead. Note that
1811 * this function must restore object->set_cache_attr in any
1812 * case where it is required to drop the object lock, e.g.
1813 * to wait for a busy page.
1814 */
1815 object->set_cache_attr = FALSE;
1816 set_cache_attr_needed = true;
1817 }
1818
1819 if (vm_page_queue_empty(&object->memq)) {
1820 return;
1821 }
1822 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1823
1824 if (reap_type == REAP_PURGEABLE) {
1825 pmap_flush_context_init(&pmap_flush_context_storage);
1826 }
1827
1828 vm_page_lock_queues();
1829
1830 next = (vm_page_t)vm_page_queue_first(&object->memq);
1831
1832 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1833 p = next;
1834 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1835
1836 if (--loop_count == 0) {
1837 vm_page_unlock_queues();
1838
1839 if (local_free_q) {
1840 if (reap_type == REAP_PURGEABLE) {
1841 pmap_flush(&pmap_flush_context_storage);
1842 pmap_flush_context_init(&pmap_flush_context_storage);
1843 }
1844 /*
1845 * Free the pages we reclaimed so far
1846 * and take a little break to avoid
1847 * hogging the page queue lock too long
1848 */
1849 local_free_q = vm_object_reap_freelist(local_free_q,
1850 disconnect_on_release, set_cache_attr_needed);
1851 } else {
1852 mutex_pause(0);
1853 }
1854
1855 loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1856
1857 vm_page_lock_queues();
1858 }
1859 if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1860 if (p->vmp_busy || p->vmp_cleaning) {
1861 vm_page_unlock_queues();
1862 /*
1863 * free the pages reclaimed so far
1864 */
1865 local_free_q = vm_object_reap_freelist(local_free_q,
1866 disconnect_on_release, set_cache_attr_needed);
1867
1868 if (set_cache_attr_needed) {
1869 object->set_cache_attr = TRUE;
1870 }
1871 vm_page_sleep(object, p, THREAD_UNINT, LCK_SLEEP_DEFAULT);
1872
1873 goto restart_after_sleep;
1874 }
1875 if (p->vmp_laundry) {
1876 vm_pageout_steal_laundry(p, TRUE);
1877 }
1878 }
1879 switch (reap_type) {
1880 case REAP_DATA_FLUSH:
1881 if (VM_PAGE_WIRED(p)) {
1882 /*
1883 * this is an odd case... perhaps we should
1884 * zero-fill this page since we're conceptually
1885 * tossing its data at this point, but leaving
1886 * it on the object to honor the 'wire' contract
1887 */
1888 continue;
1889 }
1890 break;
1891
1892 case REAP_PURGEABLE:
1893 if (VM_PAGE_WIRED(p)) {
1894 /*
1895 * can't purge a wired page
1896 */
1897 vm_page_purged_wired++;
1898 continue;
1899 }
1900 if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1901 vm_pageout_steal_laundry(p, TRUE);
1902 }
1903
1904 if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1905 /*
1906 * page is being acted upon,
1907 * so don't mess with it
1908 */
1909 vm_page_purged_others++;
1910 continue;
1911 }
1912 if (p->vmp_busy) {
1913 /*
1914 * We can't reclaim a busy page but we can
1915 * make it more likely to be paged (it's not wired) to make
1916 * sure that it gets considered by
1917 * vm_pageout_scan() later.
1918 */
1919 if (VM_PAGE_PAGEABLE(p)) {
1920 vm_page_deactivate(p);
1921 }
1922 vm_page_purged_busy++;
1923 continue;
1924 }
1925
1926 assert(!is_kernel_object(VM_PAGE_OBJECT(p)));
1927
1928 /*
1929 * we can discard this page...
1930 */
1931 if (p->vmp_pmapped == TRUE) {
1932 /*
1933 * unmap the page
1934 */
1935 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1936 }
1937 vm_page_purged_count++;
1938
1939 break;
1940
1941 case REAP_TERMINATE:
1942 if (p->vmp_absent || vm_page_is_private(p)) {
1943 /*
1944 * For private pages, VM_PAGE_FREE just
1945 * leaves the page structure around for
1946 * its owner to clean up. For absent
1947 * pages, the structure is returned to
1948 * the appropriate pool.
1949 */
1950 break;
1951 }
1952 if (vm_page_is_fictitious(p)) {
1953 assert(vm_page_is_guard(p));
1954 break;
1955 }
1956 if (!p->vmp_dirty && p->vmp_wpmapped) {
1957 p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1958 }
1959
1960 if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) {
1961 assert(!object->internal);
1962
1963 p->vmp_free_when_done = TRUE;
1964
1965 if (!p->vmp_laundry) {
1966 vm_page_queues_remove(p, TRUE);
1967 /*
1968 * flush page... page will be freed
1969 * upon completion of I/O
1970 */
1971 vm_pageout_cluster(p);
1972 }
1973 vm_page_unlock_queues();
1974 /*
1975 * free the pages reclaimed so far
1976 */
1977 local_free_q = vm_object_reap_freelist(local_free_q,
1978 disconnect_on_release, set_cache_attr_needed);
1979
1980 if (set_cache_attr_needed) {
1981 object->set_cache_attr = TRUE;
1982 }
1983 vm_object_paging_wait(object, THREAD_UNINT);
1984
1985 goto restart_after_sleep;
1986 }
1987 break;
1988
1989 case REAP_REAP:
1990 break;
1991 }
1992 vm_page_free_prepare_queues(p);
1993 assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1994 /*
1995 * Add this page to our list of reclaimed pages,
1996 * to be freed later.
1997 */
1998 p->vmp_snext = local_free_q;
1999 local_free_q = p;
2000 }
2001 vm_page_unlock_queues();
2002
2003 /*
2004 * Free the remaining reclaimed pages
2005 */
2006 if (reap_type == REAP_PURGEABLE) {
2007 pmap_flush(&pmap_flush_context_storage);
2008 }
2009
2010 vm_object_reap_freelist(local_free_q,
2011 disconnect_on_release, set_cache_attr_needed);
2012 if (set_cache_attr_needed) {
2013 object->set_cache_attr = TRUE;
2014 }
2015 }
2016
2017
2018 void
vm_object_reap_async(vm_object_t object)2019 vm_object_reap_async(
2020 vm_object_t object)
2021 {
2022 vm_object_lock_assert_exclusive(object);
2023
2024 vm_object_reaper_lock_spin();
2025
2026 vm_object_reap_count_async++;
2027
2028 /* enqueue the VM object... */
2029 queue_enter(&vm_object_reaper_queue, object,
2030 vm_object_t, cached_list);
2031
2032 vm_object_reaper_unlock();
2033
2034 /* ... and wake up the reaper thread */
2035 thread_wakeup((event_t) &vm_object_reaper_queue);
2036 }
2037
2038
2039 void
vm_object_reaper_thread(void)2040 vm_object_reaper_thread(void)
2041 {
2042 vm_object_t object, shadow_object;
2043
2044 vm_object_reaper_lock_spin();
2045
2046 while (!queue_empty(&vm_object_reaper_queue)) {
2047 queue_remove_first(&vm_object_reaper_queue,
2048 object,
2049 vm_object_t,
2050 cached_list);
2051
2052 vm_object_reaper_unlock();
2053 vm_object_lock(object);
2054
2055 assert(object->terminating);
2056 assert(!object->alive);
2057
2058 /*
2059 * The pageout daemon might be playing with our pages.
2060 * Now that the object is dead, it won't touch any more
2061 * pages, but some pages might already be on their way out.
2062 * Hence, we wait until the active paging activities have
2063 * ceased before we break the association with the pager
2064 * itself.
2065 */
2066 vm_object_paging_wait(object, THREAD_UNINT);
2067
2068 shadow_object =
2069 object->pageout ? VM_OBJECT_NULL : object->shadow;
2070
2071 vm_object_reap(object);
2072 /* cache is unlocked and object is no longer valid */
2073 object = VM_OBJECT_NULL;
2074
2075 if (shadow_object != VM_OBJECT_NULL) {
2076 /*
2077 * Drop the reference "object" was holding on
2078 * its shadow object.
2079 */
2080 vm_object_deallocate(shadow_object);
2081 shadow_object = VM_OBJECT_NULL;
2082 }
2083 vm_object_reaper_lock_spin();
2084 }
2085
2086 /* wait for more work... */
2087 assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
2088
2089 vm_object_reaper_unlock();
2090
2091 thread_block((thread_continue_t) vm_object_reaper_thread);
2092 /*NOTREACHED*/
2093 }
2094
2095 /*
2096 * Routine: vm_object_release_pager
2097 * Purpose: Terminate the pager and, upon completion,
2098 * release our last reference to it.
2099 */
2100 static void
vm_object_release_pager(memory_object_t pager)2101 vm_object_release_pager(
2102 memory_object_t pager)
2103 {
2104 /*
2105 * Terminate the pager.
2106 */
2107
2108 (void) memory_object_terminate(pager);
2109
2110 /*
2111 * Release reference to pager.
2112 */
2113 memory_object_deallocate(pager);
2114 }
2115
2116 /*
2117 * Routine: vm_object_destroy
2118 * Purpose:
2119 * Shut down a VM object, despite the
2120 * presence of address map (or other) references
2121 * to the vm_object.
2122 */
2123 #if FBDP_DEBUG_OBJECT_NO_PAGER
2124 extern uint32_t system_inshutdown;
2125 int fbdp_no_panic = 1;
2126 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
2127 kern_return_t
vm_object_destroy(vm_object_t object,vm_object_destroy_reason_t reason)2128 vm_object_destroy(
2129 vm_object_t object,
2130 vm_object_destroy_reason_t reason)
2131 {
2132 memory_object_t old_pager;
2133
2134 if (object == VM_OBJECT_NULL) {
2135 return KERN_SUCCESS;
2136 }
2137
2138 /*
2139 * Remove the pager association immediately.
2140 *
2141 * This will prevent the memory manager from further
2142 * meddling. [If it wanted to flush data or make
2143 * other changes, it should have done so before performing
2144 * the destroy call.]
2145 */
2146
2147 vm_object_lock(object);
2148
2149 #if FBDP_DEBUG_OBJECT_NO_PAGER
2150 static bool fbdp_no_panic_retrieved = false;
2151 if (!fbdp_no_panic_retrieved) {
2152 PE_parse_boot_argn("fbdp_no_panic4", &fbdp_no_panic, sizeof(fbdp_no_panic));
2153 fbdp_no_panic_retrieved = true;
2154 }
2155
2156 bool forced_unmount = false;
2157 if (object->named &&
2158 os_ref_get_count_raw(&object->ref_count) > 2 &&
2159 object->pager != NULL &&
2160 vnode_pager_get_forced_unmount(object->pager, &forced_unmount) == KERN_SUCCESS &&
2161 forced_unmount == false) {
2162 if (!fbdp_no_panic) {
2163 panic("FBDP rdar://99829401 object %p refs %d pager %p (no forced unmount)\n", object, os_ref_get_count_raw(&object->ref_count), object->pager);
2164 }
2165 DTRACE_VM3(vm_object_destroy_no_forced_unmount,
2166 vm_object_t, object,
2167 int, os_ref_get_count_raw(&object->ref_count),
2168 memory_object_t, object->pager);
2169 }
2170
2171 if (object->fbdp_tracked) {
2172 if (os_ref_get_count_raw(&object->ref_count) > 2 && !system_inshutdown) {
2173 if (!fbdp_no_panic) {
2174 panic("FBDP/4 rdar://99829401 object %p refs %d pager %p (tracked)\n", object, os_ref_get_count_raw(&object->ref_count), object->pager);
2175 }
2176 }
2177 VM_OBJECT_SET_FBDP_TRACKED(object, false);
2178 }
2179 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
2180
2181 VM_OBJECT_SET_NO_PAGER_REASON(object, reason);
2182
2183 VM_OBJECT_SET_CAN_PERSIST(object, FALSE);
2184 VM_OBJECT_SET_NAMED(object, FALSE);
2185 #if 00
2186 VM_OBJECT_SET_ALIVE(object, FALSE);
2187 #endif /* 00 */
2188
2189 #if DEVELOPMENT || DEBUG
2190 if (object->object_is_shared_cache &&
2191 object->pager != NULL &&
2192 object->pager->mo_pager_ops == &shared_region_pager_ops) {
2193 OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
2194 }
2195 #endif /* DEVELOPMENT || DEBUG */
2196
2197 old_pager = object->pager;
2198 object->pager = MEMORY_OBJECT_NULL;
2199 if (old_pager != MEMORY_OBJECT_NULL) {
2200 memory_object_control_disable(&object->pager_control);
2201 }
2202
2203 /*
2204 * Wait for the existing paging activity (that got
2205 * through before we nulled out the pager) to subside.
2206 */
2207 vm_object_paging_wait(object, THREAD_UNINT);
2208 vm_object_pl_req_wait(object, THREAD_UNINT);
2209
2210 /*
2211 * Memory objects usually stay alive while their
2212 * VM object is still mapped but vnodes can get
2213 * reclaimed by forced unmounts while still mapped,
2214 * for example, so we could be racing with a
2215 * memory_object_map() or memory_object_last_unmap()
2216 * here.
2217 * We should wait for any memory_object_map/last_unmap()
2218 * to complete, except if we're the thread calling
2219 * memory_object_last_unmap() on this memory object.
2220 */
2221 if (old_pager != MEMORY_OBJECT_NULL &&
2222 old_pager->mo_last_unmap_ctid == thread_get_ctid(current_thread())) {
2223 old_pager->mo_last_unmap_ctid = 0;
2224 } else {
2225 vm_object_mapping_wait(object, THREAD_UNINT);
2226 }
2227
2228 vm_object_unlock(object);
2229
2230 /*
2231 * Terminate the object now.
2232 */
2233 if (old_pager != MEMORY_OBJECT_NULL) {
2234 vm_object_release_pager(old_pager);
2235
2236 /*
2237 * JMM - Release the caller's reference. This assumes the
2238 * caller had a reference to release, which is a big (but
2239 * currently valid) assumption if this is driven from the
2240 * vnode pager (it is holding a named reference when making
2241 * this call)..
2242 */
2243 vm_object_deallocate(object);
2244 }
2245 return KERN_SUCCESS;
2246 }
2247
2248 /*
2249 * The "chunk" macros are used by routines below when looking for pages to deactivate. These
2250 * exist because of the need to handle shadow chains. When deactivating pages, we only
2251 * want to deactive the ones at the top most level in the object chain. In order to do
2252 * this efficiently, the specified address range is divided up into "chunks" and we use
2253 * a bit map to keep track of which pages have already been processed as we descend down
2254 * the shadow chain. These chunk macros hide the details of the bit map implementation
2255 * as much as we can.
2256 *
2257 * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2258 * set to 64 pages. The bit map is indexed from the low-order end, so that the lowest
2259 * order bit represents page 0 in the current range and highest order bit represents
2260 * page 63.
2261 *
2262 * For further convenience, we also use negative logic for the page state in the bit map.
2263 * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2264 * been processed. This way we can simply test the 64-bit long word to see if it's zero
2265 * to easily tell if the whole range has been processed. Therefore, the bit map starts
2266 * out with all the bits set. The macros below hide all these details from the caller.
2267 */
2268
2269 #define PAGES_IN_A_CHUNK 64 /* The number of pages in the chunk must */
2270 /* be the same as the number of bits in */
2271 /* the chunk_state_t type. We use 64 */
2272 /* just for convenience. */
2273
2274 #define CHUNK_SIZE (PAGES_IN_A_CHUNK * PAGE_SIZE_64) /* Size of a chunk in bytes */
2275
2276 typedef uint64_t chunk_state_t;
2277
2278 /*
2279 * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2280 * that no pages have been processed yet. Also, if len is less than the full CHUNK_SIZE,
2281 * then we mark pages beyond the len as having been "processed" so that we don't waste time
2282 * looking at pages in that range. This can save us from unnecessarily chasing down the
2283 * shadow chain.
2284 */
2285
2286 #define CHUNK_INIT(c, len) \
2287 MACRO_BEGIN \
2288 uint64_t p; \
2289 \
2290 (c) = 0xffffffffffffffffLL; \
2291 \
2292 for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++) \
2293 MARK_PAGE_HANDLED(c, p); \
2294 MACRO_END
2295
2296
2297 /*
2298 * Return true if all pages in the chunk have not yet been processed.
2299 */
2300
2301 #define CHUNK_NOT_COMPLETE(c) ((c) != 0)
2302
2303 /*
2304 * Return true if the page at offset 'p' in the bit map has already been handled
2305 * while processing a higher level object in the shadow chain.
2306 */
2307
2308 #define PAGE_ALREADY_HANDLED(c, p) (((c) & (1ULL << (p))) == 0)
2309
2310 /*
2311 * Mark the page at offset 'p' in the bit map as having been processed.
2312 */
2313
2314 #define MARK_PAGE_HANDLED(c, p) \
2315 MACRO_BEGIN \
2316 (c) = (c) & ~(1ULL << (p)); \
2317 MACRO_END
2318
2319
2320 /*
2321 * Return true if the page at the given offset has been paged out. Object is
2322 * locked upon entry and returned locked.
2323 *
2324 * NB: It is the callers responsibility to ensure that the offset in question
2325 * is not in the process of being paged in/out (i.e. not busy or no backing
2326 * page)
2327 */
2328 static bool
page_is_paged_out(vm_object_t object,vm_object_offset_t offset)2329 page_is_paged_out(
2330 vm_object_t object,
2331 vm_object_offset_t offset)
2332 {
2333 if (object->internal &&
2334 object->alive &&
2335 !object->terminating &&
2336 object->pager_ready) {
2337 if (vm_object_compressor_pager_state_get(object, offset)
2338 == VM_EXTERNAL_STATE_EXISTS) {
2339 return true;
2340 }
2341 }
2342 return false;
2343 }
2344
2345
2346
2347 /*
2348 * madvise_free_debug
2349 *
2350 * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2351 * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2352 * simulate the loss of the page's contents as if the page had been
2353 * reclaimed and then re-faulted.
2354 */
2355 #if DEVELOPMENT || DEBUG
2356 int madvise_free_debug = 0;
2357 int madvise_free_debug_sometimes = 1;
2358 #else /* DEBUG */
2359 int madvise_free_debug = 0;
2360 int madvise_free_debug_sometimes = 0;
2361 #endif /* DEBUG */
2362 int madvise_free_counter = 0;
2363
2364 __options_decl(deactivate_flags_t, uint32_t, {
2365 DEACTIVATE_KILL = 0x1,
2366 DEACTIVATE_REUSABLE = 0x2,
2367 DEACTIVATE_ALL_REUSABLE = 0x4,
2368 DEACTIVATE_CLEAR_REFMOD = 0x8,
2369 DEACTIVATE_KILL_NO_WRITE = 0x10
2370 });
2371
2372 /*
2373 * Deactivate the pages in the specified object and range. If kill_page is set, also discard any
2374 * page modified state from the pmap. Update the chunk_state as we go along. The caller must specify
2375 * a size that is less than or equal to the CHUNK_SIZE.
2376 */
2377
2378 static void
deactivate_pages_in_object(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,chunk_state_t * chunk_state,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2379 deactivate_pages_in_object(
2380 vm_object_t object,
2381 vm_object_offset_t offset,
2382 vm_object_size_t size,
2383 deactivate_flags_t flags,
2384 chunk_state_t *chunk_state,
2385 pmap_flush_context *pfc,
2386 struct pmap *pmap,
2387 vm_map_offset_t pmap_offset)
2388 {
2389 vm_page_t m;
2390 int p;
2391 struct vm_page_delayed_work dw_array;
2392 struct vm_page_delayed_work *dwp, *dwp_start;
2393 bool dwp_finish_ctx = TRUE;
2394 int dw_count;
2395 int dw_limit;
2396 unsigned int reusable = 0;
2397
2398 /*
2399 * Examine each page in the chunk. The variable 'p' is the page number relative to the start of the
2400 * chunk. Since this routine is called once for each level in the shadow chain, the chunk_state may
2401 * have pages marked as having been processed already. We stop the loop early if we find we've handled
2402 * all the pages in the chunk.
2403 */
2404
2405 dwp_start = dwp = NULL;
2406 dw_count = 0;
2407 dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2408 dwp_start = vm_page_delayed_work_get_ctx();
2409 if (dwp_start == NULL) {
2410 dwp_start = &dw_array;
2411 dw_limit = 1;
2412 dwp_finish_ctx = FALSE;
2413 }
2414
2415 dwp = dwp_start;
2416
2417 for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2418 /*
2419 * If this offset has already been found and handled in a higher level object, then don't
2420 * do anything with it in the current shadow object.
2421 */
2422
2423 if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2424 continue;
2425 }
2426
2427 /*
2428 * See if the page at this offset is around. First check to see if the page is resident,
2429 * then if not, check the existence map or with the pager.
2430 */
2431
2432 if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2433 /*
2434 * We found a page we were looking for. Mark it as "handled" now in the chunk_state
2435 * so that we won't bother looking for a page at this offset again if there are more
2436 * shadow objects. Then deactivate the page.
2437 */
2438
2439 MARK_PAGE_HANDLED(*chunk_state, p);
2440
2441 if ((!VM_PAGE_WIRED(m)) && (!vm_page_is_private(m)) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2442 (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2443 int clear_refmod_mask;
2444 int pmap_options;
2445 dwp->dw_mask = 0;
2446
2447 pmap_options = 0;
2448 clear_refmod_mask = VM_MEM_REFERENCED;
2449 dwp->dw_mask |= DW_clear_reference;
2450
2451 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2452 if (!(flags & DEACTIVATE_KILL_NO_WRITE) &&
2453 (madvise_free_debug ||
2454 (madvise_free_debug_sometimes &&
2455 madvise_free_counter++ & 0x1))) {
2456 /*
2457 * zero-fill the page (or every
2458 * other page) now to simulate
2459 * it being reclaimed and
2460 * re-faulted.
2461 */
2462 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2463 if (!m->vmp_unmodified_ro) {
2464 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2465 if (true) {
2466 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2467 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2468 }
2469 }
2470 m->vmp_precious = FALSE;
2471 m->vmp_dirty = FALSE;
2472
2473 clear_refmod_mask |= VM_MEM_MODIFIED;
2474 if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2475 /*
2476 * This page is now clean and
2477 * reclaimable. Move it out
2478 * of the throttled queue, so
2479 * that vm_pageout_scan() can
2480 * find it.
2481 */
2482 dwp->dw_mask |= DW_move_page;
2483 }
2484
2485 #if 0
2486 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2487 /*
2488 * COMMENT BLOCK ON WHY THIS SHOULDN'T BE DONE.
2489 *
2490 * Since we are about to do a vm_object_compressor_pager_state_clr
2491 * below for this page, which drops any existing compressor
2492 * storage of this page (eg side-effect of a CoW operation or
2493 * a collapse operation), it is tempting to think that we should
2494 * treat this page as if it was just decompressed (during which
2495 * we also drop existing compressor storage) and so start its life
2496 * out with vmp_unmodified_ro set to FALSE.
2497 *
2498 * However, we can't do that here because we could swing around
2499 * and re-access this page in a read-only fault.
2500 * Clearing this bit means we'll try to zero it up above
2501 * and fail.
2502 *
2503 * Note that clearing the bit is unnecessary regardless because
2504 * dirty state has been cleared. During the next soft fault, the
2505 * right state will be restored and things will progress just fine.
2506 */
2507 if (m->vmp_unmodified_ro == true) {
2508 /* Need object and pageq locks for bit manipulation*/
2509 m->vmp_unmodified_ro = false;
2510 os_atomic_dec(&compressor_ro_uncompressed);
2511 }
2512 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2513 #endif /* 0 */
2514 vm_object_compressor_pager_state_clr(object, offset);
2515
2516 if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2517 assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2518 assert(!object->all_reusable);
2519 m->vmp_reusable = TRUE;
2520 object->reusable_page_count++;
2521 assert(object->resident_page_count >= object->reusable_page_count);
2522 reusable++;
2523 /*
2524 * Tell pmap this page is now
2525 * "reusable" (to update pmap
2526 * stats for all mappings).
2527 */
2528 pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2529 }
2530 }
2531 if (flags & DEACTIVATE_CLEAR_REFMOD) {
2532 /*
2533 * The caller didn't clear the refmod bits in advance.
2534 * Clear them for this page now.
2535 */
2536 pmap_options |= PMAP_OPTIONS_NOFLUSH;
2537 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2538 clear_refmod_mask,
2539 pmap_options,
2540 (void *)pfc);
2541 }
2542
2543 if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2544 !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2545 dwp->dw_mask |= DW_move_page;
2546 }
2547
2548 if (dwp->dw_mask) {
2549 VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2550 dw_count);
2551 }
2552
2553 if (dw_count >= dw_limit) {
2554 if (reusable) {
2555 OSAddAtomic(reusable,
2556 &vm_page_stats_reusable.reusable_count);
2557 vm_page_stats_reusable.reusable += reusable;
2558 reusable = 0;
2559 }
2560 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2561
2562 dwp = dwp_start;
2563 dw_count = 0;
2564 }
2565 }
2566 } else {
2567 /*
2568 * The page at this offset isn't memory resident, check to see if it's
2569 * been paged out. If so, mark it as handled so we don't bother looking
2570 * for it in the shadow chain.
2571 */
2572
2573 if (page_is_paged_out(object, offset)) {
2574 MARK_PAGE_HANDLED(*chunk_state, p);
2575
2576 /*
2577 * If we're killing a non-resident page, then clear the page in the existence
2578 * map so we don't bother paging it back in if it's touched again in the future.
2579 */
2580
2581 if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2582 vm_object_compressor_pager_state_clr(object, offset);
2583
2584 if (pmap != PMAP_NULL) {
2585 /*
2586 * Tell pmap that this page
2587 * is no longer mapped, to
2588 * adjust the footprint ledger
2589 * because this page is no
2590 * longer compressed.
2591 */
2592 pmap_remove_options(
2593 pmap,
2594 pmap_offset,
2595 (pmap_offset +
2596 PAGE_SIZE),
2597 PMAP_OPTIONS_REMOVE);
2598 }
2599 }
2600 }
2601 }
2602 }
2603
2604 if (reusable) {
2605 OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2606 vm_page_stats_reusable.reusable += reusable;
2607 reusable = 0;
2608 }
2609
2610 if (dw_count) {
2611 vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2612 dwp = dwp_start;
2613 dw_count = 0;
2614 }
2615
2616 if (dwp_start && dwp_finish_ctx) {
2617 vm_page_delayed_work_finish_ctx(dwp_start);
2618 dwp_start = dwp = NULL;
2619 }
2620 }
2621
2622
2623 /*
2624 * Deactive a "chunk" of the given range of the object starting at offset. A "chunk"
2625 * will always be less than or equal to the given size. The total range is divided up
2626 * into chunks for efficiency and performance related to the locks and handling the shadow
2627 * chain. This routine returns how much of the given "size" it actually processed. It's
2628 * up to the caler to loop and keep calling this routine until the entire range they want
2629 * to process has been done.
2630 * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2631 */
2632
2633 static vm_object_size_t
2634 deactivate_a_chunk(
2635 vm_object_t orig_object,
2636 vm_object_offset_t offset,
2637 vm_object_size_t size,
2638 deactivate_flags_t flags,
2639 pmap_flush_context *pfc,
2640 struct pmap *pmap,
2641 vm_map_offset_t pmap_offset)
2642 {
2643 vm_object_t object;
2644 vm_object_t tmp_object;
2645 vm_object_size_t length;
2646 chunk_state_t chunk_state;
2647
2648
2649 /*
2650 * Get set to do a chunk. We'll do up to CHUNK_SIZE, but no more than the
2651 * remaining size the caller asked for.
2652 */
2653
2654 length = MIN(size, CHUNK_SIZE);
2655
2656 /*
2657 * The chunk_state keeps track of which pages we've already processed if there's
2658 * a shadow chain on this object. At this point, we haven't done anything with this
2659 * range of pages yet, so initialize the state to indicate no pages processed yet.
2660 */
2661
2662 CHUNK_INIT(chunk_state, length);
2663 object = orig_object;
2664
2665 /*
2666 * Start at the top level object and iterate around the loop once for each object
2667 * in the shadow chain. We stop processing early if we've already found all the pages
2668 * in the range. Otherwise we stop when we run out of shadow objects.
2669 */
2670
2671 while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2672 vm_object_paging_begin(object);
2673
2674 deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2675
2676 vm_object_paging_end(object);
2677
2678 /*
2679 * We've finished with this object, see if there's a shadow object. If
2680 * there is, update the offset and lock the new object. We also turn off
2681 * kill_page at this point since we only kill pages in the top most object.
2682 */
2683
2684 tmp_object = object->shadow;
2685
2686 if (tmp_object) {
2687 assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2688 flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2689 offset += object->vo_shadow_offset;
2690 vm_object_lock(tmp_object);
2691 }
2692
2693 if (object != orig_object) {
2694 vm_object_unlock(object);
2695 }
2696
2697 object = tmp_object;
2698 }
2699
2700 if (object && object != orig_object) {
2701 vm_object_unlock(object);
2702 }
2703
2704 return length;
2705 }
2706
2707
2708
2709 /*
2710 * Move any resident pages in the specified range to the inactive queue. If kill_page is set,
2711 * we also clear the modified status of the page and "forget" any changes that have been made
2712 * to the page.
2713 */
2714
2715 __private_extern__ void
2716 vm_object_deactivate_pages(
2717 vm_object_t object,
2718 vm_object_offset_t offset,
2719 vm_object_size_t size,
2720 boolean_t kill_page,
2721 boolean_t reusable_page,
2722 boolean_t kill_no_write,
2723 struct pmap *pmap,
2724 vm_map_offset_t pmap_offset)
2725 {
2726 vm_object_size_t length;
2727 boolean_t all_reusable;
2728 pmap_flush_context pmap_flush_context_storage;
2729 unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2730 unsigned int pmap_clear_refmod_options = 0;
2731 deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2732 bool refmod_cleared = false;
2733 if (kill_page) {
2734 flags |= DEACTIVATE_KILL;
2735 }
2736 if (reusable_page) {
2737 flags |= DEACTIVATE_REUSABLE;
2738 }
2739 if (kill_no_write) {
2740 flags |= DEACTIVATE_KILL_NO_WRITE;
2741 }
2742
2743 /*
2744 * We break the range up into chunks and do one chunk at a time. This is for
2745 * efficiency and performance while handling the shadow chains and the locks.
2746 * The deactivate_a_chunk() function returns how much of the range it processed.
2747 * We keep calling this routine until the given size is exhausted.
2748 */
2749
2750
2751 all_reusable = FALSE;
2752 #if 11
2753 /*
2754 * For the sake of accurate "reusable" pmap stats, we need
2755 * to tell pmap about each page that is no longer "reusable",
2756 * so we can't do the "all_reusable" optimization.
2757 *
2758 * If we do go with the all_reusable optimization, we can't
2759 * return if size is 0 since we could have "all_reusable == TRUE"
2760 * In this case, we save the overhead of doing the pmap_flush_context
2761 * work.
2762 */
2763 if (size == 0) {
2764 return;
2765 }
2766 #else
2767 if (reusable_page &&
2768 object->internal &&
2769 object->vo_size != 0 &&
2770 object->vo_size == size &&
2771 object->reusable_page_count == 0) {
2772 all_reusable = TRUE;
2773 reusable_page = FALSE;
2774 flags |= DEACTIVATE_ALL_REUSABLE;
2775 }
2776 #endif
2777
2778 if ((reusable_page || all_reusable) && object->all_reusable) {
2779 /* This means MADV_FREE_REUSABLE has been called twice, which
2780 * is probably illegal. */
2781 return;
2782 }
2783
2784
2785 pmap_flush_context_init(&pmap_flush_context_storage);
2786
2787 /*
2788 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2789 * We can't do this if we're killing pages and there's a shadow chain as
2790 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2791 * safe to kill).
2792 * And we can only do this on hardware that supports it.
2793 */
2794 if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2795 if (kill_page && object->internal) {
2796 pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2797 }
2798 if (reusable_page) {
2799 pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2800 }
2801
2802 refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2803 if (refmod_cleared) {
2804 // We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2805 flags &= ~DEACTIVATE_CLEAR_REFMOD;
2806 }
2807 }
2808
2809 while (size) {
2810 length = deactivate_a_chunk(object, offset, size, flags,
2811 &pmap_flush_context_storage, pmap, pmap_offset);
2812
2813 size -= length;
2814 offset += length;
2815 pmap_offset += length;
2816 }
2817 pmap_flush(&pmap_flush_context_storage);
2818
2819 if (all_reusable) {
2820 if (!object->all_reusable) {
2821 unsigned int reusable;
2822
2823 object->all_reusable = TRUE;
2824 assert(object->reusable_page_count == 0);
2825 /* update global stats */
2826 reusable = object->resident_page_count;
2827 OSAddAtomic(reusable,
2828 &vm_page_stats_reusable.reusable_count);
2829 vm_page_stats_reusable.reusable += reusable;
2830 vm_page_stats_reusable.all_reusable_calls++;
2831 }
2832 } else if (reusable_page) {
2833 vm_page_stats_reusable.partial_reusable_calls++;
2834 }
2835 }
2836
2837 void
2838 vm_object_reuse_pages(
2839 vm_object_t object,
2840 vm_object_offset_t start_offset,
2841 vm_object_offset_t end_offset,
2842 boolean_t allow_partial_reuse)
2843 {
2844 vm_object_offset_t cur_offset;
2845 vm_page_t m;
2846 unsigned int reused, reusable;
2847
2848 #define VM_OBJECT_REUSE_PAGE(object, m, reused) \
2849 MACRO_BEGIN \
2850 if ((m) != VM_PAGE_NULL && \
2851 (m)->vmp_reusable) { \
2852 assert((object)->reusable_page_count <= \
2853 (object)->resident_page_count); \
2854 assert((object)->reusable_page_count > 0); \
2855 (object)->reusable_page_count--; \
2856 (m)->vmp_reusable = FALSE; \
2857 (reused)++; \
2858 /* \
2859 * Tell pmap that this page is no longer \
2860 * "reusable", to update the "reusable" stats \
2861 * for all the pmaps that have mapped this \
2862 * page. \
2863 */ \
2864 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2865 0, /* refmod */ \
2866 (PMAP_OPTIONS_CLEAR_REUSABLE \
2867 | PMAP_OPTIONS_NOFLUSH), \
2868 NULL); \
2869 } \
2870 MACRO_END
2871
2872 reused = 0;
2873 reusable = 0;
2874
2875 vm_object_lock_assert_exclusive(object);
2876
2877 if (object->all_reusable) {
2878 panic("object %p all_reusable: can't update pmap stats",
2879 object);
2880 assert(object->reusable_page_count == 0);
2881 object->all_reusable = FALSE;
2882 if (end_offset - start_offset == object->vo_size ||
2883 !allow_partial_reuse) {
2884 vm_page_stats_reusable.all_reuse_calls++;
2885 reused = object->resident_page_count;
2886 } else {
2887 vm_page_stats_reusable.partial_reuse_calls++;
2888 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2889 if (m->vmp_offset < start_offset ||
2890 m->vmp_offset >= end_offset) {
2891 m->vmp_reusable = TRUE;
2892 object->reusable_page_count++;
2893 assert(object->resident_page_count >= object->reusable_page_count);
2894 continue;
2895 } else {
2896 assert(!m->vmp_reusable);
2897 reused++;
2898 }
2899 }
2900 }
2901 } else if (object->resident_page_count >
2902 ((end_offset - start_offset) >> PAGE_SHIFT)) {
2903 vm_page_stats_reusable.partial_reuse_calls++;
2904 for (cur_offset = start_offset;
2905 cur_offset < end_offset;
2906 cur_offset += PAGE_SIZE_64) {
2907 if (object->reusable_page_count == 0) {
2908 break;
2909 }
2910 m = vm_page_lookup(object, cur_offset);
2911 VM_OBJECT_REUSE_PAGE(object, m, reused);
2912 }
2913 } else {
2914 vm_page_stats_reusable.partial_reuse_calls++;
2915 vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2916 if (object->reusable_page_count == 0) {
2917 break;
2918 }
2919 if (m->vmp_offset < start_offset ||
2920 m->vmp_offset >= end_offset) {
2921 continue;
2922 }
2923 VM_OBJECT_REUSE_PAGE(object, m, reused);
2924 }
2925 }
2926
2927 /* update global stats */
2928 OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2929 vm_page_stats_reusable.reused += reused;
2930 vm_page_stats_reusable.reusable += reusable;
2931 }
2932
2933 /*
2934 * This function determines if the zero operation can be run on the
2935 * object. The checks on the entry have already been performed by
2936 * vm_map_zero_entry_preflight.
2937 */
2938 static kern_return_t
2939 vm_object_zero_preflight(
2940 vm_object_t object,
2941 vm_object_offset_t start,
2942 vm_object_offset_t end)
2943 {
2944 /*
2945 * Zeroing is further restricted to anonymous memory.
2946 */
2947 if (!object->internal) {
2948 return KERN_PROTECTION_FAILURE;
2949 }
2950
2951 /*
2952 * Zeroing for copy on write isn't yet supported
2953 */
2954 if (object->shadow != NULL ||
2955 object->vo_copy != NULL) {
2956 return KERN_NO_ACCESS;
2957 }
2958
2959 /*
2960 * Ensure the that bounds makes sense wrt the object
2961 */
2962 if (end - start > object->vo_size) {
2963 return KERN_INVALID_ADDRESS;
2964 }
2965
2966 if (object->terminating || !object->alive) {
2967 return KERN_ABORTED;
2968 }
2969
2970 return KERN_SUCCESS;
2971 }
2972
2973 static void
2974 vm_object_zero_page(vm_page_t m)
2975 {
2976 if (m != VM_PAGE_NULL) {
2977 ppnum_t phy_page_num = VM_PAGE_GET_PHYS_PAGE(m);
2978
2979 /*
2980 * Skip fictitious guard pages
2981 */
2982 if (vm_page_is_fictitious(m)) {
2983 assert(vm_page_is_guard(m));
2984 return;
2985 }
2986 pmap_zero_page(phy_page_num);
2987 }
2988 }
2989
2990 /*
2991 * This function iterates the range of pages specified in the object and
2992 * discards the ones that are compressed and zeroes the ones that are wired.
2993 * This function may drop the object lock while waiting for a page that is
2994 * busy and will restart the operation for the specific offset.
2995 */
2996 kern_return_t
2997 vm_object_zero(
2998 vm_object_t object,
2999 vm_object_offset_t *cur_offset_p,
3000 vm_object_offset_t end_offset)
3001 {
3002 kern_return_t ret;
3003
3004 vm_object_lock_assert_exclusive(object);
3005 ret = vm_object_zero_preflight(object, *cur_offset_p, end_offset);
3006 if (ret != KERN_SUCCESS) {
3007 return ret;
3008 }
3009
3010 while (*cur_offset_p < end_offset) {
3011 vm_page_t m = vm_page_lookup(object, *cur_offset_p);
3012
3013 if (m != VM_PAGE_NULL && m->vmp_busy) {
3014 vm_page_sleep(object, m, THREAD_UNINT, LCK_SLEEP_DEFAULT);
3015 /* Object lock was dropped -- reverify validity */
3016 ret = vm_object_zero_preflight(object, *cur_offset_p, end_offset);
3017 if (ret != KERN_SUCCESS) {
3018 return ret;
3019 }
3020 if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3021 /*
3022 * Our mapping could have been made "needs_copy" while
3023 * the map and object were unlocked.
3024 * We need to do the mapping preflight again...
3025 */
3026 return KERN_SUCCESS;
3027 }
3028 continue;
3029 }
3030
3031 /*
3032 * If the compressor has the page then just discard it instead
3033 * of faulting it in and zeroing it else zero the page if it exists. If
3034 * we dropped the object lock during the lookup retry the lookup for the
3035 * cur_offset.
3036 */
3037 if (page_is_paged_out(object, *cur_offset_p)) {
3038 vm_object_compressor_pager_state_clr(object, *cur_offset_p);
3039 } else {
3040 vm_object_zero_page(m);
3041 }
3042 *cur_offset_p += PAGE_SIZE_64;
3043 /*
3044 * TODO: May need a vm_object_lock_yield_shared in this loop if it takes
3045 * too long, as holding the object lock for too long can stall pageout
3046 * scan (or other users of the object)
3047 */
3048 }
3049
3050 return KERN_SUCCESS;
3051 }
3052
3053 /*
3054 * Routine: vm_object_pmap_protect
3055 *
3056 * Purpose:
3057 * Reduces the permission for all physical
3058 * pages in the specified object range.
3059 *
3060 * If removing write permission only, it is
3061 * sufficient to protect only the pages in
3062 * the top-level object; only those pages may
3063 * have write permission.
3064 *
3065 * If removing all access, we must follow the
3066 * shadow chain from the top-level object to
3067 * remove access to all pages in shadowed objects.
3068 *
3069 * The object must *not* be locked. The object must
3070 * be internal.
3071 *
3072 * If pmap is not NULL, this routine assumes that
3073 * the only mappings for the pages are in that
3074 * pmap.
3075 */
3076
3077 __private_extern__ void
3078 vm_object_pmap_protect(
3079 vm_object_t object,
3080 vm_object_offset_t offset,
3081 vm_object_size_t size,
3082 pmap_t pmap,
3083 vm_map_size_t pmap_page_size,
3084 vm_map_offset_t pmap_start,
3085 vm_prot_t prot)
3086 {
3087 vm_object_pmap_protect_options(object, offset, size, pmap,
3088 pmap_page_size,
3089 pmap_start, prot, 0);
3090 }
3091
3092 __private_extern__ void
3093 vm_object_pmap_protect_options(
3094 vm_object_t object,
3095 vm_object_offset_t offset,
3096 vm_object_size_t size,
3097 pmap_t pmap,
3098 vm_map_size_t pmap_page_size,
3099 vm_map_offset_t pmap_start,
3100 vm_prot_t prot,
3101 int options)
3102 {
3103 pmap_flush_context pmap_flush_context_storage;
3104 boolean_t delayed_pmap_flush = FALSE;
3105 vm_object_offset_t offset_in_object;
3106 vm_object_size_t size_in_object;
3107
3108 if (object == VM_OBJECT_NULL) {
3109 return;
3110 }
3111 if (pmap_page_size > PAGE_SIZE) {
3112 /* for 16K map on 4K device... */
3113 pmap_page_size = PAGE_SIZE;
3114 }
3115 /*
3116 * If we decide to work on the object itself, extend the range to
3117 * cover a full number of native pages.
3118 */
3119 size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
3120 offset_in_object = vm_object_trunc_page(offset);
3121 /*
3122 * If we decide to work on the pmap, use the exact range specified,
3123 * so no rounding/truncating offset and size. They should already
3124 * be aligned to pmap_page_size.
3125 */
3126 assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
3127 "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
3128 offset, size, (uint64_t)pmap_page_size);
3129
3130 vm_object_lock(object);
3131
3132 if (object->phys_contiguous) {
3133 if (pmap != NULL) {
3134 vm_object_unlock(object);
3135 pmap_protect_options(pmap,
3136 pmap_start,
3137 pmap_start + size,
3138 prot,
3139 options & ~PMAP_OPTIONS_NOFLUSH,
3140 NULL);
3141 } else {
3142 vm_object_offset_t phys_start, phys_end, phys_addr;
3143
3144 phys_start = object->vo_shadow_offset + offset_in_object;
3145 phys_end = phys_start + size_in_object;
3146 assert(phys_start <= phys_end);
3147 assert(phys_end <= object->vo_shadow_offset + object->vo_size);
3148 vm_object_unlock(object);
3149
3150 pmap_flush_context_init(&pmap_flush_context_storage);
3151 delayed_pmap_flush = FALSE;
3152
3153 for (phys_addr = phys_start;
3154 phys_addr < phys_end;
3155 phys_addr += PAGE_SIZE_64) {
3156 pmap_page_protect_options(
3157 (ppnum_t) (phys_addr >> PAGE_SHIFT),
3158 prot,
3159 options | PMAP_OPTIONS_NOFLUSH,
3160 (void *)&pmap_flush_context_storage);
3161 delayed_pmap_flush = TRUE;
3162 }
3163 if (delayed_pmap_flush == TRUE) {
3164 pmap_flush(&pmap_flush_context_storage);
3165 }
3166 }
3167 return;
3168 }
3169
3170 assert(object->internal);
3171
3172 while (TRUE) {
3173 if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
3174 vm_object_unlock(object);
3175 if (pmap_page_size < PAGE_SIZE) {
3176 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
3177 }
3178 pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
3179 options & ~PMAP_OPTIONS_NOFLUSH, NULL);
3180 return;
3181 }
3182
3183 if (pmap_page_size < PAGE_SIZE) {
3184 DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
3185 }
3186
3187 pmap_flush_context_init(&pmap_flush_context_storage);
3188 delayed_pmap_flush = FALSE;
3189
3190 /*
3191 * if we are doing large ranges with respect to resident
3192 * page count then we should interate over pages otherwise
3193 * inverse page look-up will be faster
3194 */
3195 if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
3196 vm_page_t p;
3197 vm_object_offset_t end;
3198
3199 end = offset_in_object + size_in_object;
3200
3201 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
3202 if (!vm_page_is_fictitious(p) &&
3203 (offset_in_object <= p->vmp_offset) &&
3204 (p->vmp_offset < end)) {
3205 vm_map_offset_t start;
3206
3207 /*
3208 * XXX FBDP 4K: intentionally using "offset" here instead
3209 * of "offset_in_object", since "start" is a pmap address.
3210 */
3211 start = pmap_start + p->vmp_offset - offset;
3212
3213 if (pmap != PMAP_NULL) {
3214 vm_map_offset_t curr;
3215 for (curr = start;
3216 curr < start + PAGE_SIZE_64;
3217 curr += pmap_page_size) {
3218 if (curr < pmap_start) {
3219 continue;
3220 }
3221 if (curr >= pmap_start + size) {
3222 break;
3223 }
3224 pmap_protect_options(
3225 pmap,
3226 curr,
3227 curr + pmap_page_size,
3228 prot,
3229 options | PMAP_OPTIONS_NOFLUSH,
3230 &pmap_flush_context_storage);
3231 }
3232 } else {
3233 pmap_page_protect_options(
3234 VM_PAGE_GET_PHYS_PAGE(p),
3235 prot,
3236 options | PMAP_OPTIONS_NOFLUSH,
3237 &pmap_flush_context_storage);
3238 }
3239 delayed_pmap_flush = TRUE;
3240 }
3241 }
3242 } else {
3243 vm_page_t p;
3244 vm_object_offset_t end;
3245 vm_object_offset_t target_off;
3246
3247 end = offset_in_object + size_in_object;
3248
3249 for (target_off = offset_in_object;
3250 target_off < end; target_off += PAGE_SIZE) {
3251 p = vm_page_lookup(object, target_off);
3252
3253 if (p != VM_PAGE_NULL) {
3254 vm_object_offset_t start;
3255
3256 /*
3257 * XXX FBDP 4K: intentionally using "offset" here instead
3258 * of "offset_in_object", since "start" is a pmap address.
3259 */
3260 start = pmap_start + (p->vmp_offset - offset);
3261
3262 if (pmap != PMAP_NULL) {
3263 vm_map_offset_t curr;
3264 for (curr = start;
3265 curr < start + PAGE_SIZE;
3266 curr += pmap_page_size) {
3267 if (curr < pmap_start) {
3268 continue;
3269 }
3270 if (curr >= pmap_start + size) {
3271 break;
3272 }
3273 pmap_protect_options(
3274 pmap,
3275 curr,
3276 curr + pmap_page_size,
3277 prot,
3278 options | PMAP_OPTIONS_NOFLUSH,
3279 &pmap_flush_context_storage);
3280 }
3281 } else {
3282 pmap_page_protect_options(
3283 VM_PAGE_GET_PHYS_PAGE(p),
3284 prot,
3285 options | PMAP_OPTIONS_NOFLUSH,
3286 &pmap_flush_context_storage);
3287 }
3288 delayed_pmap_flush = TRUE;
3289 }
3290 }
3291 }
3292 if (delayed_pmap_flush == TRUE) {
3293 pmap_flush(&pmap_flush_context_storage);
3294 }
3295
3296 if (prot == VM_PROT_NONE) {
3297 /*
3298 * Must follow shadow chain to remove access
3299 * to pages in shadowed objects.
3300 */
3301 vm_object_t next_object;
3302
3303 next_object = object->shadow;
3304 if (next_object != VM_OBJECT_NULL) {
3305 offset_in_object += object->vo_shadow_offset;
3306 offset += object->vo_shadow_offset;
3307 vm_object_lock(next_object);
3308 vm_object_unlock(object);
3309 object = next_object;
3310 } else {
3311 /*
3312 * End of chain - we are done.
3313 */
3314 break;
3315 }
3316 } else {
3317 /*
3318 * Pages in shadowed objects may never have
3319 * write permission - we may stop here.
3320 */
3321 break;
3322 }
3323 }
3324
3325 vm_object_unlock(object);
3326 }
3327
3328 uint32_t vm_page_busy_absent_skipped = 0;
3329
3330 /*
3331 * Routine: vm_object_copy_slowly
3332 *
3333 * Description:
3334 * Copy the specified range of the source
3335 * virtual memory object without using
3336 * protection-based optimizations (such
3337 * as copy-on-write). The pages in the
3338 * region are actually copied.
3339 *
3340 * In/out conditions:
3341 * The caller must hold a reference and a lock
3342 * for the source virtual memory object. The source
3343 * object will be returned *unlocked*.
3344 *
3345 * Results:
3346 * If the copy is completed successfully, KERN_SUCCESS is
3347 * returned. If the caller asserted the interruptible
3348 * argument, and an interruption occurred while waiting
3349 * for a user-generated event, MACH_SEND_INTERRUPTED is
3350 * returned. Other values may be returned to indicate
3351 * hard errors during the copy operation.
3352 *
3353 * A new virtual memory object is returned in a
3354 * parameter (_result_object). The contents of this
3355 * new object, starting at a zero offset, are a copy
3356 * of the source memory region. In the event of
3357 * an error, this parameter will contain the value
3358 * VM_OBJECT_NULL.
3359 */
3360 __exported_hidden kern_return_t
3361 vm_object_copy_slowly(
3362 vm_object_t src_object,
3363 vm_object_offset_t src_offset,
3364 vm_object_size_t size,
3365 boolean_t interruptible,
3366 #if HAS_MTE
3367 bool create_mte_object,
3368 #endif /* HAS_MTE */
3369 vm_object_t *_result_object) /* OUT */
3370 {
3371 vm_object_t new_object;
3372 vm_object_offset_t new_offset;
3373
3374 struct vm_object_fault_info fault_info = {};
3375
3376 if (size == 0) {
3377 vm_object_unlock(src_object);
3378 *_result_object = VM_OBJECT_NULL;
3379 return KERN_INVALID_ARGUMENT;
3380 }
3381
3382 /*
3383 * Prevent destruction of the source object while we copy.
3384 */
3385
3386 vm_object_reference_locked(src_object);
3387 vm_object_unlock(src_object);
3388
3389 /*
3390 * Create a new object to hold the copied pages.
3391 * A few notes:
3392 * We fill the new object starting at offset 0,
3393 * regardless of the input offset.
3394 * We don't bother to lock the new object within
3395 * this routine, since we have the only reference.
3396 */
3397
3398 size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
3399 src_offset = vm_object_trunc_page(src_offset);
3400
3401 #if HAS_MTE
3402 /*
3403 * Retain the original provenance despite the fact we're creating a byte-for-byte copy.
3404 * As far as I can think, this doesn't have a consequence either way:
3405 * The only path for which we copy slowly MTE-enabled objects is on the fork path,
3406 * during which the two maps will hold the same ID anyway.
3407 * For objects that'll never be MTE-mapped, the provenance has no consequence anyway.
3408 * I'm carrying over the ID here just because it seems more tidy than dropping it.
3409 */
3410 #endif /* HAS_MTE */
3411 new_object = vm_object_allocate(size, src_object->vmo_provenance);
3412 new_offset = 0;
3413 if (src_object->copy_strategy == MEMORY_OBJECT_COPY_NONE &&
3414 src_object->vo_inherit_copy_none) {
3415 new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
3416 new_object->vo_inherit_copy_none = true;
3417 }
3418
3419 #if HAS_MTE
3420 /*
3421 * The new object should hold MTE enabled pages. This is a byproduct
3422 * of our current forking strategy.
3423 */
3424 if (create_mte_object) {
3425 vm_object_mte_set(new_object);
3426
3427 assert(src_object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
3428 new_object->copy_strategy = src_object->copy_strategy;
3429 }
3430 #endif /* HAS_MTE */
3431
3432 assert(size == trunc_page_64(size)); /* Will the loop terminate? */
3433
3434 fault_info.interruptible = interruptible;
3435 fault_info.behavior = VM_BEHAVIOR_SEQUENTIAL;
3436 fault_info.lo_offset = src_offset;
3437 fault_info.hi_offset = src_offset + size;
3438 fault_info.stealth = TRUE;
3439
3440 for (;
3441 size != 0;
3442 src_offset += PAGE_SIZE_64,
3443 new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3444 ) {
3445 vm_page_t new_page;
3446 vm_fault_return_t result;
3447 vm_grab_options_t options;
3448
3449 options = vm_page_grab_options_for_object(new_object);
3450
3451 while ((new_page = vm_page_grab_options(options)) == VM_PAGE_NULL) {
3452 if (!vm_page_wait(interruptible)) {
3453 vm_object_deallocate(new_object);
3454 vm_object_deallocate(src_object);
3455 *_result_object = VM_OBJECT_NULL;
3456 return MACH_SEND_INTERRUPTED;
3457 }
3458 }
3459
3460 vm_object_lock(new_object);
3461 vm_page_insert(new_page, new_object, new_offset);
3462 vm_object_unlock(new_object);
3463
3464 do {
3465 vm_prot_t prot = VM_PROT_READ;
3466 vm_page_t _result_page;
3467 vm_page_t top_page;
3468 vm_page_t result_page;
3469 kern_return_t error_code;
3470 vm_object_t result_page_object;
3471
3472
3473 vm_object_lock(src_object);
3474
3475 if (src_object->internal &&
3476 src_object->shadow == VM_OBJECT_NULL &&
3477 (src_object->pager == NULL ||
3478 (vm_object_compressor_pager_state_get(src_object,
3479 src_offset) ==
3480 VM_EXTERNAL_STATE_ABSENT))) {
3481 boolean_t can_skip_page;
3482
3483 _result_page = vm_page_lookup(src_object,
3484 src_offset);
3485 if (_result_page == VM_PAGE_NULL) {
3486 /*
3487 * This page is neither resident nor
3488 * compressed and there's no shadow
3489 * object below "src_object", so this
3490 * page is really missing.
3491 * There's no need to zero-fill it just
3492 * to copy it: let's leave it missing
3493 * in "new_object" and get zero-filled
3494 * on demand.
3495 */
3496 can_skip_page = TRUE;
3497 } else if (workaround_41447923 &&
3498 src_object->pager == NULL &&
3499 _result_page != VM_PAGE_NULL &&
3500 _result_page->vmp_busy &&
3501 _result_page->vmp_absent &&
3502 src_object->purgable == VM_PURGABLE_DENY &&
3503 !src_object->blocked_access) {
3504 /*
3505 * This page is "busy" and "absent"
3506 * but not because we're waiting for
3507 * it to be decompressed. It must
3508 * be because it's a "no zero fill"
3509 * page that is currently not
3510 * accessible until it gets overwritten
3511 * by a device driver.
3512 * Since its initial state would have
3513 * been "zero-filled", let's leave the
3514 * copy page missing and get zero-filled
3515 * on demand.
3516 */
3517 assert(src_object->internal);
3518 assert(src_object->shadow == NULL);
3519 assert(src_object->pager == NULL);
3520 can_skip_page = TRUE;
3521 vm_page_busy_absent_skipped++;
3522 } else {
3523 can_skip_page = FALSE;
3524 }
3525 if (can_skip_page) {
3526 vm_object_unlock(src_object);
3527 /* free the unused "new_page"... */
3528 vm_object_lock(new_object);
3529 VM_PAGE_FREE(new_page);
3530 new_page = VM_PAGE_NULL;
3531 vm_object_unlock(new_object);
3532 /* ...and go to next page in "src_object" */
3533 result = VM_FAULT_SUCCESS;
3534 break;
3535 }
3536 }
3537
3538 vm_object_paging_begin(src_object);
3539
3540 /* cap size at maximum UPL size */
3541 upl_size_t cluster_size;
3542 if (os_convert_overflow(size, &cluster_size)) {
3543 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3544 }
3545 fault_info.cluster_size = cluster_size;
3546
3547 _result_page = VM_PAGE_NULL;
3548 result = vm_fault_page(src_object, src_offset,
3549 VM_PROT_READ, FALSE,
3550 FALSE, /* page not looked up */
3551 &prot, &_result_page, &top_page,
3552 (int *)0,
3553 &error_code, FALSE, &fault_info);
3554
3555 switch (result) {
3556 case VM_FAULT_SUCCESS:
3557 result_page = _result_page;
3558 result_page_object = VM_PAGE_OBJECT(result_page);
3559
3560 /*
3561 * Copy the page to the new object.
3562 *
3563 * POLICY DECISION:
3564 * If result_page is clean,
3565 * we could steal it instead
3566 * of copying.
3567 */
3568 vm_page_copy(result_page, new_page);
3569
3570 vm_object_unlock(result_page_object);
3571
3572 /*
3573 * Let go of both pages (make them
3574 * not busy, perform wakeup, activate).
3575 */
3576 vm_object_lock(new_object);
3577 SET_PAGE_DIRTY(new_page, FALSE);
3578 vm_page_wakeup_done(new_object, new_page);
3579 vm_object_unlock(new_object);
3580
3581 vm_object_lock(result_page_object);
3582 vm_page_wakeup_done(result_page_object, result_page);
3583
3584 vm_page_lockspin_queues();
3585 if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3586 (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3587 vm_page_activate(result_page);
3588 }
3589 vm_page_activate(new_page);
3590 vm_page_unlock_queues();
3591
3592 /*
3593 * Release paging references and
3594 * top-level placeholder page, if any.
3595 */
3596
3597 vm_fault_cleanup(result_page_object,
3598 top_page);
3599
3600 break;
3601
3602 case VM_FAULT_RETRY:
3603 break;
3604
3605 case VM_FAULT_MEMORY_SHORTAGE:
3606 if (vm_page_wait(interruptible)) {
3607 break;
3608 }
3609 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), 0 /* arg */);
3610 OS_FALLTHROUGH;
3611
3612 case VM_FAULT_INTERRUPTED:
3613 vm_object_lock(new_object);
3614 VM_PAGE_FREE(new_page);
3615 vm_object_unlock(new_object);
3616
3617 vm_object_deallocate(new_object);
3618 vm_object_deallocate(src_object);
3619 *_result_object = VM_OBJECT_NULL;
3620 return MACH_SEND_INTERRUPTED;
3621
3622 case VM_FAULT_SUCCESS_NO_VM_PAGE:
3623 /* success but no VM page: fail */
3624 vm_object_paging_end(src_object);
3625 vm_object_unlock(src_object);
3626 OS_FALLTHROUGH;
3627 case VM_FAULT_MEMORY_ERROR:
3628 /*
3629 * A policy choice:
3630 * (a) ignore pages that we can't
3631 * copy
3632 * (b) return the null object if
3633 * any page fails [chosen]
3634 */
3635
3636 vm_object_lock(new_object);
3637 VM_PAGE_FREE(new_page);
3638 vm_object_unlock(new_object);
3639
3640 vm_object_deallocate(new_object);
3641 vm_object_deallocate(src_object);
3642 *_result_object = VM_OBJECT_NULL;
3643 return error_code ? error_code:
3644 KERN_MEMORY_ERROR;
3645
3646 default:
3647 panic("vm_object_copy_slowly: unexpected error"
3648 " 0x%x from vm_fault_page()\n", result);
3649 }
3650 } while (result != VM_FAULT_SUCCESS);
3651 }
3652
3653 /*
3654 * Lose the extra reference, and return our object.
3655 */
3656 vm_object_deallocate(src_object);
3657 *_result_object = new_object;
3658 return KERN_SUCCESS;
3659 }
3660
3661 /*
3662 * Routine: vm_object_copy_quickly
3663 *
3664 * Purpose:
3665 * Copy the specified range of the source virtual
3666 * memory object, if it can be done without waiting
3667 * for user-generated events.
3668 *
3669 * Results:
3670 * If the copy is successful, the copy is returned in
3671 * the arguments; otherwise, the arguments are not
3672 * affected.
3673 *
3674 * In/out conditions:
3675 * The object should be unlocked on entry and exit.
3676 */
3677
3678 /*ARGSUSED*/
3679 __private_extern__ boolean_t
3680 vm_object_copy_quickly(
3681 vm_object_t object, /* IN */
3682 __unused vm_object_offset_t offset, /* IN */
3683 __unused vm_object_size_t size, /* IN */
3684 boolean_t *_src_needs_copy, /* OUT */
3685 boolean_t *_dst_needs_copy) /* OUT */
3686 {
3687 memory_object_copy_strategy_t copy_strategy;
3688
3689 if (object == VM_OBJECT_NULL) {
3690 *_src_needs_copy = FALSE;
3691 *_dst_needs_copy = FALSE;
3692 return TRUE;
3693 }
3694
3695 vm_object_lock(object);
3696
3697 copy_strategy = object->copy_strategy;
3698
3699 switch (copy_strategy) {
3700 case MEMORY_OBJECT_COPY_SYMMETRIC:
3701
3702 /*
3703 * Symmetric copy strategy.
3704 * Make another reference to the object.
3705 * Leave object/offset unchanged.
3706 */
3707
3708 vm_object_reference_locked(object);
3709 VM_OBJECT_SET_SHADOWED(object, TRUE);
3710 vm_object_unlock(object);
3711
3712 /*
3713 * Both source and destination must make
3714 * shadows, and the source must be made
3715 * read-only if not already.
3716 */
3717
3718 *_src_needs_copy = TRUE;
3719 *_dst_needs_copy = TRUE;
3720
3721 break;
3722
3723 case MEMORY_OBJECT_COPY_DELAY:
3724 vm_object_unlock(object);
3725 return FALSE;
3726
3727 default:
3728 vm_object_unlock(object);
3729 return FALSE;
3730 }
3731 return TRUE;
3732 }
3733
3734 static uint32_t copy_delayed_lock_collisions;
3735 static uint32_t copy_delayed_max_collisions;
3736 static uint32_t copy_delayed_lock_contention;
3737 static uint32_t copy_delayed_protect_iterate;
3738
3739 #if XNU_TARGET_OS_OSX
3740 unsigned int vm_object_copy_delayed_paging_wait_disable = 0;
3741 #else /* XNU_TARGET_OS_OSX */
3742 unsigned int vm_object_copy_delayed_paging_wait_disable = 1;
3743 #endif /* XNU_TARGET_OS_OSX */
3744
3745 /*
3746 * Routine: vm_object_copy_delayed [internal]
3747 *
3748 * Description:
3749 * Copy the specified virtual memory object, using
3750 * the asymmetric copy-on-write algorithm.
3751 *
3752 * In/out conditions:
3753 * The src_object must be locked on entry. It will be unlocked
3754 * on exit - so the caller must also hold a reference to it.
3755 *
3756 * This routine will not block waiting for user-generated
3757 * events. It is not interruptible.
3758 */
3759 __private_extern__ vm_object_t
3760 vm_object_copy_delayed(
3761 vm_object_t src_object,
3762 vm_object_offset_t src_offset,
3763 vm_object_size_t size,
3764 boolean_t src_object_shared)
3765 {
3766 vm_object_t new_copy = VM_OBJECT_NULL;
3767 vm_object_t old_copy;
3768 vm_page_t p;
3769 vm_object_size_t copy_size = src_offset + size;
3770 pmap_flush_context pmap_flush_context_storage;
3771 boolean_t delayed_pmap_flush = FALSE;
3772
3773
3774 uint32_t collisions = 0;
3775 /*
3776 * The user-level memory manager wants to see all of the changes
3777 * to this object, but it has promised not to make any changes on
3778 * its own.
3779 *
3780 * Perform an asymmetric copy-on-write, as follows:
3781 * Create a new object, called a "copy object" to hold
3782 * pages modified by the new mapping (i.e., the copy,
3783 * not the original mapping).
3784 * Record the original object as the backing object for
3785 * the copy object. If the original mapping does not
3786 * change a page, it may be used read-only by the copy.
3787 * Record the copy object in the original object.
3788 * When the original mapping causes a page to be modified,
3789 * it must be copied to a new page that is "pushed" to
3790 * the copy object.
3791 * Mark the new mapping (the copy object) copy-on-write.
3792 * This makes the copy object itself read-only, allowing
3793 * it to be reused if the original mapping makes no
3794 * changes, and simplifying the synchronization required
3795 * in the "push" operation described above.
3796 *
3797 * The copy-on-write is said to be assymetric because the original
3798 * object is *not* marked copy-on-write. A copied page is pushed
3799 * to the copy object, regardless which party attempted to modify
3800 * the page.
3801 *
3802 * Repeated asymmetric copy operations may be done. If the
3803 * original object has not been changed since the last copy, its
3804 * copy object can be reused. Otherwise, a new copy object can be
3805 * inserted between the original object and its previous copy
3806 * object. Since any copy object is read-only, this cannot affect
3807 * affect the contents of the previous copy object.
3808 *
3809 * Note that a copy object is higher in the object tree than the
3810 * original object; therefore, use of the copy object recorded in
3811 * the original object must be done carefully, to avoid deadlock.
3812 */
3813
3814 copy_size = vm_object_round_page(copy_size);
3815 Retry:
3816 if (!vm_object_copy_delayed_paging_wait_disable) {
3817 /*
3818 * Wait for paging in progress.
3819 */
3820 if (!src_object->true_share &&
3821 (src_object->paging_in_progress != 0 ||
3822 src_object->activity_in_progress != 0)) {
3823 if (src_object_shared == TRUE) {
3824 vm_object_unlock(src_object);
3825 vm_object_lock(src_object);
3826 src_object_shared = FALSE;
3827 goto Retry;
3828 }
3829 vm_object_paging_wait(src_object, THREAD_UNINT);
3830 }
3831 }
3832 if (src_object->vmo_pl_req_in_progress) {
3833 if (src_object_shared) {
3834 vm_object_unlock(src_object);
3835 vm_object_lock(src_object);
3836 src_object_shared = false;
3837 goto Retry;
3838 }
3839 vm_object_pl_req_wait(src_object, THREAD_UNINT);
3840 }
3841
3842 /*
3843 * See whether we can reuse the result of a previous
3844 * copy operation.
3845 */
3846
3847 old_copy = src_object->vo_copy;
3848 if (old_copy != VM_OBJECT_NULL) {
3849 int lock_granted;
3850
3851 /*
3852 * Try to get the locks (out of order)
3853 */
3854 if (src_object_shared == TRUE) {
3855 lock_granted = vm_object_lock_try_shared(old_copy);
3856 } else {
3857 lock_granted = vm_object_lock_try(old_copy);
3858 }
3859
3860 if (!lock_granted) {
3861 vm_object_unlock(src_object);
3862
3863 if (collisions++ == 0) {
3864 copy_delayed_lock_contention++;
3865 }
3866 mutex_pause(collisions);
3867
3868 /* Heisenberg Rules */
3869 copy_delayed_lock_collisions++;
3870
3871 if (collisions > copy_delayed_max_collisions) {
3872 copy_delayed_max_collisions = collisions;
3873 }
3874
3875 if (src_object_shared == TRUE) {
3876 vm_object_lock_shared(src_object);
3877 } else {
3878 vm_object_lock(src_object);
3879 }
3880
3881 goto Retry;
3882 }
3883
3884 /*
3885 * Determine whether the old copy object has
3886 * been modified.
3887 */
3888
3889 if (old_copy->resident_page_count == 0 &&
3890 !old_copy->pager_created) {
3891 /*
3892 * It has not been modified.
3893 *
3894 * Return another reference to
3895 * the existing copy-object if
3896 * we can safely grow it (if
3897 * needed).
3898 */
3899
3900 if (old_copy->vo_size < copy_size) {
3901 if (src_object_shared == TRUE) {
3902 vm_object_unlock(old_copy);
3903 vm_object_unlock(src_object);
3904
3905 vm_object_lock(src_object);
3906 src_object_shared = FALSE;
3907 goto Retry;
3908 }
3909 /*
3910 * We can't perform a delayed copy if any of the
3911 * pages in the extended range are wired (because
3912 * we can't safely take write permission away from
3913 * wired pages). If the pages aren't wired, then
3914 * go ahead and protect them.
3915 */
3916 copy_delayed_protect_iterate++;
3917
3918 pmap_flush_context_init(&pmap_flush_context_storage);
3919 delayed_pmap_flush = FALSE;
3920
3921 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3922 if (!vm_page_is_fictitious(p) &&
3923 p->vmp_offset >= old_copy->vo_size &&
3924 p->vmp_offset < copy_size) {
3925 if (p->vmp_busy && p->vmp_absent) {
3926 /*
3927 * A busy/absent page is still
3928 * waiting for its contents.
3929 * It should not be mapped in user
3930 * space (because it has no valid
3931 * contents) so no need to
3932 * write-protect it for copy-on-write.
3933 * It could have been mapped in the
3934 * kernel by the content provider
3935 * (a network filesystem, for example)
3936 * and we do not want to write-protect
3937 * that mapping, so we skip this page.
3938 */
3939 continue;
3940 }
3941 if (VM_PAGE_WIRED(p)) {
3942 vm_object_unlock(old_copy);
3943 vm_object_unlock(src_object);
3944
3945 if (new_copy != VM_OBJECT_NULL) {
3946 vm_object_unlock(new_copy);
3947 vm_object_deallocate(new_copy);
3948 }
3949 if (delayed_pmap_flush == TRUE) {
3950 pmap_flush(&pmap_flush_context_storage);
3951 }
3952
3953 return VM_OBJECT_NULL;
3954 } else {
3955 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3956 (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3957 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3958 delayed_pmap_flush = TRUE;
3959 }
3960 }
3961 }
3962 if (delayed_pmap_flush == TRUE) {
3963 pmap_flush(&pmap_flush_context_storage);
3964 }
3965
3966 assertf(page_aligned(copy_size),
3967 "object %p size 0x%llx",
3968 old_copy, (uint64_t)copy_size);
3969 old_copy->vo_size = copy_size;
3970
3971 /*
3972 * src_object's "vo_copy" object now covers
3973 * a larger portion of src_object.
3974 * Increment src_object's "vo_copy_version"
3975 * to make any racing vm_fault() on
3976 * "src_object" re-check if it needs to honor
3977 * any new copy-on-write obligation.
3978 */
3979 src_object->vo_copy_version++;
3980 }
3981 if (src_object_shared == TRUE) {
3982 vm_object_reference_shared(old_copy);
3983 } else {
3984 vm_object_reference_locked(old_copy);
3985 }
3986 vm_object_unlock(old_copy);
3987 vm_object_unlock(src_object);
3988
3989 if (new_copy != VM_OBJECT_NULL) {
3990 vm_object_unlock(new_copy);
3991 vm_object_deallocate(new_copy);
3992 }
3993 return old_copy;
3994 }
3995
3996
3997
3998 /*
3999 * Adjust the size argument so that the newly-created
4000 * copy object will be large enough to back either the
4001 * old copy object or the new mapping.
4002 */
4003 if (old_copy->vo_size > copy_size) {
4004 copy_size = old_copy->vo_size;
4005 }
4006
4007 if (new_copy == VM_OBJECT_NULL) {
4008 vm_object_unlock(old_copy);
4009 vm_object_unlock(src_object);
4010 /* Carry over the provenance from the object that's backing us */
4011 new_copy = vm_object_allocate(copy_size, src_object->vmo_provenance);
4012 vm_object_lock(src_object);
4013 vm_object_lock(new_copy);
4014
4015 src_object_shared = FALSE;
4016 goto Retry;
4017 }
4018 assertf(page_aligned(copy_size),
4019 "object %p size 0x%llx",
4020 new_copy, (uint64_t)copy_size);
4021 new_copy->vo_size = copy_size;
4022
4023 /*
4024 * The copy-object is always made large enough to
4025 * completely shadow the original object, since
4026 * it may have several users who want to shadow
4027 * the original object at different points.
4028 */
4029
4030 assert((old_copy->shadow == src_object) &&
4031 (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
4032 } else if (new_copy == VM_OBJECT_NULL) {
4033 vm_object_unlock(src_object);
4034 /* Carry over the provenance from the object that's backing us */
4035 new_copy = vm_object_allocate(copy_size, src_object->vmo_provenance);
4036 vm_object_lock(src_object);
4037 vm_object_lock(new_copy);
4038
4039 src_object_shared = FALSE;
4040 goto Retry;
4041 }
4042
4043 /*
4044 * We now have the src object locked, and the new copy object
4045 * allocated and locked (and potentially the old copy locked).
4046 * Before we go any further, make sure we can still perform
4047 * a delayed copy, as the situation may have changed.
4048 *
4049 * Specifically, we can't perform a delayed copy if any of the
4050 * pages in the range are wired (because we can't safely take
4051 * write permission away from wired pages). If the pages aren't
4052 * wired, then go ahead and protect them.
4053 */
4054 copy_delayed_protect_iterate++;
4055
4056 pmap_flush_context_init(&pmap_flush_context_storage);
4057 delayed_pmap_flush = FALSE;
4058
4059 vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
4060 if (!vm_page_is_fictitious(p) && p->vmp_offset < copy_size) {
4061 if (VM_PAGE_WIRED(p)) {
4062 if (old_copy) {
4063 vm_object_unlock(old_copy);
4064 }
4065 vm_object_unlock(src_object);
4066 vm_object_unlock(new_copy);
4067 vm_object_deallocate(new_copy);
4068
4069 if (delayed_pmap_flush == TRUE) {
4070 pmap_flush(&pmap_flush_context_storage);
4071 }
4072
4073 return VM_OBJECT_NULL;
4074 } else {
4075 pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
4076 (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
4077 PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4078 delayed_pmap_flush = TRUE;
4079 }
4080 }
4081 }
4082 if (delayed_pmap_flush == TRUE) {
4083 pmap_flush(&pmap_flush_context_storage);
4084 }
4085
4086 if (old_copy != VM_OBJECT_NULL) {
4087 /*
4088 * Make the old copy-object shadow the new one.
4089 * It will receive no more pages from the original
4090 * object.
4091 */
4092
4093 /* remove ref. from old_copy */
4094 vm_object_lock_assert_exclusive(src_object);
4095 os_ref_release_live_locked_raw(&src_object->ref_count,
4096 &vm_object_refgrp);
4097 vm_object_lock_assert_exclusive(old_copy);
4098 old_copy->shadow = new_copy;
4099 vm_object_lock_assert_exclusive(new_copy);
4100 assert(os_ref_get_count_raw(&new_copy->ref_count) > 0);
4101 /* for old_copy->shadow ref. */
4102 os_ref_retain_locked_raw(&new_copy->ref_count, &vm_object_refgrp);
4103
4104 vm_object_unlock(old_copy); /* done with old_copy */
4105 }
4106
4107 /*
4108 * Point the new copy at the existing object.
4109 */
4110 vm_object_lock_assert_exclusive(new_copy);
4111 new_copy->shadow = src_object;
4112 new_copy->vo_shadow_offset = 0;
4113 VM_OBJECT_SET_SHADOWED(new_copy, TRUE); /* caller must set needs_copy */
4114
4115 vm_object_lock_assert_exclusive(src_object);
4116 vm_object_reference_locked(src_object);
4117 VM_OBJECT_COPY_SET(src_object, new_copy);
4118 vm_object_unlock(src_object);
4119 vm_object_unlock(new_copy);
4120
4121 return new_copy;
4122 }
4123
4124 /*
4125 * Routine: vm_object_copy_strategically
4126 *
4127 * Purpose:
4128 * Perform a copy according to the source object's
4129 * declared strategy. This operation may block,
4130 * and may be interrupted.
4131 */
4132 __private_extern__ kern_return_t
4133 vm_object_copy_strategically(
4134 vm_object_t src_object,
4135 vm_object_offset_t src_offset,
4136 vm_object_size_t size,
4137 bool forking,
4138 vm_object_t *dst_object, /* OUT */
4139 vm_object_offset_t *dst_offset, /* OUT */
4140 boolean_t *dst_needs_copy) /* OUT */
4141 {
4142 boolean_t result;
4143 boolean_t interruptible = THREAD_ABORTSAFE; /* XXX */
4144 boolean_t object_lock_shared = FALSE;
4145 memory_object_copy_strategy_t copy_strategy;
4146
4147 assert(src_object != VM_OBJECT_NULL);
4148
4149 copy_strategy = src_object->copy_strategy;
4150
4151 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
4152 vm_object_lock_shared(src_object);
4153 object_lock_shared = TRUE;
4154 } else {
4155 vm_object_lock(src_object);
4156 }
4157
4158 /*
4159 * The copy strategy is only valid if the memory manager
4160 * is "ready". Internal objects are always ready.
4161 */
4162
4163 while (!src_object->internal && !src_object->pager_ready) {
4164 wait_result_t wait_result;
4165
4166 if (object_lock_shared == TRUE) {
4167 vm_object_unlock(src_object);
4168 vm_object_lock(src_object);
4169 object_lock_shared = FALSE;
4170 continue;
4171 }
4172 wait_result = vm_object_sleep( src_object,
4173 VM_OBJECT_EVENT_PAGER_READY,
4174 interruptible, LCK_SLEEP_EXCLUSIVE);
4175 if (wait_result != THREAD_AWAKENED) {
4176 vm_object_unlock(src_object);
4177 *dst_object = VM_OBJECT_NULL;
4178 *dst_offset = 0;
4179 *dst_needs_copy = FALSE;
4180 return MACH_SEND_INTERRUPTED;
4181 }
4182 }
4183
4184 /*
4185 * Use the appropriate copy strategy.
4186 */
4187
4188 if (copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) {
4189 if (forking) {
4190 copy_strategy = MEMORY_OBJECT_COPY_DELAY;
4191 } else {
4192 copy_strategy = MEMORY_OBJECT_COPY_NONE;
4193 if (object_lock_shared) {
4194 vm_object_unlock(src_object);
4195 vm_object_lock(src_object);
4196 object_lock_shared = FALSE;
4197 }
4198 }
4199 }
4200
4201 switch (copy_strategy) {
4202 case MEMORY_OBJECT_COPY_DELAY:
4203 *dst_object = vm_object_copy_delayed(src_object,
4204 src_offset, size, object_lock_shared);
4205 if (*dst_object != VM_OBJECT_NULL) {
4206 *dst_offset = src_offset;
4207 *dst_needs_copy = TRUE;
4208 result = KERN_SUCCESS;
4209 break;
4210 }
4211 vm_object_lock(src_object);
4212 OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
4213
4214 case MEMORY_OBJECT_COPY_NONE:
4215 result = vm_object_copy_slowly(src_object,
4216 src_offset, size,
4217 interruptible,
4218 #if HAS_MTE
4219 forking && vm_object_is_mte_mappable(src_object), /* create_mte_object */
4220 #endif /* HAS_MTE */
4221 dst_object);
4222 if (result == KERN_SUCCESS) {
4223 *dst_offset = src_offset - vm_object_trunc_page(src_offset);
4224 *dst_needs_copy = FALSE;
4225 }
4226 break;
4227
4228 case MEMORY_OBJECT_COPY_SYMMETRIC:
4229 vm_object_unlock(src_object);
4230 result = KERN_MEMORY_RESTART_COPY;
4231 break;
4232
4233 default:
4234 panic("copy_strategically: bad strategy %d for object %p",
4235 copy_strategy, src_object);
4236 result = KERN_INVALID_ARGUMENT;
4237 }
4238 return result;
4239 }
4240
4241 /*
4242 * vm_object_shadow:
4243 *
4244 * Create a new object which is backed by the
4245 * specified existing object range. The source
4246 * object reference is deallocated.
4247 *
4248 * The new object and offset into that object
4249 * are returned in the source parameters.
4250 */
4251 boolean_t vm_object_shadow_check = TRUE;
4252 uint64_t vm_object_shadow_forced = 0;
4253 uint64_t vm_object_shadow_skipped = 0;
4254
4255 __private_extern__ boolean_t
4256 vm_object_shadow(
4257 vm_object_t *object, /* IN/OUT */
4258 vm_object_offset_t *offset, /* IN/OUT */
4259 vm_object_size_t length,
4260 boolean_t always_shadow)
4261 {
4262 vm_object_t source;
4263 vm_object_t result;
4264
4265 source = *object;
4266 assert(source != VM_OBJECT_NULL);
4267 if (source == VM_OBJECT_NULL) {
4268 return FALSE;
4269 }
4270
4271 assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
4272
4273 /*
4274 * Determine if we really need a shadow.
4275 *
4276 * If the source object is larger than what we are trying
4277 * to create, then force the shadow creation even if the
4278 * ref count is 1. This will allow us to [potentially]
4279 * collapse the underlying object away in the future
4280 * (freeing up the extra data it might contain and that
4281 * we don't need).
4282 */
4283
4284 assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
4285
4286 /*
4287 * The following optimization does not work in the context of submaps
4288 * (the shared region, in particular).
4289 * This object might have only 1 reference (in the submap) but that
4290 * submap can itself be mapped multiple times, so the object is
4291 * actually indirectly referenced more than once...
4292 * The caller can specify to "always_shadow" to bypass the optimization.
4293 */
4294 if (vm_object_shadow_check &&
4295 source->vo_size == length &&
4296 os_ref_get_count_raw(&source->ref_count) == 1) {
4297 if (always_shadow) {
4298 vm_object_shadow_forced++;
4299 } else {
4300 /*
4301 * Lock the object and check again.
4302 * We also check to see if there's
4303 * a shadow or copy object involved.
4304 * We can't do that earlier because
4305 * without the object locked, there
4306 * could be a collapse and the chain
4307 * gets modified leaving us with an
4308 * invalid pointer.
4309 */
4310 vm_object_lock(source);
4311 if (source->vo_size == length &&
4312 os_ref_get_count_raw(&source->ref_count) == 1 &&
4313 (source->shadow == VM_OBJECT_NULL ||
4314 source->shadow->vo_copy == VM_OBJECT_NULL)) {
4315 VM_OBJECT_SET_SHADOWED(source, FALSE);
4316 vm_object_unlock(source);
4317 vm_object_shadow_skipped++;
4318 return FALSE;
4319 }
4320 /* things changed while we were locking "source"... */
4321 vm_object_unlock(source);
4322 }
4323 }
4324
4325 /*
4326 * *offset is the map entry's offset into the VM object and
4327 * is aligned to the map's page size.
4328 * VM objects need to be aligned to the system's page size.
4329 * Record the necessary adjustment and re-align the offset so
4330 * that result->vo_shadow_offset is properly page-aligned.
4331 */
4332 vm_object_offset_t offset_adjustment;
4333 offset_adjustment = *offset - vm_object_trunc_page(*offset);
4334 length = vm_object_round_page(length + offset_adjustment);
4335 *offset = vm_object_trunc_page(*offset);
4336
4337 /*
4338 * Allocate a new object with the given length
4339 */
4340
4341 if ((result = vm_object_allocate(length, source->vmo_provenance)) == VM_OBJECT_NULL) {
4342 panic("vm_object_shadow: no object for shadowing");
4343 }
4344
4345 /*
4346 * The new object shadows the source object, adding
4347 * a reference to it. Our caller changes his reference
4348 * to point to the new object, removing a reference to
4349 * the source object. Net result: no change of reference
4350 * count.
4351 */
4352 result->shadow = source;
4353
4354 /*
4355 * Store the offset into the source object,
4356 * and fix up the offset into the new object.
4357 */
4358
4359 result->vo_shadow_offset = *offset;
4360 assertf(page_aligned(result->vo_shadow_offset),
4361 "result %p shadow offset 0x%llx",
4362 result, result->vo_shadow_offset);
4363
4364 /*
4365 * Return the new things
4366 */
4367
4368 *offset = 0;
4369 if (offset_adjustment) {
4370 /*
4371 * Make the map entry point to the equivalent offset
4372 * in the new object.
4373 */
4374 DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
4375 *offset += offset_adjustment;
4376 }
4377 *object = result;
4378 return TRUE;
4379 }
4380
4381 /*
4382 * The relationship between vm_object structures and
4383 * the memory_object requires careful synchronization.
4384 *
4385 * All associations are created by memory_object_create_named
4386 * for external pagers and vm_object_compressor_pager_create for internal
4387 * objects as follows:
4388 *
4389 * pager: the memory_object itself, supplied by
4390 * the user requesting a mapping (or the kernel,
4391 * when initializing internal objects); the
4392 * kernel simulates holding send rights by keeping
4393 * a port reference;
4394 *
4395 * pager_request:
4396 * the memory object control port,
4397 * created by the kernel; the kernel holds
4398 * receive (and ownership) rights to this
4399 * port, but no other references.
4400 *
4401 * When initialization is complete, the "initialized" field
4402 * is asserted. Other mappings using a particular memory object,
4403 * and any references to the vm_object gained through the
4404 * port association must wait for this initialization to occur.
4405 *
4406 * In order to allow the memory manager to set attributes before
4407 * requests (notably virtual copy operations, but also data or
4408 * unlock requests) are made, a "ready" attribute is made available.
4409 * Only the memory manager may affect the value of this attribute.
4410 * Its value does not affect critical kernel functions, such as
4411 * internal object initialization or destruction. [Furthermore,
4412 * memory objects created by the kernel are assumed to be ready
4413 * immediately; the default memory manager need not explicitly
4414 * set the "ready" attribute.]
4415 *
4416 * [Both the "initialized" and "ready" attribute wait conditions
4417 * use the "pager" field as the wait event.]
4418 *
4419 * The port associations can be broken down by any of the
4420 * following routines:
4421 * vm_object_terminate:
4422 * No references to the vm_object remain, and
4423 * the object cannot (or will not) be cached.
4424 * This is the normal case, and is done even
4425 * though one of the other cases has already been
4426 * done.
4427 * memory_object_destroy:
4428 * The memory manager has requested that the
4429 * kernel relinquish references to the memory
4430 * object. [The memory manager may not want to
4431 * destroy the memory object, but may wish to
4432 * refuse or tear down existing memory mappings.]
4433 *
4434 * Each routine that breaks an association must break all of
4435 * them at once. At some later time, that routine must clear
4436 * the pager field and release the memory object references.
4437 * [Furthermore, each routine must cope with the simultaneous
4438 * or previous operations of the others.]
4439 *
4440 * Because the pager field may be cleared spontaneously, it
4441 * cannot be used to determine whether a memory object has
4442 * ever been associated with a particular vm_object. [This
4443 * knowledge is important to the shadow object mechanism.]
4444 * For this reason, an additional "created" attribute is
4445 * provided.
4446 *
4447 * During various paging operations, the pager reference found in the
4448 * vm_object must be valid. To prevent this from being released,
4449 * (other than being removed, i.e., made null), routines may use
4450 * the vm_object_paging_begin/end routines [actually, macros].
4451 * The implementation uses the "paging_in_progress" and "wanted" fields.
4452 * [Operations that alter the validity of the pager values include the
4453 * termination routines and vm_object_collapse.]
4454 */
4455
4456
4457 /*
4458 * Routine: vm_object_memory_object_associate
4459 * Purpose:
4460 * Associate a VM object to the given pager.
4461 * If a VM object is not provided, create one.
4462 * Initialize the pager.
4463 */
4464 vm_object_t
4465 vm_object_memory_object_associate(
4466 memory_object_t pager,
4467 vm_object_t object,
4468 vm_object_size_t size,
4469 boolean_t named)
4470 {
4471 memory_object_control_t control;
4472
4473 assert(pager != MEMORY_OBJECT_NULL);
4474
4475 if (object != VM_OBJECT_NULL) {
4476 vm_object_lock(object);
4477 assert(object->internal);
4478 assert(object->pager_created);
4479 assert(!object->pager_initialized);
4480 assert(!object->pager_ready);
4481 assert(object->pager_trusted);
4482 } else {
4483 /* No provenance yet */
4484 object = vm_object_allocate(size, VM_MAP_SERIAL_NONE);
4485 assert(object != VM_OBJECT_NULL);
4486 vm_object_lock(object);
4487 VM_OBJECT_SET_INTERNAL(object, FALSE);
4488 VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE);
4489 /* copy strategy invalid until set by memory manager */
4490 object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4491 }
4492
4493 /*
4494 * Allocate request port.
4495 */
4496
4497 control = memory_object_control_allocate(object);
4498 assert(control != MEMORY_OBJECT_CONTROL_NULL);
4499
4500 assert(!object->pager_ready);
4501 assert(!object->pager_initialized);
4502 assert(object->pager == NULL);
4503 assert(object->pager_control == NULL);
4504
4505 /*
4506 * Copy the reference we were given.
4507 */
4508
4509 memory_object_reference(pager);
4510 VM_OBJECT_SET_PAGER_CREATED(object, TRUE);
4511 object->pager = pager;
4512 object->pager_control = control;
4513 VM_OBJECT_SET_PAGER_READY(object, FALSE);
4514
4515 vm_object_unlock(object);
4516
4517 /*
4518 * Let the pager know we're using it.
4519 */
4520
4521 (void) memory_object_init(pager,
4522 object->pager_control,
4523 PAGE_SIZE);
4524
4525 vm_object_lock(object);
4526 if (named) {
4527 VM_OBJECT_SET_NAMED(object, TRUE);
4528 }
4529 if (object->internal) {
4530 VM_OBJECT_SET_PAGER_READY(object, TRUE);
4531 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4532 }
4533
4534 VM_OBJECT_SET_PAGER_INITIALIZED(object, TRUE);
4535 // vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_INIT);
4536
4537 vm_object_unlock(object);
4538
4539 return object;
4540 }
4541
4542 /*
4543 * Routine: vm_object_compressor_pager_create
4544 * Purpose:
4545 * Create a memory object for an internal object.
4546 * In/out conditions:
4547 * The object is locked on entry and exit;
4548 * it may be unlocked within this call.
4549 * Limitations:
4550 * Only one thread may be performing a
4551 * vm_object_compressor_pager_create on an object at
4552 * a time. Presumably, only the pageout
4553 * daemon will be using this routine.
4554 */
4555
4556 void
4557 vm_object_compressor_pager_create(
4558 vm_object_t object)
4559 {
4560 memory_object_t pager;
4561 vm_object_t pager_object = VM_OBJECT_NULL;
4562
4563 assert(!is_kernel_object(object));
4564
4565 /*
4566 * Prevent collapse or termination by holding a paging reference
4567 */
4568
4569 vm_object_paging_begin(object);
4570 if (object->pager_created) {
4571 /*
4572 * Someone else got to it first...
4573 * wait for them to finish initializing the ports
4574 */
4575 while (!object->pager_ready) {
4576 vm_object_sleep(object,
4577 VM_OBJECT_EVENT_PAGER_READY,
4578 THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
4579 }
4580 vm_object_paging_end(object);
4581 return;
4582 }
4583
4584 if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4585 (object->vo_size / PAGE_SIZE)) {
4586 #if DEVELOPMENT || DEBUG
4587 printf("vm_object_compressor_pager_create(%p): "
4588 "object size 0x%llx >= 0x%llx\n",
4589 object,
4590 (uint64_t) object->vo_size,
4591 0x0FFFFFFFFULL * PAGE_SIZE);
4592 #endif /* DEVELOPMENT || DEBUG */
4593 vm_object_paging_end(object);
4594 return;
4595 }
4596
4597 #if HAS_MTE /* TODO: remove this when MTE support in the compressor is finalized */
4598 if (!vm_object_allow_compressor_pager_for_mte && vm_object_is_mte_mappable(object)) {
4599 vm_object_no_compressor_pager_for_mte_count++;
4600 vm_object_paging_end(object);
4601 return;
4602 }
4603 #endif
4604
4605 /*
4606 * Indicate that a memory object has been assigned
4607 * before dropping the lock, to prevent a race.
4608 */
4609
4610 VM_OBJECT_SET_PAGER_CREATED(object, TRUE);
4611 VM_OBJECT_SET_PAGER_TRUSTED(object, TRUE);
4612 object->paging_offset = 0;
4613
4614 vm_object_unlock(object);
4615
4616 /*
4617 * Create the [internal] pager, and associate it with this object.
4618 *
4619 * We make the association here so that vm_object_enter()
4620 * can look up the object to complete initializing it. No
4621 * user will ever map this object.
4622 */
4623 {
4624 /* create our new memory object */
4625 assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4626 (object->vo_size / PAGE_SIZE));
4627 (void) compressor_memory_object_create(
4628 (memory_object_size_t) object->vo_size,
4629 &pager);
4630 if (pager == NULL) {
4631 panic("vm_object_compressor_pager_create(): "
4632 "no pager for object %p size 0x%llx\n",
4633 object, (uint64_t) object->vo_size);
4634 }
4635 }
4636
4637 /*
4638 * A reference was returned by
4639 * memory_object_create(), and it is
4640 * copied by vm_object_memory_object_associate().
4641 */
4642
4643 pager_object = vm_object_memory_object_associate(pager,
4644 object,
4645 object->vo_size,
4646 FALSE);
4647 if (pager_object != object) {
4648 panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)", pager, pager_object, object, (uint64_t) object->vo_size);
4649 }
4650
4651 /*
4652 * Drop the reference we were passed.
4653 */
4654 memory_object_deallocate(pager);
4655
4656 vm_object_lock(object);
4657
4658 /*
4659 * Release the paging reference
4660 */
4661 vm_object_paging_end(object);
4662 }
4663
4664 vm_external_state_t
4665 vm_object_compressor_pager_state_get(
4666 vm_object_t object,
4667 vm_object_offset_t offset)
4668 {
4669 if (__probable(not_in_kdp)) {
4670 vm_object_lock_assert_held(object);
4671 }
4672 if (object->internal &&
4673 object->pager != NULL &&
4674 !object->terminating &&
4675 object->alive) {
4676 return vm_compressor_pager_state_get(object->pager,
4677 offset + object->paging_offset);
4678 } else {
4679 return VM_EXTERNAL_STATE_UNKNOWN;
4680 }
4681 }
4682
4683 void
4684 vm_object_compressor_pager_state_clr(
4685 vm_object_t object,
4686 vm_object_offset_t offset)
4687 {
4688 unsigned int num_pages_cleared;
4689 vm_object_lock_assert_exclusive(object);
4690 if (object->internal &&
4691 object->pager != NULL &&
4692 !object->terminating &&
4693 object->alive) {
4694 num_pages_cleared = vm_compressor_pager_state_clr(object->pager,
4695 offset + object->paging_offset);
4696 if (num_pages_cleared) {
4697 vm_compressor_pager_count(object->pager,
4698 -num_pages_cleared,
4699 FALSE, /* shared */
4700 object);
4701 }
4702 if (num_pages_cleared &&
4703 (object->purgable != VM_PURGABLE_DENY || object->vo_ledger_tag)) {
4704 /* less compressed purgeable/tagged pages */
4705 assert3u(num_pages_cleared, ==, 1);
4706 vm_object_owner_compressed_update(object, -num_pages_cleared);
4707 }
4708 }
4709 }
4710
4711 /*
4712 * Global variables for vm_object_collapse():
4713 *
4714 * Counts for normal collapses and bypasses.
4715 * Debugging variables, to watch or disable collapse.
4716 */
4717 static long object_collapses = 0;
4718 static long object_bypasses = 0;
4719
4720 static boolean_t vm_object_collapse_allowed = TRUE;
4721 static boolean_t vm_object_bypass_allowed = TRUE;
4722
4723 void vm_object_do_collapse_compressor(vm_object_t object,
4724 vm_object_t backing_object);
4725 void
4726 vm_object_do_collapse_compressor(
4727 vm_object_t object,
4728 vm_object_t backing_object)
4729 {
4730 vm_object_offset_t new_offset, backing_offset;
4731 vm_object_size_t size;
4732
4733 vm_counters.do_collapse_compressor++;
4734
4735 vm_object_lock_assert_exclusive(object);
4736 vm_object_lock_assert_exclusive(backing_object);
4737
4738 size = object->vo_size;
4739
4740 /*
4741 * Move all compressed pages from backing_object
4742 * to the parent.
4743 */
4744
4745 for (backing_offset = object->vo_shadow_offset;
4746 backing_offset < object->vo_shadow_offset + object->vo_size;
4747 backing_offset += PAGE_SIZE) {
4748 memory_object_offset_t backing_pager_offset;
4749
4750 /* find the next compressed page at or after this offset */
4751 backing_pager_offset = (backing_offset +
4752 backing_object->paging_offset);
4753 backing_pager_offset = vm_compressor_pager_next_compressed(
4754 backing_object->pager,
4755 backing_pager_offset);
4756 if (backing_pager_offset == (memory_object_offset_t) -1) {
4757 /* no more compressed pages */
4758 break;
4759 }
4760 backing_offset = (backing_pager_offset -
4761 backing_object->paging_offset);
4762
4763 new_offset = backing_offset - object->vo_shadow_offset;
4764
4765 if (new_offset >= object->vo_size) {
4766 /* we're out of the scope of "object": done */
4767 break;
4768 }
4769
4770 if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4771 (vm_compressor_pager_state_get(object->pager,
4772 (new_offset +
4773 object->paging_offset)) ==
4774 VM_EXTERNAL_STATE_EXISTS)) {
4775 /*
4776 * This page already exists in object, resident or
4777 * compressed.
4778 * We don't need this compressed page in backing_object
4779 * and it will be reclaimed when we release
4780 * backing_object.
4781 */
4782 continue;
4783 }
4784
4785 /*
4786 * backing_object has this page in the VM compressor and
4787 * we need to transfer it to object.
4788 */
4789 vm_counters.do_collapse_compressor_pages++;
4790 vm_compressor_pager_transfer(
4791 /* destination: */
4792 object->pager,
4793 (new_offset + object->paging_offset),
4794 /* source: */
4795 backing_object->pager,
4796 (backing_offset + backing_object->paging_offset));
4797 }
4798 }
4799
4800 /*
4801 * Routine: vm_object_do_collapse
4802 * Purpose:
4803 * Collapse an object with the object backing it.
4804 * Pages in the backing object are moved into the
4805 * parent, and the backing object is deallocated.
4806 * Conditions:
4807 * Both objects and the cache are locked; the page
4808 * queues are unlocked.
4809 *
4810 */
4811 static void
4812 vm_object_do_collapse(
4813 vm_object_t object,
4814 vm_object_t backing_object)
4815 {
4816 vm_page_t p, pp;
4817 vm_object_offset_t new_offset, backing_offset;
4818 vm_object_size_t size;
4819
4820 vm_object_lock_assert_exclusive(object);
4821 vm_object_lock_assert_exclusive(backing_object);
4822
4823 assert(object->purgable == VM_PURGABLE_DENY);
4824 assert(backing_object->purgable == VM_PURGABLE_DENY);
4825
4826 backing_offset = object->vo_shadow_offset;
4827 size = object->vo_size;
4828
4829 /*
4830 * Move all in-memory pages from backing_object
4831 * to the parent. Pages that have been paged out
4832 * will be overwritten by any of the parent's
4833 * pages that shadow them.
4834 */
4835
4836 while (!vm_page_queue_empty(&backing_object->memq)) {
4837 p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4838
4839 new_offset = (p->vmp_offset - backing_offset);
4840
4841 assert(!p->vmp_busy || p->vmp_absent);
4842
4843 /*
4844 * If the parent has a page here, or if
4845 * this page falls outside the parent,
4846 * dispose of it.
4847 *
4848 * Otherwise, move it as planned.
4849 */
4850
4851 if (p->vmp_offset < backing_offset || new_offset >= size) {
4852 VM_PAGE_FREE(p);
4853 } else {
4854 pp = vm_page_lookup(object, new_offset);
4855 if (pp == VM_PAGE_NULL) {
4856 if (vm_object_compressor_pager_state_get(object,
4857 new_offset)
4858 == VM_EXTERNAL_STATE_EXISTS) {
4859 /*
4860 * Parent object has this page
4861 * in the VM compressor.
4862 * Throw away the backing
4863 * object's page.
4864 */
4865 VM_PAGE_FREE(p);
4866 } else {
4867 /*
4868 * Parent now has no page.
4869 * Move the backing object's page
4870 * up.
4871 */
4872 vm_page_rename(p, object, new_offset);
4873 }
4874 } else {
4875 assert(!pp->vmp_absent);
4876
4877 /*
4878 * Parent object has a real page.
4879 * Throw away the backing object's
4880 * page.
4881 */
4882 VM_PAGE_FREE(p);
4883 }
4884 }
4885 }
4886
4887 if (vm_object_collapse_compressor_allowed &&
4888 object->pager != MEMORY_OBJECT_NULL &&
4889 backing_object->pager != MEMORY_OBJECT_NULL) {
4890 /* move compressed pages from backing_object to object */
4891 vm_object_do_collapse_compressor(object, backing_object);
4892 } else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4893 assert((!object->pager_created &&
4894 (object->pager == MEMORY_OBJECT_NULL)) ||
4895 (!backing_object->pager_created &&
4896 (backing_object->pager == MEMORY_OBJECT_NULL)));
4897 /*
4898 * Move the pager from backing_object to object.
4899 *
4900 * XXX We're only using part of the paging space
4901 * for keeps now... we ought to discard the
4902 * unused portion.
4903 */
4904
4905 assert(!object->paging_in_progress);
4906 assert(!object->activity_in_progress);
4907 assert(!object->pager_created);
4908 assert(object->pager == NULL);
4909 object->pager = backing_object->pager;
4910
4911 VM_OBJECT_SET_PAGER_CREATED(object, backing_object->pager_created);
4912 object->pager_control = backing_object->pager_control;
4913 VM_OBJECT_SET_PAGER_READY(object, backing_object->pager_ready);
4914 VM_OBJECT_SET_PAGER_INITIALIZED(object, backing_object->pager_initialized);
4915 object->paging_offset =
4916 backing_object->paging_offset + backing_offset;
4917 if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4918 memory_object_control_collapse(&object->pager_control,
4919 object);
4920 }
4921 /* the backing_object has lost its pager: reset all fields */
4922 VM_OBJECT_SET_PAGER_CREATED(backing_object, FALSE);
4923 backing_object->pager_control = NULL;
4924 VM_OBJECT_SET_PAGER_READY(backing_object, FALSE);
4925 backing_object->paging_offset = 0;
4926 backing_object->pager = NULL;
4927 }
4928 /*
4929 * Object now shadows whatever backing_object did.
4930 * Note that the reference to backing_object->shadow
4931 * moves from within backing_object to within object.
4932 */
4933
4934 assert(!object->phys_contiguous);
4935 assert(!backing_object->phys_contiguous);
4936 object->shadow = backing_object->shadow;
4937 if (object->shadow) {
4938 assertf(page_aligned(object->vo_shadow_offset),
4939 "object %p shadow_offset 0x%llx",
4940 object, object->vo_shadow_offset);
4941 assertf(page_aligned(backing_object->vo_shadow_offset),
4942 "backing_object %p shadow_offset 0x%llx",
4943 backing_object, backing_object->vo_shadow_offset);
4944 object->vo_shadow_offset += backing_object->vo_shadow_offset;
4945 /* "backing_object" gave its shadow to "object" */
4946 backing_object->shadow = VM_OBJECT_NULL;
4947 backing_object->vo_shadow_offset = 0;
4948 } else {
4949 /* no shadow, therefore no shadow offset... */
4950 object->vo_shadow_offset = 0;
4951 }
4952 assert((object->shadow == VM_OBJECT_NULL) ||
4953 (object->shadow->vo_copy != backing_object));
4954
4955 /*
4956 * Discard backing_object.
4957 *
4958 * Since the backing object has no pages, no
4959 * pager left, and no object references within it,
4960 * all that is necessary is to dispose of it.
4961 */
4962 object_collapses++;
4963
4964 assert(os_ref_get_count_raw(&backing_object->ref_count) == 1);
4965 assert(backing_object->resident_page_count == 0);
4966 assert(backing_object->paging_in_progress == 0);
4967 assert(backing_object->activity_in_progress == 0);
4968 assert(backing_object->shadow == VM_OBJECT_NULL);
4969 assert(backing_object->vo_shadow_offset == 0);
4970
4971 if (backing_object->pager != MEMORY_OBJECT_NULL) {
4972 /* ... unless it has a pager; need to terminate pager too */
4973 vm_counters.do_collapse_terminate++;
4974 if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4975 vm_counters.do_collapse_terminate_failure++;
4976 }
4977 return;
4978 }
4979
4980 assert(backing_object->pager == NULL);
4981
4982 VM_OBJECT_SET_ALIVE(backing_object, FALSE);
4983 vm_object_unlock(backing_object);
4984
4985 #if VM_OBJECT_TRACKING
4986 if (vm_object_tracking_btlog) {
4987 btlog_erase(vm_object_tracking_btlog, backing_object);
4988 }
4989 #endif /* VM_OBJECT_TRACKING */
4990
4991 vm_object_lock_destroy(backing_object);
4992
4993 zfree(vm_object_zone, backing_object);
4994 }
4995
4996 static void
4997 vm_object_do_bypass(
4998 vm_object_t object,
4999 vm_object_t backing_object)
5000 {
5001 /*
5002 * Make the parent shadow the next object
5003 * in the chain.
5004 */
5005
5006 vm_object_lock_assert_exclusive(object);
5007 vm_object_lock_assert_exclusive(backing_object);
5008
5009 vm_object_reference(backing_object->shadow);
5010
5011 assert(!object->phys_contiguous);
5012 assert(!backing_object->phys_contiguous);
5013 object->shadow = backing_object->shadow;
5014 if (object->shadow) {
5015 assertf(page_aligned(object->vo_shadow_offset),
5016 "object %p shadow_offset 0x%llx",
5017 object, object->vo_shadow_offset);
5018 assertf(page_aligned(backing_object->vo_shadow_offset),
5019 "backing_object %p shadow_offset 0x%llx",
5020 backing_object, backing_object->vo_shadow_offset);
5021 object->vo_shadow_offset += backing_object->vo_shadow_offset;
5022 } else {
5023 /* no shadow, therefore no shadow offset... */
5024 object->vo_shadow_offset = 0;
5025 }
5026
5027 /*
5028 * Backing object might have had a copy pointer
5029 * to us. If it did, clear it.
5030 */
5031 if (backing_object->vo_copy == object) {
5032 VM_OBJECT_COPY_SET(backing_object, VM_OBJECT_NULL);
5033 }
5034
5035 /*
5036 * Drop the reference count on backing_object.
5037 #if TASK_SWAPPER
5038 * Since its ref_count was at least 2, it
5039 * will not vanish; so we don't need to call
5040 * vm_object_deallocate.
5041 * [with a caveat for "named" objects]
5042 *
5043 * The res_count on the backing object is
5044 * conditionally decremented. It's possible
5045 * (via vm_pageout_scan) to get here with
5046 * a "swapped" object, which has a 0 res_count,
5047 * in which case, the backing object res_count
5048 * is already down by one.
5049 #else
5050 * Don't call vm_object_deallocate unless
5051 * ref_count drops to zero.
5052 *
5053 * The ref_count can drop to zero here if the
5054 * backing object could be bypassed but not
5055 * collapsed, such as when the backing object
5056 * is temporary and cachable.
5057 #endif
5058 */
5059 if (os_ref_get_count_raw(&backing_object->ref_count) > 2 ||
5060 (!backing_object->named &&
5061 os_ref_get_count_raw(&backing_object->ref_count) > 1)) {
5062 vm_object_lock_assert_exclusive(backing_object);
5063 os_ref_release_live_locked_raw(&backing_object->ref_count,
5064 &vm_object_refgrp);
5065 vm_object_unlock(backing_object);
5066 } else {
5067 /*
5068 * Drop locks so that we can deallocate
5069 * the backing object.
5070 */
5071
5072 /*
5073 * vm_object_collapse (the caller of this function) is
5074 * now called from contexts that may not guarantee that a
5075 * valid reference is held on the object... w/o a valid
5076 * reference, it is unsafe and unwise (you will definitely
5077 * regret it) to unlock the object and then retake the lock
5078 * since the object may be terminated and recycled in between.
5079 * The "activity_in_progress" reference will keep the object
5080 * 'stable'.
5081 */
5082 vm_object_activity_begin(object);
5083 vm_object_unlock(object);
5084
5085 vm_object_unlock(backing_object);
5086 vm_object_deallocate(backing_object);
5087
5088 /*
5089 * Relock object. We don't have to reverify
5090 * its state since vm_object_collapse will
5091 * do that for us as it starts at the
5092 * top of its loop.
5093 */
5094
5095 vm_object_lock(object);
5096 vm_object_activity_end(object);
5097 }
5098
5099 object_bypasses++;
5100 }
5101
5102
5103 /*
5104 * vm_object_collapse:
5105 *
5106 * Perform an object collapse or an object bypass if appropriate.
5107 * The real work of collapsing and bypassing is performed in
5108 * the routines vm_object_do_collapse and vm_object_do_bypass.
5109 *
5110 * Requires that the object be locked and the page queues be unlocked.
5111 *
5112 */
5113 static unsigned long vm_object_collapse_calls = 0;
5114 static unsigned long vm_object_collapse_objects = 0;
5115 static unsigned long vm_object_collapse_do_collapse = 0;
5116 static unsigned long vm_object_collapse_do_bypass = 0;
5117
5118 __private_extern__ void
5119 vm_object_collapse(
5120 vm_object_t object,
5121 vm_object_offset_t hint_offset,
5122 boolean_t can_bypass)
5123 {
5124 vm_object_t backing_object;
5125 vm_object_size_t object_vcount, object_rcount;
5126 vm_object_t original_object;
5127 int object_lock_type;
5128 int backing_object_lock_type;
5129
5130 vm_object_collapse_calls++;
5131
5132 assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
5133
5134 if (!vm_object_collapse_allowed &&
5135 !(can_bypass && vm_object_bypass_allowed)) {
5136 return;
5137 }
5138
5139 if (object == VM_OBJECT_NULL) {
5140 return;
5141 }
5142
5143 original_object = object;
5144
5145 /*
5146 * The top object was locked "exclusive" by the caller.
5147 * In the first pass, to determine if we can collapse the shadow chain,
5148 * take a "shared" lock on the shadow objects. If we can collapse,
5149 * we'll have to go down the chain again with exclusive locks.
5150 */
5151 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5152 backing_object_lock_type = OBJECT_LOCK_SHARED;
5153
5154 retry:
5155 object = original_object;
5156 vm_object_lock_assert_exclusive(object);
5157
5158 while (TRUE) {
5159 vm_object_collapse_objects++;
5160 /*
5161 * Verify that the conditions are right for either
5162 * collapse or bypass:
5163 */
5164
5165 /*
5166 * There is a backing object, and
5167 */
5168
5169 backing_object = object->shadow;
5170 if (backing_object == VM_OBJECT_NULL) {
5171 if (object != original_object) {
5172 vm_object_unlock(object);
5173 }
5174 return;
5175 }
5176 if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
5177 vm_object_lock_shared(backing_object);
5178 } else {
5179 vm_object_lock(backing_object);
5180 }
5181
5182 /*
5183 * No pages in the object are currently
5184 * being paged out, and
5185 */
5186 if (object->paging_in_progress != 0 ||
5187 object->activity_in_progress != 0) {
5188 /* try and collapse the rest of the shadow chain */
5189 if (object != original_object) {
5190 vm_object_unlock(object);
5191 }
5192 object = backing_object;
5193 object_lock_type = backing_object_lock_type;
5194 continue;
5195 }
5196
5197 /*
5198 * ...
5199 * The backing object is not read_only,
5200 * and no pages in the backing object are
5201 * currently being paged out.
5202 * The backing object is internal.
5203 *
5204 */
5205
5206 if (!backing_object->internal ||
5207 backing_object->paging_in_progress != 0 ||
5208 backing_object->activity_in_progress != 0) {
5209 /* try and collapse the rest of the shadow chain */
5210 if (object != original_object) {
5211 vm_object_unlock(object);
5212 }
5213 object = backing_object;
5214 object_lock_type = backing_object_lock_type;
5215 continue;
5216 }
5217
5218 /*
5219 * Purgeable objects are not supposed to engage in
5220 * copy-on-write activities, so should not have
5221 * any shadow objects or be a shadow object to another
5222 * object.
5223 * Collapsing a purgeable object would require some
5224 * updates to the purgeable compressed ledgers.
5225 */
5226 if (object->purgable != VM_PURGABLE_DENY ||
5227 backing_object->purgable != VM_PURGABLE_DENY) {
5228 panic("vm_object_collapse() attempting to collapse "
5229 "purgeable object: %p(%d) %p(%d)\n",
5230 object, object->purgable,
5231 backing_object, backing_object->purgable);
5232 /* try and collapse the rest of the shadow chain */
5233 if (object != original_object) {
5234 vm_object_unlock(object);
5235 }
5236 object = backing_object;
5237 object_lock_type = backing_object_lock_type;
5238 continue;
5239 }
5240
5241 /*
5242 * The backing object can't be a copy-object:
5243 * the shadow_offset for the copy-object must stay
5244 * as 0. Furthermore (for the 'we have all the
5245 * pages' case), if we bypass backing_object and
5246 * just shadow the next object in the chain, old
5247 * pages from that object would then have to be copied
5248 * BOTH into the (former) backing_object and into the
5249 * parent object.
5250 */
5251 if (backing_object->shadow != VM_OBJECT_NULL &&
5252 backing_object->shadow->vo_copy == backing_object) {
5253 /* try and collapse the rest of the shadow chain */
5254 if (object != original_object) {
5255 vm_object_unlock(object);
5256 }
5257 object = backing_object;
5258 object_lock_type = backing_object_lock_type;
5259 continue;
5260 }
5261
5262 /*
5263 * We can now try to either collapse the backing
5264 * object (if the parent is the only reference to
5265 * it) or (perhaps) remove the parent's reference
5266 * to it.
5267 *
5268 * If there is exactly one reference to the backing
5269 * object, we may be able to collapse it into the
5270 * parent.
5271 *
5272 * As long as one of the objects is still not known
5273 * to the pager, we can collapse them.
5274 */
5275 if (os_ref_get_count_raw(&backing_object->ref_count) == 1 &&
5276 (vm_object_collapse_compressor_allowed ||
5277 !object->pager_created
5278 || (!backing_object->pager_created)
5279 ) && vm_object_collapse_allowed) {
5280 /*
5281 * We need the exclusive lock on the VM objects.
5282 */
5283 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5284 /*
5285 * We have an object and its shadow locked
5286 * "shared". We can't just upgrade the locks
5287 * to "exclusive", as some other thread might
5288 * also have these objects locked "shared" and
5289 * attempt to upgrade one or the other to
5290 * "exclusive". The upgrades would block
5291 * forever waiting for the other "shared" locks
5292 * to get released.
5293 * So we have to release the locks and go
5294 * down the shadow chain again (since it could
5295 * have changed) with "exclusive" locking.
5296 */
5297 vm_object_unlock(backing_object);
5298 if (object != original_object) {
5299 vm_object_unlock(object);
5300 }
5301 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5302 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5303 goto retry;
5304 }
5305
5306 /*
5307 * Collapse the object with its backing
5308 * object, and try again with the object's
5309 * new backing object.
5310 */
5311
5312 vm_object_do_collapse(object, backing_object);
5313 vm_object_collapse_do_collapse++;
5314 continue;
5315 }
5316
5317 /*
5318 * Collapsing the backing object was not possible
5319 * or permitted, so let's try bypassing it.
5320 */
5321
5322 if (!(can_bypass && vm_object_bypass_allowed)) {
5323 /* try and collapse the rest of the shadow chain */
5324 if (object != original_object) {
5325 vm_object_unlock(object);
5326 }
5327 object = backing_object;
5328 object_lock_type = backing_object_lock_type;
5329 continue;
5330 }
5331
5332
5333 /*
5334 * If the object doesn't have all its pages present,
5335 * we have to make sure no pages in the backing object
5336 * "show through" before bypassing it.
5337 */
5338 object_vcount = object->vo_size >> PAGE_SHIFT;
5339 object_rcount = (vm_object_size_t)object->resident_page_count;
5340
5341 if (object_rcount != object_vcount) {
5342 vm_object_offset_t offset;
5343 vm_object_offset_t backing_offset;
5344 vm_object_size_t backing_rcount, backing_vcount;
5345
5346 /*
5347 * If the backing object has a pager but no pagemap,
5348 * then we cannot bypass it, because we don't know
5349 * what pages it has.
5350 */
5351 if (backing_object->pager_created) {
5352 /* try and collapse the rest of the shadow chain */
5353 if (object != original_object) {
5354 vm_object_unlock(object);
5355 }
5356 object = backing_object;
5357 object_lock_type = backing_object_lock_type;
5358 continue;
5359 }
5360
5361 /*
5362 * If the object has a pager but no pagemap,
5363 * then we cannot bypass it, because we don't know
5364 * what pages it has.
5365 */
5366 if (object->pager_created) {
5367 /* try and collapse the rest of the shadow chain */
5368 if (object != original_object) {
5369 vm_object_unlock(object);
5370 }
5371 object = backing_object;
5372 object_lock_type = backing_object_lock_type;
5373 continue;
5374 }
5375
5376 backing_offset = object->vo_shadow_offset;
5377 backing_vcount = backing_object->vo_size >> PAGE_SHIFT;
5378 backing_rcount = (vm_object_size_t)backing_object->resident_page_count;
5379 assert(backing_vcount >= object_vcount);
5380
5381 if (backing_rcount > (backing_vcount - object_vcount) &&
5382 backing_rcount - (backing_vcount - object_vcount) > object_rcount) {
5383 /*
5384 * we have enough pages in the backing object to guarantee that
5385 * at least 1 of them must be 'uncovered' by a resident page
5386 * in the object we're evaluating, so move on and
5387 * try to collapse the rest of the shadow chain
5388 */
5389 if (object != original_object) {
5390 vm_object_unlock(object);
5391 }
5392 object = backing_object;
5393 object_lock_type = backing_object_lock_type;
5394 continue;
5395 }
5396
5397 /*
5398 * If all of the pages in the backing object are
5399 * shadowed by the parent object, the parent
5400 * object no longer has to shadow the backing
5401 * object; it can shadow the next one in the
5402 * chain.
5403 *
5404 * If the backing object has existence info,
5405 * we must check examine its existence info
5406 * as well.
5407 *
5408 */
5409
5410 #define EXISTS_IN_OBJECT(obj, off, rc) \
5411 ((vm_object_compressor_pager_state_get((obj), (off)) \
5412 == VM_EXTERNAL_STATE_EXISTS) || \
5413 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5414
5415 /*
5416 * Check the hint location first
5417 * (since it is often the quickest way out of here).
5418 */
5419 if (object->cow_hint != ~(vm_offset_t)0) {
5420 hint_offset = (vm_object_offset_t)object->cow_hint;
5421 } else {
5422 hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
5423 (hint_offset - 8 * PAGE_SIZE_64) : 0;
5424 }
5425
5426 if (EXISTS_IN_OBJECT(backing_object, hint_offset +
5427 backing_offset, backing_rcount) &&
5428 !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) {
5429 /* dependency right at the hint */
5430 object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
5431 /* try and collapse the rest of the shadow chain */
5432 if (object != original_object) {
5433 vm_object_unlock(object);
5434 }
5435 object = backing_object;
5436 object_lock_type = backing_object_lock_type;
5437 continue;
5438 }
5439
5440 /*
5441 * If the object's window onto the backing_object
5442 * is large compared to the number of resident
5443 * pages in the backing object, it makes sense to
5444 * walk the backing_object's resident pages first.
5445 *
5446 * NOTE: Pages may be in both the existence map and/or
5447 * resident, so if we don't find a dependency while
5448 * walking the backing object's resident page list
5449 * directly, and there is an existence map, we'll have
5450 * to run the offset based 2nd pass. Because we may
5451 * have to run both passes, we need to be careful
5452 * not to decrement 'rcount' in the 1st pass
5453 */
5454 if (backing_rcount && backing_rcount < (object_vcount / 8)) {
5455 vm_object_size_t rc = object_rcount;
5456 vm_page_t p;
5457
5458 backing_rcount = backing_object->resident_page_count;
5459 p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
5460 do {
5461 offset = (p->vmp_offset - backing_offset);
5462
5463 if (offset < object->vo_size &&
5464 offset != hint_offset &&
5465 !EXISTS_IN_OBJECT(object, offset, rc)) {
5466 /* found a dependency */
5467 object->cow_hint = (vm_offset_t) offset; /* atomic */
5468
5469 break;
5470 }
5471 p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5472 } while (--backing_rcount);
5473 if (backing_rcount != 0) {
5474 /* try and collapse the rest of the shadow chain */
5475 if (object != original_object) {
5476 vm_object_unlock(object);
5477 }
5478 object = backing_object;
5479 object_lock_type = backing_object_lock_type;
5480 continue;
5481 }
5482 }
5483
5484 /*
5485 * Walk through the offsets looking for pages in the
5486 * backing object that show through to the object.
5487 */
5488 if (backing_rcount) {
5489 offset = hint_offset;
5490
5491 while ((offset =
5492 (offset + PAGE_SIZE_64 < object->vo_size) ?
5493 (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5494 if (EXISTS_IN_OBJECT(backing_object, offset +
5495 backing_offset, backing_rcount) &&
5496 !EXISTS_IN_OBJECT(object, offset, object_rcount)) {
5497 /* found a dependency */
5498 object->cow_hint = (vm_offset_t) offset; /* atomic */
5499 break;
5500 }
5501 }
5502 if (offset != hint_offset) {
5503 /* try and collapse the rest of the shadow chain */
5504 if (object != original_object) {
5505 vm_object_unlock(object);
5506 }
5507 object = backing_object;
5508 object_lock_type = backing_object_lock_type;
5509 continue;
5510 }
5511 }
5512 }
5513
5514 /*
5515 * We need "exclusive" locks on the 2 VM objects.
5516 */
5517 if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5518 vm_object_unlock(backing_object);
5519 if (object != original_object) {
5520 vm_object_unlock(object);
5521 }
5522 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5523 backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5524 goto retry;
5525 }
5526
5527 /* reset the offset hint for any objects deeper in the chain */
5528 object->cow_hint = (vm_offset_t)0;
5529
5530 /*
5531 * All interesting pages in the backing object
5532 * already live in the parent or its pager.
5533 * Thus we can bypass the backing object.
5534 */
5535
5536 vm_object_do_bypass(object, backing_object);
5537 vm_object_collapse_do_bypass++;
5538
5539 /*
5540 * Try again with this object's new backing object.
5541 */
5542
5543 continue;
5544 }
5545
5546 /* NOT REACHED */
5547 /*
5548 * if (object != original_object) {
5549 * vm_object_unlock(object);
5550 * }
5551 */
5552 }
5553
5554 /*
5555 * Routine: vm_object_page_remove: [internal]
5556 * Purpose:
5557 * Removes all physical pages in the specified
5558 * object range from the object's list of pages.
5559 *
5560 * In/out conditions:
5561 * The object must be locked.
5562 * The object must not have paging_in_progress, usually
5563 * guaranteed by not having a pager.
5564 */
5565 unsigned int vm_object_page_remove_lookup = 0;
5566 unsigned int vm_object_page_remove_iterate = 0;
5567
5568 __private_extern__ void
5569 vm_object_page_remove(
5570 vm_object_t object,
5571 vm_object_offset_t start,
5572 vm_object_offset_t end)
5573 {
5574 vm_page_t p, next;
5575
5576 /*
5577 * One and two page removals are most popular.
5578 * The factor of 16 here is somewhat arbitrary.
5579 * It balances vm_object_lookup vs iteration.
5580 */
5581
5582 if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5583 vm_object_page_remove_lookup++;
5584
5585 for (; start < end; start += PAGE_SIZE_64) {
5586 p = vm_page_lookup(object, start);
5587 if (p != VM_PAGE_NULL) {
5588 assert(!p->vmp_cleaning && !p->vmp_laundry);
5589 if (!vm_page_is_fictitious(p) && p->vmp_pmapped) {
5590 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5591 }
5592 VM_PAGE_FREE(p);
5593 }
5594 }
5595 } else {
5596 vm_object_page_remove_iterate++;
5597
5598 p = (vm_page_t) vm_page_queue_first(&object->memq);
5599 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5600 next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5601 if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5602 assert(!p->vmp_cleaning && !p->vmp_laundry);
5603 if (!vm_page_is_fictitious(p) && p->vmp_pmapped) {
5604 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5605 }
5606 VM_PAGE_FREE(p);
5607 }
5608 p = next;
5609 }
5610 }
5611 }
5612
5613
5614 /*
5615 * Routine: vm_object_coalesce
5616 * Function: Coalesces two objects backing up adjoining
5617 * regions of memory into a single object.
5618 *
5619 * returns TRUE if objects were combined.
5620 *
5621 * NOTE: Only works at the moment if the second object is NULL -
5622 * if it's not, which object do we lock first?
5623 *
5624 * Parameters:
5625 * prev_object First object to coalesce
5626 * prev_offset Offset into prev_object
5627 * next_object Second object into coalesce
5628 * next_offset Offset into next_object
5629 *
5630 * prev_size Size of reference to prev_object
5631 * next_size Size of reference to next_object
5632 *
5633 * Conditions:
5634 * The object(s) must *not* be locked. The map must be locked
5635 * to preserve the reference to the object(s).
5636 */
5637 static int vm_object_coalesce_count = 0;
5638
5639 __private_extern__ boolean_t
5640 vm_object_coalesce(
5641 vm_object_t prev_object,
5642 vm_object_t next_object,
5643 vm_object_offset_t prev_offset,
5644 __unused vm_object_offset_t next_offset,
5645 vm_object_size_t prev_size,
5646 vm_object_size_t next_size)
5647 {
5648 vm_object_size_t newsize;
5649
5650 #ifdef lint
5651 next_offset++;
5652 #endif /* lint */
5653
5654 if (next_object != VM_OBJECT_NULL) {
5655 return FALSE;
5656 }
5657
5658 if (prev_object == VM_OBJECT_NULL) {
5659 return TRUE;
5660 }
5661
5662 vm_object_lock(prev_object);
5663
5664 /*
5665 * Try to collapse the object first
5666 */
5667 vm_object_collapse(prev_object, prev_offset, TRUE);
5668
5669 /*
5670 * Can't coalesce if pages not mapped to
5671 * prev_entry may be in use any way:
5672 * . more than one reference
5673 * . paged out
5674 * . shadows another object
5675 * . has a copy elsewhere
5676 * . is purgeable
5677 * . paging references (pages might be in page-list)
5678 */
5679
5680 if ((os_ref_get_count_raw(&prev_object->ref_count) > 1) ||
5681 prev_object->pager_created ||
5682 prev_object->phys_contiguous ||
5683 (prev_object->shadow != VM_OBJECT_NULL) ||
5684 (prev_object->vo_copy != VM_OBJECT_NULL) ||
5685 (prev_object->true_share != FALSE) ||
5686 (prev_object->purgable != VM_PURGABLE_DENY) ||
5687 (prev_object->paging_in_progress != 0) ||
5688 (prev_object->activity_in_progress != 0)) {
5689 vm_object_unlock(prev_object);
5690 return FALSE;
5691 }
5692 /* newsize = prev_offset + prev_size + next_size; */
5693 if (__improbable(os_add3_overflow(prev_offset, prev_size, next_size,
5694 &newsize))) {
5695 vm_object_unlock(prev_object);
5696 return FALSE;
5697 }
5698
5699 vm_object_coalesce_count++;
5700
5701 /*
5702 * Remove any pages that may still be in the object from
5703 * a previous deallocation.
5704 */
5705 vm_object_page_remove(prev_object,
5706 prev_offset + prev_size,
5707 prev_offset + prev_size + next_size);
5708
5709 /*
5710 * Extend the object if necessary.
5711 */
5712 if (newsize > prev_object->vo_size) {
5713 assertf(page_aligned(newsize),
5714 "object %p size 0x%llx",
5715 prev_object, (uint64_t)newsize);
5716 prev_object->vo_size = newsize;
5717 }
5718
5719 vm_object_unlock(prev_object);
5720 return TRUE;
5721 }
5722
5723 kern_return_t
5724 vm_object_populate_with_private(
5725 vm_object_t object,
5726 vm_object_offset_t offset,
5727 ppnum_t phys_page,
5728 vm_size_t size)
5729 {
5730 ppnum_t base_page;
5731 vm_object_offset_t base_offset;
5732
5733
5734 if (!object->private) {
5735 return KERN_FAILURE;
5736 }
5737
5738 base_page = phys_page;
5739
5740 vm_object_lock(object);
5741
5742 if (!object->phys_contiguous) {
5743 vm_page_t m;
5744
5745 if ((base_offset = trunc_page_64(offset)) != offset) {
5746 vm_object_unlock(object);
5747 return KERN_FAILURE;
5748 }
5749 base_offset += object->paging_offset;
5750
5751 while (size) {
5752 m = vm_page_lookup(object, base_offset);
5753
5754 if (m != VM_PAGE_NULL) {
5755 ppnum_t m_phys_page = VM_PAGE_GET_PHYS_PAGE(m);
5756
5757 if (m_phys_page == vm_page_guard_addr) {
5758 /* nothing to do */
5759 } else if (m_phys_page == vm_page_fictitious_addr) {
5760 vm_page_lockspin_queues();
5761 vm_page_make_private(m, base_page);
5762 vm_page_unlock_queues();
5763 } else if (m_phys_page != base_page) {
5764 if (!vm_page_is_private(m)) {
5765 /*
5766 * we'd leak a real page... that can't be right
5767 */
5768 panic("vm_object_populate_with_private - %p not private", m);
5769 }
5770 if (m->vmp_pmapped) {
5771 /*
5772 * pmap call to clear old mapping
5773 */
5774 pmap_disconnect(m_phys_page);
5775 }
5776 VM_PAGE_SET_PHYS_PAGE(m, base_page);
5777 }
5778 } else {
5779 m = vm_page_create_private(base_page);
5780
5781 m->vmp_unusual = TRUE;
5782 m->vmp_busy = FALSE;
5783
5784 vm_page_insert(m, object, base_offset);
5785 }
5786 base_page++; /* Go to the next physical page */
5787 base_offset += PAGE_SIZE;
5788 size -= PAGE_SIZE;
5789 }
5790 } else {
5791 /* NOTE: we should check the original settings here */
5792 /* if we have a size > zero a pmap call should be made */
5793 /* to disable the range */
5794
5795 /* pmap_? */
5796
5797 /* shadows on contiguous memory are not allowed */
5798 /* we therefore can use the offset field */
5799 object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5800 assertf(page_aligned(size),
5801 "object %p size 0x%llx",
5802 object, (uint64_t)size);
5803 object->vo_size = size;
5804 }
5805 vm_object_unlock(object);
5806
5807 return KERN_SUCCESS;
5808 }
5809
5810
5811 kern_return_t
5812 memory_object_create_named(
5813 memory_object_t pager,
5814 memory_object_offset_t size,
5815 memory_object_control_t *control)
5816 {
5817 vm_object_t object;
5818
5819 *control = MEMORY_OBJECT_CONTROL_NULL;
5820 if (pager == MEMORY_OBJECT_NULL) {
5821 return KERN_INVALID_ARGUMENT;
5822 }
5823
5824 object = vm_object_memory_object_associate(pager,
5825 VM_OBJECT_NULL,
5826 size,
5827 TRUE);
5828 if (object == VM_OBJECT_NULL) {
5829 return KERN_INVALID_OBJECT;
5830 }
5831
5832 /* wait for object (if any) to be ready */
5833 if (object != VM_OBJECT_NULL) {
5834 vm_object_lock(object);
5835 VM_OBJECT_SET_NAMED(object, TRUE);
5836 while (!object->pager_ready) {
5837 vm_object_sleep(object,
5838 VM_OBJECT_EVENT_PAGER_READY,
5839 THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
5840 }
5841 *control = object->pager_control;
5842 vm_object_unlock(object);
5843 }
5844 return KERN_SUCCESS;
5845 }
5846
5847
5848 __private_extern__ kern_return_t
5849 vm_object_lock_request(
5850 vm_object_t object,
5851 vm_object_offset_t offset,
5852 vm_object_size_t size,
5853 memory_object_return_t should_return,
5854 int flags,
5855 vm_prot_t prot)
5856 {
5857 __unused boolean_t should_flush;
5858
5859 should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5860
5861 /*
5862 * Check for bogus arguments.
5863 */
5864 if (object == VM_OBJECT_NULL) {
5865 return KERN_INVALID_ARGUMENT;
5866 }
5867
5868 if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5869 return KERN_INVALID_ARGUMENT;
5870 }
5871
5872 /*
5873 * XXX TODO4K
5874 * extend range for conservative operations (copy-on-write, sync, ...)
5875 * truncate range for destructive operations (purge, ...)
5876 */
5877 size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5878 offset = vm_object_trunc_page(offset);
5879
5880 /*
5881 * Lock the object, and acquire a paging reference to
5882 * prevent the memory_object reference from being released.
5883 */
5884 vm_object_lock(object);
5885 vm_object_paging_begin(object);
5886
5887 (void)vm_object_update(object,
5888 offset, size, NULL, NULL, should_return, flags, prot);
5889
5890 vm_object_paging_end(object);
5891 vm_object_unlock(object);
5892
5893 return KERN_SUCCESS;
5894 }
5895
5896 /*
5897 * Empty a purgeable object by grabbing the physical pages assigned to it and
5898 * putting them on the free queue without writing them to backing store, etc.
5899 * When the pages are next touched they will be demand zero-fill pages. We
5900 * skip pages which are busy, being paged in/out, wired, etc. We do _not_
5901 * skip referenced/dirty pages, pages on the active queue, etc. We're more
5902 * than happy to grab these since this is a purgeable object. We mark the
5903 * object as "empty" after reaping its pages.
5904 *
5905 * On entry the object must be locked and it must be
5906 * purgeable with no delayed copies pending.
5907 */
5908 uint64_t
5909 vm_object_purge(vm_object_t object, int flags)
5910 {
5911 unsigned int object_page_count = 0, pgcount = 0;
5912 uint64_t total_purged_pgcount = 0;
5913 boolean_t skipped_object = FALSE;
5914
5915 vm_object_lock_assert_exclusive(object);
5916
5917 if (object->purgable == VM_PURGABLE_DENY) {
5918 return 0;
5919 }
5920
5921 assert(object->vo_copy == VM_OBJECT_NULL);
5922 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5923
5924 /*
5925 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5926 * reaping its pages. We update vm_page_purgeable_count in bulk
5927 * and we don't want vm_page_remove() to update it again for each
5928 * page we reap later.
5929 *
5930 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5931 * are all accounted for in the "volatile" ledgers, so this does not
5932 * make any difference.
5933 * If we transitioned directly from NONVOLATILE to EMPTY,
5934 * vm_page_purgeable_count must have been updated when the object
5935 * was dequeued from its volatile queue and the purgeable ledgers
5936 * must have also been updated accordingly at that time (in
5937 * vm_object_purgable_control()).
5938 */
5939 if (object->purgable == VM_PURGABLE_VOLATILE) {
5940 unsigned int delta;
5941 assert(object->resident_page_count >=
5942 object->wired_page_count);
5943 delta = (object->resident_page_count -
5944 object->wired_page_count);
5945 if (delta != 0) {
5946 assert(vm_page_purgeable_count >=
5947 delta);
5948 OSAddAtomic(-delta,
5949 (SInt32 *)&vm_page_purgeable_count);
5950 }
5951 if (object->wired_page_count != 0) {
5952 assert(vm_page_purgeable_wired_count >=
5953 object->wired_page_count);
5954 OSAddAtomic(-object->wired_page_count,
5955 (SInt32 *)&vm_page_purgeable_wired_count);
5956 }
5957 VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
5958 }
5959 assert(object->purgable == VM_PURGABLE_EMPTY);
5960
5961 object_page_count = object->resident_page_count;
5962
5963 vm_object_reap_pages(object, REAP_PURGEABLE);
5964
5965 if (object->resident_page_count >= object_page_count) {
5966 total_purged_pgcount = 0;
5967 } else {
5968 total_purged_pgcount = object_page_count - object->resident_page_count;
5969 }
5970
5971 if (object->pager != NULL) {
5972 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5973
5974 if (object->activity_in_progress == 0 &&
5975 object->paging_in_progress == 0) {
5976 /*
5977 * Also reap any memory coming from this object
5978 * in the VM compressor.
5979 *
5980 * There are no operations in progress on the VM object
5981 * and no operation can start while we're holding the
5982 * VM object lock, so it's safe to reap the compressed
5983 * pages and update the page counts.
5984 */
5985 pgcount = vm_compressor_pager_get_count(object->pager);
5986 if (pgcount) {
5987 pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
5988 vm_compressor_pager_count(object->pager,
5989 -pgcount,
5990 FALSE, /* shared */
5991 object);
5992 vm_object_owner_compressed_update(object,
5993 -pgcount);
5994 }
5995 if (!(flags & C_DONT_BLOCK)) {
5996 assert(vm_compressor_pager_get_count(object->pager)
5997 == 0);
5998 }
5999 } else {
6000 /*
6001 * There's some kind of paging activity in progress
6002 * for this object, which could result in a page
6003 * being compressed or decompressed, possibly while
6004 * the VM object is not locked, so it could race
6005 * with us.
6006 *
6007 * We can't really synchronize this without possibly
6008 * causing a deadlock when the compressor needs to
6009 * allocate or free memory while compressing or
6010 * decompressing a page from a purgeable object
6011 * mapped in the kernel_map...
6012 *
6013 * So let's not attempt to purge the compressor
6014 * pager if there's any kind of operation in
6015 * progress on the VM object.
6016 */
6017 skipped_object = TRUE;
6018 }
6019 }
6020
6021 vm_object_lock_assert_exclusive(object);
6022
6023 total_purged_pgcount += pgcount;
6024
6025 KDBG_RELEASE(VMDBG_CODE(DBG_VM_PURGEABLE_OBJECT_PURGE_ONE) | DBG_FUNC_NONE,
6026 VM_KERNEL_UNSLIDE_OR_PERM(object), /* purged object */
6027 object_page_count,
6028 total_purged_pgcount,
6029 skipped_object);
6030
6031 return total_purged_pgcount;
6032 }
6033
6034
6035 /*
6036 * vm_object_purgeable_control() allows the caller to control and investigate the
6037 * state of a purgeable object. A purgeable object is created via a call to
6038 * vm_allocate() with VM_FLAGS_PURGABLE specified. A purgeable object will
6039 * never be coalesced with any other object -- even other purgeable objects --
6040 * and will thus always remain a distinct object. A purgeable object has
6041 * special semantics when its reference count is exactly 1. If its reference
6042 * count is greater than 1, then a purgeable object will behave like a normal
6043 * object and attempts to use this interface will result in an error return
6044 * of KERN_INVALID_ARGUMENT.
6045 *
6046 * A purgeable object may be put into a "volatile" state which will make the
6047 * object's pages elligable for being reclaimed without paging to backing
6048 * store if the system runs low on memory. If the pages in a volatile
6049 * purgeable object are reclaimed, the purgeable object is said to have been
6050 * "emptied." When a purgeable object is emptied the system will reclaim as
6051 * many pages from the object as it can in a convenient manner (pages already
6052 * en route to backing store or busy for other reasons are left as is). When
6053 * a purgeable object is made volatile, its pages will generally be reclaimed
6054 * before other pages in the application's working set. This semantic is
6055 * generally used by applications which can recreate the data in the object
6056 * faster than it can be paged in. One such example might be media assets
6057 * which can be reread from a much faster RAID volume.
6058 *
6059 * A purgeable object may be designated as "non-volatile" which means it will
6060 * behave like all other objects in the system with pages being written to and
6061 * read from backing store as needed to satisfy system memory needs. If the
6062 * object was emptied before the object was made non-volatile, that fact will
6063 * be returned as the old state of the purgeable object (see
6064 * VM_PURGABLE_SET_STATE below). In this case, any pages of the object which
6065 * were reclaimed as part of emptying the object will be refaulted in as
6066 * zero-fill on demand. It is up to the application to note that an object
6067 * was emptied and recreate the objects contents if necessary. When a
6068 * purgeable object is made non-volatile, its pages will generally not be paged
6069 * out to backing store in the immediate future. A purgeable object may also
6070 * be manually emptied.
6071 *
6072 * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6073 * volatile purgeable object may be queried at any time. This information may
6074 * be used as a control input to let the application know when the system is
6075 * experiencing memory pressure and is reclaiming memory.
6076 *
6077 * The specified address may be any address within the purgeable object. If
6078 * the specified address does not represent any object in the target task's
6079 * virtual address space, then KERN_INVALID_ADDRESS will be returned. If the
6080 * object containing the specified address is not a purgeable object, then
6081 * KERN_INVALID_ARGUMENT will be returned. Otherwise, KERN_SUCCESS will be
6082 * returned.
6083 *
6084 * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6085 * VM_PURGABLE_GET_STATE. For VM_PURGABLE_SET_STATE, the in/out parameter
6086 * state is used to set the new state of the purgeable object and return its
6087 * old state. For VM_PURGABLE_GET_STATE, the current state of the purgeable
6088 * object is returned in the parameter state.
6089 *
6090 * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6091 * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY. These, respectively, represent
6092 * the non-volatile, volatile and volatile/empty states described above.
6093 * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6094 * immediately reclaim as many pages in the object as can be conveniently
6095 * collected (some may have already been written to backing store or be
6096 * otherwise busy).
6097 *
6098 * The process of making a purgeable object non-volatile and determining its
6099 * previous state is atomic. Thus, if a purgeable object is made
6100 * VM_PURGABLE_NONVOLATILE and the old state is returned as
6101 * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6102 * completely intact and will remain so until the object is made volatile
6103 * again. If the old state is returned as VM_PURGABLE_EMPTY then the object
6104 * was reclaimed while it was in a volatile state and its previous contents
6105 * have been lost.
6106 */
6107 /*
6108 * The object must be locked.
6109 */
6110 kern_return_t
6111 vm_object_purgable_control(
6112 vm_object_t object,
6113 vm_purgable_t control,
6114 int *state)
6115 {
6116 int old_state;
6117 int new_state;
6118
6119 if (object == VM_OBJECT_NULL) {
6120 /*
6121 * Object must already be present or it can't be purgeable.
6122 */
6123 return KERN_INVALID_ARGUMENT;
6124 }
6125
6126 vm_object_lock_assert_exclusive(object);
6127
6128 /*
6129 * Get current state of the purgeable object.
6130 */
6131 old_state = object->purgable;
6132 if (old_state == VM_PURGABLE_DENY) {
6133 return KERN_INVALID_ARGUMENT;
6134 }
6135
6136 /* purgeable cant have delayed copies - now or in the future */
6137 assert(object->vo_copy == VM_OBJECT_NULL);
6138 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6139
6140 /*
6141 * Execute the desired operation.
6142 */
6143 if (control == VM_PURGABLE_GET_STATE) {
6144 *state = old_state;
6145 return KERN_SUCCESS;
6146 }
6147
6148 if (control == VM_PURGABLE_SET_STATE &&
6149 object->purgeable_only_by_kernel) {
6150 return KERN_PROTECTION_FAILURE;
6151 }
6152
6153 if (control != VM_PURGABLE_SET_STATE &&
6154 control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
6155 return KERN_INVALID_ARGUMENT;
6156 }
6157
6158 if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
6159 object->volatile_empty = TRUE;
6160 }
6161 if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
6162 object->volatile_fault = TRUE;
6163 }
6164
6165 new_state = *state & VM_PURGABLE_STATE_MASK;
6166 if (new_state == VM_PURGABLE_VOLATILE) {
6167 if (old_state == VM_PURGABLE_EMPTY) {
6168 /* what's been emptied must stay empty */
6169 new_state = VM_PURGABLE_EMPTY;
6170 }
6171 if (object->volatile_empty) {
6172 /* debugging mode: go straight to empty */
6173 new_state = VM_PURGABLE_EMPTY;
6174 }
6175 }
6176
6177 switch (new_state) {
6178 case VM_PURGABLE_DENY:
6179 /*
6180 * Attempting to convert purgeable memory to non-purgeable:
6181 * not allowed.
6182 */
6183 return KERN_INVALID_ARGUMENT;
6184 case VM_PURGABLE_NONVOLATILE:
6185 VM_OBJECT_SET_PURGABLE(object, new_state);
6186
6187 if (old_state == VM_PURGABLE_VOLATILE) {
6188 unsigned int delta;
6189
6190 assert(object->resident_page_count >=
6191 object->wired_page_count);
6192 delta = (object->resident_page_count -
6193 object->wired_page_count);
6194
6195 assert(vm_page_purgeable_count >= delta);
6196
6197 if (delta != 0) {
6198 OSAddAtomic(-delta,
6199 (SInt32 *)&vm_page_purgeable_count);
6200 }
6201 if (object->wired_page_count != 0) {
6202 assert(vm_page_purgeable_wired_count >=
6203 object->wired_page_count);
6204 OSAddAtomic(-object->wired_page_count,
6205 (SInt32 *)&vm_page_purgeable_wired_count);
6206 }
6207
6208 vm_page_lock_queues();
6209
6210 /* object should be on a queue */
6211 assert(object->objq.next != NULL &&
6212 object->objq.prev != NULL);
6213 purgeable_q_t queue;
6214
6215 /*
6216 * Move object from its volatile queue to the
6217 * non-volatile queue...
6218 */
6219 queue = vm_purgeable_object_remove(object);
6220 assert(queue);
6221
6222 if (object->purgeable_when_ripe) {
6223 vm_purgeable_token_delete_last(queue);
6224 }
6225 assert(queue->debug_count_objects >= 0);
6226
6227 vm_page_unlock_queues();
6228 }
6229 if (old_state == VM_PURGABLE_VOLATILE ||
6230 old_state == VM_PURGABLE_EMPTY) {
6231 /*
6232 * Transfer the object's pages from the volatile to
6233 * non-volatile ledgers.
6234 */
6235 vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
6236 }
6237
6238 break;
6239
6240 case VM_PURGABLE_VOLATILE:
6241 if (object->volatile_fault) {
6242 vm_page_t p;
6243 int refmod;
6244
6245 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6246 if (p->vmp_busy ||
6247 VM_PAGE_WIRED(p) ||
6248 vm_page_is_fictitious(p)) {
6249 continue;
6250 }
6251 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6252 if ((refmod & VM_MEM_MODIFIED) &&
6253 !p->vmp_dirty) {
6254 SET_PAGE_DIRTY(p, FALSE);
6255 }
6256 }
6257 }
6258
6259 assert(old_state != VM_PURGABLE_EMPTY);
6260
6261 purgeable_q_t queue;
6262
6263 /* find the correct queue */
6264 if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
6265 queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
6266 } else {
6267 if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
6268 queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6269 } else {
6270 queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
6271 }
6272 }
6273
6274 if (old_state == VM_PURGABLE_NONVOLATILE ||
6275 old_state == VM_PURGABLE_EMPTY) {
6276 unsigned int delta;
6277
6278 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6279 VM_PURGABLE_NO_AGING) {
6280 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, FALSE);
6281 } else {
6282 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, TRUE);
6283 }
6284
6285 if (object->purgeable_when_ripe) {
6286 kern_return_t result;
6287
6288 /* try to add token... this can fail */
6289 vm_page_lock_queues();
6290
6291 result = vm_purgeable_token_add(queue);
6292 if (result != KERN_SUCCESS) {
6293 vm_page_unlock_queues();
6294 return result;
6295 }
6296 vm_page_unlock_queues();
6297 }
6298
6299 assert(object->resident_page_count >=
6300 object->wired_page_count);
6301 delta = (object->resident_page_count -
6302 object->wired_page_count);
6303
6304 if (delta != 0) {
6305 OSAddAtomic(delta,
6306 &vm_page_purgeable_count);
6307 }
6308 if (object->wired_page_count != 0) {
6309 OSAddAtomic(object->wired_page_count,
6310 &vm_page_purgeable_wired_count);
6311 }
6312
6313 VM_OBJECT_SET_PURGABLE(object, new_state);
6314
6315 /* object should be on "non-volatile" queue */
6316 assert(object->objq.next != NULL);
6317 assert(object->objq.prev != NULL);
6318 } else if (old_state == VM_PURGABLE_VOLATILE) {
6319 purgeable_q_t old_queue;
6320 boolean_t purgeable_when_ripe;
6321
6322 /*
6323 * if reassigning priorities / purgeable groups, we don't change the
6324 * token queue. So moving priorities will not make pages stay around longer.
6325 * Reasoning is that the algorithm gives most priority to the most important
6326 * object. If a new token is added, the most important object' priority is boosted.
6327 * This biases the system already for purgeable queues that move a lot.
6328 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6329 */
6330 assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6331
6332 old_queue = vm_purgeable_object_remove(object);
6333 assert(old_queue);
6334
6335 if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6336 VM_PURGABLE_NO_AGING) {
6337 purgeable_when_ripe = FALSE;
6338 } else {
6339 purgeable_when_ripe = TRUE;
6340 }
6341
6342 if (old_queue != queue ||
6343 (purgeable_when_ripe !=
6344 object->purgeable_when_ripe)) {
6345 kern_return_t result;
6346
6347 /* Changing queue. Have to move token. */
6348 vm_page_lock_queues();
6349 if (object->purgeable_when_ripe) {
6350 vm_purgeable_token_delete_last(old_queue);
6351 }
6352 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, purgeable_when_ripe);
6353 if (object->purgeable_when_ripe) {
6354 result = vm_purgeable_token_add(queue);
6355 assert(result == KERN_SUCCESS); /* this should never fail since we just freed a token */
6356 }
6357 vm_page_unlock_queues();
6358 }
6359 }
6360 ;
6361 vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
6362 if (old_state == VM_PURGABLE_NONVOLATILE) {
6363 vm_purgeable_accounting(object,
6364 VM_PURGABLE_NONVOLATILE);
6365 }
6366
6367 assert(queue->debug_count_objects >= 0);
6368
6369 break;
6370
6371
6372 case VM_PURGABLE_EMPTY:
6373 if (object->volatile_fault) {
6374 vm_page_t p;
6375 int refmod;
6376
6377 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6378 if (p->vmp_busy ||
6379 VM_PAGE_WIRED(p) ||
6380 vm_page_is_fictitious(p)) {
6381 continue;
6382 }
6383 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6384 if ((refmod & VM_MEM_MODIFIED) &&
6385 !p->vmp_dirty) {
6386 SET_PAGE_DIRTY(p, FALSE);
6387 }
6388 }
6389 }
6390
6391 if (old_state == VM_PURGABLE_VOLATILE) {
6392 purgeable_q_t old_queue;
6393
6394 /* object should be on a queue */
6395 assert(object->objq.next != NULL &&
6396 object->objq.prev != NULL);
6397
6398 old_queue = vm_purgeable_object_remove(object);
6399 assert(old_queue);
6400 if (object->purgeable_when_ripe) {
6401 vm_page_lock_queues();
6402 vm_purgeable_token_delete_first(old_queue);
6403 vm_page_unlock_queues();
6404 }
6405 }
6406
6407 if (old_state == VM_PURGABLE_NONVOLATILE) {
6408 /*
6409 * This object's pages were previously accounted as
6410 * "non-volatile" and now need to be accounted as
6411 * "volatile".
6412 */
6413 vm_purgeable_accounting(object,
6414 VM_PURGABLE_NONVOLATILE);
6415 /*
6416 * Set to VM_PURGABLE_EMPTY because the pages are no
6417 * longer accounted in the "non-volatile" ledger
6418 * and are also not accounted for in
6419 * "vm_page_purgeable_count".
6420 */
6421 VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
6422 }
6423
6424 (void) vm_object_purge(object, 0);
6425 assert(object->purgable == VM_PURGABLE_EMPTY);
6426
6427 break;
6428 }
6429
6430 *state = old_state;
6431
6432 vm_object_lock_assert_exclusive(object);
6433
6434 return KERN_SUCCESS;
6435 }
6436
6437 kern_return_t
6438 vm_object_get_page_counts(
6439 vm_object_t object,
6440 vm_object_offset_t offset,
6441 vm_object_size_t size,
6442 uint64_t *resident_page_count,
6443 uint64_t *dirty_page_count,
6444 uint64_t *swapped_page_count)
6445 {
6446 vm_page_t p = VM_PAGE_NULL;
6447 unsigned int local_resident_count = 0;
6448 unsigned int local_dirty_count = 0;
6449 unsigned int local_swapped_count = 0;
6450 vm_object_offset_t cur_offset = 0;
6451 vm_object_offset_t end_offset = 0;
6452
6453 if (object == VM_OBJECT_NULL) {
6454 return KERN_INVALID_ARGUMENT;
6455 }
6456
6457 cur_offset = offset;
6458 end_offset = offset + size;
6459
6460 vm_object_lock_assert_exclusive(object);
6461
6462 if (resident_page_count != NULL &&
6463 dirty_page_count == NULL &&
6464 offset == 0 &&
6465 object->vo_size == size) {
6466 /*
6467 * Fast path when:
6468 * - we only want the resident page count, and,
6469 * - the entire object is exactly covered by the request.
6470 */
6471 local_resident_count = object->resident_page_count;
6472 if (object->internal && object->pager != NULL) {
6473 local_swapped_count = vm_compressor_pager_get_count(object->pager);
6474 }
6475 goto out;
6476 }
6477
6478 if (object->resident_page_count <= (size >> PAGE_SHIFT) &&
6479 swapped_page_count == NULL) {
6480 /*
6481 * Faster path when we don't care about non-resident pages and the object has
6482 * fewer resident pages than the requested range.
6483 */
6484 vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6485 if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
6486 local_resident_count++;
6487 if (p->vmp_dirty ||
6488 (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6489 local_dirty_count++;
6490 }
6491 }
6492 }
6493 goto out;
6494 }
6495
6496 for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6497 p = vm_page_lookup(object, cur_offset);
6498
6499 if (p != VM_PAGE_NULL) {
6500 local_resident_count++;
6501 if (p->vmp_dirty ||
6502 (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6503 local_dirty_count++;
6504 }
6505 } else if (page_is_paged_out(object, cur_offset)) {
6506 local_swapped_count++;
6507 }
6508 }
6509
6510 out:
6511 if (resident_page_count != NULL) {
6512 *resident_page_count = local_resident_count;
6513 }
6514
6515 if (dirty_page_count != NULL) {
6516 *dirty_page_count = local_dirty_count;
6517 }
6518
6519 if (swapped_page_count != NULL) {
6520 *swapped_page_count = local_swapped_count;
6521 }
6522
6523 return KERN_SUCCESS;
6524 }
6525
6526
6527 /*
6528 * vm_object_reference:
6529 *
6530 * Gets another reference to the given object.
6531 */
6532 #ifdef vm_object_reference
6533 #undef vm_object_reference
6534 #endif
6535 __private_extern__ void
6536 vm_object_reference(
6537 vm_object_t object)
6538 {
6539 if (object == VM_OBJECT_NULL) {
6540 return;
6541 }
6542
6543 vm_object_lock(object);
6544 vm_object_reference_locked(object);
6545 vm_object_unlock(object);
6546 }
6547
6548 /*
6549 * vm_object_transpose
6550 *
6551 * This routine takes two VM objects of the same size and exchanges
6552 * their backing store.
6553 * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6554 * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6555 *
6556 * The VM objects must not be locked by caller.
6557 */
6558 unsigned int vm_object_transpose_count = 0;
6559 kern_return_t
6560 vm_object_transpose(
6561 vm_object_t object1,
6562 vm_object_t object2,
6563 vm_object_size_t transpose_size)
6564 {
6565 vm_object_t tmp_object;
6566 kern_return_t retval;
6567 boolean_t object1_locked, object2_locked;
6568 vm_page_t page;
6569 vm_object_offset_t page_offset;
6570
6571 tmp_object = VM_OBJECT_NULL;
6572 object1_locked = FALSE; object2_locked = FALSE;
6573
6574 if (object1 == object2 ||
6575 object1 == VM_OBJECT_NULL ||
6576 object2 == VM_OBJECT_NULL) {
6577 /*
6578 * If the 2 VM objects are the same, there's
6579 * no point in exchanging their backing store.
6580 */
6581 retval = KERN_INVALID_VALUE;
6582 goto done;
6583 }
6584
6585 /*
6586 * Since we need to lock both objects at the same time,
6587 * make sure we always lock them in the same order to
6588 * avoid deadlocks.
6589 */
6590 if (object1 > object2) {
6591 tmp_object = object1;
6592 object1 = object2;
6593 object2 = tmp_object;
6594 }
6595
6596 /*
6597 * Allocate a temporary VM object to hold object1's contents
6598 * while we copy object2 to object1.
6599 */
6600 tmp_object = vm_object_allocate(transpose_size, object1->vmo_provenance);
6601 vm_object_lock(tmp_object);
6602 VM_OBJECT_SET_CAN_PERSIST(tmp_object, FALSE);
6603
6604
6605 /*
6606 * Grab control of the 1st VM object.
6607 */
6608 vm_object_lock(object1);
6609 object1_locked = TRUE;
6610 if (!object1->alive || object1->terminating ||
6611 object1->vo_copy || object1->shadow || object1->shadowed ||
6612 object1->purgable != VM_PURGABLE_DENY) {
6613 /*
6614 * We don't deal with copy or shadow objects (yet).
6615 */
6616 retval = KERN_INVALID_VALUE;
6617 goto done;
6618 }
6619 /*
6620 * We're about to mess with the object's backing store and
6621 * taking a "paging_in_progress" reference wouldn't be enough
6622 * to prevent any paging activity on this object, so the caller should
6623 * have "quiesced" the objects beforehand, via a UPL operation with
6624 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6625 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6626 *
6627 * Wait for any paging operation to complete (but only paging, not
6628 * other kind of activities not linked to the pager). After we're
6629 * statisfied that there's no more paging in progress, we keep the
6630 * object locked, to guarantee that no one tries to access its pager.
6631 */
6632 vm_object_paging_only_wait(object1, THREAD_UNINT);
6633
6634 /*
6635 * Same as above for the 2nd object...
6636 */
6637 vm_object_lock(object2);
6638 object2_locked = TRUE;
6639 if (!object2->alive || object2->terminating ||
6640 object2->vo_copy || object2->shadow || object2->shadowed ||
6641 object2->purgable != VM_PURGABLE_DENY) {
6642 retval = KERN_INVALID_VALUE;
6643 goto done;
6644 }
6645 vm_object_paging_only_wait(object2, THREAD_UNINT);
6646
6647
6648 if (object1->vo_size != object2->vo_size ||
6649 object1->vo_size != transpose_size) {
6650 /*
6651 * If the 2 objects don't have the same size, we can't
6652 * exchange their backing stores or one would overflow.
6653 * If their size doesn't match the caller's
6654 * "transpose_size", we can't do it either because the
6655 * transpose operation will affect the entire span of
6656 * the objects.
6657 */
6658 retval = KERN_INVALID_VALUE;
6659 goto done;
6660 }
6661
6662
6663 /*
6664 * Transpose the lists of resident pages.
6665 * This also updates the resident_page_count and the memq_hint.
6666 */
6667 if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6668 /*
6669 * No pages in object1, just transfer pages
6670 * from object2 to object1. No need to go through
6671 * an intermediate object.
6672 */
6673 while (!vm_page_queue_empty(&object2->memq)) {
6674 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6675 vm_page_rename(page, object1, page->vmp_offset);
6676 }
6677 assert(vm_page_queue_empty(&object2->memq));
6678 } else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6679 /*
6680 * No pages in object2, just transfer pages
6681 * from object1 to object2. No need to go through
6682 * an intermediate object.
6683 */
6684 while (!vm_page_queue_empty(&object1->memq)) {
6685 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6686 vm_page_rename(page, object2, page->vmp_offset);
6687 }
6688 assert(vm_page_queue_empty(&object1->memq));
6689 } else {
6690 /* transfer object1's pages to tmp_object */
6691 while (!vm_page_queue_empty(&object1->memq)) {
6692 page = (vm_page_t) vm_page_queue_first(&object1->memq);
6693 page_offset = page->vmp_offset;
6694 vm_page_remove(page, TRUE);
6695 page->vmp_offset = page_offset;
6696 vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6697 }
6698 assert(vm_page_queue_empty(&object1->memq));
6699 /* transfer object2's pages to object1 */
6700 while (!vm_page_queue_empty(&object2->memq)) {
6701 page = (vm_page_t) vm_page_queue_first(&object2->memq);
6702 vm_page_rename(page, object1, page->vmp_offset);
6703 }
6704 assert(vm_page_queue_empty(&object2->memq));
6705 /* transfer tmp_object's pages to object2 */
6706 while (!vm_page_queue_empty(&tmp_object->memq)) {
6707 page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6708 vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6709 vm_page_insert(page, object2, page->vmp_offset);
6710 }
6711 assert(vm_page_queue_empty(&tmp_object->memq));
6712 }
6713
6714 #define __TRANSPOSE_FIELD(field) \
6715 MACRO_BEGIN \
6716 tmp_object->field = object1->field; \
6717 object1->field = object2->field; \
6718 object2->field = tmp_object->field; \
6719 MACRO_END
6720
6721 /* "Lock" refers to the object not its contents */
6722 /* "size" should be identical */
6723 assert(object1->vo_size == object2->vo_size);
6724 /* "memq_hint" was updated above when transposing pages */
6725 /* "ref_count" refers to the object not its contents */
6726 assert(os_ref_get_count_raw(&object1->ref_count) >= 1);
6727 assert(os_ref_get_count_raw(&object2->ref_count) >= 1);
6728 /* "resident_page_count" was updated above when transposing pages */
6729 /* "wired_page_count" was updated above when transposing pages */
6730 #if !VM_TAG_ACTIVE_UPDATE
6731 /* "wired_objq" was dealt with along with "wired_page_count" */
6732 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6733 /* "reusable_page_count" was updated above when transposing pages */
6734 /* there should be no "copy" */
6735 assert(!object1->vo_copy);
6736 assert(!object2->vo_copy);
6737 /* there should be no "shadow" */
6738 assert(!object1->shadow);
6739 assert(!object2->shadow);
6740 __TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6741 __TRANSPOSE_FIELD(pager);
6742 __TRANSPOSE_FIELD(paging_offset);
6743 __TRANSPOSE_FIELD(pager_control);
6744 /* update the memory_objects' pointers back to the VM objects */
6745 if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6746 memory_object_control_collapse(&object1->pager_control,
6747 object1);
6748 }
6749 if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6750 memory_object_control_collapse(&object2->pager_control,
6751 object2);
6752 }
6753 __TRANSPOSE_FIELD(copy_strategy);
6754 /* "paging_in_progress" refers to the object not its contents */
6755 assert(!object1->paging_in_progress);
6756 assert(!object2->paging_in_progress);
6757 assert(object1->activity_in_progress);
6758 assert(object2->activity_in_progress);
6759 /* "all_wanted" refers to the object not its contents */
6760 __TRANSPOSE_FIELD(pager_created);
6761 __TRANSPOSE_FIELD(pager_initialized);
6762 __TRANSPOSE_FIELD(pager_ready);
6763 __TRANSPOSE_FIELD(pager_trusted);
6764 __TRANSPOSE_FIELD(can_persist);
6765 __TRANSPOSE_FIELD(internal);
6766 __TRANSPOSE_FIELD(private);
6767 __TRANSPOSE_FIELD(pageout);
6768 /* "alive" should be set */
6769 assert(object1->alive);
6770 assert(object2->alive);
6771 /* "purgeable" should be non-purgeable */
6772 assert(object1->purgable == VM_PURGABLE_DENY);
6773 assert(object2->purgable == VM_PURGABLE_DENY);
6774 /* "shadowed" refers to the the object not its contents */
6775 __TRANSPOSE_FIELD(purgeable_when_ripe);
6776 __TRANSPOSE_FIELD(true_share);
6777 /* "terminating" should not be set */
6778 assert(!object1->terminating);
6779 assert(!object2->terminating);
6780 /* transfer "named" reference if needed */
6781 if (object1->named && !object2->named) {
6782 os_ref_release_live_locked_raw(&object1->ref_count, &vm_object_refgrp);
6783 os_ref_retain_locked_raw(&object2->ref_count, &vm_object_refgrp);
6784 } else if (!object1->named && object2->named) {
6785 os_ref_retain_locked_raw(&object1->ref_count, &vm_object_refgrp);
6786 os_ref_release_live_locked_raw(&object2->ref_count, &vm_object_refgrp);
6787 }
6788 __TRANSPOSE_FIELD(named);
6789 /* "shadow_severed" refers to the object not its contents */
6790 __TRANSPOSE_FIELD(phys_contiguous);
6791 __TRANSPOSE_FIELD(nophyscache);
6792 __TRANSPOSE_FIELD(no_pager_reason);
6793 /* "cached_list.next" points to transposed object */
6794 object1->cached_list.next = (queue_entry_t) object2;
6795 object2->cached_list.next = (queue_entry_t) object1;
6796 /* "cached_list.prev" should be NULL */
6797 assert(object1->cached_list.prev == NULL);
6798 assert(object2->cached_list.prev == NULL);
6799 __TRANSPOSE_FIELD(last_alloc);
6800 __TRANSPOSE_FIELD(sequential);
6801 __TRANSPOSE_FIELD(pages_created);
6802 __TRANSPOSE_FIELD(pages_used);
6803 __TRANSPOSE_FIELD(scan_collisions);
6804 __TRANSPOSE_FIELD(cow_hint);
6805 __TRANSPOSE_FIELD(wimg_bits);
6806 __TRANSPOSE_FIELD(set_cache_attr);
6807 __TRANSPOSE_FIELD(code_signed);
6808 object1->transposed = TRUE;
6809 object2->transposed = TRUE;
6810 __TRANSPOSE_FIELD(mapping_in_progress);
6811 __TRANSPOSE_FIELD(volatile_empty);
6812 __TRANSPOSE_FIELD(volatile_fault);
6813 __TRANSPOSE_FIELD(all_reusable);
6814 assert(object1->blocked_access);
6815 assert(object2->blocked_access);
6816 __TRANSPOSE_FIELD(set_cache_attr);
6817 assert(!object1->object_is_shared_cache);
6818 assert(!object2->object_is_shared_cache);
6819 /* ignore purgeable_queue_type and purgeable_queue_group */
6820 assert(!object1->io_tracking);
6821 assert(!object2->io_tracking);
6822 #if VM_OBJECT_ACCESS_TRACKING
6823 assert(!object1->access_tracking);
6824 assert(!object2->access_tracking);
6825 #endif /* VM_OBJECT_ACCESS_TRACKING */
6826 __TRANSPOSE_FIELD(no_tag_update);
6827 #if CONFIG_SECLUDED_MEMORY
6828 assert(!object1->eligible_for_secluded);
6829 assert(!object2->eligible_for_secluded);
6830 assert(!object1->can_grab_secluded);
6831 assert(!object2->can_grab_secluded);
6832 #else /* CONFIG_SECLUDED_MEMORY */
6833 assert(object1->__object3_unused_bits == 0);
6834 assert(object2->__object3_unused_bits == 0);
6835 #endif /* CONFIG_SECLUDED_MEMORY */
6836 #if UPL_DEBUG
6837 /* "uplq" refers to the object not its contents (see upl_transpose()) */
6838 #endif
6839 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6840 assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6841 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6842 assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6843 __TRANSPOSE_FIELD(vmo_provenance);
6844
6845 #undef __TRANSPOSE_FIELD
6846
6847 retval = KERN_SUCCESS;
6848
6849 done:
6850 /*
6851 * Cleanup.
6852 */
6853 if (tmp_object != VM_OBJECT_NULL) {
6854 vm_object_unlock(tmp_object);
6855 /*
6856 * Re-initialize the temporary object to avoid
6857 * deallocating a real pager.
6858 */
6859 _vm_object_allocate(
6860 transpose_size,
6861 tmp_object,
6862 /*
6863 * Since we're reallocating purely to deallocate,
6864 * don't bother trying to set a sensible provenance.
6865 */
6866 VM_MAP_SERIAL_NONE
6867 );
6868 vm_object_deallocate(tmp_object);
6869 tmp_object = VM_OBJECT_NULL;
6870 }
6871
6872 if (object1_locked) {
6873 vm_object_unlock(object1);
6874 object1_locked = FALSE;
6875 }
6876 if (object2_locked) {
6877 vm_object_unlock(object2);
6878 object2_locked = FALSE;
6879 }
6880
6881 vm_object_transpose_count++;
6882
6883 return retval;
6884 }
6885
6886
6887 /*
6888 * vm_object_cluster_size
6889 *
6890 * Determine how big a cluster we should issue an I/O for...
6891 *
6892 * Inputs: *start == offset of page needed
6893 * *length == maximum cluster pager can handle
6894 * Outputs: *start == beginning offset of cluster
6895 * *length == length of cluster to try
6896 *
6897 * The original *start will be encompassed by the cluster
6898 *
6899 */
6900 extern int speculative_reads_disabled;
6901
6902 /*
6903 * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6904 * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6905 * always be page-aligned. The derivation could involve operations (e.g. division)
6906 * that could give us non-page-size aligned values if we start out with values that
6907 * are odd multiples of PAGE_SIZE.
6908 */
6909 #if !XNU_TARGET_OS_OSX
6910 unsigned int preheat_max_bytes = (1024 * 512);
6911 #else /* !XNU_TARGET_OS_OSX */
6912 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6913 #endif /* !XNU_TARGET_OS_OSX */
6914 unsigned int preheat_min_bytes = (1024 * 32);
6915
6916
6917 __private_extern__ void
6918 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6919 vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6920 {
6921 vm_size_t pre_heat_size;
6922 vm_size_t tail_size;
6923 vm_size_t head_size;
6924 vm_size_t max_length;
6925 vm_size_t cluster_size;
6926 vm_object_offset_t object_size;
6927 vm_object_offset_t orig_start;
6928 vm_object_offset_t target_start;
6929 vm_object_offset_t offset;
6930 vm_behavior_t behavior;
6931 boolean_t look_behind = TRUE;
6932 boolean_t look_ahead = TRUE;
6933 boolean_t isSSD = FALSE;
6934 uint32_t throttle_limit;
6935 int sequential_run;
6936 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6937 vm_size_t max_ph_size;
6938 vm_size_t min_ph_size;
6939
6940 assert( !(*length & PAGE_MASK));
6941 assert( !(*start & PAGE_MASK_64));
6942
6943 /*
6944 * remember maxiumum length of run requested
6945 */
6946 max_length = *length;
6947 /*
6948 * we'll always return a cluster size of at least
6949 * 1 page, since the original fault must always
6950 * be processed
6951 */
6952 *length = PAGE_SIZE;
6953 *io_streaming = 0;
6954
6955 if (speculative_reads_disabled || fault_info == NULL) {
6956 /*
6957 * no cluster... just fault the page in
6958 */
6959 return;
6960 }
6961 orig_start = *start;
6962 target_start = orig_start;
6963 cluster_size = round_page(fault_info->cluster_size);
6964 behavior = fault_info->behavior;
6965
6966 vm_object_lock(object);
6967
6968 if (object->pager == MEMORY_OBJECT_NULL) {
6969 goto out; /* pager is gone for this object, nothing more to do */
6970 }
6971 vnode_pager_get_isSSD(object->pager, &isSSD);
6972
6973 min_ph_size = round_page(preheat_min_bytes);
6974 max_ph_size = round_page(preheat_max_bytes);
6975
6976 #if XNU_TARGET_OS_OSX
6977 /*
6978 * If we're paging from an SSD, we cut the minimum cluster size in half
6979 * and reduce the maximum size by a factor of 8. We do this because the
6980 * latency to issue an I/O is a couple of orders of magnitude smaller than
6981 * on spinning media, so being overly aggressive on the cluster size (to
6982 * try and reduce cumulative seek penalties) isn't a good trade off over
6983 * the increased memory pressure caused by the larger speculative I/Os.
6984 * However, the latency isn't 0, so a small amount of clustering is still
6985 * a win.
6986 *
6987 * If an explicit cluster size has already been provided, then we're
6988 * receiving a strong hint that the entire range will be needed (e.g.
6989 * wiring, willneed). In these cases, we want to maximize the I/O size
6990 * to minimize the number of I/Os issued.
6991 */
6992 if (isSSD && cluster_size <= PAGE_SIZE) {
6993 min_ph_size /= 2;
6994 max_ph_size /= 8;
6995
6996 if (min_ph_size & PAGE_MASK_64) {
6997 min_ph_size = trunc_page(min_ph_size);
6998 }
6999
7000 if (max_ph_size & PAGE_MASK_64) {
7001 max_ph_size = trunc_page(max_ph_size);
7002 }
7003 }
7004 #endif /* XNU_TARGET_OS_OSX */
7005
7006 if (min_ph_size < PAGE_SIZE) {
7007 min_ph_size = PAGE_SIZE;
7008 }
7009
7010 if (max_ph_size < PAGE_SIZE) {
7011 max_ph_size = PAGE_SIZE;
7012 } else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
7013 max_ph_size = MAX_UPL_TRANSFER_BYTES;
7014 }
7015
7016 if (max_length > max_ph_size) {
7017 max_length = max_ph_size;
7018 }
7019
7020 if (max_length <= PAGE_SIZE) {
7021 goto out;
7022 }
7023
7024 if (object->internal) {
7025 object_size = object->vo_size;
7026 } else {
7027 vnode_pager_get_object_size(object->pager, &object_size);
7028 }
7029
7030 object_size = round_page_64(object_size);
7031
7032 if (orig_start >= object_size) {
7033 /*
7034 * fault occurred beyond the EOF...
7035 * we need to punt w/o changing the
7036 * starting offset
7037 */
7038 goto out;
7039 }
7040 if (object->pages_used > object->pages_created) {
7041 /*
7042 * must have wrapped our 32 bit counters
7043 * so reset
7044 */
7045 object->pages_used = object->pages_created = 0;
7046 }
7047 if ((sequential_run = object->sequential)) {
7048 if (sequential_run < 0) {
7049 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
7050 sequential_run = 0 - sequential_run;
7051 } else {
7052 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
7053 }
7054 }
7055 switch (behavior) {
7056 default:
7057 behavior = VM_BEHAVIOR_DEFAULT;
7058 OS_FALLTHROUGH;
7059
7060 case VM_BEHAVIOR_DEFAULT:
7061 if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
7062 goto out;
7063 }
7064
7065 if (sequential_run >= (3 * PAGE_SIZE)) {
7066 pre_heat_size = sequential_run + PAGE_SIZE;
7067
7068 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
7069 look_behind = FALSE;
7070 } else {
7071 look_ahead = FALSE;
7072 }
7073
7074 *io_streaming = 1;
7075 } else {
7076 if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
7077 /*
7078 * prime the pump
7079 */
7080 pre_heat_size = min_ph_size;
7081 } else {
7082 /*
7083 * Linear growth in PH size: The maximum size is max_length...
7084 * this cacluation will result in a size that is neither a
7085 * power of 2 nor a multiple of PAGE_SIZE... so round
7086 * it up to the nearest PAGE_SIZE boundary
7087 */
7088 pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
7089
7090 if (pre_heat_size < min_ph_size) {
7091 pre_heat_size = min_ph_size;
7092 } else {
7093 pre_heat_size = round_page(pre_heat_size);
7094 }
7095 }
7096 }
7097 break;
7098
7099 case VM_BEHAVIOR_RANDOM:
7100 if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
7101 goto out;
7102 }
7103 break;
7104
7105 case VM_BEHAVIOR_SEQUENTIAL:
7106 if ((pre_heat_size = cluster_size) == 0) {
7107 pre_heat_size = sequential_run + PAGE_SIZE;
7108 }
7109 look_behind = FALSE;
7110 *io_streaming = 1;
7111
7112 break;
7113
7114 case VM_BEHAVIOR_RSEQNTL:
7115 if ((pre_heat_size = cluster_size) == 0) {
7116 pre_heat_size = sequential_run + PAGE_SIZE;
7117 }
7118 look_ahead = FALSE;
7119 *io_streaming = 1;
7120
7121 break;
7122 }
7123 throttle_limit = (uint32_t) max_length;
7124 assert(throttle_limit == max_length);
7125
7126 if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
7127 if (max_length > throttle_limit) {
7128 max_length = throttle_limit;
7129 }
7130 }
7131 if (pre_heat_size > max_length) {
7132 pre_heat_size = max_length;
7133 }
7134
7135 if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
7136 unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
7137
7138 if (consider_free < vm_page_throttle_limit) {
7139 pre_heat_size = trunc_page(pre_heat_size / 16);
7140 } else if (consider_free < vm_page_free_target) {
7141 pre_heat_size = trunc_page(pre_heat_size / 4);
7142 }
7143
7144 if (pre_heat_size < min_ph_size) {
7145 pre_heat_size = min_ph_size;
7146 }
7147 }
7148 if (look_ahead == TRUE) {
7149 if (look_behind == TRUE) {
7150 /*
7151 * if we get here its due to a random access...
7152 * so we want to center the original fault address
7153 * within the cluster we will issue... make sure
7154 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7155 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7156 * necessarily an even number of pages so we need to truncate
7157 * the result to a PAGE_SIZE boundary
7158 */
7159 head_size = trunc_page(pre_heat_size / 2);
7160
7161 if (target_start > head_size) {
7162 target_start -= head_size;
7163 } else {
7164 target_start = 0;
7165 }
7166
7167 /*
7168 * 'target_start' at this point represents the beginning offset
7169 * of the cluster we are considering... 'orig_start' will be in
7170 * the center of this cluster if we didn't have to clip the start
7171 * due to running into the start of the file
7172 */
7173 }
7174 if ((target_start + pre_heat_size) > object_size) {
7175 pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
7176 }
7177 /*
7178 * at this point caclulate the number of pages beyond the original fault
7179 * address that we want to consider... this is guaranteed not to extend beyond
7180 * the current EOF...
7181 */
7182 assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
7183 tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
7184 } else {
7185 if (pre_heat_size > target_start) {
7186 /*
7187 * since pre_heat_size is always smaller then 2^32,
7188 * if it is larger then target_start (a 64 bit value)
7189 * it is safe to clip target_start to 32 bits
7190 */
7191 pre_heat_size = (vm_size_t) target_start;
7192 }
7193 tail_size = 0;
7194 }
7195 assert( !(target_start & PAGE_MASK_64));
7196 assert( !(pre_heat_size & PAGE_MASK_64));
7197
7198 if (pre_heat_size <= PAGE_SIZE) {
7199 goto out;
7200 }
7201
7202 if (look_behind == TRUE) {
7203 /*
7204 * take a look at the pages before the original
7205 * faulting offset... recalculate this in case
7206 * we had to clip 'pre_heat_size' above to keep
7207 * from running past the EOF.
7208 */
7209 head_size = pre_heat_size - tail_size - PAGE_SIZE;
7210
7211 for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7212 /*
7213 * don't poke below the lowest offset
7214 */
7215 if (offset < fault_info->lo_offset) {
7216 break;
7217 }
7218 /*
7219 * for external objects or internal objects w/o a pager,
7220 * vm_object_compressor_pager_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7221 */
7222 if (vm_object_compressor_pager_state_get(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7223 break;
7224 }
7225 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7226 /*
7227 * don't bridge resident pages
7228 */
7229 break;
7230 }
7231 *start = offset;
7232 *length += PAGE_SIZE;
7233 }
7234 }
7235 if (look_ahead == TRUE) {
7236 for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7237 /*
7238 * don't poke above the highest offset
7239 */
7240 if (offset >= fault_info->hi_offset) {
7241 break;
7242 }
7243 assert(offset < object_size);
7244
7245 /*
7246 * for external objects or internal objects w/o a pager,
7247 * vm_object_compressor_pager_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7248 */
7249 if (vm_object_compressor_pager_state_get(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7250 break;
7251 }
7252 if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7253 /*
7254 * don't bridge resident pages
7255 */
7256 break;
7257 }
7258 *length += PAGE_SIZE;
7259 }
7260 }
7261 out:
7262 if (*length > max_length) {
7263 *length = max_length;
7264 }
7265
7266 vm_object_unlock(object);
7267
7268 DTRACE_VM1(clustersize, vm_size_t, *length);
7269 }
7270
7271
7272 /*
7273 * Allow manipulation of individual page state. This is actually part of
7274 * the UPL regimen but takes place on the VM object rather than on a UPL
7275 */
7276
7277 kern_return_t
7278 vm_object_page_op(
7279 vm_object_t object,
7280 vm_object_offset_t offset,
7281 int ops,
7282 ppnum_t *phys_entry,
7283 int *flags)
7284 {
7285 vm_page_t dst_page;
7286
7287 vm_object_lock(object);
7288
7289 if (ops & UPL_POP_PHYSICAL) {
7290 if (object->phys_contiguous) {
7291 if (phys_entry) {
7292 *phys_entry = (ppnum_t)
7293 (object->vo_shadow_offset >> PAGE_SHIFT);
7294 }
7295 vm_object_unlock(object);
7296 return KERN_SUCCESS;
7297 } else {
7298 vm_object_unlock(object);
7299 return KERN_INVALID_OBJECT;
7300 }
7301 }
7302 if (object->phys_contiguous) {
7303 vm_object_unlock(object);
7304 return KERN_INVALID_OBJECT;
7305 }
7306
7307 while (TRUE) {
7308 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
7309 vm_object_unlock(object);
7310 return KERN_FAILURE;
7311 }
7312
7313 /* Sync up on getting the busy bit */
7314 if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
7315 (((ops & UPL_POP_SET) &&
7316 (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7317 /* someone else is playing with the page, we will */
7318 /* have to wait */
7319 vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_DEFAULT);
7320 continue;
7321 }
7322
7323 if (ops & UPL_POP_DUMP) {
7324 if (dst_page->vmp_pmapped == TRUE) {
7325 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7326 }
7327
7328 VM_PAGE_FREE(dst_page);
7329 break;
7330 }
7331
7332 if (flags) {
7333 *flags = 0;
7334
7335 /* Get the condition of flags before requested ops */
7336 /* are undertaken */
7337
7338 if (dst_page->vmp_dirty) {
7339 *flags |= UPL_POP_DIRTY;
7340 }
7341 if (dst_page->vmp_free_when_done) {
7342 *flags |= UPL_POP_PAGEOUT;
7343 }
7344 if (dst_page->vmp_precious) {
7345 *flags |= UPL_POP_PRECIOUS;
7346 }
7347 if (dst_page->vmp_absent) {
7348 *flags |= UPL_POP_ABSENT;
7349 }
7350 if (dst_page->vmp_busy) {
7351 *flags |= UPL_POP_BUSY;
7352 }
7353 }
7354
7355 /* The caller should have made a call either contingent with */
7356 /* or prior to this call to set UPL_POP_BUSY */
7357 if (ops & UPL_POP_SET) {
7358 /* The protection granted with this assert will */
7359 /* not be complete. If the caller violates the */
7360 /* convention and attempts to change page state */
7361 /* without first setting busy we may not see it */
7362 /* because the page may already be busy. However */
7363 /* if such violations occur we will assert sooner */
7364 /* or later. */
7365 assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
7366 if (ops & UPL_POP_DIRTY) {
7367 SET_PAGE_DIRTY(dst_page, FALSE);
7368 }
7369 if (ops & UPL_POP_PAGEOUT) {
7370 dst_page->vmp_free_when_done = TRUE;
7371 }
7372 if (ops & UPL_POP_PRECIOUS) {
7373 dst_page->vmp_precious = TRUE;
7374 }
7375 if (ops & UPL_POP_ABSENT) {
7376 dst_page->vmp_absent = TRUE;
7377 }
7378 if (ops & UPL_POP_BUSY) {
7379 dst_page->vmp_busy = TRUE;
7380 }
7381 }
7382
7383 if (ops & UPL_POP_CLR) {
7384 assert(dst_page->vmp_busy);
7385 if (ops & UPL_POP_DIRTY) {
7386 dst_page->vmp_dirty = FALSE;
7387 }
7388 if (ops & UPL_POP_PAGEOUT) {
7389 dst_page->vmp_free_when_done = FALSE;
7390 }
7391 if (ops & UPL_POP_PRECIOUS) {
7392 dst_page->vmp_precious = FALSE;
7393 }
7394 if (ops & UPL_POP_ABSENT) {
7395 dst_page->vmp_absent = FALSE;
7396 }
7397 if (ops & UPL_POP_BUSY) {
7398 dst_page->vmp_busy = FALSE;
7399 vm_page_wakeup(object, dst_page);
7400 }
7401 }
7402 if (phys_entry) {
7403 /*
7404 * The physical page number will remain valid
7405 * only if the page is kept busy.
7406 */
7407 assert(dst_page->vmp_busy);
7408 *phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
7409 }
7410
7411 break;
7412 }
7413
7414 vm_object_unlock(object);
7415 return KERN_SUCCESS;
7416 }
7417
7418 /*
7419 * vm_object_range_op offers performance enhancement over
7420 * vm_object_page_op for page_op functions which do not require page
7421 * level state to be returned from the call. Page_op was created to provide
7422 * a low-cost alternative to page manipulation via UPLs when only a single
7423 * page was involved. The range_op call establishes the ability in the _op
7424 * family of functions to work on multiple pages where the lack of page level
7425 * state handling allows the caller to avoid the overhead of the upl structures.
7426 */
7427
7428 kern_return_t
7429 vm_object_range_op(
7430 vm_object_t object,
7431 vm_object_offset_t offset_beg,
7432 vm_object_offset_t offset_end,
7433 int ops,
7434 uint32_t *range)
7435 {
7436 vm_object_offset_t offset;
7437 vm_page_t dst_page;
7438
7439 if (object->resident_page_count == 0) {
7440 if (range) {
7441 if (ops & UPL_ROP_PRESENT) {
7442 *range = 0;
7443 } else {
7444 *range = (uint32_t) (offset_end - offset_beg);
7445 assert(*range == (offset_end - offset_beg));
7446 }
7447 }
7448 return KERN_SUCCESS;
7449 }
7450 vm_object_lock(object);
7451
7452 if (object->phys_contiguous) {
7453 vm_object_unlock(object);
7454 return KERN_INVALID_OBJECT;
7455 }
7456
7457 offset = offset_beg & ~PAGE_MASK_64;
7458
7459 while (offset < offset_end) {
7460 dst_page = vm_page_lookup(object, offset);
7461 if (dst_page != VM_PAGE_NULL) {
7462 if (ops & UPL_ROP_DUMP) {
7463 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
7464 /*
7465 * someone else is playing with the
7466 * page, we will have to wait
7467 */
7468 vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_DEFAULT);
7469 /*
7470 * need to relook the page up since it's
7471 * state may have changed while we slept
7472 * it might even belong to a different object
7473 * at this point
7474 */
7475 continue;
7476 }
7477 if (dst_page->vmp_laundry) {
7478 vm_pageout_steal_laundry(dst_page, FALSE);
7479 }
7480
7481 if (dst_page->vmp_pmapped == TRUE) {
7482 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7483 }
7484
7485 VM_PAGE_FREE(dst_page);
7486 } else if ((ops & UPL_ROP_ABSENT)
7487 && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
7488 break;
7489 }
7490 } else if (ops & UPL_ROP_PRESENT) {
7491 break;
7492 }
7493
7494 offset += PAGE_SIZE;
7495 }
7496 vm_object_unlock(object);
7497
7498 if (range) {
7499 if (offset > offset_end) {
7500 offset = offset_end;
7501 }
7502 if (offset > offset_beg) {
7503 *range = (uint32_t) (offset - offset_beg);
7504 assert(*range == (offset - offset_beg));
7505 } else {
7506 *range = 0;
7507 }
7508 }
7509 return KERN_SUCCESS;
7510 }
7511
7512 /*
7513 * Used to point a pager directly to a range of memory (when the pager may be associated
7514 * with a non-device vnode). Takes a virtual address, an offset, and a size. We currently
7515 * expect that the virtual address will denote the start of a range that is physically contiguous.
7516 */
7517 kern_return_t
7518 pager_map_to_phys_contiguous(
7519 memory_object_control_t object,
7520 memory_object_offset_t offset,
7521 addr64_t base_vaddr,
7522 vm_size_t size)
7523 {
7524 ppnum_t page_num;
7525 boolean_t clobbered_private;
7526 kern_return_t retval;
7527 vm_object_t pager_object;
7528
7529 page_num = pmap_find_phys(kernel_pmap, base_vaddr);
7530
7531 if (!page_num) {
7532 retval = KERN_FAILURE;
7533 goto out;
7534 }
7535
7536 pager_object = memory_object_control_to_vm_object(object);
7537
7538 if (!pager_object) {
7539 retval = KERN_FAILURE;
7540 goto out;
7541 }
7542
7543 clobbered_private = pager_object->private;
7544 if (pager_object->private != TRUE) {
7545 vm_object_lock(pager_object);
7546 VM_OBJECT_SET_PRIVATE(pager_object, TRUE);
7547 vm_object_unlock(pager_object);
7548 }
7549 retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7550
7551 if (retval != KERN_SUCCESS) {
7552 if (pager_object->private != clobbered_private) {
7553 vm_object_lock(pager_object);
7554 VM_OBJECT_SET_PRIVATE(pager_object, clobbered_private);
7555 vm_object_unlock(pager_object);
7556 }
7557 }
7558
7559 out:
7560 return retval;
7561 }
7562
7563 uint32_t scan_object_collision = 0;
7564
7565 void
7566 vm_object_lock(vm_object_t object)
7567 {
7568 if (object == vm_pageout_scan_wants_object) {
7569 scan_object_collision++;
7570 mutex_pause(2);
7571 }
7572 DTRACE_VM(vm_object_lock_w);
7573 lck_rw_lock_exclusive(&object->Lock);
7574 }
7575
7576 boolean_t
7577 vm_object_lock_avoid(vm_object_t object)
7578 {
7579 if (object == vm_pageout_scan_wants_object) {
7580 scan_object_collision++;
7581 return TRUE;
7582 }
7583 return FALSE;
7584 }
7585
7586 boolean_t
7587 _vm_object_lock_try(vm_object_t object)
7588 {
7589 boolean_t retval;
7590
7591 retval = lck_rw_try_lock_exclusive(&object->Lock);
7592 #if DEVELOPMENT || DEBUG
7593 if (retval == TRUE) {
7594 DTRACE_VM(vm_object_lock_w);
7595 }
7596 #endif
7597 return retval;
7598 }
7599
7600 boolean_t
7601 vm_object_lock_try(vm_object_t object)
7602 {
7603 /*
7604 * Called from hibernate path so check before blocking.
7605 */
7606 if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7607 mutex_pause(2);
7608 }
7609 return _vm_object_lock_try(object);
7610 }
7611
7612 /*
7613 * Lock the object exclusive.
7614 *
7615 * Returns true iff the thread had to spin or block before
7616 * acquiring the lock.
7617 */
7618 bool
7619 vm_object_lock_check_contended(vm_object_t object)
7620 {
7621 if (object == vm_pageout_scan_wants_object) {
7622 scan_object_collision++;
7623 mutex_pause(2);
7624 }
7625 DTRACE_VM(vm_object_lock_w);
7626 return lck_rw_lock_exclusive_check_contended(&object->Lock);
7627 }
7628
7629 void
7630 vm_object_lock_shared(vm_object_t object)
7631 {
7632 if (vm_object_lock_avoid(object)) {
7633 mutex_pause(2);
7634 }
7635 DTRACE_VM(vm_object_lock_r);
7636 lck_rw_lock_shared(&object->Lock);
7637 }
7638
7639 boolean_t
7640 vm_object_lock_yield_shared(vm_object_t object)
7641 {
7642 boolean_t retval = FALSE, force_yield = FALSE;
7643
7644 vm_object_lock_assert_shared(object);
7645
7646 force_yield = vm_object_lock_avoid(object);
7647
7648 retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7649 if (retval) {
7650 DTRACE_VM(vm_object_lock_yield);
7651 }
7652
7653 return retval;
7654 }
7655
7656 boolean_t
7657 vm_object_lock_try_shared(vm_object_t object)
7658 {
7659 boolean_t retval;
7660
7661 if (vm_object_lock_avoid(object)) {
7662 mutex_pause(2);
7663 }
7664 retval = lck_rw_try_lock_shared(&object->Lock);
7665 if (retval) {
7666 DTRACE_VM(vm_object_lock_r);
7667 }
7668 return retval;
7669 }
7670
7671 boolean_t
7672 vm_object_lock_upgrade(vm_object_t object)
7673 {
7674 boolean_t retval;
7675
7676 retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7677 #if DEVELOPMENT || DEBUG
7678 if (retval == TRUE) {
7679 DTRACE_VM(vm_object_lock_w);
7680 }
7681 #endif
7682 return retval;
7683 }
7684
7685 void
7686 vm_object_unlock(vm_object_t object)
7687 {
7688 #if DEVELOPMENT || DEBUG
7689 DTRACE_VM(vm_object_unlock);
7690 #endif
7691 lck_rw_done(&object->Lock);
7692 }
7693
7694
7695 unsigned int vm_object_change_wimg_mode_count = 0;
7696
7697 /*
7698 * The object must be locked
7699 */
7700 void
7701 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7702 {
7703 vm_object_lock_assert_exclusive(object);
7704
7705 vm_object_paging_only_wait(object, THREAD_UNINT);
7706
7707 #if HAS_MTE
7708 if (vm_object_is_mte_mappable(object)) {
7709 panic("Changing WIMG mode on tagged VM object: %d", wimg_mode);
7710 } else if (wimg_mode == VM_WIMG_MTE) {
7711 panic("Changing untagged VM object to VM_WIMG_MTE: %d", object->wimg_bits);
7712 }
7713 #endif /* HAS_MTE */
7714
7715 const unified_page_list_t pmap_batch_list = {
7716 .pageq = &object->memq,
7717 .type = UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q,
7718 };
7719 pmap_batch_set_cache_attributes(&pmap_batch_list, wimg_mode);
7720 object->set_cache_attr = !HAS_DEFAULT_CACHEABILITY(wimg_mode);
7721
7722 object->wimg_bits = wimg_mode;
7723
7724 vm_object_change_wimg_mode_count++;
7725 }
7726
7727 #if CONFIG_FREEZE
7728
7729 extern struct freezer_context freezer_context_global;
7730
7731 /*
7732 * This routine does the "relocation" of previously
7733 * compressed pages belonging to this object that are
7734 * residing in a number of compressed segments into
7735 * a set of compressed segments dedicated to hold
7736 * compressed pages belonging to this object.
7737 */
7738
7739 extern AbsoluteTime c_freezer_last_yield_ts;
7740
7741 #define MAX_FREE_BATCH 32
7742 #define FREEZER_DUTY_CYCLE_ON_MS 5
7743 #define FREEZER_DUTY_CYCLE_OFF_MS 5
7744
7745 static int c_freezer_should_yield(void);
7746
7747
7748 static int
7749 c_freezer_should_yield()
7750 {
7751 AbsoluteTime cur_time;
7752 uint64_t nsecs;
7753
7754 assert(c_freezer_last_yield_ts);
7755 clock_get_uptime(&cur_time);
7756
7757 SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7758 absolutetime_to_nanoseconds(cur_time, &nsecs);
7759
7760 if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7761 return 1;
7762 }
7763 return 0;
7764 }
7765
7766
7767 void
7768 vm_object_compressed_freezer_done()
7769 {
7770 vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7771 }
7772
7773
7774 uint32_t
7775 vm_object_compressed_freezer_pageout(
7776 vm_object_t object, uint32_t dirty_budget)
7777 {
7778 vm_page_t p;
7779 vm_page_t local_freeq = NULL;
7780 int local_freed = 0;
7781 kern_return_t retval = KERN_SUCCESS;
7782 int obj_resident_page_count_snapshot = 0;
7783 uint32_t paged_out_count = 0;
7784
7785 assert(object != VM_OBJECT_NULL);
7786 assert(object->internal);
7787
7788 vm_object_lock(object);
7789
7790 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7791 if (!object->pager_initialized) {
7792 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7793
7794 if (!object->pager_initialized) {
7795 vm_object_compressor_pager_create(object);
7796 }
7797 }
7798
7799 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7800 vm_object_unlock(object);
7801 return paged_out_count;
7802 }
7803 }
7804
7805 /*
7806 * We could be freezing a shared internal object that might
7807 * be part of some other thread's current VM operations.
7808 * We skip it if there's a paging-in-progress or activity-in-progress
7809 * because we could be here a long time with the map lock held.
7810 *
7811 * Note: We are holding the map locked while we wait.
7812 * This is fine in the freezer path because the task
7813 * is suspended and so this latency is acceptable.
7814 */
7815 if (object->paging_in_progress || object->activity_in_progress) {
7816 vm_object_unlock(object);
7817 return paged_out_count;
7818 }
7819
7820 if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7821 vm_object_offset_t curr_offset = 0;
7822
7823 /*
7824 * Go through the object and make sure that any
7825 * previously compressed pages are relocated into
7826 * a compressed segment associated with our "freezer_chead".
7827 */
7828 while (curr_offset < object->vo_size) {
7829 curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7830
7831 if (curr_offset == (vm_object_offset_t) -1) {
7832 break;
7833 }
7834
7835 retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7836
7837 if (retval != KERN_SUCCESS) {
7838 break;
7839 }
7840
7841 curr_offset += PAGE_SIZE_64;
7842 }
7843 }
7844
7845 /*
7846 * We can't hold the object lock while heading down into the compressed pager
7847 * layer because we might need the kernel map lock down there to allocate new
7848 * compressor data structures. And if this same object is mapped in the kernel
7849 * and there's a fault on it, then that thread will want the object lock while
7850 * holding the kernel map lock.
7851 *
7852 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7853 * we won't be stuck in an infinite loop if the same page(s) keep getting
7854 * decompressed. So we grab a snapshot of the number of pages in the object and
7855 * we won't process any more than that number of pages.
7856 */
7857
7858 obj_resident_page_count_snapshot = object->resident_page_count;
7859
7860 vm_object_activity_begin(object);
7861
7862 while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7863 p = (vm_page_t)vm_page_queue_first(&object->memq);
7864
7865 KDBG_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed);
7866
7867 vm_page_lockspin_queues();
7868
7869 if (p->vmp_cleaning || vm_page_is_fictitious(p) ||
7870 p->vmp_busy || p->vmp_absent || p->vmp_unusual ||
7871 VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) {
7872 vm_page_unlock_queues();
7873
7874 KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1);
7875
7876 vm_page_queue_remove(&object->memq, p, vmp_listq);
7877 vm_page_queue_enter(&object->memq, p, vmp_listq);
7878
7879 continue;
7880 }
7881
7882 if (p->vmp_pmapped == TRUE) {
7883 int refmod_state, pmap_flags;
7884
7885 if (p->vmp_dirty || p->vmp_precious) {
7886 pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7887 } else {
7888 pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7889 }
7890
7891 vm_page_lockconvert_queues();
7892 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7893 if (refmod_state & VM_MEM_MODIFIED) {
7894 SET_PAGE_DIRTY(p, FALSE);
7895 }
7896 }
7897
7898 if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7899 /*
7900 * Clean and non-precious page.
7901 */
7902 vm_page_unlock_queues();
7903 VM_PAGE_FREE(p);
7904
7905 KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2);
7906 continue;
7907 }
7908
7909 if (p->vmp_laundry) {
7910 vm_pageout_steal_laundry(p, TRUE);
7911 }
7912
7913 vm_page_queues_remove(p, TRUE);
7914
7915 vm_page_unlock_queues();
7916
7917
7918 /*
7919 * In case the compressor fails to compress this page, we need it at
7920 * the back of the object memq so that we don't keep trying to process it.
7921 * Make the move here while we have the object lock held.
7922 */
7923
7924 vm_page_queue_remove(&object->memq, p, vmp_listq);
7925 vm_page_queue_enter(&object->memq, p, vmp_listq);
7926
7927 /*
7928 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7929 *
7930 * Mark the page busy so no one messes with it while we have the object lock dropped.
7931 */
7932 p->vmp_busy = TRUE;
7933
7934 vm_object_activity_begin(object);
7935
7936 vm_object_unlock(object);
7937
7938 if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7939 (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7940 p) == KERN_SUCCESS) {
7941 /*
7942 * page has already been un-tabled from the object via 'vm_page_remove'
7943 */
7944 p->vmp_snext = local_freeq;
7945 local_freeq = p;
7946 local_freed++;
7947 paged_out_count++;
7948
7949 if (local_freed >= MAX_FREE_BATCH) {
7950 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7951
7952 vm_page_free_list(local_freeq, TRUE);
7953
7954 local_freeq = NULL;
7955 local_freed = 0;
7956 }
7957 freezer_context_global.freezer_ctx_uncompressed_pages++;
7958 }
7959 KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed);
7960
7961 if (local_freed == 0 && c_freezer_should_yield()) {
7962 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7963 clock_get_uptime(&c_freezer_last_yield_ts);
7964 }
7965
7966 vm_object_lock(object);
7967 }
7968
7969 if (local_freeq) {
7970 OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7971
7972 vm_page_free_list(local_freeq, TRUE);
7973
7974 local_freeq = NULL;
7975 local_freed = 0;
7976 }
7977
7978 vm_object_activity_end(object);
7979
7980 vm_object_unlock(object);
7981
7982 if (c_freezer_should_yield()) {
7983 thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7984 clock_get_uptime(&c_freezer_last_yield_ts);
7985 }
7986 return paged_out_count;
7987 }
7988
7989 #endif /* CONFIG_FREEZE */
7990
7991
7992 uint64_t vm_object_pageout_not_on_queue = 0;
7993 uint64_t vm_object_pageout_not_pageable = 0;
7994 uint64_t vm_object_pageout_pageable = 0;
7995 uint64_t vm_object_pageout_active_local = 0;
7996 void
7997 vm_object_pageout(
7998 vm_object_t object)
7999 {
8000 vm_page_t p, next;
8001 struct vm_pageout_queue *iq;
8002
8003 if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
8004 return;
8005 }
8006
8007 iq = &vm_pageout_queue_internal;
8008
8009 assert(object != VM_OBJECT_NULL );
8010
8011 vm_object_lock(object);
8012
8013 if (!object->internal ||
8014 object->terminating ||
8015 !object->alive) {
8016 vm_object_unlock(object);
8017 return;
8018 }
8019
8020 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8021 if (!object->pager_initialized) {
8022 vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8023
8024 if (!object->pager_initialized) {
8025 vm_object_compressor_pager_create(object);
8026 }
8027 }
8028
8029 if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8030 vm_object_unlock(object);
8031 return;
8032 }
8033 }
8034
8035 ReScan:
8036 next = (vm_page_t)vm_page_queue_first(&object->memq);
8037
8038 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
8039 p = next;
8040 next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
8041
8042 vm_page_lockspin_queues();
8043
8044 assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
8045 assert(p->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8046
8047 if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
8048 p->vmp_cleaning ||
8049 p->vmp_laundry ||
8050 p->vmp_busy ||
8051 p->vmp_absent ||
8052 VMP_ERROR_GET(p) ||
8053 vm_page_is_fictitious(p) ||
8054 VM_PAGE_WIRED(p)) {
8055 /*
8056 * Page is already being cleaned or can't be cleaned.
8057 */
8058 vm_page_unlock_queues();
8059 continue;
8060 }
8061 if (p->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8062 // printf("FBDP %s:%d page %p object %p offset 0x%llx state %d not on queue\n", __FUNCTION__, __LINE__, p, VM_PAGE_OBJECT(p), p->vmp_offset, p->vmp_q_state);
8063 vm_object_pageout_not_on_queue++;
8064 vm_page_unlock_queues();
8065 continue;
8066 }
8067 if (!VM_PAGE_PAGEABLE(p)) {
8068 if (p->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
8069 vm_object_pageout_active_local++;
8070 } else {
8071 vm_object_pageout_not_pageable++;
8072 vm_page_unlock_queues();
8073 continue;
8074 }
8075 } else {
8076 vm_object_pageout_pageable++;
8077 }
8078
8079 if (vm_compressor_low_on_space()) {
8080 vm_page_unlock_queues();
8081 break;
8082 }
8083
8084 /* Throw to the pageout queue */
8085
8086 if (VM_PAGE_Q_THROTTLED(iq)) {
8087 iq->pgo_draining = TRUE;
8088
8089 assert_wait((event_t) (&iq->pgo_laundry + 1),
8090 THREAD_INTERRUPTIBLE);
8091 vm_page_unlock_queues();
8092 vm_object_unlock(object);
8093
8094 thread_block(THREAD_CONTINUE_NULL);
8095
8096 vm_object_lock(object);
8097 goto ReScan;
8098 }
8099
8100 assert(!vm_page_is_fictitious(p));
8101 assert(!p->vmp_busy);
8102 assert(!p->vmp_absent);
8103 assert(!p->vmp_unusual);
8104 assert(!VMP_ERROR_GET(p)); /* XXX there's a window here where we could have an ECC error! */
8105 assert(!VM_PAGE_WIRED(p));
8106 assert(!p->vmp_cleaning);
8107
8108 if (p->vmp_pmapped == TRUE) {
8109 int refmod_state;
8110 int pmap_options;
8111
8112 /*
8113 * Tell pmap the page should be accounted
8114 * for as "compressed" if it's been modified.
8115 */
8116 pmap_options =
8117 PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8118 if (p->vmp_dirty || p->vmp_precious) {
8119 /*
8120 * We already know it's been modified,
8121 * so tell pmap to account for it
8122 * as "compressed".
8123 */
8124 pmap_options = PMAP_OPTIONS_COMPRESSOR;
8125 }
8126 vm_page_lockconvert_queues();
8127 refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
8128 pmap_options,
8129 NULL);
8130 if (refmod_state & VM_MEM_MODIFIED) {
8131 SET_PAGE_DIRTY(p, FALSE);
8132 }
8133 }
8134
8135 if (!p->vmp_dirty && !p->vmp_precious) {
8136 vm_page_unlock_queues();
8137 VM_PAGE_FREE(p);
8138 continue;
8139 }
8140 vm_page_queues_remove(p, TRUE);
8141
8142 vm_pageout_cluster(p);
8143
8144 vm_page_unlock_queues();
8145 }
8146 vm_object_unlock(object);
8147 }
8148
8149
8150 #if CONFIG_IOSCHED
8151
8152 void
8153 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
8154 {
8155 io_reprioritize_req_t req;
8156 struct vnode *devvp = NULL;
8157
8158 if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8159 return;
8160 }
8161
8162 /*
8163 * Create the request for I/O reprioritization.
8164 * We use the noblock variant of zalloc because we're holding the object
8165 * lock here and we could cause a deadlock in low memory conditions.
8166 */
8167 req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
8168 if (req == NULL) {
8169 return;
8170 }
8171 req->blkno = blkno;
8172 req->len = len;
8173 req->priority = prio;
8174 req->devvp = devvp;
8175
8176 /* Insert request into the reprioritization list */
8177 mpsc_daemon_enqueue(&io_reprioritize_q, &req->iorr_elm, MPSC_QUEUE_DISABLE_PREEMPTION);
8178
8179 return;
8180 }
8181
8182 void
8183 vm_decmp_upl_reprioritize(upl_t upl, int prio)
8184 {
8185 int offset;
8186 vm_object_t object;
8187 io_reprioritize_req_t req;
8188 struct vnode *devvp = NULL;
8189 uint64_t blkno;
8190 uint32_t len;
8191 upl_t io_upl;
8192 uint64_t *io_upl_reprio_info;
8193 int io_upl_size;
8194
8195 if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
8196 return;
8197 }
8198
8199 /*
8200 * We dont want to perform any allocations with the upl lock held since that might
8201 * result in a deadlock. If the system is low on memory, the pageout thread would
8202 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
8203 * be freed up by the pageout thread, it would be a deadlock.
8204 */
8205
8206
8207 /* First step is just to get the size of the upl to find out how big the reprio info is */
8208 if (!upl_try_lock(upl)) {
8209 return;
8210 }
8211
8212 if (upl->decmp_io_upl == NULL) {
8213 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8214 upl_unlock(upl);
8215 return;
8216 }
8217
8218 io_upl = upl->decmp_io_upl;
8219 assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
8220 assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
8221 "upl %p offset 0x%llx size 0x%x\n",
8222 io_upl, io_upl->u_offset, io_upl->u_size);
8223 io_upl_size = io_upl->u_size;
8224 upl_unlock(upl);
8225
8226 /* Now perform the allocation */
8227 io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
8228 if (io_upl_reprio_info == NULL) {
8229 return;
8230 }
8231
8232 /* Now again take the lock, recheck the state and grab out the required info */
8233 if (!upl_try_lock(upl)) {
8234 goto out;
8235 }
8236
8237 if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
8238 /* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8239 upl_unlock(upl);
8240 goto out;
8241 }
8242 memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
8243 sizeof(uint64_t) * atop(io_upl_size));
8244
8245 /* Get the VM object for this UPL */
8246 if (io_upl->flags & UPL_SHADOWED) {
8247 object = io_upl->map_object->shadow;
8248 } else {
8249 object = io_upl->map_object;
8250 }
8251
8252 /* Get the dev vnode ptr for this object */
8253 if (!object || !object->pager ||
8254 vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8255 upl_unlock(upl);
8256 goto out;
8257 }
8258
8259 upl_unlock(upl);
8260
8261 /* Now we have all the information needed to do the expedite */
8262
8263 offset = 0;
8264 while (offset < io_upl_size) {
8265 blkno = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
8266 len = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
8267
8268 /*
8269 * This implementation may cause some spurious expedites due to the
8270 * fact that we dont cleanup the blkno & len from the upl_reprio_info
8271 * even after the I/O is complete.
8272 */
8273
8274 if (blkno != 0 && len != 0) {
8275 /* Create the request for I/O reprioritization */
8276 req = zalloc_flags(io_reprioritize_req_zone,
8277 Z_WAITOK | Z_NOFAIL);
8278 req->blkno = blkno;
8279 req->len = len;
8280 req->priority = prio;
8281 req->devvp = devvp;
8282
8283 /* Insert request into the reprioritization list */
8284 mpsc_daemon_enqueue(&io_reprioritize_q, &req->iorr_elm, MPSC_QUEUE_DISABLE_PREEMPTION);
8285
8286 offset += len;
8287 } else {
8288 offset += PAGE_SIZE;
8289 }
8290 }
8291
8292 out:
8293 kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size));
8294 }
8295
8296 void
8297 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
8298 {
8299 upl_t upl;
8300 upl_page_info_t *pl;
8301 unsigned int i, num_pages;
8302 int cur_tier;
8303
8304 cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
8305
8306 /*
8307 * Scan through all UPLs associated with the object to find the
8308 * UPL containing the contended page.
8309 */
8310 queue_iterate(&o->uplq, upl, upl_t, uplq) {
8311 if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
8312 continue;
8313 }
8314 pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
8315 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
8316 "upl %p offset 0x%llx size 0x%x\n",
8317 upl, upl->u_offset, upl->u_size);
8318 num_pages = (upl->u_size / PAGE_SIZE);
8319
8320 /*
8321 * For each page in the UPL page list, see if it matches the contended
8322 * page and was issued as a low prio I/O.
8323 */
8324 for (i = 0; i < num_pages; i++) {
8325 if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
8326 if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
8327 KDBG((VMDBG_CODE(DBG_VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8328 VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority);
8329 vm_decmp_upl_reprioritize(upl, cur_tier);
8330 break;
8331 }
8332 KDBG((VMDBG_CODE(DBG_VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8333 upl->upl_reprio_info[i], upl->upl_priority);
8334 if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
8335 vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
8336 }
8337 break;
8338 }
8339 }
8340 /* Check if we found any hits */
8341 if (i != num_pages) {
8342 break;
8343 }
8344 }
8345
8346 return;
8347 }
8348
8349 void
8350 kdp_vm_object_sleep_find_owner(
8351 event64_t wait_event,
8352 block_hint_t wait_type,
8353 thread_waitinfo_t *waitinfo)
8354 {
8355 assert(wait_type >= kThreadWaitPagerInit && wait_type <= kThreadWaitPageInThrottle);
8356 vm_object_wait_reason_t wait_reason = wait_type - kThreadWaitPagerInit;
8357 vm_object_t object = (vm_object_t)((uintptr_t)wait_event - wait_reason);
8358 waitinfo->context = VM_KERNEL_ADDRPERM(object);
8359 /*
8360 * There is currently no non-trivial way to ascertain the thread(s)
8361 * currently operating on this object.
8362 */
8363 waitinfo->owner = 0;
8364 }
8365
8366
8367 wait_result_t
8368 vm_object_sleep(
8369 vm_object_t object,
8370 vm_object_wait_reason_t reason,
8371 wait_interrupt_t interruptible,
8372 lck_sleep_action_t action)
8373 {
8374 wait_result_t wr;
8375 block_hint_t block_hint;
8376 event_t wait_event;
8377
8378 vm_object_lock_assert_exclusive(object);
8379 assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
8380 switch (reason) {
8381 case VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS:
8382 block_hint = kThreadWaitPagerInit; /* XXX change that */
8383 break;
8384 case VM_OBJECT_EVENT_PAGER_READY:
8385 block_hint = kThreadWaitPagerReady;
8386 break;
8387 case VM_OBJECT_EVENT_PAGING_IN_PROGRESS:
8388 block_hint = kThreadWaitPagingActivity;
8389 break;
8390 case VM_OBJECT_EVENT_MAPPING_IN_PROGRESS:
8391 block_hint = kThreadWaitMappingInProgress;
8392 break;
8393 case VM_OBJECT_EVENT_UNBLOCKED:
8394 block_hint = kThreadWaitMemoryBlocked;
8395 break;
8396 case VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS:
8397 block_hint = kThreadWaitPagingInProgress;
8398 break;
8399 case VM_OBJECT_EVENT_PAGEIN_THROTTLE:
8400 block_hint = kThreadWaitPageInThrottle;
8401 break;
8402 default:
8403 panic("Unexpected wait reason %u", reason);
8404 }
8405 thread_set_pending_block_hint(current_thread(), block_hint);
8406
8407 KDBG_FILTERED(VMDBG_CODE(DBG_VM_OBJECT_SLEEP) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(object), reason);
8408
8409 vm_object_set_wanted(object, reason);
8410 wait_event = (event_t)((uintptr_t)object + (uintptr_t)reason);
8411 wr = lck_rw_sleep(&object->Lock, LCK_SLEEP_PROMOTED_PRI | action, wait_event, interruptible);
8412
8413 KDBG_FILTERED(VMDBG_CODE(DBG_VM_OBJECT_SLEEP) | DBG_FUNC_END, VM_KERNEL_ADDRHIDE(object), reason, wr);
8414 return wr;
8415 }
8416
8417 wait_result_t
8418 vm_object_pl_req_wait(vm_object_t object, wait_interrupt_t interruptible)
8419 {
8420 wait_result_t wr = THREAD_NOT_WAITING;
8421 vm_object_lock_assert_exclusive(object);
8422 while (object->vmo_pl_req_in_progress != 0) {
8423 wr = vm_object_sleep(object,
8424 VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS,
8425 interruptible,
8426 LCK_SLEEP_EXCLUSIVE);
8427 if (wr != THREAD_AWAKENED) {
8428 break;
8429 }
8430 }
8431 return wr;
8432 }
8433
8434 wait_result_t
8435 vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible)
8436 {
8437 wait_result_t wr = THREAD_NOT_WAITING;
8438 vm_object_lock_assert_exclusive(object);
8439 while (object->paging_in_progress != 0 ||
8440 object->activity_in_progress != 0) {
8441 wr = vm_object_sleep((object),
8442 VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
8443 interruptible,
8444 LCK_SLEEP_EXCLUSIVE);
8445 if (wr != THREAD_AWAKENED) {
8446 break;
8447 }
8448 }
8449 return wr;
8450 }
8451
8452 wait_result_t
8453 vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible)
8454 {
8455 wait_result_t wr = THREAD_NOT_WAITING;
8456 vm_object_lock_assert_exclusive(object);
8457 while (object->paging_in_progress != 0) {
8458 wr = vm_object_sleep(object,
8459 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,
8460 interruptible,
8461 LCK_SLEEP_EXCLUSIVE);
8462 if (wr != THREAD_AWAKENED) {
8463 break;
8464 }
8465 }
8466 return wr;
8467 }
8468
8469 wait_result_t
8470 vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible)
8471 {
8472 wait_result_t wr = THREAD_NOT_WAITING;
8473 vm_object_lock_assert_exclusive(object);
8474 /*
8475 * TODO: consider raising the throttle limit specifically for
8476 * shared-cache objects, which are expected to be highly contended.
8477 * (rdar://127899888)
8478 */
8479 while (object->paging_in_progress >= vm_object_pagein_throttle) {
8480 wr = vm_object_sleep(object,
8481 VM_OBJECT_EVENT_PAGEIN_THROTTLE,
8482 interruptible,
8483 LCK_SLEEP_EXCLUSIVE);
8484 if (wr != THREAD_AWAKENED) {
8485 break;
8486 }
8487 }
8488 return wr;
8489 }
8490
8491 wait_result_t
8492 vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible)
8493 {
8494 wait_result_t wr = THREAD_NOT_WAITING;
8495 vm_object_lock_assert_exclusive(object);
8496 while (object->mapping_in_progress) {
8497 wr = vm_object_sleep(object,
8498 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS,
8499 interruptible,
8500 LCK_SLEEP_EXCLUSIVE);
8501 if (wr != THREAD_AWAKENED) {
8502 break;
8503 }
8504 }
8505 return wr;
8506 }
8507
8508 void
8509 vm_object_wakeup(
8510 vm_object_t object,
8511 vm_object_wait_reason_t reason)
8512 {
8513 vm_object_lock_assert_exclusive(object);
8514 assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
8515
8516 if (vm_object_wanted(object, reason)) {
8517 thread_wakeup((event_t)((uintptr_t)object + (uintptr_t)reason));
8518 }
8519 object->all_wanted &= ~(1 << reason);
8520 }
8521
8522
8523 void
8524 kdp_vm_page_sleep_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
8525 {
8526 vm_page_t m = (vm_page_t)wait_event;
8527 waitinfo->context = VM_KERNEL_ADDRPERM(m);
8528 /*
8529 * There is not currently a non-trivial way to identify the thread
8530 * holding a page busy.
8531 */
8532 waitinfo->owner = 0;
8533 }
8534
8535 #if PAGE_SLEEP_WITH_INHERITOR
8536 static wait_result_t vm_page_sleep_with_inheritor(lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, event_t event, wait_interrupt_t interruptible);
8537 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8538
8539 wait_result_t
8540 vm_page_sleep(vm_object_t object, vm_page_t m, wait_interrupt_t interruptible, lck_sleep_action_t action)
8541 {
8542 wait_result_t ret;
8543
8544 KDBG_FILTERED((VMDBG_CODE(DBG_VM_PAGE_SLEEP)) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(object), m->vmp_offset, VM_KERNEL_ADDRHIDE(m));
8545 #if CONFIG_IOSCHED
8546 if (object->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
8547 /*
8548 * Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8549 */
8550 vm_page_handle_prio_inversion(object, m);
8551 }
8552 #endif /* CONFIG_IOSCHED */
8553 m->vmp_wanted = TRUE;
8554 thread_set_pending_block_hint(current_thread(), kThreadWaitPageBusy);
8555 #if PAGE_SLEEP_WITH_INHERITOR
8556 ret = vm_page_sleep_with_inheritor(&object->Lock, action, (event_t)m, interruptible);
8557 #else
8558 ret = lck_rw_sleep(&object->Lock, LCK_SLEEP_PROMOTED_PRI | action, (event_t)m, interruptible);
8559 #endif
8560 KDBG_FILTERED((VMDBG_CODE(DBG_VM_PAGE_SLEEP)) | DBG_FUNC_END, VM_KERNEL_ADDRHIDE(object), m->vmp_offset, VM_KERNEL_ADDRHIDE(m));
8561 return ret;
8562 }
8563
8564 void
8565 vm_page_wakeup(vm_object_t object, vm_page_t m)
8566 {
8567 assert(m);
8568 /*
8569 * The page may have been freed from its object before this wakeup is issued
8570 */
8571 if (object != VM_OBJECT_NULL) {
8572 vm_object_lock_assert_exclusive(object);
8573 }
8574
8575 if (m->vmp_wanted) {
8576 KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP) | DBG_FUNC_NONE,
8577 VM_KERNEL_ADDRHIDE(object), m->vmp_offset,
8578 VM_KERNEL_ADDRHIDE(m));
8579 m->vmp_wanted = false;
8580 thread_wakeup((event_t)m);
8581 }
8582 }
8583
8584 void
8585 vm_page_wakeup_done(__assert_only vm_object_t object, vm_page_t m)
8586 {
8587 assert(object);
8588 assert(m->vmp_busy);
8589 vm_object_lock_assert_exclusive(object);
8590
8591 KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP_DONE) | DBG_FUNC_NONE,
8592 VM_KERNEL_ADDRHIDE(object), m->vmp_offset,
8593 VM_KERNEL_ADDRHIDE(m), m->vmp_wanted);
8594 m->vmp_busy = false;
8595 vm_page_wakeup(object, m);
8596 }
8597
8598 #if PAGE_SLEEP_WITH_INHERITOR
8599 static bool page_worker_unregister_worker(event_t event, thread_t expect_th, page_worker_token_t *token);
8600 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8601
8602 /* This function duplicates all of what vm_page_wakeup_done() does and adds the option
8603 * that we're being called from vm_fault_page() in a page that is possibly boosted due to being an inheritor*/
8604 void
8605 vm_page_wakeup_done_with_inheritor(vm_object_t object __unused, vm_page_t m, page_worker_token_t *token __unused)
8606 {
8607 #if PAGE_SLEEP_WITH_INHERITOR
8608 assert(object);
8609 assert(m->vmp_busy);
8610 vm_object_lock_assert_exclusive(object);
8611
8612 bool had_inheritor = page_worker_unregister_worker((event_t)m, current_thread(), token);
8613
8614 KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP_DONE) | DBG_FUNC_NONE,
8615 VM_KERNEL_ADDRHIDE(object), VM_KERNEL_ADDRHIDE(m),
8616 m->vmp_wanted, had_inheritor);
8617 m->vmp_busy = FALSE;
8618
8619 if (m->vmp_wanted) {
8620 m->vmp_wanted = FALSE;
8621 if (had_inheritor) {
8622 wakeup_all_with_inheritor((event_t)m, THREAD_AWAKENED);
8623 } else {
8624 thread_wakeup((event_t)m);
8625 }
8626 }
8627 #else /* PAGE_SLEEP_WITH_INHERITOR */
8628 vm_page_wakeup_done(object, m);
8629 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8630 }
8631
8632 #if PAGE_SLEEP_WITH_INHERITOR
8633
8634 /*
8635 * vm_page_sleep_with_inheritor:
8636 * The goal of this functionality is to prevent priority inversion that can occur when a low-priority
8637 * thread is stuck in the compressor and a higher priority thread waits for the same page.
8638 * Just before vm_fault_page() calls into the compressor it calls page_worker_register_worker()
8639 * this registers the calling thread as the "page worker" of this page.
8640 * When another thread then tries to vm_page_sleep() on that page, (wait for it to un-busy) the worker is found and
8641 * instead of a plain thread_block() (in lck_rw_sleep()) we do lck_rw_sleep_with_inheritor() and give the registered
8642 * worker thread as the inheritor of the priority boost.
8643 * The worker thread might have started its work on a low priority, and when a waiter was added, it got boost.
8644 * When the worker is done getting the page it calls vm_page_wakeup_done_with_inheritor() instead of
8645 * vm_page_wakeup_done() this unregisters the thread, clears the page busy bit (so that now other threads can
8646 * use this page), and wakes up any waiters waiting for that page with wakeup_all_with_inheritor(), which
8647 * removes the priority boost.
8648 *
8649 * The worker registration is done in a simple single entry per bucket hash table. A hash collision may occur
8650 * if two faulting pages end up in the same entry. In this case, the registration of the second one is going to
8651 * fail and the only repercussions of this is that it would not get the possible boost if anyone is going to wait
8652 * on it. This implementation was selected over a full hash-table to keep it simple and fast.
8653 */
8654
8655 struct page_worker {
8656 lck_ticket_t pw_entry_lock;
8657 event_t pw_owner_event;
8658 thread_t pw_current_worker;
8659 };
8660
8661 SECURITY_READ_ONLY_LATE(uint32_t) page_worker_table_size = 0;
8662 SECURITY_READ_ONLY_LATE(static struct page_worker *)page_worker_table = NULL;
8663 SCALABLE_COUNTER_DEFINE(page_worker_hash_collisions);
8664 SCALABLE_COUNTER_DEFINE(page_worker_inheritor_sleeps);
8665
8666 LCK_GRP_DECLARE(page_worker_table_lock_grp, "page_worker_table_locks");
8667
8668 #define page_worker_entry_unlock(entry) \
8669 lck_ticket_unlock(&entry->pw_entry_lock);
8670
8671 #define PAGE_WORKER_TABLE_BUCKETS (256)
8672
8673 void
8674 page_worker_init(void)
8675 {
8676 page_worker_table_size = PAGE_WORKER_TABLE_BUCKETS;
8677 #if DEVELOPMENT || DEBUG
8678 PE_parse_boot_argn("page_worker_table_size", &page_worker_table_size, sizeof(page_worker_table_size));
8679 #endif /* DEVELOPMENT || DEBUG */
8680 /* This checks that the size is a positive power of 2, needed for the hash function */
8681 assert(page_worker_table_size > 0 && !(page_worker_table_size & (page_worker_table_size - 1)));
8682
8683 page_worker_table = zalloc_permanent(page_worker_table_size * sizeof(struct page_worker), ZALIGN_PTR);
8684 if (page_worker_table == NULL) {
8685 panic("Page events hash table memory allocation failed!");
8686 }
8687 for (uint32_t i = 0; i < page_worker_table_size; ++i) {
8688 struct page_worker* we = &(page_worker_table[i]);
8689 lck_ticket_init(&we->pw_entry_lock, &page_worker_table_lock_grp);
8690 }
8691 }
8692
8693 static struct page_worker *
8694 page_worker_lock_table_entry(event_t event)
8695 {
8696 if (page_worker_table == NULL) {
8697 return NULL;
8698 }
8699 uint32_t hash = os_hash_kernel_pointer((void *)event);
8700 uint32_t index = hash & (page_worker_table_size - 1);
8701
8702 struct page_worker *entry = &page_worker_table[index];
8703
8704 lck_ticket_lock(&entry->pw_entry_lock, &page_worker_table_lock_grp);
8705 return entry;
8706 }
8707
8708 /* returns a locked entry if found or added, otherwise returns NULL */
8709 static struct page_worker *
8710 page_worker_lookup(event_t event, bool try_add_missing)
8711 {
8712 assert(event != NULL);
8713 struct page_worker *entry = page_worker_lock_table_entry(event);
8714 if (entry == NULL) {
8715 /* table not initialized */
8716 return NULL;
8717 }
8718 if (entry->pw_owner_event == event) {
8719 /* found existing entry and it belongs to this event */
8720 return entry;
8721 }
8722
8723 if (try_add_missing) {
8724 if (entry->pw_owner_event == NULL) {
8725 /* found empty entry, take over it */
8726 entry->pw_owner_event = event;
8727 return entry;
8728 }
8729 /* didn't find the event, need to add it, but can't because it's occupied */
8730 counter_inc(&page_worker_hash_collisions);
8731 }
8732 page_worker_entry_unlock(entry);
8733 return NULL;
8734 }
8735
8736 /* returns true if current_thread() was successfully registered as worker */
8737 void
8738 page_worker_register_worker(event_t event __unused, page_worker_token_t *out_token)
8739 {
8740 out_token->pwt_did_register_inheritor = false;
8741 out_token->pwt_floor_token.thread = THREAD_NULL;
8742
8743 struct page_worker* entry = page_worker_lookup(event, TRUE);
8744 if (entry == NULL) {
8745 /* failed registration due to a hash collision */
8746 out_token->pwt_floor_token = thread_priority_floor_start();
8747 return;
8748 }
8749 entry->pw_current_worker = current_thread();
8750 /* no need to take the thread reference because this is going to get cleared in the same call of vm_page_fault() */
8751 page_worker_entry_unlock(entry);
8752 out_token->pwt_did_register_inheritor = true;
8753 }
8754
8755 static bool
8756 page_worker_unregister_worker(event_t event, thread_t expect_th __unused, page_worker_token_t *token)
8757 {
8758 struct page_worker *entry = page_worker_lookup(event, FALSE);
8759 if (entry == NULL) {
8760 assert(!token->pwt_did_register_inheritor);
8761 /* did we do thread_priority_floor_start() ? */
8762 if (token->pwt_floor_token.thread != THREAD_NULL) {
8763 thread_priority_floor_end(&token->pwt_floor_token);
8764 }
8765 return false;
8766 }
8767 assert(token->pwt_did_register_inheritor);
8768 assert(token->pwt_floor_token.thread == THREAD_NULL); /* we shouldn't have done thread_priority_floor_start() */
8769 assert(entry->pw_owner_event != 0);
8770 assert(entry->pw_current_worker == expect_th);
8771 entry->pw_owner_event = 0;
8772 entry->pw_current_worker = THREAD_NULL;
8773 page_worker_entry_unlock(entry); /* was locked in page_worker_lookup() */
8774 return true;
8775 }
8776
8777 static wait_result_t
8778 vm_page_sleep_with_inheritor(lck_rw_t *lck, lck_sleep_action_t action, event_t event, wait_interrupt_t interruptible)
8779 {
8780 struct page_worker *entry = page_worker_lookup(event, FALSE);
8781 thread_t inheritor = THREAD_NULL;
8782 if (entry != NULL) {
8783 inheritor = entry->pw_current_worker;
8784 page_worker_entry_unlock(entry);
8785 }
8786
8787 wait_result_t ret;
8788 if (inheritor == THREAD_NULL) {
8789 /* no worker was found */
8790 ret = lck_rw_sleep(lck, LCK_SLEEP_PROMOTED_PRI | action, event, interruptible);
8791 } else {
8792 counter_inc(&page_worker_inheritor_sleeps);
8793 ret = lck_rw_sleep_with_inheritor(lck, action, event, inheritor, interruptible, TIMEOUT_WAIT_FOREVER);
8794 }
8795
8796 return ret;
8797 }
8798 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8799
8800 static void
8801 io_reprioritize(mpsc_queue_chain_t elm, __assert_only mpsc_daemon_queue_t dq)
8802 {
8803 assert3p(dq, ==, &io_reprioritize_q);
8804 io_reprioritize_req_t req = mpsc_queue_element(elm, struct io_reprioritize_req, iorr_elm);
8805 vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
8806 zfree(io_reprioritize_req_zone, req);
8807 }
8808
8809 #endif /* CONFIG_IOSCHED */
8810
8811 #if VM_OBJECT_ACCESS_TRACKING
8812 void
8813 vm_object_access_tracking(
8814 vm_object_t object,
8815 int *access_tracking_p,
8816 uint32_t *access_tracking_reads_p,
8817 uint32_t *access_tracking_writes_p)
8818 {
8819 int access_tracking;
8820
8821 access_tracking = !!*access_tracking_p;
8822
8823 vm_object_lock(object);
8824 *access_tracking_p = object->access_tracking;
8825 if (access_tracking_reads_p) {
8826 *access_tracking_reads_p = object->access_tracking_reads;
8827 }
8828 if (access_tracking_writes_p) {
8829 *access_tracking_writes_p = object->access_tracking_writes;
8830 }
8831 object->access_tracking = access_tracking;
8832 object->access_tracking_reads = 0;
8833 object->access_tracking_writes = 0;
8834 vm_object_unlock(object);
8835
8836 if (access_tracking) {
8837 vm_object_pmap_protect_options(object,
8838 0,
8839 object->vo_size,
8840 PMAP_NULL,
8841 PAGE_SIZE,
8842 0,
8843 VM_PROT_NONE,
8844 0);
8845 }
8846 }
8847 #endif /* VM_OBJECT_ACCESS_TRACKING */
8848
8849 void
8850 vm_object_ledger_tag_ledgers(
8851 vm_object_t object,
8852 int *ledger_idx_volatile,
8853 int *ledger_idx_nonvolatile,
8854 int *ledger_idx_volatile_compressed,
8855 int *ledger_idx_nonvolatile_compressed,
8856 int *ledger_idx_composite,
8857 int *ledger_idx_external_wired,
8858 boolean_t *do_footprint)
8859 {
8860 assert(object->shadow == VM_OBJECT_NULL);
8861
8862 *ledger_idx_volatile = -1;
8863 *ledger_idx_nonvolatile = -1;
8864 *ledger_idx_volatile_compressed = -1;
8865 *ledger_idx_nonvolatile_compressed = -1;
8866 *ledger_idx_composite = -1;
8867 *ledger_idx_external_wired = -1;
8868 *do_footprint = !object->vo_no_footprint;
8869
8870 if (!object->internal) {
8871 switch (object->vo_ledger_tag) {
8872 case VM_LEDGER_TAG_DEFAULT:
8873 if (*do_footprint) {
8874 *ledger_idx_external_wired = task_ledgers.tagged_footprint;
8875 } else {
8876 *ledger_idx_external_wired = task_ledgers.tagged_nofootprint;
8877 }
8878 break;
8879 case VM_LEDGER_TAG_NETWORK:
8880 *do_footprint = FALSE;
8881 *ledger_idx_external_wired = task_ledgers.network_nonvolatile;
8882 break;
8883 case VM_LEDGER_TAG_MEDIA:
8884 if (*do_footprint) {
8885 *ledger_idx_external_wired = task_ledgers.media_footprint;
8886 } else {
8887 *ledger_idx_external_wired = task_ledgers.media_nofootprint;
8888 }
8889 break;
8890 case VM_LEDGER_TAG_GRAPHICS:
8891 if (*do_footprint) {
8892 *ledger_idx_external_wired = task_ledgers.graphics_footprint;
8893 } else {
8894 *ledger_idx_external_wired = task_ledgers.graphics_nofootprint;
8895 }
8896 break;
8897 case VM_LEDGER_TAG_NEURAL:
8898 *ledger_idx_composite = task_ledgers.neural_nofootprint_total;
8899 if (*do_footprint) {
8900 *ledger_idx_external_wired = task_ledgers.neural_footprint;
8901 } else {
8902 *ledger_idx_external_wired = task_ledgers.neural_nofootprint;
8903 }
8904 break;
8905 case VM_LEDGER_TAG_NONE:
8906 default:
8907 panic("%s: external object %p has unsupported ledger_tag %d",
8908 __FUNCTION__, object, object->vo_ledger_tag);
8909 }
8910 return;
8911 }
8912
8913 assert(object->internal);
8914 switch (object->vo_ledger_tag) {
8915 case VM_LEDGER_TAG_NONE:
8916 /*
8917 * Regular purgeable memory:
8918 * counts in footprint only when nonvolatile.
8919 */
8920 *do_footprint = TRUE;
8921 assert(object->purgable != VM_PURGABLE_DENY);
8922 *ledger_idx_volatile = task_ledgers.purgeable_volatile;
8923 *ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
8924 *ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
8925 *ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
8926 break;
8927 case VM_LEDGER_TAG_DEFAULT:
8928 /*
8929 * "default" tagged memory:
8930 * counts in footprint only when nonvolatile and not marked
8931 * as "no_footprint".
8932 */
8933 *ledger_idx_volatile = task_ledgers.tagged_nofootprint;
8934 *ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8935 if (*do_footprint) {
8936 *ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
8937 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
8938 } else {
8939 *ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
8940 *ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8941 }
8942 break;
8943 case VM_LEDGER_TAG_NETWORK:
8944 /*
8945 * "network" tagged memory:
8946 * never counts in footprint.
8947 */
8948 *do_footprint = FALSE;
8949 *ledger_idx_volatile = task_ledgers.network_volatile;
8950 *ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
8951 *ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
8952 *ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
8953 break;
8954 case VM_LEDGER_TAG_MEDIA:
8955 /*
8956 * "media" tagged memory:
8957 * counts in footprint only when nonvolatile and not marked
8958 * as "no footprint".
8959 */
8960 *ledger_idx_volatile = task_ledgers.media_nofootprint;
8961 *ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
8962 if (*do_footprint) {
8963 *ledger_idx_nonvolatile = task_ledgers.media_footprint;
8964 *ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
8965 } else {
8966 *ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
8967 *ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
8968 }
8969 break;
8970 case VM_LEDGER_TAG_GRAPHICS:
8971 /*
8972 * "graphics" tagged memory:
8973 * counts in footprint only when nonvolatile and not marked
8974 * as "no footprint".
8975 */
8976 *ledger_idx_volatile = task_ledgers.graphics_nofootprint;
8977 *ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8978 if (*do_footprint) {
8979 *ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
8980 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
8981 } else {
8982 *ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
8983 *ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8984 }
8985 break;
8986 case VM_LEDGER_TAG_NEURAL:
8987 /*
8988 * "neural" tagged memory:
8989 * counts in footprint only when nonvolatile and not marked
8990 * as "no footprint".
8991 */
8992 *ledger_idx_composite = task_ledgers.neural_nofootprint_total;
8993 *ledger_idx_volatile = task_ledgers.neural_nofootprint;
8994 *ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
8995 if (*do_footprint) {
8996 *ledger_idx_nonvolatile = task_ledgers.neural_footprint;
8997 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
8998 } else {
8999 *ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
9000 *ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
9001 }
9002 break;
9003 default:
9004 panic("%s: object %p has unsupported ledger_tag %d",
9005 __FUNCTION__, object, object->vo_ledger_tag);
9006 }
9007 }
9008
9009 kern_return_t
9010 vm_object_ownership_change(
9011 vm_object_t object,
9012 int new_ledger_tag,
9013 task_t new_owner,
9014 int new_ledger_flags,
9015 boolean_t old_task_objq_locked)
9016 {
9017 int old_ledger_tag;
9018 task_t old_owner;
9019 int resident_count, wired_count;
9020 unsigned int compressed_count;
9021 int ledger_idx_volatile;
9022 int ledger_idx_nonvolatile;
9023 int ledger_idx_volatile_compressed;
9024 int ledger_idx_nonvolatile_compressed;
9025 int ledger_idx;
9026 int ledger_idx_compressed;
9027 int ledger_idx_composite;
9028 int ledger_idx_external_wired;
9029 boolean_t do_footprint, old_no_footprint, new_no_footprint;
9030 boolean_t new_task_objq_locked;
9031
9032 vm_object_lock_assert_exclusive(object);
9033
9034 if (new_owner != VM_OBJECT_OWNER_DISOWNED &&
9035 new_owner != TASK_NULL) {
9036 if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
9037 object->purgable == VM_PURGABLE_DENY) {
9038 /* non-purgeable memory must have a valid non-zero ledger tag */
9039 return KERN_INVALID_ARGUMENT;
9040 }
9041 if (!object->internal
9042 && !memory_object_is_vnode_pager(object->pager)) {
9043 /* non-file-backed "external" objects can't be owned */
9044 return KERN_INVALID_ARGUMENT;
9045 }
9046 }
9047 if (new_owner == VM_OBJECT_OWNER_UNCHANGED) {
9048 /* leave owner unchanged */
9049 new_owner = VM_OBJECT_OWNER(object);
9050 }
9051 if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
9052 /* leave ledger_tag unchanged */
9053 new_ledger_tag = object->vo_ledger_tag;
9054 }
9055 if (new_ledger_tag < 0 ||
9056 new_ledger_tag > VM_LEDGER_TAG_MAX) {
9057 return KERN_INVALID_ARGUMENT;
9058 }
9059 if (new_ledger_flags & ~VM_LEDGER_FLAGS_ALL) {
9060 return KERN_INVALID_ARGUMENT;
9061 }
9062 if (object->internal &&
9063 object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
9064 object->purgable == VM_PURGABLE_DENY) {
9065 /*
9066 * This VM object is neither ledger-tagged nor purgeable.
9067 * We can convert it to "ledger tag" ownership iff it
9068 * has not been used at all yet (no resident pages and
9069 * no pager) and it's going to be assigned to a valid task.
9070 */
9071 if (object->resident_page_count != 0 ||
9072 object->pager != NULL ||
9073 object->pager_created ||
9074 os_ref_get_count_raw(&object->ref_count) != 1 ||
9075 object->vo_owner != TASK_NULL ||
9076 object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
9077 new_owner == TASK_NULL) {
9078 return KERN_FAILURE;
9079 }
9080 }
9081
9082 if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
9083 new_no_footprint = TRUE;
9084 } else {
9085 new_no_footprint = FALSE;
9086 }
9087 #if __arm64__
9088 if (!new_no_footprint &&
9089 object->purgable != VM_PURGABLE_DENY &&
9090 new_owner != TASK_NULL &&
9091 new_owner != VM_OBJECT_OWNER_DISOWNED &&
9092 new_owner->task_legacy_footprint) {
9093 /*
9094 * This task has been granted "legacy footprint" and should
9095 * not be charged for its IOKit purgeable memory. Since we
9096 * might now change the accounting of such memory to the
9097 * "graphics" ledger, for example, give it the "no footprint"
9098 * option.
9099 */
9100 new_no_footprint = TRUE;
9101 }
9102 #endif /* __arm64__ */
9103 assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC);
9104 assert(object->shadow == VM_OBJECT_NULL);
9105 if (object->internal) {
9106 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
9107 assert(object->vo_copy == VM_OBJECT_NULL);
9108 }
9109
9110 old_ledger_tag = object->vo_ledger_tag;
9111 old_no_footprint = object->vo_no_footprint;
9112 old_owner = VM_OBJECT_OWNER(object);
9113
9114 if (__improbable(vm_debug_events)) {
9115 DTRACE_VM8(object_ownership_change,
9116 vm_object_t, object,
9117 task_t, old_owner,
9118 int, old_ledger_tag,
9119 int, old_no_footprint,
9120 task_t, new_owner,
9121 int, new_ledger_tag,
9122 int, new_no_footprint,
9123 int, VM_OBJECT_ID(object));
9124 }
9125
9126 resident_count = object->resident_page_count - object->wired_page_count;
9127 wired_count = object->wired_page_count;
9128 if (object->internal) {
9129 compressed_count = vm_compressor_pager_get_count(object->pager);
9130 } else {
9131 compressed_count = 0;
9132 }
9133
9134 /*
9135 * Deal with the old owner and/or ledger tag, if needed.
9136 */
9137 if (old_owner != TASK_NULL &&
9138 ((old_owner != new_owner) /* new owner ... */
9139 || /* ... or ... */
9140 (old_no_footprint != new_no_footprint) /* new "no_footprint" */
9141 || /* ... or ... */
9142 old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
9143 /*
9144 * Take this object off of the old owner's ledgers.
9145 */
9146 vm_object_ledger_tag_ledgers(object,
9147 &ledger_idx_volatile,
9148 &ledger_idx_nonvolatile,
9149 &ledger_idx_volatile_compressed,
9150 &ledger_idx_nonvolatile_compressed,
9151 &ledger_idx_composite,
9152 &ledger_idx_external_wired,
9153 &do_footprint);
9154 if (object->internal) {
9155 if (object->purgable == VM_PURGABLE_VOLATILE ||
9156 object->purgable == VM_PURGABLE_EMPTY) {
9157 ledger_idx = ledger_idx_volatile;
9158 ledger_idx_compressed = ledger_idx_volatile_compressed;
9159 } else {
9160 ledger_idx = ledger_idx_nonvolatile;
9161 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
9162 }
9163 if (resident_count) {
9164 /*
9165 * Adjust the appropriate old owners's ledgers by the
9166 * number of resident pages.
9167 */
9168 ledger_debit(old_owner->ledger,
9169 ledger_idx,
9170 ptoa_64(resident_count));
9171 /* adjust old owner's footprint */
9172 if (object->purgable != VM_PURGABLE_VOLATILE &&
9173 object->purgable != VM_PURGABLE_EMPTY) {
9174 if (do_footprint) {
9175 ledger_debit(old_owner->ledger,
9176 task_ledgers.phys_footprint,
9177 ptoa_64(resident_count));
9178 } else if (ledger_idx_composite != -1) {
9179 ledger_debit(old_owner->ledger,
9180 ledger_idx_composite,
9181 ptoa_64(resident_count));
9182 }
9183 }
9184 }
9185 if (wired_count) {
9186 /* wired pages are always nonvolatile */
9187 ledger_debit(old_owner->ledger,
9188 ledger_idx_nonvolatile,
9189 ptoa_64(wired_count));
9190 if (do_footprint) {
9191 ledger_debit(old_owner->ledger,
9192 task_ledgers.phys_footprint,
9193 ptoa_64(wired_count));
9194 } else if (ledger_idx_composite != -1) {
9195 ledger_debit(old_owner->ledger,
9196 ledger_idx_composite,
9197 ptoa_64(wired_count));
9198 }
9199 }
9200 if (compressed_count) {
9201 /*
9202 * Adjust the appropriate old owner's ledgers
9203 * by the number of compressed pages.
9204 */
9205 ledger_debit(old_owner->ledger,
9206 ledger_idx_compressed,
9207 ptoa_64(compressed_count));
9208 if (object->purgable != VM_PURGABLE_VOLATILE &&
9209 object->purgable != VM_PURGABLE_EMPTY) {
9210 if (do_footprint) {
9211 ledger_debit(old_owner->ledger,
9212 task_ledgers.phys_footprint,
9213 ptoa_64(compressed_count));
9214 } else if (ledger_idx_composite != -1) {
9215 ledger_debit(old_owner->ledger,
9216 ledger_idx_composite,
9217 ptoa_64(compressed_count));
9218 }
9219 }
9220 }
9221 } else {
9222 /* external but owned object: count wired pages */
9223 if (wired_count) {
9224 ledger_debit(old_owner->ledger,
9225 ledger_idx_external_wired,
9226 ptoa_64(wired_count));
9227 if (do_footprint) {
9228 ledger_debit(old_owner->ledger,
9229 task_ledgers.phys_footprint,
9230 ptoa_64(wired_count));
9231 } else if (ledger_idx_composite != -1) {
9232 ledger_debit(old_owner->ledger,
9233 ledger_idx_composite,
9234 ptoa_64(wired_count));
9235 }
9236 }
9237 }
9238 if (old_owner != new_owner) {
9239 /* remove object from old_owner's list of owned objects */
9240 DTRACE_VM2(object_owner_remove,
9241 vm_object_t, object,
9242 task_t, old_owner);
9243 if (!old_task_objq_locked) {
9244 task_objq_lock(old_owner);
9245 }
9246 old_owner->task_owned_objects--;
9247 queue_remove(&old_owner->task_objq, object,
9248 vm_object_t, task_objq);
9249 switch (object->purgable) {
9250 case VM_PURGABLE_NONVOLATILE:
9251 case VM_PURGABLE_EMPTY:
9252 vm_purgeable_nonvolatile_owner_update(old_owner,
9253 -1);
9254 break;
9255 case VM_PURGABLE_VOLATILE:
9256 vm_purgeable_volatile_owner_update(old_owner,
9257 -1);
9258 break;
9259 default:
9260 break;
9261 }
9262 if (!old_task_objq_locked) {
9263 task_objq_unlock(old_owner);
9264 }
9265 }
9266 }
9267
9268 /*
9269 * Switch to new ledger tag and/or owner.
9270 */
9271
9272 new_task_objq_locked = FALSE;
9273 if (new_owner != old_owner &&
9274 new_owner != TASK_NULL &&
9275 new_owner != VM_OBJECT_OWNER_DISOWNED) {
9276 /*
9277 * If the new owner is not accepting new objects ("disowning"),
9278 * the object becomes "disowned" and will be added to
9279 * the kernel's task_objq.
9280 *
9281 * Check first without locking, to avoid blocking while the
9282 * task is disowning its objects.
9283 */
9284 if (new_owner->task_objects_disowning) {
9285 new_owner = VM_OBJECT_OWNER_DISOWNED;
9286 } else {
9287 task_objq_lock(new_owner);
9288 /* check again now that we have the lock */
9289 if (new_owner->task_objects_disowning) {
9290 new_owner = VM_OBJECT_OWNER_DISOWNED;
9291 task_objq_unlock(new_owner);
9292 } else {
9293 new_task_objq_locked = TRUE;
9294 }
9295 }
9296 }
9297
9298 object->vo_ledger_tag = new_ledger_tag;
9299 object->vo_owner = new_owner;
9300 object->vo_no_footprint = new_no_footprint;
9301
9302 if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
9303 /*
9304 * Disowned objects are added to the kernel's task_objq but
9305 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
9306 * differentiate them from objects intentionally owned by
9307 * the kernel.
9308 */
9309 assert(old_owner != kernel_task);
9310 new_owner = kernel_task;
9311 assert(!new_task_objq_locked);
9312 task_objq_lock(new_owner);
9313 new_task_objq_locked = TRUE;
9314 }
9315
9316 /*
9317 * Deal with the new owner and/or ledger tag, if needed.
9318 */
9319 if (new_owner != TASK_NULL &&
9320 ((new_owner != old_owner) /* new owner ... */
9321 || /* ... or ... */
9322 (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
9323 || /* ... or ... */
9324 new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
9325 /*
9326 * Add this object to the new owner's ledgers.
9327 */
9328 vm_object_ledger_tag_ledgers(object,
9329 &ledger_idx_volatile,
9330 &ledger_idx_nonvolatile,
9331 &ledger_idx_volatile_compressed,
9332 &ledger_idx_nonvolatile_compressed,
9333 &ledger_idx_composite,
9334 &ledger_idx_external_wired,
9335 &do_footprint);
9336 if (object->internal) {
9337 if (object->purgable == VM_PURGABLE_VOLATILE ||
9338 object->purgable == VM_PURGABLE_EMPTY) {
9339 ledger_idx = ledger_idx_volatile;
9340 ledger_idx_compressed = ledger_idx_volatile_compressed;
9341 } else {
9342 ledger_idx = ledger_idx_nonvolatile;
9343 ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
9344 }
9345 if (resident_count) {
9346 /*
9347 * Adjust the appropriate new owners's ledgers by the
9348 * number of resident pages.
9349 */
9350 ledger_credit(new_owner->ledger,
9351 ledger_idx,
9352 ptoa_64(resident_count));
9353 /* adjust new owner's footprint */
9354 if (object->purgable != VM_PURGABLE_VOLATILE &&
9355 object->purgable != VM_PURGABLE_EMPTY) {
9356 if (do_footprint) {
9357 ledger_credit(new_owner->ledger,
9358 task_ledgers.phys_footprint,
9359 ptoa_64(resident_count));
9360 } else if (ledger_idx_composite != -1) {
9361 ledger_credit(new_owner->ledger,
9362 ledger_idx_composite,
9363 ptoa_64(resident_count));
9364 }
9365 }
9366 }
9367 if (wired_count) {
9368 /* wired pages are always nonvolatile */
9369 ledger_credit(new_owner->ledger,
9370 ledger_idx_nonvolatile,
9371 ptoa_64(wired_count));
9372 if (do_footprint) {
9373 ledger_credit(new_owner->ledger,
9374 task_ledgers.phys_footprint,
9375 ptoa_64(wired_count));
9376 } else if (ledger_idx_composite != -1) {
9377 ledger_credit(new_owner->ledger,
9378 ledger_idx_composite,
9379 ptoa_64(wired_count));
9380 }
9381 }
9382 if (compressed_count) {
9383 /*
9384 * Adjust the new owner's ledgers by the number of
9385 * compressed pages.
9386 */
9387 ledger_credit(new_owner->ledger,
9388 ledger_idx_compressed,
9389 ptoa_64(compressed_count));
9390 if (object->purgable != VM_PURGABLE_VOLATILE &&
9391 object->purgable != VM_PURGABLE_EMPTY) {
9392 if (do_footprint) {
9393 ledger_credit(new_owner->ledger,
9394 task_ledgers.phys_footprint,
9395 ptoa_64(compressed_count));
9396 } else if (ledger_idx_composite != -1) {
9397 ledger_credit(new_owner->ledger,
9398 ledger_idx_composite,
9399 ptoa_64(compressed_count));
9400 }
9401 }
9402 }
9403 } else {
9404 /* external but owned object: count wired pages */
9405 if (wired_count) {
9406 ledger_credit(new_owner->ledger,
9407 ledger_idx_external_wired,
9408 ptoa_64(wired_count));
9409 if (do_footprint) {
9410 ledger_credit(new_owner->ledger,
9411 task_ledgers.phys_footprint,
9412 ptoa_64(wired_count));
9413 } else if (ledger_idx_composite != -1) {
9414 ledger_credit(new_owner->ledger,
9415 ledger_idx_composite,
9416 ptoa_64(wired_count));
9417 }
9418 }
9419 }
9420 if (new_owner != old_owner) {
9421 /* add object to new_owner's list of owned objects */
9422 DTRACE_VM2(object_owner_add,
9423 vm_object_t, object,
9424 task_t, new_owner);
9425 assert(new_task_objq_locked);
9426 new_owner->task_owned_objects++;
9427 queue_enter(&new_owner->task_objq, object,
9428 vm_object_t, task_objq);
9429 switch (object->purgable) {
9430 case VM_PURGABLE_NONVOLATILE:
9431 case VM_PURGABLE_EMPTY:
9432 vm_purgeable_nonvolatile_owner_update(new_owner,
9433 +1);
9434 break;
9435 case VM_PURGABLE_VOLATILE:
9436 vm_purgeable_volatile_owner_update(new_owner,
9437 +1);
9438 break;
9439 default:
9440 break;
9441 }
9442 }
9443 }
9444
9445 if (new_task_objq_locked) {
9446 task_objq_unlock(new_owner);
9447 }
9448
9449 return KERN_SUCCESS;
9450 }
9451
9452 void
9453 vm_owned_objects_disown(
9454 task_t task)
9455 {
9456 vm_object_t next_object;
9457 vm_object_t object;
9458 int collisions;
9459 kern_return_t kr;
9460
9461 if (task == NULL) {
9462 return;
9463 }
9464
9465 collisions = 0;
9466
9467 again:
9468 if (task->task_objects_disowned) {
9469 /* task has already disowned its owned objects */
9470 assert(task->task_volatile_objects == 0);
9471 assert(task->task_nonvolatile_objects == 0);
9472 assert(task->task_owned_objects == 0);
9473 return;
9474 }
9475
9476 task_objq_lock(task);
9477
9478 task->task_objects_disowning = TRUE;
9479
9480 for (object = (vm_object_t) queue_first(&task->task_objq);
9481 !queue_end(&task->task_objq, (queue_entry_t) object);
9482 object = next_object) {
9483 if (task->task_nonvolatile_objects == 0 &&
9484 task->task_volatile_objects == 0 &&
9485 task->task_owned_objects == 0) {
9486 /* no more objects owned by "task" */
9487 break;
9488 }
9489
9490 next_object = (vm_object_t) queue_next(&object->task_objq);
9491
9492 #if DEBUG
9493 assert(object->vo_purgeable_volatilizer == NULL);
9494 #endif /* DEBUG */
9495 assert(object->vo_owner == task);
9496 if (!vm_object_lock_try(object)) {
9497 task_objq_unlock(task);
9498 mutex_pause(collisions++);
9499 goto again;
9500 }
9501 /* transfer ownership to the kernel */
9502 assert(VM_OBJECT_OWNER(object) != kernel_task);
9503 kr = vm_object_ownership_change(
9504 object,
9505 object->vo_ledger_tag, /* unchanged */
9506 VM_OBJECT_OWNER_DISOWNED, /* new owner */
9507 0, /* new_ledger_flags */
9508 TRUE); /* old_owner->task_objq locked */
9509 assert(kr == KERN_SUCCESS);
9510 assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
9511 vm_object_unlock(object);
9512 }
9513
9514 if (__improbable(task->task_owned_objects != 0)) {
9515 panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
9516 __FUNCTION__,
9517 task,
9518 task->task_volatile_objects,
9519 task->task_nonvolatile_objects,
9520 task->task_owned_objects,
9521 &task->task_objq,
9522 queue_first(&task->task_objq),
9523 queue_last(&task->task_objq));
9524 }
9525
9526 /* there shouldn't be any objects owned by task now */
9527 assert(task->task_volatile_objects == 0);
9528 assert(task->task_nonvolatile_objects == 0);
9529 assert(task->task_owned_objects == 0);
9530 assert(task->task_objects_disowning);
9531
9532 /* and we don't need to try and disown again */
9533 task->task_objects_disowned = TRUE;
9534
9535 task_objq_unlock(task);
9536 }
9537
9538 void
9539 vm_object_wired_page_update_ledgers(
9540 vm_object_t object,
9541 int64_t wired_delta)
9542 {
9543 task_t owner;
9544
9545 vm_object_lock_assert_exclusive(object);
9546 if (wired_delta == 0) {
9547 /* no change in number of wired pages */
9548 return;
9549 }
9550 if (object->internal) {
9551 /* no extra accounting needed for internal objects */
9552 return;
9553 }
9554 if (!object->vo_ledger_tag) {
9555 /* external object but not owned: no extra accounting */
9556 return;
9557 }
9558
9559 /*
9560 * For an explicitly-owned external VM object, account for
9561 * wired pages in one of the owner's ledgers.
9562 */
9563 owner = VM_OBJECT_OWNER(object);
9564 if (owner) {
9565 int ledger_idx_volatile;
9566 int ledger_idx_nonvolatile;
9567 int ledger_idx_volatile_compressed;
9568 int ledger_idx_nonvolatile_compressed;
9569 int ledger_idx_composite;
9570 int ledger_idx_external_wired;
9571 boolean_t do_footprint;
9572
9573 /* ask which ledgers need an update */
9574 vm_object_ledger_tag_ledgers(object,
9575 &ledger_idx_volatile,
9576 &ledger_idx_nonvolatile,
9577 &ledger_idx_volatile_compressed,
9578 &ledger_idx_nonvolatile_compressed,
9579 &ledger_idx_composite,
9580 &ledger_idx_external_wired,
9581 &do_footprint);
9582 if (wired_delta > 0) {
9583 /* more external wired bytes */
9584 ledger_credit(owner->ledger,
9585 ledger_idx_external_wired,
9586 ptoa(wired_delta));
9587 if (do_footprint) {
9588 /* more footprint */
9589 ledger_credit(owner->ledger,
9590 task_ledgers.phys_footprint,
9591 ptoa(wired_delta));
9592 } else if (ledger_idx_composite != -1) {
9593 ledger_credit(owner->ledger,
9594 ledger_idx_composite,
9595 ptoa(wired_delta));
9596 }
9597 } else {
9598 /* less external wired bytes */
9599 ledger_debit(owner->ledger,
9600 ledger_idx_external_wired,
9601 ptoa(-wired_delta));
9602 if (do_footprint) {
9603 /* more footprint */
9604 ledger_debit(owner->ledger,
9605 task_ledgers.phys_footprint,
9606 ptoa(-wired_delta));
9607 } else if (ledger_idx_composite != -1) {
9608 ledger_debit(owner->ledger,
9609 ledger_idx_composite,
9610 ptoa(-wired_delta));
9611 }
9612 }
9613 }
9614 }
9615