1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
65 #include <debug.h>
66 #include <libkern/OSDebug.h>
67
68 #include <mach/clock_types.h>
69 #include <mach/vm_prot.h>
70 #include <mach/vm_statistics.h>
71 #include <mach/sdt.h>
72 #include <kern/counter.h>
73 #include <kern/host_statistics.h>
74 #include <kern/sched_prim.h>
75 #include <kern/policy_internal.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/kalloc.h>
79 #include <kern/zalloc_internal.h>
80 #include <kern/ledger.h>
81 #include <kern/ecc.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_init_xnu.h>
84 #include <vm/vm_map_internal.h>
85 #include <vm/vm_page_internal.h>
86 #include <vm/vm_pageout_internal.h>
87 #include <vm/vm_kern_xnu.h> /* kmem_alloc() */
88 #include <vm/vm_compressor_pager_internal.h>
89 #include <kern/misc_protos.h>
90 #include <mach_debug/zone_info.h>
91 #include <vm/cpm_internal.h>
92 #include <pexpert/pexpert.h>
93 #include <pexpert/device_tree.h>
94 #include <san/kasan.h>
95 #include <os/log.h>
96
97 #include <libkern/coreanalytics/coreanalytics.h>
98 #include <kern/backtrace.h>
99 #include <kern/telemetry.h>
100
101 #include <vm/vm_protos_internal.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h>
104 #include <vm/vm_compressor_internal.h>
105 #include <vm/vm_iokit.h>
106 #include <vm/vm_object_internal.h>
107
108
109 #if defined (__x86_64__)
110 #include <i386/misc_protos.h>
111 #endif
112
113 #if CONFIG_SPTM
114 #include <arm64/sptm/sptm.h>
115 #endif
116
117 #if CONFIG_PHANTOM_CACHE
118 #include <vm/vm_phantom_cache_internal.h>
119 #endif
120
121 #if HIBERNATION
122 #include <IOKit/IOHibernatePrivate.h>
123 #include <machine/pal_hibernate.h>
124 #endif /* HIBERNATION */
125
126 #if CONFIG_SECLUDED_MEMORY
127 static_assert(!XNU_VM_HAS_LOPAGE,
128 "VM_PAGE_ON_SECLUDED_Q and VM_PAGE_ON_FREE_LOPAGE_Q alias");
129 #endif
130
131 #include <sys/kdebug.h>
132
133 #if defined(HAS_APPLE_PAC)
134 #include <ptrauth.h>
135 #endif
136 #if defined(__arm64__)
137 #include <arm/cpu_internal.h>
138 #endif /* defined(__arm64__) */
139
140 /*
141 * During single threaded early boot we don't initialize all pages.
142 * This avoids some delay during boot. They'll be initialized and
143 * added to the free list as needed or after we are multithreaded by
144 * what becomes the pageout thread.
145 *
146 * This slows down booting the DEBUG kernel, particularly on
147 * large memory systems, but is worthwhile in deterministically
148 * trapping uninitialized memory usage.
149 */
150 #if DEBUG
151 static TUNABLE(uint32_t, fillval, "fill", 0xDEB8F177);
152 #else
153 static TUNABLE(uint32_t, fillval, "fill", 0);
154 #endif
155
156 #if MACH_ASSERT
157
158 TUNABLE(bool, vm_check_refs_on_alloc, "vm_check_refs_on_alloc", false);
159 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
160
161 #else /* MACH_ASSERT */
162
163 #define ASSERT_PMAP_FREE(mem) /* nothing */
164
165 #endif /* MACH_ASSERT */
166
167 extern boolean_t vm_pageout_running;
168 extern thread_t vm_pageout_scan_thread;
169 extern bool vps_dynamic_priority_enabled;
170
171 const uint16_t vm_page_inactive_states =
172 BIT(VM_PAGE_ON_INACTIVE_INTERNAL_Q) |
173 BIT(VM_PAGE_ON_INACTIVE_EXTERNAL_Q) |
174 BIT(VM_PAGE_ON_INACTIVE_CLEANED_Q);
175
176 const uint16_t vm_page_active_or_inactive_states =
177 vm_page_inactive_states |
178 #if CONFIG_SECLUDED_MEMORY
179 BIT(VM_PAGE_ON_SECLUDED_Q) |
180 #endif /* CONFIG_SECLUDED_MEMORY */
181 BIT(VM_PAGE_ON_ACTIVE_Q);
182
183 const uint16_t vm_page_non_speculative_pageable_states =
184 vm_page_active_or_inactive_states |
185 BIT(VM_PAGE_ON_THROTTLED_Q);
186
187 const uint16_t vm_page_pageable_states =
188 vm_page_non_speculative_pageable_states |
189 BIT(VM_PAGE_ON_SPECULATIVE_Q);
190
191 #if CONFIG_SECLUDED_MEMORY
192 struct vm_page_secluded_data vm_page_secluded;
193 #endif /* CONFIG_SECLUDED_MEMORY */
194 #if HIBERNATION
195 static bool hibernate_rebuild_needed = false;
196 #endif /* HIBERNATION */
197
198 #if DEVELOPMENT || DEBUG
199 extern struct memory_object_pager_ops shared_region_pager_ops;
200 unsigned int shared_region_pagers_resident_count = 0;
201 unsigned int shared_region_pagers_resident_peak = 0;
202 #endif /* DEVELOPMENT || DEBUG */
203
204
205
206 unsigned int PERCPU_DATA(start_color);
207 vm_page_t PERCPU_DATA(free_pages);
208 SCALABLE_COUNTER_DEFINE(vm_cpu_free_count);
209 boolean_t hibernate_cleaning_in_progress = FALSE;
210
211 atomic_counter_t vm_guard_count;
212
213 #if XNU_VM_HAS_LOPAGE
214 /*
215 * this interface exists to support hardware controllers
216 * incapable of generating DMAs with more than 32 bits
217 * of address on platforms with physical memory > 4G...
218 */
219 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
220 uint32_t vm_lopage_free_count = 0;
221 uint32_t vm_lopage_free_limit = 0;
222 uint32_t vm_lopage_lowater = 0;
223 bool vm_lopage_refill = false;
224 bool vm_lopage_needed = false;
225 unsigned int vm_lopages_allocated_q = 0;
226 unsigned int vm_lopages_allocated_cpm_success = 0;
227 unsigned int vm_lopages_allocated_cpm_failed = 0;
228 #endif /* XNU_VM_HAS_LOPAGE */
229
230
231 int speculative_age_index = 0;
232 int speculative_steal_index = 0;
233 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
234
235 boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
236 * Updated and checked behind the vm_page_queues_lock. */
237
238 static void vm_page_free_prepare(vm_page_t page);
239
240
241 static void vm_tag_init(void);
242
243 /* for debugging purposes */
244 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
245 VM_PAGE_PACKED_FROM_ARRAY;
246 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
247 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
248
249 /*
250 * Associated with page of user-allocatable memory is a
251 * page structure.
252 */
253
254 /*
255 * These variables record the values returned by vm_page_bootstrap,
256 * for debugging purposes. The implementation of pmap_steal_memory
257 * and pmap_startup here also uses them internally.
258 */
259
260 vm_offset_t virtual_space_start;
261 vm_offset_t virtual_space_end;
262 uint32_t vm_page_pages;
263
264 /*
265 * The vm_page_lookup() routine, which provides for fast
266 * (virtual memory object, offset) to page lookup, employs
267 * the following hash table. The vm_page_{insert,remove}
268 * routines install and remove associations in the table.
269 * [This table is often called the virtual-to-physical,
270 * or VP, table.]
271 */
272 typedef struct {
273 vm_page_packed_t page_list;
274 #if MACH_PAGE_HASH_STATS
275 int cur_count; /* current count */
276 int hi_count; /* high water mark */
277 #endif /* MACH_PAGE_HASH_STATS */
278 } vm_page_bucket_t;
279
280
281 #define BUCKETS_PER_LOCK 16
282
283 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets; /* Array of buckets */
284 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_count = 0; /* How big is array? */
285 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_mask; /* Mask for hash function */
286 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_shift; /* Shift for hash function */
287 SECURITY_READ_ONLY_LATE(uint32_t) vm_page_bucket_hash; /* Basic bucket hash */
288 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_lock_count = 0; /* How big is array of locks? */
289
290 #ifndef VM_TAG_ACTIVE_UPDATE
291 #error VM_TAG_ACTIVE_UPDATE
292 #endif
293 #ifndef VM_TAG_SIZECLASSES
294 #error VM_TAG_SIZECLASSES
295 #endif
296
297 /* for debugging */
298 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
299 SECURITY_READ_ONLY_LATE(lck_ticket_t *) vm_page_bucket_locks;
300
301 vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
302 vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
303 #if VM_TAG_SIZECLASSES
304 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
305 #endif /* VM_TAG_SIZECLASSES */
306
307 vm_tag_t vm_allocation_tag_highest;
308
309 #if VM_PAGE_BUCKETS_CHECK
310 boolean_t vm_page_buckets_check_ready = FALSE;
311 #if VM_PAGE_FAKE_BUCKETS
312 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
313 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
314 #endif /* VM_PAGE_FAKE_BUCKETS */
315 #endif /* VM_PAGE_BUCKETS_CHECK */
316
317 #if MACH_PAGE_HASH_STATS
318 /* This routine is only for debug. It is intended to be called by
319 * hand by a developer using a kernel debugger. This routine prints
320 * out vm_page_hash table statistics to the kernel debug console.
321 */
322 void
hash_debug(void)323 hash_debug(void)
324 {
325 int i;
326 int numbuckets = 0;
327 int highsum = 0;
328 int maxdepth = 0;
329
330 for (i = 0; i < vm_page_bucket_count; i++) {
331 if (vm_page_buckets[i].hi_count) {
332 numbuckets++;
333 highsum += vm_page_buckets[i].hi_count;
334 if (vm_page_buckets[i].hi_count > maxdepth) {
335 maxdepth = vm_page_buckets[i].hi_count;
336 }
337 }
338 }
339 printf("Total number of buckets: %d\n", vm_page_bucket_count);
340 printf("Number used buckets: %d = %d%%\n",
341 numbuckets, 100 * numbuckets / vm_page_bucket_count);
342 printf("Number unused buckets: %d = %d%%\n",
343 vm_page_bucket_count - numbuckets,
344 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
345 printf("Sum of bucket max depth: %d\n", highsum);
346 printf("Average bucket depth: %d.%2d\n",
347 highsum / vm_page_bucket_count,
348 highsum % vm_page_bucket_count);
349 printf("Maximum bucket depth: %d\n", maxdepth);
350 }
351 #endif /* MACH_PAGE_HASH_STATS */
352
353 /*
354 * The virtual page size is currently implemented as a runtime
355 * variable, but is constant once initialized using vm_set_page_size.
356 * This initialization must be done in the machine-dependent
357 * bootstrap sequence, before calling other machine-independent
358 * initializations.
359 *
360 * All references to the virtual page size outside this
361 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
362 * constants.
363 */
364 #if defined(__arm64__)
365 vm_size_t page_size;
366 vm_size_t page_mask;
367 int page_shift;
368 #else
369 vm_size_t page_size = PAGE_SIZE;
370 vm_size_t page_mask = PAGE_MASK;
371 int page_shift = PAGE_SHIFT;
372 #endif
373
374 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages;
375 #if XNU_VM_HAS_DELAYED_PAGES
376 vm_page_t vm_pages_end;
377 uint32_t vm_pages_count;
378 #else
379 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages_end;
380 SECURITY_READ_ONLY_LATE(uint32_t) vm_pages_count;
381 #endif /* XNU_VM_HAS_DELAYED_PAGES */
382 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
383 SECURITY_READ_ONLY_LATE(ppnum_t) vm_pages_first_pnum;
384 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
385 #if CONFIG_SPTM
386 /*
387 * When used, these 128bit (MAX_COLORS bits) masks represent a "cluster"
388 * of contiguous free physical pages.
389 *
390 * For each cluster, there is an enqueue "index", which is -1 when there is no
391 * free page in the cluster, or the index in [0, 128) of the page that is
392 * enqueued on the vm_page_free_queue to represent the entire cluster.
393 *
394 * Grouping pages this way has the double nice effect to reduce doubly linked
395 * list (the worst data structure known to man when considering cache misses)
396 * manipulations, and also to mechanically make the VM serve more "contiguous"
397 * pages naturally.
398 */
399 static_assert(XNU_VM_HAS_LINEAR_PAGES_ARRAY);
400 SECURITY_READ_ONLY_LATE(__uint128_t *) _vm_pages_free_masks;
401 SECURITY_READ_ONLY_LATE(int8_t *) _vm_pages_free_enqueue_idx;
402 #endif /* CONFIG_SPTM */
403
404
405 /*
406 * Resident pages that represent real memory
407 * are allocated from a set of free lists,
408 * one per color.
409 */
410 SECURITY_READ_ONLY_LATE(unsigned int) vm_colors;
411 SECURITY_READ_ONLY_LATE(unsigned int) vm_color_mask; /* mask is == (vm_colors-1) */
412 unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
413 unsigned int vm_free_magazine_refill_limit = 0;
414
415 struct vm_page_free_queue vm_page_queue_free;
416
417 unsigned int vm_page_free_wanted;
418 unsigned int vm_page_free_wanted_privileged;
419 #if CONFIG_SECLUDED_MEMORY
420 unsigned int vm_page_free_wanted_secluded;
421 #endif /* CONFIG_SECLUDED_MEMORY */
422 unsigned int vm_page_free_count;
423
424 unsigned int vm_page_realtime_count;
425
426 /*
427 * Occasionally, the virtual memory system uses
428 * resident page structures that do not refer to
429 * real pages, for example to leave a page with
430 * important state information in the VP table.
431 *
432 * These page structures are allocated the way
433 * most other kernel structures are.
434 */
435 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
436 vm_locks_array_t vm_page_locks;
437
438 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
439 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
440 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
441 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
442 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
443 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
444 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
445 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
446 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
447
448 unsigned int vm_page_local_q_soft_limit = 250;
449 unsigned int vm_page_local_q_hard_limit = 500;
450 struct vpl *__zpercpu vm_page_local_q;
451
452 /* N.B. Guard and fictitious pages must not
453 * be assigned a zero phys_page value.
454 */
455 /*
456 * Fictitious pages don't have a physical address,
457 * but we must initialize phys_page to something.
458 * For debugging, this should be a strange value
459 * that the pmap module can recognize in assertions.
460 */
461 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
462
463 /*
464 * Guard pages are not accessible so they don't
465 * need a physical address, but we need to enter
466 * one in the pmap.
467 * Let's make it recognizable and make sure that
468 * we don't use a real physical page with that
469 * physical address.
470 */
471 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
472
473 /*
474 * Resident page structures are also chained on
475 * queues that are used by the page replacement
476 * system (pageout daemon). These queues are
477 * defined here, but are shared by the pageout
478 * module. The inactive queue is broken into
479 * file backed and anonymous for convenience as the
480 * pageout daemon often assignes a higher
481 * importance to anonymous pages (less likely to pick)
482 */
483 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
484 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
485 #if CONFIG_SECLUDED_MEMORY
486 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
487 #endif /* CONFIG_SECLUDED_MEMORY */
488 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */
489 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
490
491 queue_head_t vm_objects_wired;
492
493 vm_page_queue_head_t vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
494 uint32_t vm_page_donate_mode;
495 uint32_t vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
496 uint32_t vm_page_donate_count;
497 bool vm_page_donate_queue_ripe;
498
499
500 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
501 uint32_t vm_page_background_target;
502 uint32_t vm_page_background_target_snapshot;
503 uint32_t vm_page_background_count;
504 uint64_t vm_page_background_promoted_count;
505
506 uint32_t vm_page_background_internal_count;
507 uint32_t vm_page_background_external_count;
508
509 uint32_t vm_page_background_mode;
510 uint32_t vm_page_background_exclude_external;
511
512 unsigned int vm_page_active_count;
513 unsigned int vm_page_inactive_count;
514 unsigned int vm_page_kernelcache_count;
515 #if CONFIG_SECLUDED_MEMORY
516 unsigned int vm_page_secluded_count;
517 unsigned int vm_page_secluded_count_free;
518 unsigned int vm_page_secluded_count_inuse;
519 unsigned int vm_page_secluded_count_over_target;
520 #endif /* CONFIG_SECLUDED_MEMORY */
521 unsigned int vm_page_anonymous_count;
522 unsigned int vm_page_throttled_count;
523 unsigned int vm_page_speculative_count;
524
525 unsigned int vm_page_wire_count;
526 unsigned int vm_page_wire_count_on_boot = 0;
527 unsigned int vm_page_stolen_count = 0;
528 unsigned int vm_page_wire_count_initial;
529 unsigned int vm_page_gobble_count = 0;
530 unsigned int vm_page_kern_lpage_count = 0;
531
532 uint64_t booter_size; /* external so it can be found in core dumps */
533
534 #define VM_PAGE_WIRE_COUNT_WARNING 0
535 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
536
537 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
538 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
539 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
540
541 unsigned int vm_page_xpmapped_external_count = 0;
542 unsigned int vm_page_external_count = 0;
543 unsigned int vm_page_internal_count = 0;
544 unsigned int vm_page_pageable_external_count = 0;
545 unsigned int vm_page_pageable_internal_count = 0;
546
547 #if DEVELOPMENT || DEBUG
548 unsigned int vm_page_speculative_recreated = 0;
549 unsigned int vm_page_speculative_created = 0;
550 unsigned int vm_page_speculative_used = 0;
551 #endif
552
553 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
554
555 unsigned int vm_page_cleaned_count = 0;
556
557 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
558 ppnum_t max_valid_low_ppnum = PPNUM_MAX;
559
560
561 /*
562 * Several page replacement parameters are also
563 * shared with this module, so that page allocation
564 * (done here in vm_page_alloc) can trigger the
565 * pageout daemon.
566 */
567 unsigned int vm_page_free_target = 0;
568 unsigned int vm_page_free_min = 0;
569 unsigned int vm_page_throttle_limit = 0;
570 unsigned int vm_page_inactive_target = 0;
571 #if CONFIG_SECLUDED_MEMORY
572 unsigned int vm_page_secluded_target = 0;
573 #endif /* CONFIG_SECLUDED_MEMORY */
574 unsigned int vm_page_anonymous_min = 0;
575 unsigned int vm_page_free_reserved = 0;
576
577
578 /*
579 * The VM system has a couple of heuristics for deciding
580 * that pages are "uninteresting" and should be placed
581 * on the inactive queue as likely candidates for replacement.
582 * These variables let the heuristics be controlled at run-time
583 * to make experimentation easier.
584 */
585
586 boolean_t vm_page_deactivate_hint = TRUE;
587
588 struct vm_page_stats_reusable vm_page_stats_reusable;
589
590 /*
591 * vm_set_page_size:
592 *
593 * Sets the page size, perhaps based upon the memory
594 * size. Must be called before any use of page-size
595 * dependent functions.
596 *
597 * Sets page_shift and page_mask from page_size.
598 */
599 void
vm_set_page_size(void)600 vm_set_page_size(void)
601 {
602 page_size = PAGE_SIZE;
603 page_mask = PAGE_MASK;
604 page_shift = PAGE_SHIFT;
605
606 if ((page_mask & page_size) != 0) {
607 panic("vm_set_page_size: page size not a power of two");
608 }
609
610 for (page_shift = 0;; page_shift++) {
611 if ((1U << page_shift) == page_size) {
612 break;
613 }
614 }
615 }
616
617
618 /*
619 * @abstract
620 * Given a page, returns the memory class of that page.
621 */
622 static vm_memory_class_t
vm_page_get_memory_class(vm_page_t mem __unused,ppnum_t pnum __unused)623 vm_page_get_memory_class(vm_page_t mem __unused, ppnum_t pnum __unused)
624 {
625 assert(!vm_page_is_fictitious(mem));
626
627 #if XNU_VM_HAS_LOPAGE
628 if (mem->vmp_lopage) {
629 return VM_MEMORY_CLASS_LOPAGE;
630 }
631 #endif /* XNU_VM_HAS_LOPAGE */
632 return VM_MEMORY_CLASS_REGULAR;
633 }
634
635 /*
636 * vm_page_validate_no_references:
637 *
638 * Make sure the physical page has no refcounts.
639 *
640 */
641 static inline void
vm_page_validate_no_references(vm_page_t mem)642 vm_page_validate_no_references(
643 vm_page_t mem)
644 {
645 bool is_freed;
646
647 pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
648
649 #if CONFIG_SPTM
650 is_freed = pmap_is_page_free(paddr);
651 #else
652 is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
653 #endif /* CONFIG_SPTM */
654
655 if (!is_freed) {
656 /*
657 * There is a redundancy here, but we are going to panic anyways,
658 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
659 * behavior.
660 */
661 ASSERT_PMAP_FREE(mem);
662 panic("%s: page 0x%llx is referenced", __func__, paddr);
663 }
664 }
665
666 /*
667 * vm_page_is_restricted:
668 *
669 * Checks if a given vm_page_t is a restricted page.
670 */
671 inline bool
vm_page_is_restricted(vm_page_t mem)672 vm_page_is_restricted(vm_page_t mem)
673 {
674 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(mem);
675 return pmap_is_page_restricted(pn);
676 }
677
678 #ifdef __x86_64__
679
680 #define MAX_CLUMP_SIZE 16
681 #define DEFAULT_CLUMP_SIZE 4
682
683 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
684
685 #if DEVELOPMENT || DEBUG
686 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
687 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
688
689 static inline void
vm_clump_update_stats(unsigned int c)690 vm_clump_update_stats(unsigned int c)
691 {
692 assert(c <= vm_clump_size);
693 if (c > 0 && c <= vm_clump_size) {
694 vm_clump_stats[c] += c;
695 }
696 vm_clump_allocs += c;
697 }
698 #endif /* if DEVELOPMENT || DEBUG */
699
700 /* Called once to setup the VM clump knobs */
701 static void
vm_page_setup_clump(void)702 vm_page_setup_clump( void )
703 {
704 unsigned int override, n;
705
706 vm_clump_size = DEFAULT_CLUMP_SIZE;
707 if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
708 vm_clump_size = override;
709 }
710
711 if (vm_clump_size > MAX_CLUMP_SIZE) {
712 panic("vm_page_setup_clump:: clump_size is too large!");
713 }
714 if (vm_clump_size < 1) {
715 panic("vm_page_setup_clump:: clump_size must be >= 1");
716 }
717 if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
718 panic("vm_page_setup_clump:: clump_size must be a power of 2");
719 }
720
721 vm_clump_promote_threshold = vm_clump_size;
722 vm_clump_mask = vm_clump_size - 1;
723 for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
724 ;
725 }
726
727 #if DEVELOPMENT || DEBUG
728 bzero(vm_clump_stats, sizeof(vm_clump_stats));
729 vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
730 #endif /* if DEVELOPMENT || DEBUG */
731 }
732
733 #endif /* __x86_64__ */
734
735 void
vm_page_free_queue_init(vm_page_free_queue_t free_queue)736 vm_page_free_queue_init(vm_page_free_queue_t free_queue)
737 {
738 for (unsigned int color = 0; color < MAX_COLORS; color++) {
739 vm_page_queue_init(&free_queue->vmpfq_queues[color].qhead);
740 }
741 }
742
743 /*!
744 * @function vm_page_free_queue_for_class()
745 *
746 * @abstract
747 * Returns the appropriate free queue for the given class and page color.
748 */
749 __pure2
750 static vm_page_queue_t
vm_page_free_queue_for_class(vm_memory_class_t mem_class,unsigned int color)751 vm_page_free_queue_for_class(vm_memory_class_t mem_class, unsigned int color)
752 {
753 switch (mem_class) {
754 case VM_MEMORY_CLASS_REGULAR:
755 return &vm_page_queue_free.vmpfq_queues[color].qhead;
756 #if XNU_VM_HAS_LOPAGE
757 case VM_MEMORY_CLASS_LOPAGE:
758 return &vm_lopage_queue_free;
759 #endif /* XNU_VM_HAS_LOPAGE */
760 #if CONFIG_SECLUDED_MEMORY
761 case VM_MEMORY_CLASS_SECLUDED:
762 return &vm_page_queue_secluded;
763 #endif
764 }
765 }
766
767 __pure2
768 static bool
vm_page_free_queue_has_colors(vm_memory_class_t mem_class)769 vm_page_free_queue_has_colors(vm_memory_class_t mem_class)
770 {
771 switch (mem_class) {
772 case VM_MEMORY_CLASS_REGULAR:
773 return true;
774 #if XNU_VM_HAS_LOPAGE
775 case VM_MEMORY_CLASS_LOPAGE:
776 return false;
777 #endif /* XNU_VM_HAS_LOPAGE */
778 #if CONFIG_SECLUDED_MEMORY
779 case VM_MEMORY_CLASS_SECLUDED:
780 return false;
781 #endif
782 }
783 }
784
785
786 #if CONFIG_SECLUDED_MEMORY
787
788 static bool
vm_page_secluded_pool_eligible(vm_memory_class_t class)789 vm_page_secluded_pool_eligible(vm_memory_class_t class)
790 {
791 switch (class) {
792 #if XNU_VM_HAS_LOPAGE
793 case VM_MEMORY_CLASS_LOPAGE:
794 return false;
795 #endif /* XNU_VM_HAS_LOPAGE */
796 default:
797 return true;
798 }
799 }
800
801 static bool
vm_page_secluded_pool_depleted(void)802 vm_page_secluded_pool_depleted(void)
803 {
804 if (vm_page_free_count <= vm_page_free_reserved) {
805 return false;
806 }
807 if (num_tasks_can_use_secluded_mem) {
808 return false;
809 }
810 return vm_page_secluded_count < vm_page_secluded_target;
811 }
812
813 #endif /* CONFIG_SECLUDED_MEMORY */
814 #if HIBERNATION
815
816 __attribute__((overloadable))
817 static void
818 vm_page_free_queue_foreach(vm_page_queue_t queue, void (^block)(vm_page_t))
819 {
820 vm_page_t page;
821
vm_page_queue_iterate(queue,page,vmp_pageq)822 vm_page_queue_iterate(queue, page, vmp_pageq) {
823 block(page);
824 }
825 }
826
827 __attribute__((overloadable))
828 static void
829 vm_page_free_queue_foreach(vm_page_free_queue_t queue, void (^block)(vm_page_t))
830 {
831 for (unsigned int color = 0; color < vm_colors; color++) {
832 vm_page_free_queue_foreach(&queue->vmpfq_queues[color].qhead, block);
833 }
834 }
835
836 #endif /* HIBERNATION */
837 #if CONFIG_SPTM
838
839 static inline uint32_t
vm_pages_free_mask_len(void)840 vm_pages_free_mask_len(void)
841 {
842 extern pmap_paddr_t real_avail_end;
843
844 uint64_t pnums = atop(real_avail_end) - pmap_first_pnum;
845 static_assert(8 * sizeof(__uint128_t) == MAX_COLORS);
846 return (uint32_t)((pnums + MAX_COLORS - 1) / MAX_COLORS);
847 }
848
849 static inline int8_t
vm_pages_free_mask_bit(ppnum_t pnum)850 vm_pages_free_mask_bit(ppnum_t pnum)
851 {
852 return (int8_t)(pnum & (MAX_COLORS - 1));
853 }
854
855 static inline uint32_t
vm_pages_free_mask_index(ppnum_t pnum)856 vm_pages_free_mask_index(ppnum_t pnum)
857 {
858 return (pnum - pmap_first_pnum) / MAX_COLORS;
859 }
860
861 __pure2
862 static inline __uint128_t *
vm_pages_free_masks(void)863 vm_pages_free_masks(void)
864 {
865 return _vm_pages_free_masks;
866 }
867
868 __pure2
869 static inline bitmap_t *
vm_pages_free_masks_as_bitmap(uint32_t index)870 vm_pages_free_masks_as_bitmap(uint32_t index)
871 {
872 /*
873 * this conversion is gross but helps with codegen for bit-wise
874 * accesses where the __uint128_t type is really yielding poor code.
875 *
876 * This conversion is only legal on little endian architectures.
877 */
878 #ifndef __LITTLE_ENDIAN__
879 #error unsupported configuration
880 #endif
881 return (bitmap_t *)(_vm_pages_free_masks + index);
882 }
883
884 __pure2
885 static inline int8_t *
vm_pages_free_enqueue_idx(uint32_t index)886 vm_pages_free_enqueue_idx(uint32_t index)
887 {
888 return &_vm_pages_free_enqueue_idx[index];
889 }
890
891 /*!
892 * @brief
893 * Return the position of the next bit in "circular" order for a given cluster
894 * of pages, starting at and including @c bit.
895 */
896 static inline int8_t
vm_pages_free_mask_next_bit(uint32_t index,int8_t bit)897 vm_pages_free_mask_next_bit(uint32_t index, int8_t bit)
898 {
899 __uint128_t value = vm_pages_free_masks()[index];
900 __uint128_t mask = ((__uint128_t)1 << bit) - 1;
901
902 if (value == 0) {
903 return -1;
904 }
905
906 if (value & ~mask) {
907 value &= ~mask;
908 }
909 if ((uint64_t)value) {
910 return (int8_t)__builtin_ctzll((uint64_t)value);
911 }
912 return 64 + (int8_t)__builtin_ctzll((uint64_t)(value >> 64));
913 }
914
915 static inline bool
vm_pages_free_mask_test(uint32_t index,int8_t bit)916 vm_pages_free_mask_test(uint32_t index, int8_t bit)
917 {
918 return bitmap_test(vm_pages_free_masks_as_bitmap(index), bit);
919 }
920
921 static inline void
vm_pages_free_mask_set(uint32_t index,int8_t bit)922 vm_pages_free_mask_set(uint32_t index, int8_t bit)
923 {
924 assert(!vm_pages_free_mask_test(index, bit));
925 bitmap_set(vm_pages_free_masks_as_bitmap(index), bit);
926 }
927
928 static inline void
vm_pages_free_mask_clear(uint32_t index,int8_t bit)929 vm_pages_free_mask_clear(uint32_t index, int8_t bit)
930 {
931 assert(vm_pages_free_mask_test(index, bit));
932 bitmap_clear(vm_pages_free_masks_as_bitmap(index), bit);
933 }
934
935 #endif /* CONFIG_SPTM */
936
937 __attribute__((always_inline))
938 void
vm_page_free_queue_enter(vm_memory_class_t class,vm_page_t mem,ppnum_t pnum)939 vm_page_free_queue_enter(vm_memory_class_t class, vm_page_t mem, ppnum_t pnum)
940 {
941 bool enter_first;
942 unsigned int color;
943 vm_page_queue_t queue;
944
945 if (startup_phase >= STARTUP_SUB_KMEM) {
946 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
947 }
948
949 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
950 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0 &&
951 mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0 &&
952 mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0 &&
953 mem->vmp_next_m == 0 &&
954 mem->vmp_object == 0 &&
955 mem->vmp_wire_count == 0 &&
956 mem->vmp_busy &&
957 !mem->vmp_tabled &&
958 !mem->vmp_laundry &&
959 !mem->vmp_pmapped &&
960 !mem->vmp_wpmapped &&
961 !mem->vmp_realtime);
962
963 switch (class) {
964 #if XNU_VM_HAS_LOPAGE
965 case VM_MEMORY_CLASS_LOPAGE:
966 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
967 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
968 mem->vmp_lopage = true;
969 mem->vmp_canonical = true;
970 enter_first = true;
971 break;
972 #endif /* XNU_VM_HAS_LOPAGE */
973 #if CONFIG_SECLUDED_MEMORY
974 case VM_MEMORY_CLASS_SECLUDED:
975 if (startup_phase >= STARTUP_SUB_KMEM) {
976 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
977 }
978 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
979 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
980 mem->vmp_lopage = false;
981 mem->vmp_canonical = true;
982 enter_first = true;
983 break;
984 #endif
985 default:
986 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
987 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
988 mem->vmp_lopage = false;
989 mem->vmp_canonical = true;
990 enter_first = false;
991 break;
992 }
993
994
995 color = VM_PAGE_GET_COLOR_PNUM(pnum);
996 queue = vm_page_free_queue_for_class(class, color);
997 #if CONFIG_SPTM
998 if (class == VM_MEMORY_CLASS_REGULAR && vm_pages_free_masks()) {
999 uint32_t index = vm_pages_free_mask_index(pnum);
1000 int8_t bit = vm_pages_free_mask_bit(pnum);
1001
1002 if (vm_pages_free_masks()[index] == 0) {
1003 vm_page_queue_enter(queue, mem, vmp_pageq);
1004 *vm_pages_free_enqueue_idx(index) = bit;
1005 }
1006 vm_pages_free_mask_set(index, bit);
1007 } else
1008 #endif /* CONFIG_SPTM */
1009 if (enter_first) {
1010 vm_page_queue_enter_first(queue, mem, vmp_pageq);
1011 } else {
1012 #if defined(__x86_64__)
1013 vm_page_queue_enter_clump(queue, mem);
1014 #else
1015 vm_page_queue_enter(queue, mem, vmp_pageq);
1016 #endif
1017 }
1018
1019 switch (class) {
1020 case VM_MEMORY_CLASS_REGULAR:
1021 VM_COUNTER_INC(&vm_page_queue_free.vmpfq_count);
1022 VM_COUNTER_INC(&vm_page_free_count);
1023 break;
1024 #if XNU_VM_HAS_LOPAGE
1025 case VM_MEMORY_CLASS_LOPAGE:
1026 VM_COUNTER_INC(&vm_lopage_free_count);
1027 if (vm_lopage_free_count >= vm_lopage_free_limit) {
1028 vm_lopage_refill = false;
1029 }
1030 break;
1031 #endif /* XNU_VM_HAS_LOPAGE */
1032 #if CONFIG_SECLUDED_MEMORY
1033 case VM_MEMORY_CLASS_SECLUDED:
1034 vm_page_secluded_count++;
1035 vm_page_secluded_count_free++;
1036 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
1037 break;
1038 #endif /* CONFIG_SECLUDED_MEMORY */
1039 default:
1040 __builtin_unreachable();
1041 }
1042 }
1043
1044 /*!
1045 * @typedef vmp_free_list_result_t
1046 *
1047 * @discussion
1048 * This data structure is used by vm_page_free_queue_add_list to track
1049 * how many pages were freed to which free lists, so that it can then drive
1050 * which waiters we are going to wake up.
1051 *
1052 * uint8_t counters are enough because we never free more than 64 pages at
1053 * a time, and this allows for the data structure to be passed by register.
1054 */
1055 typedef struct {
1056 uint8_t vmpr_regular;
1057 uint8_t vmpr_lopage;
1058 #if CONFIG_SECLUDED_MEMORY
1059 uint8_t vmpr_secluded;
1060 #endif /* CONFIG_SECLUDED_MEMORY */
1061 } vmp_free_list_result_t;
1062
1063 /*!
1064 * @abstract
1065 * Returns whether there are any threads blocked in VM_PAGE_WAIT().
1066 *
1067 * @discussion
1068 * The page free queue lock must be held.
1069 */
1070 static bool
vm_page_free_queue_has_any_waiters(void)1071 vm_page_free_queue_has_any_waiters(void)
1072 {
1073 uint32_t result = 0;
1074
1075 result |= vm_page_free_wanted;
1076 result |= vm_page_free_wanted_privileged;
1077 #if CONFIG_SECLUDED_MEMORY
1078 result |= vm_page_free_wanted_secluded;
1079 #endif /* CONFIG_SECLUDED_MEMORY */
1080
1081 return result != 0;
1082 }
1083
1084 void
vm_page_free_wakeup(event_t event,uint32_t n)1085 vm_page_free_wakeup(event_t event, uint32_t n)
1086 {
1087 if (vps_dynamic_priority_enabled) {
1088 if (n == UINT32_MAX) {
1089 wakeup_all_with_inheritor(event, THREAD_AWAKENED);
1090 } else {
1091 while (n-- > 0) {
1092 wakeup_one_with_inheritor(event, THREAD_AWAKENED,
1093 LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
1094 }
1095 }
1096 } else {
1097 thread_wakeup_nthreads(event, n);
1098 }
1099 }
1100
1101 /*!
1102 * @abstract
1103 * Helper to wakeup threads in VM_PAGE_WAIT() given
1104 * a vm_page_free_queue_enter_list() result.
1105 *
1106 * @discussion
1107 * The page free queue lock must be held, and is unlocked on return.
1108 *
1109 * @param vmpr The result of a vm_page_free_queue_enter_list() call.
1110 */
1111 __attribute__((noinline))
1112 static void
vm_page_free_queue_handle_wakeups_and_unlock(vmp_free_list_result_t vmpr)1113 vm_page_free_queue_handle_wakeups_and_unlock(vmp_free_list_result_t vmpr)
1114 {
1115 unsigned int need_wakeup = 0;
1116 unsigned int need_priv_wakeup = 0;
1117 #if CONFIG_SECLUDED_MEMORY
1118 unsigned int need_wakeup_secluded = 0;
1119 #endif /* CONFIG_SECLUDED_MEMORY */
1120 unsigned int unpriv_limit;
1121
1122 #define DONATE_TO_WAITERS(wake, count, waiters_count, limit) ({ \
1123 uint32_t __n = MIN(MIN(waiters_count, vmpr.count), limit); \
1124 waiters_count -= __n; \
1125 vmpr.count -= __n; \
1126 wake += __n; \
1127 __n; \
1128 })
1129
1130 /*
1131 * Step 1: privileged waiters get to be satisfied first
1132 */
1133 if (vm_page_free_wanted_privileged) {
1134 DONATE_TO_WAITERS(need_priv_wakeup,
1135 vmpr_regular, vm_page_free_wanted_privileged,
1136 UINT32_MAX);
1137 }
1138
1139
1140 /*
1141 * Step 2: the privileged reserve needs to be replenished
1142 *
1143 * Let's make sure that we only wake up regular threads
1144 * for free pages above the reserve threshold.
1145 */
1146 if (vm_page_free_count <= vm_page_free_reserved) {
1147 unpriv_limit = 0;
1148 } else {
1149 unpriv_limit = vm_page_free_count - vm_page_free_reserved;
1150 }
1151
1152 /*
1153 * Step 3: satisfy secluded waiters, using the secluded pool first,
1154 * regular pages second.
1155 */
1156 #if CONFIG_SECLUDED_MEMORY
1157 if (vm_page_free_wanted_secluded) {
1158 DONATE_TO_WAITERS(need_wakeup_secluded,
1159 vmpr_secluded, vm_page_free_wanted_secluded,
1160 UINT32_MAX);
1161 unpriv_limit -= DONATE_TO_WAITERS(need_wakeup_secluded,
1162 vmpr_regular, vm_page_free_wanted_secluded,
1163 unpriv_limit);
1164
1165 if (vm_page_free_wanted_secluded == 0) {
1166 need_wakeup_secluded = UINT32_MAX;
1167 }
1168 }
1169 #endif /* CONFIG_SECLUDED_MEMORY */
1170
1171 /*
1172 * Step 4: satisfy regular demand last.
1173 */
1174 if (vm_page_free_wanted) {
1175 unpriv_limit -= DONATE_TO_WAITERS(need_wakeup,
1176 vmpr_regular, vm_page_free_wanted,
1177 unpriv_limit);
1178 if (vm_page_free_wanted == 0) {
1179 need_wakeup = UINT32_MAX;
1180 }
1181 }
1182
1183 /*
1184 * We have updated waiter counts, and if that release page happens
1185 * from the context of a thread that's super low priority we might
1186 * starve waking up privileged threads.
1187 *
1188 * While we hold the free page lock, such threads would wake us up via
1189 * the mutex priority inheritance mechanism, but as soon as we drop the
1190 * lock all bets are off.
1191 *
1192 * To avoid this priority inversion that could really hurt the VM,
1193 * disable preemption until we've woken up everyone.
1194 */
1195 disable_preemption();
1196 vm_free_page_unlock();
1197
1198 /*
1199 * Dispatch privileged wakeups
1200 *
1201 * There shouldn't be that many VM-privileged threads,
1202 * so let's wake them all up, even if we don't quite
1203 * have enough pages to satisfy them all.
1204 */
1205 if (need_priv_wakeup) {
1206 vm_page_free_wakeup(&vm_page_free_wanted_privileged,
1207 UINT32_MAX);
1208 }
1209 if (need_wakeup) {
1210 vm_page_free_wakeup(&vm_page_free_count, need_wakeup);
1211 }
1212 #if CONFIG_SECLUDED_MEMORY
1213 if (need_wakeup_secluded) {
1214 vm_page_free_wakeup(&vm_page_free_wanted_secluded,
1215 need_wakeup_secluded);
1216 }
1217 #endif /* CONFIG_SECLUDED_MEMORY */
1218
1219 enable_preemption();
1220
1221 #undef DONATE_TO_WAITERS
1222 }
1223
1224 /*
1225 * @abstract
1226 * Given a list of pages, put each page on whichever global free queue is
1227 * appropriate.
1228 *
1229 * @discussion
1230 * Must be called with the VM free page lock unlocked.
1231 *
1232 * The list must contain less than 255 elements.
1233 */
1234 static void
vm_page_free_queue_enter_list(vm_page_list_t list,vmp_release_options_t opts)1235 vm_page_free_queue_enter_list(vm_page_list_t list, vmp_release_options_t opts)
1236 {
1237 bool page_queues_unlock = false;
1238 bool page_queues_locked = false;
1239 bool do_secluded = false;
1240 vmp_free_list_result_t result = { };
1241 vm_page_t mem;
1242
1243 LCK_MTX_ASSERT(&vm_page_queue_lock,
1244 (opts & VMP_RELEASE_Q_LOCKED)
1245 ? LCK_MTX_ASSERT_OWNED
1246 : LCK_MTX_ASSERT_NOTOWNED);
1247
1248 /*
1249 * Hibernation and startup do not really need the lock because
1250 * these are single threaded paths, so from the PoV of that function,
1251 * it's as if VMP_RELEASE_Q_LOCKED was passed.
1252 */
1253 page_queues_locked = (opts & (VMP_RELEASE_STARTUP |
1254 VMP_RELEASE_HIBERNATE |
1255 VMP_RELEASE_Q_LOCKED));
1256
1257 #if CONFIG_SECLUDED_MEMORY
1258 do_secluded = vm_page_secluded_pool_depleted();
1259 #endif /* CONFIG_SECLUDED_MEMORY */
1260
1261 if (!page_queues_locked && (list.vmpl_has_realtime || do_secluded)) {
1262 vm_page_lock_queues();
1263 page_queues_locked = true;
1264 page_queues_unlock = true;
1265 }
1266
1267 vm_free_page_lock_spin();
1268
1269 vm_page_list_foreach_consume(mem, &list) {
1270 ppnum_t pnum = VM_PAGE_GET_PHYS_PAGE(mem);
1271 vm_memory_class_t class = vm_page_get_memory_class(mem, pnum);
1272
1273 if (mem->vmp_realtime) {
1274 mem->vmp_realtime = false;
1275 VM_COUNTER_DEC(&vm_page_realtime_count);
1276 }
1277
1278 #if XNU_VM_HAS_LOPAGE
1279 if ((class == VM_MEMORY_CLASS_REGULAR ||
1280 class == VM_MEMORY_CLASS_LOPAGE) &&
1281 vm_lopage_refill &&
1282 vm_lopage_free_count < vm_lopage_free_limit &&
1283 pnum < max_valid_low_ppnum) {
1284 class = VM_MEMORY_CLASS_LOPAGE;
1285 } else {
1286 class = VM_MEMORY_CLASS_REGULAR;
1287 }
1288 #endif /* XNU_VM_HAS_LOPAGE */
1289
1290 #if CONFIG_SECLUDED_MEMORY
1291 /*
1292 * XXX FBDP TODO: also avoid refilling secluded queue
1293 * when some IOKit objects are already grabbing from it...
1294 */
1295 if (page_queues_locked &&
1296 vm_page_secluded_pool_eligible(class) &&
1297 vm_page_secluded_pool_depleted()) {
1298 class = VM_MEMORY_CLASS_SECLUDED;
1299 }
1300 #endif /* CONFIG_SECLUDED_MEMORY */
1301
1302 vm_page_free_queue_enter(class, mem, pnum);
1303
1304 switch (class) {
1305 case VM_MEMORY_CLASS_REGULAR:
1306 result.vmpr_regular++;
1307 break;
1308 #if XNU_VM_HAS_LOPAGE
1309 case VM_MEMORY_CLASS_LOPAGE:
1310 result.vmpr_lopage++;
1311 break;
1312 #endif /* XNU_VM_HAS_LOPAGE */
1313 #if CONFIG_SECLUDED_MEMORY
1314 case VM_MEMORY_CLASS_SECLUDED:
1315 result.vmpr_secluded++;
1316 continue;
1317 #endif /* CONFIG_SECLUDED_MEMORY */
1318 }
1319 }
1320
1321 if (page_queues_unlock) {
1322 vm_page_unlock_queues();
1323 }
1324
1325 vm_pageout_vminfo.vm_page_pages_freed += list.vmpl_count;
1326 VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE,
1327 DBG_FUNC_NONE, list.vmpl_count, 0, 0, 0);
1328
1329 if (vm_page_free_queue_has_any_waiters()) {
1330 vm_page_free_queue_handle_wakeups_and_unlock(result);
1331 } else {
1332 vm_free_page_unlock();
1333 }
1334
1335 VM_CHECK_MEMORYSTATUS;
1336 }
1337
1338 __attribute__((always_inline))
1339 void
vm_page_free_queue_remove(vm_memory_class_t class,vm_page_t mem,ppnum_t pnum,vm_page_q_state_t q_state)1340 vm_page_free_queue_remove(
1341 vm_memory_class_t class,
1342 vm_page_t mem,
1343 ppnum_t pnum,
1344 vm_page_q_state_t q_state)
1345 {
1346 unsigned int color;
1347 vm_page_queue_t queue;
1348
1349 if (startup_phase >= STARTUP_SUB_KMEM) {
1350 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
1351 }
1352
1353 mem->vmp_q_state = q_state;
1354
1355
1356 color = VM_PAGE_GET_COLOR_PNUM(pnum);
1357 queue = vm_page_free_queue_for_class(class, color);
1358 #if CONFIG_SPTM
1359 if (class == VM_MEMORY_CLASS_REGULAR && vm_pages_free_masks()) {
1360 uint32_t index = vm_pages_free_mask_index(pnum);
1361 int8_t bit = vm_pages_free_mask_bit(pnum);
1362
1363 vm_pages_free_mask_clear(index, bit);
1364 if (*vm_pages_free_enqueue_idx(index) == bit) {
1365 vm_page_queue_remove(queue, mem, vmp_pageq);
1366 bit = vm_pages_free_mask_next_bit(index, bit);
1367 *vm_pages_free_enqueue_idx(index) = bit;
1368
1369 if (bit != -1) {
1370 assert(vm_pages_free_mask_test(index, bit));
1371 pnum = (pnum & -MAX_COLORS) + bit;
1372 mem = vm_page_find_canonical(pnum);
1373 color = VM_PAGE_GET_COLOR_PNUM(pnum);
1374 queue = vm_page_free_queue_for_class(class, color);
1375 vm_page_queue_enter(queue, mem, vmp_pageq);
1376 }
1377 }
1378 } else
1379 #endif /* CONFIG_SPTM */
1380 {
1381 vm_page_queue_remove(queue, mem, vmp_pageq);
1382 }
1383
1384 switch (class) {
1385 case VM_MEMORY_CLASS_REGULAR:
1386 VM_COUNTER_DEC(&vm_page_queue_free.vmpfq_count);
1387 VM_COUNTER_DEC(&vm_page_free_count);
1388 break;
1389 #if XNU_VM_HAS_LOPAGE
1390 case VM_MEMORY_CLASS_LOPAGE:
1391 VM_COUNTER_DEC(&vm_lopage_free_count);
1392 vm_lopages_allocated_q += 1;
1393 if (vm_lopage_free_count < vm_lopage_lowater) {
1394 vm_lopage_refill = true;
1395 }
1396 break;
1397 #endif /* XNU_VM_HAS_LOPAGE */
1398 default:
1399 __builtin_unreachable();
1400 }
1401 }
1402
1403 vm_page_list_t
vm_page_free_queue_grab(vm_grab_options_t options __unused,vm_memory_class_t class,unsigned int num_pages,vm_page_q_state_t q_state)1404 vm_page_free_queue_grab(
1405 vm_grab_options_t options __unused,
1406 vm_memory_class_t class,
1407 unsigned int num_pages,
1408 vm_page_q_state_t q_state)
1409 {
1410 unsigned int *colorp;
1411 unsigned int color;
1412 #if defined(__x86_64__)
1413 unsigned int clump_end = 1;
1414 unsigned int sub_count = 0;
1415 #endif /* __x86_64__ */
1416 vm_page_list_t list = { };
1417
1418 if (startup_phase >= STARTUP_SUB_KMEM) {
1419 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
1420 }
1421 assert(get_preemption_level() != 0);
1422 assert(q_state <= VM_PAGE_Q_STATE_LAST_VALID_VALUE);
1423
1424
1425 colorp = PERCPU_GET(start_color);
1426 color = *colorp;
1427
1428 /* Get the pages. */
1429 while (list.vmpl_count < num_pages) {
1430 uint32_t color_offset = 1;
1431 vm_page_queue_t queue;
1432 vm_page_t mem;
1433
1434 queue = vm_page_free_queue_for_class(class, color);
1435 if (!vm_page_free_queue_has_colors(class)) {
1436 assert(!vm_page_queue_empty(queue));
1437 color_offset = 0;
1438 }
1439 while (vm_page_queue_empty(queue)) {
1440 color = (color + 1) & vm_color_mask;
1441 queue = vm_page_free_queue_for_class(class, color);
1442 }
1443
1444 #if defined(__x86_64__)
1445 if (class == VM_MEMORY_CLASS_REGULAR) {
1446 /*
1447 * x86_64 uses a bespoke free queue scheme, where the free path
1448 * tries to cluster clumps of contiguous pages together on
1449 * the free queue to optimize for the platform's memory
1450 * controller.
1451 */
1452 vm_page_queue_remove_first_with_clump(queue, mem, clump_end);
1453 sub_count++;
1454 if (clump_end) {
1455 #if DEVELOPMENT || DEBUG
1456 vm_clump_update_stats(sub_count);
1457 #endif /* !DEVELOPMENT && !DEBUG */
1458 sub_count = 0;
1459 } else {
1460 /* Only change colors at the end of a clump. */
1461 color_offset = 0;
1462 }
1463 } else
1464 #endif /* !defined(__x86_64__) */
1465 {
1466 /* Other targets default to rotating colors after each pop. */
1467 vm_page_queue_remove_first(queue, mem, vmp_pageq);
1468 }
1469
1470 #if CONFIG_SPTM
1471 if (vm_pages_free_masks()) {
1472 ppnum_t pnum = VM_PAGE_GET_PHYS_PAGE(mem);
1473 ppnum_t first_pnum = pnum & -MAX_COLORS;
1474 uint32_t index = vm_pages_free_mask_index(pnum);
1475 int8_t bit = vm_pages_free_mask_bit(pnum);
1476
1477 for (;;) {
1478 vm_pages_free_mask_clear(index, bit);
1479 mem->vmp_q_state = q_state;
1480 vm_page_list_push(&list, mem);
1481
1482 bit = (bit + 1) & (MAX_COLORS - 1);
1483
1484 if (!vm_pages_free_mask_test(index, bit) ||
1485 num_pages <= list.vmpl_count) {
1486 break;
1487 }
1488 mem = vm_page_find_canonical(first_pnum + bit);
1489 }
1490
1491 color = bit & vm_color_mask;
1492
1493 bit = vm_pages_free_mask_next_bit(index, bit);
1494 *vm_pages_free_enqueue_idx(index) = bit;
1495
1496 if (bit != -1) {
1497 assert(vm_pages_free_mask_test(index, bit));
1498 mem = vm_page_find_canonical(first_pnum + bit);
1499 queue = vm_page_free_queue_for_class(class,
1500 bit & vm_color_mask);
1501 vm_page_queue_enter_first(queue, mem, vmp_pageq);
1502 }
1503 } else
1504 #endif /* CONFIG_SPTM */
1505 {
1506 /* Set the page to the client's desired queue state. */
1507 mem->vmp_q_state = q_state;
1508 vm_page_list_push(&list, mem);
1509
1510 color = (color + color_offset) & vm_color_mask;
1511 }
1512 }
1513
1514 switch (class) {
1515 case VM_MEMORY_CLASS_REGULAR:
1516 VM_COUNTER_SUB(&vm_page_queue_free.vmpfq_count, list.vmpl_count);
1517 VM_COUNTER_SUB(&vm_page_free_count, list.vmpl_count);
1518 break;
1519 #if XNU_VM_HAS_LOPAGE
1520 case VM_MEMORY_CLASS_LOPAGE:
1521 VM_COUNTER_SUB(&vm_lopage_free_count, list.vmpl_count);
1522 vm_lopages_allocated_q += list.vmpl_count;
1523 if (vm_lopage_free_count < vm_lopage_lowater) {
1524 vm_lopage_refill = true;
1525 }
1526 break;
1527 #endif /* XNU_VM_HAS_LOPAGE */
1528 default:
1529 __builtin_unreachable();
1530 }
1531
1532 /* Record the next page color the CPU should try to get. */
1533 *colorp = color;
1534 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
1535 vm_clump_update_stats(sub_count);
1536 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
1537
1538 return list;
1539 }
1540
1541
1542 #define COLOR_GROUPS_TO_STEAL 4
1543
1544 /* Called once during statup, once the cache geometry is known.
1545 */
1546 static void
vm_page_set_colors(void)1547 vm_page_set_colors( void )
1548 {
1549 unsigned int n, override;
1550
1551 #if defined (__x86_64__)
1552 /* adjust #colors because we need to color outside the clump boundary */
1553 vm_cache_geometry_colors >>= vm_clump_shift;
1554 #endif
1555 if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */
1556 n = override;
1557 } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */
1558 n = vm_cache_geometry_colors;
1559 } else {
1560 n = DEFAULT_COLORS; /* use default if all else fails */
1561 }
1562 if (n == 0) {
1563 n = 1;
1564 }
1565 if (n > MAX_COLORS) {
1566 n = MAX_COLORS;
1567 }
1568
1569 /* the count must be a power of 2 */
1570 if ((n & (n - 1)) != 0) {
1571 n = DEFAULT_COLORS; /* use default if all else fails */
1572 }
1573 vm_colors = n;
1574 vm_color_mask = n - 1;
1575
1576 vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
1577
1578 #if defined (__x86_64__)
1579 /* adjust for reduction in colors due to clumping and multiple cores */
1580 if (real_ncpus) {
1581 vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
1582 }
1583 #endif
1584 }
1585
1586 #if XNU_VM_HAS_DELAYED_PAGES
1587
1588 static uint32_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */
1589 static ppnum_t delay_above_pnum = PPNUM_MAX;
1590
1591 /*
1592 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
1593 * If ARM ever uses delayed page initialization, this value may need to be quite different.
1594 */
1595 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
1596
1597 /*
1598 * When we have to dip into more delayed pages due to low memory, free up
1599 * a large chunk to get things back to normal. This avoids contention on the
1600 * delayed code allocating page by page.
1601 */
1602 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
1603
1604 /*
1605 * Get and initialize the next delayed page.
1606 */
1607 __attribute__((noinline))
1608 static vm_page_t
vm_get_delayed_page(vm_grab_options_t grab_options)1609 vm_get_delayed_page(vm_grab_options_t grab_options)
1610 {
1611 vm_page_t p;
1612 ppnum_t pnum;
1613
1614 /*
1615 * Get a new page if we have one.
1616 */
1617 vm_free_page_lock();
1618 if (vm_delayed_count == 0) {
1619 vm_free_page_unlock();
1620 return NULL;
1621 }
1622
1623 if (!pmap_next_page(&pnum)) {
1624 vm_delayed_count = 0;
1625 vm_free_page_unlock();
1626 return NULL;
1627 }
1628
1629
1630 assert(vm_delayed_count > 0);
1631 --vm_delayed_count;
1632
1633 #if defined(__x86_64__)
1634 /* x86 cluster code requires increasing phys_page in vm_pages[] */
1635 if (vm_pages_count > 0) {
1636 assert(pnum > vm_page_get(vm_pages_count - 1)->vmp_phys_page);
1637 }
1638 #endif
1639 p = vm_page_get(vm_pages_count);
1640 assert(p < vm_pages_end);
1641 vm_page_init(p, pnum);
1642 ++vm_pages_count;
1643 ++vm_page_pages;
1644 vm_free_page_unlock();
1645
1646 /*
1647 * These pages were initially counted as wired, undo that now.
1648 */
1649 if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
1650 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1651 } else {
1652 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
1653 vm_page_lockspin_queues();
1654 }
1655 --vm_page_wire_count;
1656 --vm_page_wire_count_initial;
1657 if (vm_page_wire_count_on_boot != 0) {
1658 --vm_page_wire_count_on_boot;
1659 }
1660 if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
1661 vm_page_unlock_queues();
1662 }
1663
1664
1665 if (fillval) {
1666 fillPage(pnum, fillval);
1667 }
1668 return p;
1669 }
1670
1671 /*
1672 * Free all remaining delayed pages to the free lists.
1673 */
1674 void
vm_free_delayed_pages(void)1675 vm_free_delayed_pages(void)
1676 {
1677 vm_page_t p;
1678 vm_page_t list = NULL;
1679 uint_t cnt = 0;
1680 vm_offset_t start_free_va;
1681 int64_t free_size;
1682
1683 while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
1684 if (vm_himemory_mode) {
1685 vm_page_release(p, VMP_RELEASE_NONE);
1686 } else {
1687 p->vmp_snext = list;
1688 list = p;
1689 }
1690 ++cnt;
1691 }
1692
1693 /*
1694 * Free the pages in reverse order if not himemory mode.
1695 * Hence the low memory pages will be first on free lists. (LIFO)
1696 */
1697 while (list != NULL) {
1698 p = list;
1699 list = p->vmp_snext;
1700 p->vmp_snext = NULL;
1701 vm_page_release(p, VMP_RELEASE_NONE);
1702 }
1703 #if DEVELOPMENT || DEBUG
1704 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
1705 #endif
1706
1707 /*
1708 * Free up any unused full pages at the end of the vm_pages[] array
1709 */
1710 start_free_va = round_page((vm_offset_t)vm_page_get(vm_pages_count));
1711
1712 #if defined(__x86_64__)
1713 /*
1714 * Since x86 might have used large pages for vm_pages[], we can't
1715 * free starting in the middle of a partially used large page.
1716 */
1717 if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
1718 start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
1719 }
1720 #endif
1721 if (start_free_va < (vm_offset_t)vm_pages_end) {
1722 free_size = trunc_page((vm_offset_t)vm_pages_end - start_free_va);
1723 if (free_size > 0) {
1724 ml_static_mfree(start_free_va, (vm_offset_t)free_size);
1725 vm_pages_end = (void *)start_free_va;
1726
1727 /*
1728 * Note there's no locking here, as only this thread will ever change this value.
1729 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
1730 */
1731 vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
1732
1733 #if DEVELOPMENT || DEBUG
1734 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
1735 (long)free_size, (long)start_free_va);
1736 #endif
1737 }
1738 }
1739 }
1740
1741 /*
1742 * Try and free up enough delayed pages to match a contig memory allocation.
1743 */
1744 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)1745 vm_free_delayed_pages_contig(
1746 uint_t npages,
1747 ppnum_t max_pnum,
1748 ppnum_t pnum_mask)
1749 {
1750 vm_page_t p;
1751 ppnum_t pnum;
1752 uint_t cnt = 0;
1753
1754 /*
1755 * Treat 0 as the absolute max page number.
1756 */
1757 if (max_pnum == 0) {
1758 max_pnum = PPNUM_MAX;
1759 }
1760
1761 /*
1762 * Free till we get a properly aligned start page
1763 */
1764 for (;;) {
1765 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
1766 if (p == NULL) {
1767 return;
1768 }
1769 pnum = VM_PAGE_GET_PHYS_PAGE(p);
1770 vm_page_release(p, VMP_RELEASE_NONE);
1771 if (pnum >= max_pnum) {
1772 return;
1773 }
1774 if ((pnum & pnum_mask) == 0) {
1775 break;
1776 }
1777 }
1778
1779 /*
1780 * Having a healthy pool of free pages will help performance. We don't
1781 * want to fall back to the delayed code for every page allocation.
1782 */
1783 if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
1784 npages += VM_DELAY_PAGE_CHUNK;
1785 }
1786
1787 /*
1788 * Now free up the pages
1789 */
1790 for (cnt = 1; cnt < npages; ++cnt) {
1791 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
1792 if (p == NULL) {
1793 return;
1794 }
1795 vm_page_release(p, VMP_RELEASE_NONE);
1796 }
1797 }
1798
1799 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1800
1801 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
1802
1803 void
vm_page_init_local_q(unsigned int num_cpus)1804 vm_page_init_local_q(unsigned int num_cpus)
1805 {
1806 struct vpl *t_local_q;
1807
1808 /*
1809 * no point in this for a uni-processor system
1810 */
1811 if (num_cpus >= 2) {
1812 ml_cpu_info_t cpu_info;
1813
1814 /*
1815 * Force the allocation alignment to a cacheline,
1816 * because the `vpl` struct has a lock and will be taken
1817 * cross CPU so we want to isolate the rest of the per-CPU
1818 * data to avoid false sharing due to this lock being taken.
1819 */
1820
1821 ml_cpu_get_info(&cpu_info);
1822
1823 t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
1824 cpu_info.cache_line_size - 1);
1825
1826 zpercpu_foreach(lq, t_local_q) {
1827 VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
1828 vm_page_queue_init(&lq->vpl_queue);
1829 }
1830
1831 /* make the initialization visible to all cores */
1832 os_atomic_store(&vm_page_local_q, t_local_q, release);
1833 }
1834 }
1835
1836 /*
1837 * vm_init_before_launchd
1838 *
1839 * This should be called right before launchd is loaded.
1840 */
1841 void
vm_init_before_launchd(void)1842 vm_init_before_launchd(void)
1843 {
1844 vm_page_lockspin_queues();
1845 vm_page_wire_count_on_boot = vm_page_wire_count;
1846 vm_page_unlock_queues();
1847 }
1848
1849
1850 /*
1851 * vm_page_bootstrap:
1852 *
1853 * Initializes the resident memory module.
1854 *
1855 * Allocates memory for the page cells, and
1856 * for the object/offset-to-page hash table headers.
1857 * Each page cell is initialized and placed on the free list.
1858 * Returns the range of available kernel virtual memory.
1859 */
1860 __startup_func
1861 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)1862 vm_page_bootstrap(
1863 vm_offset_t *startp,
1864 vm_offset_t *endp)
1865 {
1866 unsigned int i;
1867 unsigned int log1;
1868 unsigned int log2;
1869 unsigned int size;
1870
1871 /*
1872 * Initialize the page queues.
1873 */
1874
1875 lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
1876 lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
1877 lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
1878
1879 for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
1880 int group;
1881
1882 purgeable_queues[i].token_q_head = 0;
1883 purgeable_queues[i].token_q_tail = 0;
1884 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1885 queue_init(&purgeable_queues[i].objq[group]);
1886 }
1887
1888 purgeable_queues[i].type = i;
1889 purgeable_queues[i].new_pages = 0;
1890 #if MACH_ASSERT
1891 purgeable_queues[i].debug_count_tokens = 0;
1892 purgeable_queues[i].debug_count_objects = 0;
1893 #endif
1894 }
1895 ;
1896 purgeable_nonvolatile_count = 0;
1897 queue_init(&purgeable_nonvolatile_queue);
1898
1899 vm_page_free_queue_init(&vm_page_queue_free);
1900 #if XNU_VM_HAS_LOPAGE
1901 vm_page_queue_init(&vm_lopage_queue_free);
1902 #endif /* XNU_VM_HAS_LOPAGE */
1903 vm_page_queue_init(&vm_page_queue_active);
1904 vm_page_queue_init(&vm_page_queue_inactive);
1905 #if CONFIG_SECLUDED_MEMORY
1906 vm_page_queue_init(&vm_page_queue_secluded);
1907 #endif /* CONFIG_SECLUDED_MEMORY */
1908 vm_page_queue_init(&vm_page_queue_cleaned);
1909 vm_page_queue_init(&vm_page_queue_throttled);
1910 vm_page_queue_init(&vm_page_queue_anonymous);
1911 queue_init(&vm_objects_wired);
1912
1913 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
1914 vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
1915
1916 vm_page_queue_speculative[i].age_ts.tv_sec = 0;
1917 vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
1918 }
1919
1920 vm_page_queue_init(&vm_page_queue_donate);
1921 vm_page_queue_init(&vm_page_queue_background);
1922
1923 vm_page_background_count = 0;
1924 vm_page_background_internal_count = 0;
1925 vm_page_background_external_count = 0;
1926 vm_page_background_promoted_count = 0;
1927
1928 vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
1929
1930 if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
1931 vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
1932 }
1933
1934 #if defined(__LP64__)
1935 vm_page_background_mode = VM_PAGE_BG_ENABLED;
1936 vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
1937 #else
1938 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1939 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1940 #endif
1941 vm_page_background_exclude_external = 0;
1942
1943 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1944 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1945 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1946
1947 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1948 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1949 }
1950
1951 PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1952 if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1953 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1954 }
1955
1956 vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1957 vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1958 vm_page_donate_target = vm_page_donate_target_high;
1959 vm_page_donate_count = 0;
1960
1961 vm_page_free_wanted = 0;
1962 vm_page_free_wanted_privileged = 0;
1963 #if CONFIG_SECLUDED_MEMORY
1964 vm_page_free_wanted_secluded = 0;
1965 #endif /* CONFIG_SECLUDED_MEMORY */
1966
1967 #if defined (__x86_64__)
1968 /* this must be called before vm_page_set_colors() */
1969 vm_page_setup_clump();
1970 #endif
1971
1972 vm_page_set_colors();
1973
1974 for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1975 vm_allocation_sites_static[t].refcount = 2;
1976 vm_allocation_sites_static[t].tag = t;
1977 vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1978 }
1979 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1980 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1981 vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1982
1983 /*
1984 * Steal memory for the map and zone subsystems.
1985 *
1986 * make sure initialize_ram_ranges() has run before we steal pages for the first time on arm
1987 */
1988 (void)pmap_free_pages();
1989
1990 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1991
1992 /*
1993 * Allocate (and initialize) the virtual-to-physical
1994 * table hash buckets.
1995 *
1996 * The number of buckets should be a power of two to
1997 * get a good hash function. The following computation
1998 * chooses the first power of two that is greater
1999 * than the number of physical pages in the system.
2000 */
2001
2002 if (vm_page_bucket_count == 0) {
2003 unsigned int npages = pmap_free_pages();
2004
2005 vm_page_bucket_count = 1;
2006 while (vm_page_bucket_count < npages) {
2007 vm_page_bucket_count <<= 1;
2008 }
2009 }
2010 vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
2011
2012 vm_page_hash_mask = vm_page_bucket_count - 1;
2013
2014 /*
2015 * Calculate object shift value for hashing algorithm:
2016 * O = log2(sizeof(struct vm_object))
2017 * B = log2(vm_page_bucket_count)
2018 * hash shifts the object left by
2019 * B/2 - O
2020 */
2021 size = vm_page_bucket_count;
2022 for (log1 = 0; size > 1; log1++) {
2023 size /= 2;
2024 }
2025 size = sizeof(struct vm_object);
2026 for (log2 = 0; size > 1; log2++) {
2027 size /= 2;
2028 }
2029 vm_page_hash_shift = log1 / 2 - log2 + 1;
2030
2031 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
2032 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
2033 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
2034
2035 if (vm_page_hash_mask & vm_page_bucket_count) {
2036 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
2037 }
2038
2039 #if VM_PAGE_BUCKETS_CHECK
2040 #if VM_PAGE_FAKE_BUCKETS
2041 /*
2042 * Allocate a decoy set of page buckets, to detect
2043 * any stomping there.
2044 */
2045 vm_page_fake_buckets = (vm_page_bucket_t *)
2046 pmap_steal_memory(vm_page_bucket_count *
2047 sizeof(vm_page_bucket_t), 0);
2048 vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
2049 vm_page_fake_buckets_end =
2050 vm_map_round_page((vm_page_fake_buckets_start +
2051 (vm_page_bucket_count *
2052 sizeof(vm_page_bucket_t))),
2053 PAGE_MASK);
2054 char *cp;
2055 for (cp = (char *)vm_page_fake_buckets_start;
2056 cp < (char *)vm_page_fake_buckets_end;
2057 cp++) {
2058 *cp = 0x5a;
2059 }
2060 #endif /* VM_PAGE_FAKE_BUCKETS */
2061 #endif /* VM_PAGE_BUCKETS_CHECK */
2062
2063 kernel_debug_string_early("vm_page_buckets");
2064 vm_page_buckets = (vm_page_bucket_t *)
2065 pmap_steal_memory(vm_page_bucket_count *
2066 sizeof(vm_page_bucket_t), 0);
2067
2068 kernel_debug_string_early("vm_page_bucket_locks");
2069 vm_page_bucket_locks = (lck_ticket_t *)
2070 pmap_steal_memory(vm_page_bucket_lock_count *
2071 sizeof(lck_ticket_t), 0);
2072
2073 for (i = 0; i < vm_page_bucket_count; i++) {
2074 vm_page_bucket_t *bucket = &vm_page_buckets[i];
2075
2076 bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
2077 #if MACH_PAGE_HASH_STATS
2078 bucket->cur_count = 0;
2079 bucket->hi_count = 0;
2080 #endif /* MACH_PAGE_HASH_STATS */
2081 }
2082
2083 for (i = 0; i < vm_page_bucket_lock_count; i++) {
2084 lck_ticket_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket);
2085 }
2086
2087 vm_tag_init();
2088
2089 #if VM_PAGE_BUCKETS_CHECK
2090 vm_page_buckets_check_ready = TRUE;
2091 #endif /* VM_PAGE_BUCKETS_CHECK */
2092
2093 /*
2094 * Machine-dependent code allocates the resident page table.
2095 * It uses vm_page_init to initialize the page frames.
2096 * The code also returns to us the virtual space available
2097 * to the kernel. We don't trust the pmap module
2098 * to get the alignment right.
2099 */
2100
2101 kernel_debug_string_early("pmap_startup");
2102 pmap_startup(&virtual_space_start, &virtual_space_end);
2103 virtual_space_start = round_page(virtual_space_start);
2104 virtual_space_end = trunc_page(virtual_space_end);
2105
2106 *startp = virtual_space_start;
2107 *endp = virtual_space_end;
2108
2109 /*
2110 * Compute the initial "wire" count.
2111 * Up until now, the pages which have been set aside are not under
2112 * the VM system's control, so although they aren't explicitly
2113 * wired, they nonetheless can't be moved. At this moment,
2114 * all VM managed pages are "free", courtesy of pmap_startup.
2115 */
2116 assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
2117 vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
2118 vm_page_free_count - vm_lopage_free_count;
2119 #if CONFIG_SECLUDED_MEMORY
2120 vm_page_wire_count -= vm_page_secluded_count;
2121 #endif
2122 vm_page_wire_count_initial = vm_page_wire_count;
2123
2124 /* capture this for later use */
2125 booter_size = ml_get_booter_memory_size();
2126
2127 printf("vm_page_bootstrap: %d free pages, %d wired pages"
2128 #if XNU_VM_HAS_DELAYED_PAGES
2129 ", (up to %d of which are delayed free)"
2130 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2131 "%c",
2132 vm_page_free_count,
2133 vm_page_wire_count,
2134 #if XNU_VM_HAS_DELAYED_PAGES
2135 vm_delayed_count,
2136 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2137 '\n');
2138
2139 kernel_debug_string_early("vm_page_bootstrap complete");
2140 }
2141
2142 #ifndef MACHINE_PAGES
2143 /*
2144 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
2145 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
2146 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
2147 */
2148 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)2149 pmap_steal_memory_internal(
2150 vm_size_t size,
2151 vm_size_t alignment,
2152 boolean_t might_free,
2153 unsigned int flags,
2154 pmap_mapping_type_t mapping_type)
2155 {
2156 kern_return_t kr;
2157 vm_offset_t addr;
2158 vm_offset_t map_addr;
2159 ppnum_t phys_page;
2160 unsigned int pmap_flags;
2161
2162 /*
2163 * Size needs to be aligned to word size.
2164 */
2165 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
2166
2167 /*
2168 * Alignment defaults to word size if not specified.
2169 */
2170 if (alignment == 0) {
2171 alignment = sizeof(void*);
2172 }
2173
2174 /*
2175 * Alignment must be no greater than a page and must be a power of two.
2176 */
2177 assert(alignment <= PAGE_SIZE);
2178 assert((alignment & (alignment - 1)) == 0);
2179
2180 /*
2181 * On the first call, get the initial values for virtual address space
2182 * and page align them.
2183 */
2184 if (virtual_space_start == virtual_space_end) {
2185 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
2186 virtual_space_start = round_page(virtual_space_start);
2187 virtual_space_end = trunc_page(virtual_space_end);
2188
2189 #if defined(__x86_64__)
2190 /*
2191 * Release remaining unused section of preallocated KVA and the 4K page tables
2192 * that map it. This makes the VA available for large page mappings.
2193 */
2194 Idle_PTs_release(virtual_space_start, virtual_space_end);
2195 #endif
2196 }
2197
2198 /*
2199 * Allocate the virtual space for this request. On x86, we'll align to a large page
2200 * address if the size is big enough to back with at least 1 large page.
2201 */
2202 #if defined(__x86_64__)
2203 if (size >= I386_LPGBYTES) {
2204 virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
2205 }
2206 #endif
2207 virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
2208 addr = virtual_space_start;
2209 virtual_space_start += size;
2210
2211 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
2212
2213 /*
2214 * Allocate and map physical pages to back the new virtual space.
2215 */
2216 map_addr = round_page(addr);
2217 while (map_addr < addr + size) {
2218 #if defined(__x86_64__)
2219 /*
2220 * Back with a large page if properly aligned on x86
2221 */
2222 if ((map_addr & I386_LPGMASK) == 0 &&
2223 map_addr + I386_LPGBYTES <= addr + size &&
2224 pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
2225 pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
2226 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
2227 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
2228 VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
2229
2230 if (kr != KERN_SUCCESS) {
2231 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
2232 (unsigned long)map_addr, phys_page);
2233 }
2234 map_addr += I386_LPGBYTES;
2235 vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
2236 vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
2237 vm_page_kern_lpage_count++;
2238 continue;
2239 }
2240 #endif
2241
2242 if (!pmap_next_page_hi(&phys_page, might_free)) {
2243 panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
2244 }
2245
2246 #if defined(__x86_64__)
2247 pmap_pre_expand(kernel_pmap, map_addr);
2248 #endif
2249 pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
2250
2251
2252 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
2253 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
2254 pmap_flags, FALSE, mapping_type);
2255
2256 if (kr != KERN_SUCCESS) {
2257 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
2258 (unsigned long)map_addr, phys_page);
2259 }
2260 map_addr += PAGE_SIZE;
2261
2262 /*
2263 * Account for newly stolen memory
2264 */
2265 vm_page_wire_count++;
2266 vm_page_stolen_count++;
2267 }
2268
2269 #if defined(__x86_64__)
2270 /*
2271 * The call with might_free is currently the last use of pmap_steal_memory*().
2272 * Notify the pmap layer to record which high pages were allocated so far.
2273 */
2274 if (might_free) {
2275 pmap_hi_pages_done();
2276 }
2277 #endif
2278 #if KASAN
2279 kasan_notify_address(round_page(addr), size);
2280 #endif
2281 return (void *) addr;
2282 }
2283
2284 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)2285 pmap_steal_memory(
2286 vm_size_t size,
2287 vm_size_t alignment)
2288 {
2289 return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
2290 }
2291
2292 void *
pmap_steal_freeable_memory(vm_size_t size)2293 pmap_steal_freeable_memory(
2294 vm_size_t size)
2295 {
2296 return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
2297 }
2298
2299
2300
2301 #if CONFIG_SECLUDED_MEMORY
2302 /* boot-args to control secluded memory */
2303 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
2304 /* IOKit can use secluded memory */
2305 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
2306 /* apps can use secluded memory */
2307 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
2308 /* filecache can use seclude memory */
2309 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
2310 uint64_t secluded_shutoff_trigger = 0;
2311 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
2312 #endif /* CONFIG_SECLUDED_MEMORY */
2313
2314
2315 #if defined(__arm64__)
2316 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
2317 #endif
2318
2319 void vm_page_release_startup(vm_page_t mem);
2320 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)2321 pmap_startup(
2322 vm_offset_t *startp,
2323 vm_offset_t *endp)
2324 {
2325 unsigned int npages;
2326 ppnum_t phys_page;
2327 uint64_t mem_sz;
2328 uint64_t start_ns;
2329 uint64_t now_ns;
2330 uint32_t divisor;
2331 #if XNU_VM_HAS_DELAYED_PAGES
2332 uint_t low_page_count = 0;
2333 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2334
2335 /*
2336 * make sure we are aligned on a 64 byte boundary
2337 * for VM_PAGE_PACK_PTR (it clips off the low-order
2338 * 6 bits of the pointer)
2339 */
2340 if (virtual_space_start != virtual_space_end) {
2341 virtual_space_start = round_page(virtual_space_start);
2342 }
2343
2344 /*
2345 * We calculate how many page frames we will have
2346 * and then allocate the page structures in one chunk.
2347 *
2348 * Note that the calculation here doesn't take into account
2349 * the memory needed to map what's being allocated, i.e. the page
2350 * table entries. So the actual number of pages we get will be
2351 * less than this. To do someday: include that in the computation.
2352 *
2353 * Also for ARM, we don't use the count of free_pages, but rather the
2354 * range from last page to first page (ignore holes due to retired pages).
2355 */
2356
2357 /*
2358 * Initialize and release the page frames.
2359 */
2360 kernel_debug_string_early("page_frame_init");
2361 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
2362 if (fillval) {
2363 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
2364 }
2365
2366 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2367 mem_sz = ptoa(pmap_free_pages_span());
2368 #if CONFIG_SPTM
2369 {
2370 uint32_t count = vm_pages_free_mask_len();
2371
2372 _vm_pages_free_masks = pmap_steal_memory(count *
2373 sizeof(__uint128_t), sizeof(__uint128_t));
2374 _vm_pages_free_enqueue_idx = pmap_steal_memory(count, sizeof(uint8_t));
2375 bzero(_vm_pages_free_masks, count * sizeof(__uint128_t));
2376 memset(_vm_pages_free_enqueue_idx, 0xff, count);
2377 }
2378 #endif /* CONFIG_SPTM */
2379 #else
2380 mem_sz = ptoa(pmap_free_pages());
2381 #endif
2382 mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */
2383 divisor = PAGE_SIZE + sizeof(struct vm_page);
2384 npages = (uint32_t)((mem_sz + divisor - 1) / divisor); /* scaled to include the vm_page_ts */
2385
2386
2387 vm_pages = pmap_steal_freeable_memory(npages * sizeof(struct vm_page));
2388 vm_pages_end = vm_page_get(npages);
2389
2390 #if CONFIG_SECLUDED_MEMORY
2391 /*
2392 * Figure out how much secluded memory to have before we start
2393 * release pages to free lists.
2394 * The default, if specified nowhere else, is no secluded mem.
2395 */
2396 vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
2397
2398 /*
2399 * Allow a really large app to effectively use secluded memory until it exits.
2400 */
2401 if (vm_page_secluded_target != 0) {
2402 /*
2403 * Get an amount from boot-args, else use 1/2 of max_mem.
2404 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
2405 * used munch to induce jetsam thrashing of false idle daemons on N56.
2406 */
2407 int secluded_shutoff_mb;
2408 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
2409 sizeof(secluded_shutoff_mb))) {
2410 secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
2411 } else {
2412 secluded_shutoff_trigger = max_mem / 2;
2413 }
2414
2415 /* ensure the headroom value is sensible and avoid underflows */
2416 assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
2417 }
2418 #endif /* CONFIG_SECLUDED_MEMORY */
2419
2420 #if defined(__x86_64__)
2421
2422 /*
2423 * Decide how much memory we delay freeing at boot time.
2424 */
2425 uint32_t delay_above_gb;
2426 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
2427 delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
2428 }
2429
2430 if (delay_above_gb == 0) {
2431 delay_above_pnum = PPNUM_MAX;
2432 } else {
2433 delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
2434 }
2435
2436 /* make sure we have sane breathing room: 1G above low memory */
2437 if (delay_above_pnum <= max_valid_low_ppnum) {
2438 delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
2439 }
2440
2441 if (delay_above_pnum < PPNUM_MAX) {
2442 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
2443 }
2444
2445 #endif /* defined(__x86_64__) */
2446
2447
2448 for (uint32_t i = 0; i < npages && pmap_next_page(&phys_page); i++) {
2449 #if XNU_VM_HAS_DELAYED_PAGES
2450 if (phys_page < max_valid_low_ppnum) {
2451 ++low_page_count;
2452 }
2453
2454 /* Are we at high enough pages to delay the rest? */
2455 if (low_page_count > vm_lopage_free_limit &&
2456 phys_page > delay_above_pnum) {
2457 vm_delayed_count = pmap_free_pages();
2458 assert3u(vm_pages_count + vm_delayed_count, <=, npages);
2459 break;
2460 }
2461 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2462
2463 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2464 if (i == 0) {
2465 vm_pages_first_pnum = phys_page;
2466 patch_low_glo_vm_page_info(vm_pages, vm_pages_end,
2467 vm_pages_first_pnum);
2468 }
2469 #else
2470 /* The x86 clump freeing code requires increasing ppn's to work correctly */
2471 if (i > 0) {
2472 assert(phys_page > vm_page_get(i - 1)->vmp_phys_page);
2473 }
2474 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2475
2476 ++vm_pages_count;
2477 vm_page_init(vm_page_get(i), phys_page);
2478 if (fillval) {
2479 fillPage(phys_page, fillval);
2480 }
2481 if (vm_himemory_mode) {
2482 vm_page_release_startup(vm_page_get(i));
2483 }
2484 }
2485
2486 vm_page_pages = vm_pages_count; /* used to report to user space */
2487
2488 if (!vm_himemory_mode) {
2489 for (uint32_t i = npages; i-- > 0;) {
2490 /* skip retired pages */
2491 if (!VMP_ERROR_GET(vm_page_get(i))) {
2492 vm_page_release_startup(vm_page_get(i));
2493 }
2494 }
2495 }
2496
2497 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
2498 printf("pmap_startup() init/release time: %lld microsec\n",
2499 (now_ns - start_ns) / NSEC_PER_USEC);
2500 #if XNU_VM_HAS_DELAYED_PAGES
2501 printf("pmap_startup() delayed init/release of %d pages\n",
2502 vm_delayed_count);
2503 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2504
2505 /*
2506 * Validate packing will work properly. This needs to be done last
2507 * after vm_pages_count has been computed.
2508 */
2509 if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
2510 panic("pmap_startup(): too many pages to support vm_page packing");
2511 }
2512 if ((vm_page_t)VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(vm_pages)) != vm_pages) {
2513 panic("VM_PAGE_PACK_PTR failed on vm_pages - %p", vm_pages);
2514 }
2515 if ((vm_page_t)VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(vm_page_get(vm_pages_count - 1))) !=
2516 vm_page_get(vm_pages_count - 1)) {
2517 panic("VM_PAGE_PACK_PTR failed on vm_pages_end - %p",
2518 vm_page_get(vm_pages_count - 1));
2519 }
2520
2521 VM_CHECK_MEMORYSTATUS;
2522
2523 /*
2524 * We have to re-align virtual_space_start,
2525 * because pmap_steal_memory has been using it.
2526 */
2527 virtual_space_start = round_page(virtual_space_start);
2528 *startp = virtual_space_start;
2529 *endp = virtual_space_end;
2530 }
2531 #endif /* MACHINE_PAGES */
2532
2533 /*
2534 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
2535 * or frees to this zone. It's just here for reporting purposes via zprint command.
2536 * This needs to be done after all initially delayed pages are put on the free lists.
2537 */
2538 void
vm_pages_array_finalize(void)2539 vm_pages_array_finalize(void)
2540 {
2541 (void)zone_create_ext("vm pages array", sizeof(struct vm_page),
2542 ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
2543 uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
2544
2545 zone_set_exhaustible(z, 0, true);
2546 /*
2547 * Reflect size and usage information for vm_pages[].
2548 */
2549
2550 z->z_elems_avail = (uint32_t)(vm_pages_end - vm_pages);
2551 z->z_elems_free = z->z_elems_avail - vm_pages_count;
2552 zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
2553 vm_pages_count * sizeof(struct vm_page);
2554 vm_page_array_zone_data_size = (uint64_t)vm_pages_end - (uint64_t)vm_pages;
2555 vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
2556 z->z_wired_cur += vm_page_zone_pages;
2557 z->z_wired_hwm = z->z_wired_cur;
2558 z->z_va_cur = z->z_wired_cur;
2559 /* since zone accounts for these, take them out of stolen */
2560 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
2561 });
2562 }
2563
2564 /*
2565 * Create the vm_pages zone. This is used for the vm_page structures for the pages
2566 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
2567 * this needs to happen in early VM bootstrap.
2568 */
2569
2570 __startup_func
2571 static void
vm_page_module_init(void)2572 vm_page_module_init(void)
2573 {
2574 vm_size_t vm_page_with_ppnum_size;
2575
2576 /*
2577 * Since the pointers to elements in this zone will be packed, they
2578 * must have appropriate size. Not strictly what sizeof() reports.
2579 */
2580 vm_page_with_ppnum_size =
2581 (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
2582 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
2583
2584 vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
2585 ZC_ALIGNMENT_REQUIRED | ZC_VM,
2586 ZONE_ID_ANY, ^(zone_t z) {
2587 /*
2588 * The number "10" is a small number that is larger than the number
2589 * of fictitious pages that any single caller will attempt to allocate
2590 * without blocking.
2591 *
2592 * The largest such number at the moment is kmem_alloc()
2593 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
2594 * taking into account the 50% hysteresis the zone allocator uses.
2595 *
2596 * Note: this works at all because the zone allocator
2597 * doesn't ever allocate fictitious pages.
2598 */
2599 zone_raise_reserve(z, 10);
2600 });
2601 }
2602 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
2603
2604 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2605 /*
2606 * Radix tree of pages within the [pmap_first_pnum, vm_pages_first_pnum) range,
2607 * in order to support page lookup by pnum (@see vm_page_find_canonical()),
2608 * which corresponds to pages returned to the VM via @c ml_static_mfree().
2609 *
2610 * Kernel vm pages are never freed, which means that this data structure
2611 * is insert only.
2612 *
2613 * Empirically we have about 4-5k such pages, typically in only few rather dense
2614 * contiguous spans, inside a range of roughly 32k pnums.
2615 *
2616 * A radix tree works well with the distribution of keys, but also allows for
2617 * a straightforward lockless lookup path.
2618 */
2619
2620 #define VM_PAGE_RADIX_FANOUT_SHIFT 8
2621 #define VM_PAGE_RADIX_FANOUT (1u << VM_PAGE_RADIX_FANOUT_SHIFT)
2622
2623 typedef uint32_t vm_page_radix_ptr_t;
2624
2625 typedef struct vm_page_radix_node {
2626 vm_page_radix_ptr_t vmpr_array[VM_PAGE_RADIX_FANOUT];
2627 } *vm_page_radix_node_t;
2628
2629 static LCK_GRP_DECLARE(vm_pages_radix_lock_grp, "VM pages radix");
2630 static LCK_MTX_DECLARE(vm_pages_radix_lock, &vm_pages_radix_lock_grp);
2631
2632 static SECURITY_READ_ONLY_LATE(uintptr_t) vm_pages_radix_root;
2633 static uint32_t vm_pages_radix_count;
2634
2635 static vm_page_radix_node_t
vm_page_radix_node_unpack(vm_page_radix_ptr_t ptr)2636 vm_page_radix_node_unpack(vm_page_radix_ptr_t ptr)
2637 {
2638 return (vm_page_radix_node_t)VM_UNPACK_POINTER(ptr, VM_PAGE_PACKED_PTR);
2639 }
2640
2641 static vm_page_radix_ptr_t
vm_page_radix_node_pack(vm_page_radix_node_t node)2642 vm_page_radix_node_pack(vm_page_radix_node_t node)
2643 {
2644 vm_offset_t ptr = (vm_offset_t)node;
2645
2646 VM_ASSERT_POINTER_PACKABLE(ptr, VM_PAGE_PACKED_PTR);
2647 return (vm_page_radix_ptr_t)VM_PACK_POINTER(ptr, VM_PAGE_PACKED_PTR);
2648 }
2649
2650 static uint32_t
vm_page_radix_key(uint32_t level,uint32_t index)2651 vm_page_radix_key(uint32_t level, uint32_t index)
2652 {
2653 uint32_t key = index >> (VM_PAGE_RADIX_FANOUT_SHIFT * level);
2654
2655 return key & (VM_PAGE_RADIX_FANOUT - 1);
2656 }
2657
2658 static vm_page_radix_ptr_t *
vm_page_radix_slot(vm_page_radix_node_t node,uint32_t level,uint32_t index)2659 vm_page_radix_slot(vm_page_radix_node_t node, uint32_t level, uint32_t index)
2660 {
2661 return node->vmpr_array + vm_page_radix_key(level, index);
2662 }
2663
2664 __startup_func
2665 __attribute__((noinline))
2666 static vm_page_radix_node_t
vm_pages_radix_init_root(uint32_t * levelp)2667 vm_pages_radix_init_root(uint32_t *levelp)
2668 {
2669 uint32_t max_index = vm_pages_first_pnum - pmap_first_pnum - 1;
2670 vm_page_radix_node_t root;
2671 uint32_t level;
2672 vm_size_t size;
2673
2674 /*
2675 * Init a top-level node right away, to cover any index within
2676 * [0, vm_pages_first_pnum - pmap_first_pnum)
2677 */
2678 level = (fls(max_index | 1) - 1) / VM_PAGE_RADIX_FANOUT_SHIFT;
2679 size = (vm_page_radix_key(level, max_index) + 1) *
2680 sizeof(vm_page_radix_ptr_t);
2681
2682 root = zalloc_permanent(size, ZALIGN_64);
2683
2684 /*
2685 * Pack the level into the root pointer low bits,
2686 * so that pointer and level can be read atomically.
2687 *
2688 * See vm_pages_radix_load_root().
2689 */
2690 os_atomic_store(&vm_pages_radix_root, (uintptr_t)root | level, release);
2691
2692 *levelp = level;
2693 return root;
2694 }
2695
2696 static vm_page_radix_node_t
vm_pages_radix_node_alloc(vm_page_radix_ptr_t * slot)2697 vm_pages_radix_node_alloc(vm_page_radix_ptr_t *slot)
2698 {
2699 vm_page_radix_node_t node;
2700
2701 node = zalloc_permanent(sizeof(struct vm_page_radix_node),
2702 VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
2703 os_atomic_store(slot, vm_page_radix_node_pack(node), release);
2704 return node;
2705 }
2706
2707 static vm_page_radix_node_t
vm_pages_radix_load_root(uint32_t * level)2708 vm_pages_radix_load_root(uint32_t *level)
2709 {
2710 const uintptr_t VM_PAGE_RADIX_LEVEL_MASK = 0x7ul;
2711
2712 uintptr_t root = os_atomic_load(&vm_pages_radix_root, dependency);
2713
2714 *level = root & VM_PAGE_RADIX_LEVEL_MASK;
2715 root &= ~VM_PAGE_RADIX_LEVEL_MASK;
2716 return (vm_page_radix_node_t)root;
2717 }
2718
2719 vm_page_t
vm_pages_radix_next(uint32_t * cursor,ppnum_t * pnum)2720 vm_pages_radix_next(uint32_t *cursor, ppnum_t *pnum)
2721 {
2722 const uint32_t max_index = vm_pages_first_pnum - pmap_first_pnum;
2723 vm_page_radix_node_t node;
2724 uint32_t level, index;
2725
2726 index = *cursor;
2727 node = vm_pages_radix_load_root(&level);
2728
2729 while (index < max_index) {
2730 vm_page_radix_ptr_t *slot = vm_page_radix_slot(node, level, index);
2731 vm_page_radix_ptr_t ptr = os_atomic_load(slot, dependency);
2732
2733 if (ptr == 0) {
2734 uint32_t stride = 1 << (VM_PAGE_RADIX_FANOUT_SHIFT * level);
2735
2736 index = (index + stride) & -stride;
2737 if (vm_page_radix_key(level, index) == 0) {
2738 /* restart lookup at the top */
2739 node = vm_pages_radix_load_root(&level);
2740 }
2741 } else if (level > 0) {
2742 node = vm_page_radix_node_unpack(ptr);
2743 level -= 1;
2744 } else {
2745 *cursor = index + 1;
2746 if (pnum) {
2747 *pnum = pmap_first_pnum + index;
2748 }
2749 return (vm_page_t)VM_PAGE_UNPACK_PTR(ptr);
2750 }
2751 }
2752
2753 if (pnum) {
2754 *pnum = 0;
2755 }
2756 return VM_PAGE_NULL;
2757 }
2758
2759 #if DEBUG || DEVELOPMENT
2760
2761 static int
vm_page_radix_verify_test(int64_t in __unused,int64_t * out)2762 vm_page_radix_verify_test(int64_t in __unused, int64_t *out)
2763 {
2764 uint32_t count = 0;
2765 vm_page_t mem;
2766
2767 lck_mtx_lock(&vm_pages_radix_lock);
2768
2769 vm_pages_radix_for_each(mem) {
2770 count++;
2771 assert(mem == vm_page_find_canonical(VM_PAGE_GET_PHYS_PAGE(mem)));
2772 }
2773
2774 assert(count == vm_pages_radix_count);
2775
2776 lck_mtx_unlock(&vm_pages_radix_lock);
2777
2778 *out = 1;
2779 return 0;
2780 }
2781 SYSCTL_TEST_REGISTER(vm_page_radix_verify, vm_page_radix_verify_test);
2782
2783 #endif /* DEBUG || DEVELOPMENT */
2784
2785 __attribute__((noinline))
2786 static void
vm_pages_radix_insert(ppnum_t pnum,vm_page_t page)2787 vm_pages_radix_insert(ppnum_t pnum, vm_page_t page)
2788 {
2789 vm_page_radix_ptr_t *slot;
2790 vm_page_radix_node_t node;
2791 uint32_t level, index;
2792
2793 assert(!vm_page_in_array(page));
2794 index = pnum - pmap_first_pnum;
2795
2796 lck_mtx_lock(&vm_pages_radix_lock);
2797
2798 node = vm_pages_radix_load_root(&level);
2799 if (node == NULL) {
2800 node = vm_pages_radix_init_root(&level);
2801 }
2802
2803 for (; level > 0; level--) {
2804 slot = vm_page_radix_slot(node, level, index);
2805 if (*slot == 0) {
2806 node = vm_pages_radix_node_alloc(slot);
2807 } else {
2808 node = vm_page_radix_node_unpack(*slot);
2809 }
2810 }
2811
2812 slot = vm_page_radix_slot(node, 0, index);
2813 assert(*slot == 0);
2814 os_atomic_store(slot, VM_PAGE_PACK_PTR(page), release);
2815 vm_pages_radix_count++;
2816
2817 lck_mtx_unlock(&vm_pages_radix_lock);
2818 }
2819
2820 __abortlike
2821 static void
vm_page_for_ppnum_panic(ppnum_t pnum)2822 vm_page_for_ppnum_panic(ppnum_t pnum)
2823 {
2824 if (pnum < pmap_first_pnum) {
2825 panic("physical page is before the start of DRAM: %#x < %#x)",
2826 pnum, pmap_first_pnum);
2827 }
2828 panic("physical page is beyond the end of managed DRAM: %#x >= %#x)",
2829 pnum, vm_pages_first_pnum + vm_pages_count);
2830 }
2831
2832 vm_page_t
vm_page_find_canonical(ppnum_t pnum)2833 vm_page_find_canonical(ppnum_t pnum)
2834 {
2835 vm_page_radix_ptr_t *slot;
2836 vm_page_radix_node_t node;
2837 vm_page_radix_ptr_t ptr;
2838 uint32_t level, index;
2839
2840 if (pnum < pmap_first_pnum) {
2841 vm_page_for_ppnum_panic(pnum);
2842 }
2843
2844 if (pnum >= vm_pages_first_pnum + vm_pages_count) {
2845 /*
2846 * We could receive requests for pages which are beyond the xnu's managed space. (eg: ECC errors)
2847 * These need to be handled gracefully, so we return VM_PAGE_NULL here.
2848 */
2849 return VM_PAGE_NULL;
2850 }
2851
2852 if (__probable(pnum >= vm_pages_first_pnum)) {
2853 return vm_page_get(pnum - vm_pages_first_pnum);
2854 }
2855
2856 index = pnum - pmap_first_pnum;
2857 node = vm_pages_radix_load_root(&level);
2858
2859 for (; node && level > 0; level--) {
2860 slot = vm_page_radix_slot(node, level, index);
2861 ptr = os_atomic_load(slot, dependency);
2862 node = vm_page_radix_node_unpack(ptr);
2863 }
2864
2865 if (__probable(node)) {
2866 slot = vm_page_radix_slot(node, 0, index);
2867 ptr = os_atomic_load(slot, dependency);
2868 return (vm_page_t)VM_PAGE_UNPACK_PTR(ptr);
2869 }
2870
2871 return VM_PAGE_NULL;
2872 }
2873
2874 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2875
2876 /*!
2877 * @function vm_page_create()
2878 *
2879 * @brief
2880 * Common helper for all vm_page_create* functions.
2881 */
2882 static vm_page_t
vm_page_create(ppnum_t phys_page,bool canonical,zalloc_flags_t flags)2883 vm_page_create(ppnum_t phys_page, bool canonical, zalloc_flags_t flags)
2884 {
2885 vm_page_t m;
2886
2887 m = zalloc_flags(vm_page_zone, flags);
2888 if (m) {
2889 vm_page_init(m, phys_page);
2890 if (phys_page == vm_page_guard_addr) {
2891 counter_inc(&vm_guard_count);
2892 }
2893 }
2894 if (canonical) {
2895 assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
2896 m->vmp_canonical = true;
2897 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2898 vm_pages_radix_insert(phys_page, m);
2899 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2900 vm_free_page_lock();
2901 vm_page_pages++;
2902 vm_free_page_unlock();
2903 }
2904 return m;
2905 }
2906
2907 /*
2908 * Routine: vm_page_create_canonical
2909 * Purpose:
2910 * After the VM system is up, machine-dependent code
2911 * may stumble across more physical memory. For example,
2912 * memory that it was reserving for a frame buffer.
2913 * vm_page_create_canonical turns this memory into available pages.
2914 */
2915
2916 void
vm_page_create_canonical(ppnum_t phys_page)2917 vm_page_create_canonical(ppnum_t phys_page)
2918 {
2919 vm_page_t m;
2920
2921 m = vm_page_create(phys_page, true, Z_WAITOK);
2922 vm_page_release(m, VMP_RELEASE_NONE);
2923 }
2924
2925
2926 /*
2927 * vm_page_hash:
2928 *
2929 * Distributes the object/offset key pair among hash buckets.
2930 *
2931 * NOTE: The bucket count must be a power of 2
2932 */
2933 #define vm_page_hash(object, offset) (\
2934 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
2935 & vm_page_hash_mask)
2936
2937
2938 /*
2939 * vm_page_insert: [ internal use only ]
2940 *
2941 * Inserts the given mem entry into the object/object-page
2942 * table and object list.
2943 *
2944 * The object must be locked.
2945 */
2946 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2947 vm_page_insert(
2948 vm_page_t mem,
2949 vm_object_t object,
2950 vm_object_offset_t offset)
2951 {
2952 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
2953 }
2954
2955 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)2956 vm_page_insert_wired(
2957 vm_page_t mem,
2958 vm_object_t object,
2959 vm_object_offset_t offset,
2960 vm_tag_t tag)
2961 {
2962 vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
2963 }
2964
2965 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)2966 vm_page_insert_internal(
2967 vm_page_t mem,
2968 vm_object_t object,
2969 vm_object_offset_t offset,
2970 vm_tag_t tag,
2971 boolean_t queues_lock_held,
2972 boolean_t insert_in_hash,
2973 boolean_t batch_pmap_op,
2974 boolean_t batch_accounting,
2975 uint64_t *delayed_ledger_update)
2976 {
2977 vm_page_bucket_t *bucket;
2978 lck_ticket_t *bucket_lock;
2979 int hash_id;
2980 task_t owner;
2981 int ledger_idx_volatile;
2982 int ledger_idx_nonvolatile;
2983 int ledger_idx_volatile_compressed;
2984 int ledger_idx_nonvolatile_compressed;
2985 int ledger_idx_composite;
2986 int ledger_idx_external_wired;
2987 boolean_t do_footprint;
2988
2989 #if 0
2990 /*
2991 * we may not hold the page queue lock
2992 * so this check isn't safe to make
2993 */
2994 VM_PAGE_CHECK(mem);
2995 #endif
2996
2997 assertf(page_aligned(offset), "0x%llx\n", offset);
2998
2999 assert(!VM_PAGE_WIRED(mem) || !vm_page_is_canonical(mem) ||
3000 (tag != VM_KERN_MEMORY_NONE));
3001
3002 vm_object_lock_assert_exclusive(object);
3003 LCK_MTX_ASSERT(&vm_page_queue_lock,
3004 queues_lock_held ? LCK_MTX_ASSERT_OWNED
3005 : LCK_MTX_ASSERT_NOTOWNED);
3006
3007 if (queues_lock_held == FALSE) {
3008 assert(!VM_PAGE_PAGEABLE(mem));
3009 }
3010
3011 if (insert_in_hash == TRUE) {
3012 #if DEBUG || VM_PAGE_BUCKETS_CHECK
3013 if (mem->vmp_tabled || mem->vmp_object) {
3014 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
3015 "already in (obj=%p,off=0x%llx)",
3016 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
3017 }
3018 #endif
3019 if (object->internal && (offset >= object->vo_size)) {
3020 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
3021 mem, object, offset, object->vo_size);
3022 }
3023
3024 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
3025
3026 /*
3027 * Record the object/offset pair in this page
3028 */
3029
3030 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
3031 mem->vmp_offset = offset;
3032
3033 #if CONFIG_SECLUDED_MEMORY
3034 if (object->eligible_for_secluded) {
3035 vm_page_secluded.eligible_for_secluded++;
3036 }
3037 #endif /* CONFIG_SECLUDED_MEMORY */
3038
3039 /*
3040 * Insert it into the object_object/offset hash table
3041 */
3042 hash_id = vm_page_hash(object, offset);
3043 bucket = &vm_page_buckets[hash_id];
3044 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
3045
3046 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
3047
3048 mem->vmp_next_m = bucket->page_list;
3049 bucket->page_list = VM_PAGE_PACK_PTR(mem);
3050 assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
3051
3052 #if MACH_PAGE_HASH_STATS
3053 if (++bucket->cur_count > bucket->hi_count) {
3054 bucket->hi_count = bucket->cur_count;
3055 }
3056 #endif /* MACH_PAGE_HASH_STATS */
3057 mem->vmp_hashed = TRUE;
3058 lck_ticket_unlock(bucket_lock);
3059 }
3060
3061 {
3062 unsigned int cache_attr;
3063
3064 cache_attr = object->wimg_bits & VM_WIMG_MASK;
3065
3066
3067 if (cache_attr != VM_WIMG_USE_DEFAULT) {
3068 PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
3069 }
3070
3071 }
3072
3073 /*
3074 * Now link into the object's list of backed pages.
3075 */
3076 vm_page_queue_enter(&object->memq, mem, vmp_listq);
3077 object->memq_hint = mem;
3078 mem->vmp_tabled = TRUE;
3079
3080 /*
3081 * Show that the object has one more resident page.
3082 */
3083
3084 object->resident_page_count++;
3085 if (VM_PAGE_WIRED(mem)) {
3086 assert(mem->vmp_wire_count > 0);
3087 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
3088 VM_OBJECT_WIRED_PAGE_ADD(object, mem);
3089 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
3090 }
3091 assert(object->resident_page_count >= object->wired_page_count);
3092
3093 #if DEVELOPMENT || DEBUG
3094 if (object->object_is_shared_cache &&
3095 object->pager != NULL &&
3096 object->pager->mo_pager_ops == &shared_region_pager_ops) {
3097 int new, old;
3098 assert(!object->internal);
3099 new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
3100 do {
3101 old = shared_region_pagers_resident_peak;
3102 } while (old < new &&
3103 !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
3104 }
3105 #endif /* DEVELOPMENT || DEBUG */
3106
3107 if (batch_accounting == FALSE) {
3108 if (object->internal) {
3109 OSAddAtomic(1, &vm_page_internal_count);
3110 } else {
3111 OSAddAtomic(1, &vm_page_external_count);
3112 }
3113 }
3114
3115 /*
3116 * It wouldn't make sense to insert a "reusable" page in
3117 * an object (the page would have been marked "reusable" only
3118 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
3119 * in the object at that time).
3120 * But a page could be inserted in a "all_reusable" object, if
3121 * something faults it in (a vm_read() from another task or a
3122 * "use-after-free" issue in user space, for example). It can
3123 * also happen if we're relocating a page from that object to
3124 * a different physical page during a physically-contiguous
3125 * allocation.
3126 */
3127 assert(!mem->vmp_reusable);
3128 if (object->all_reusable) {
3129 OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
3130 }
3131
3132 if (object->purgable == VM_PURGABLE_DENY &&
3133 !object->vo_ledger_tag) {
3134 owner = TASK_NULL;
3135 } else {
3136 owner = VM_OBJECT_OWNER(object);
3137 vm_object_ledger_tag_ledgers(object,
3138 &ledger_idx_volatile,
3139 &ledger_idx_nonvolatile,
3140 &ledger_idx_volatile_compressed,
3141 &ledger_idx_nonvolatile_compressed,
3142 &ledger_idx_composite,
3143 &ledger_idx_external_wired,
3144 &do_footprint);
3145 }
3146 if (owner &&
3147 object->internal &&
3148 (object->purgable == VM_PURGABLE_NONVOLATILE ||
3149 object->purgable == VM_PURGABLE_DENY ||
3150 VM_PAGE_WIRED(mem))) {
3151 if (delayed_ledger_update) {
3152 *delayed_ledger_update += PAGE_SIZE;
3153 } else {
3154 /* more non-volatile bytes */
3155 ledger_credit(owner->ledger,
3156 ledger_idx_nonvolatile,
3157 PAGE_SIZE);
3158 if (do_footprint) {
3159 /* more footprint */
3160 ledger_credit(owner->ledger,
3161 task_ledgers.phys_footprint,
3162 PAGE_SIZE);
3163 } else if (ledger_idx_composite != -1) {
3164 ledger_credit(owner->ledger,
3165 ledger_idx_composite,
3166 PAGE_SIZE);
3167 }
3168 }
3169 } else if (owner &&
3170 object->internal &&
3171 (object->purgable == VM_PURGABLE_VOLATILE ||
3172 object->purgable == VM_PURGABLE_EMPTY)) {
3173 assert(!VM_PAGE_WIRED(mem));
3174 /* more volatile bytes */
3175 ledger_credit(owner->ledger,
3176 ledger_idx_volatile,
3177 PAGE_SIZE);
3178 }
3179
3180 if (object->purgable == VM_PURGABLE_VOLATILE) {
3181 if (VM_PAGE_WIRED(mem)) {
3182 OSAddAtomic(+1, &vm_page_purgeable_wired_count);
3183 } else {
3184 OSAddAtomic(+1, &vm_page_purgeable_count);
3185 }
3186 } else if (object->purgable == VM_PURGABLE_EMPTY &&
3187 mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
3188 /*
3189 * This page belongs to a purged VM object but hasn't
3190 * been purged (because it was "busy").
3191 * It's in the "throttled" queue and hence not
3192 * visible to vm_pageout_scan(). Move it to a pageable
3193 * queue, so that it can eventually be reclaimed, instead
3194 * of lingering in the "empty" object.
3195 */
3196 if (queues_lock_held == FALSE) {
3197 vm_page_lockspin_queues();
3198 }
3199 vm_page_deactivate(mem);
3200 if (queues_lock_held == FALSE) {
3201 vm_page_unlock_queues();
3202 }
3203 }
3204
3205
3206 #if VM_OBJECT_TRACKING_OP_MODIFIED
3207 if (vm_object_tracking_btlog &&
3208 object->internal &&
3209 object->resident_page_count == 0 &&
3210 object->pager == NULL &&
3211 object->shadow != NULL &&
3212 object->shadow->vo_copy == object) {
3213 btlog_record(vm_object_tracking_btlog, object,
3214 VM_OBJECT_TRACKING_OP_MODIFIED,
3215 btref_get(__builtin_frame_address(0), 0));
3216 }
3217 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
3218 }
3219
3220 /*
3221 * vm_page_replace:
3222 *
3223 * Exactly like vm_page_insert, except that we first
3224 * remove any existing page at the given offset in object.
3225 *
3226 * The object must be locked.
3227 */
3228 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)3229 vm_page_replace(
3230 vm_page_t mem,
3231 vm_object_t object,
3232 vm_object_offset_t offset)
3233 {
3234 vm_page_bucket_t *bucket;
3235 vm_page_t found_m = VM_PAGE_NULL;
3236 lck_ticket_t *bucket_lock;
3237 int hash_id;
3238
3239 #if 0
3240 /*
3241 * we don't hold the page queue lock
3242 * so this check isn't safe to make
3243 */
3244 VM_PAGE_CHECK(mem);
3245 #endif
3246 vm_object_lock_assert_exclusive(object);
3247 #if DEBUG || VM_PAGE_BUCKETS_CHECK
3248 if (mem->vmp_tabled || mem->vmp_object) {
3249 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
3250 "already in (obj=%p,off=0x%llx)",
3251 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
3252 }
3253 #endif
3254 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3255
3256 assert(!VM_PAGE_PAGEABLE(mem));
3257
3258 /*
3259 * Record the object/offset pair in this page
3260 */
3261 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
3262 mem->vmp_offset = offset;
3263
3264 /*
3265 * Insert it into the object_object/offset hash table,
3266 * replacing any page that might have been there.
3267 */
3268
3269 hash_id = vm_page_hash(object, offset);
3270 bucket = &vm_page_buckets[hash_id];
3271 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
3272
3273 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
3274
3275 if (bucket->page_list) {
3276 vm_page_packed_t *mp = &bucket->page_list;
3277 vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
3278
3279 do {
3280 /*
3281 * compare packed object pointers
3282 */
3283 if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
3284 /*
3285 * Remove old page from hash list
3286 */
3287 *mp = m->vmp_next_m;
3288 m->vmp_hashed = FALSE;
3289 m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
3290
3291 found_m = m;
3292 break;
3293 }
3294 mp = &m->vmp_next_m;
3295 } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
3296
3297 mem->vmp_next_m = bucket->page_list;
3298 } else {
3299 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
3300 }
3301 /*
3302 * insert new page at head of hash list
3303 */
3304 bucket->page_list = VM_PAGE_PACK_PTR(mem);
3305 mem->vmp_hashed = TRUE;
3306
3307 lck_ticket_unlock(bucket_lock);
3308
3309 if (found_m) {
3310 /*
3311 * there was already a page at the specified
3312 * offset for this object... remove it from
3313 * the object and free it back to the free list
3314 */
3315 vm_page_free_unlocked(found_m, FALSE);
3316 }
3317 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
3318 }
3319
3320 /*
3321 * vm_page_remove: [ internal use only ]
3322 *
3323 * Removes the given mem entry from the object/offset-page
3324 * table and the object page list.
3325 *
3326 * The object must be locked.
3327 */
3328
3329 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)3330 vm_page_remove(
3331 vm_page_t mem,
3332 boolean_t remove_from_hash)
3333 {
3334 vm_page_bucket_t *bucket;
3335 vm_page_t this;
3336 lck_ticket_t *bucket_lock;
3337 int hash_id;
3338 task_t owner;
3339 vm_object_t m_object;
3340 int ledger_idx_volatile;
3341 int ledger_idx_nonvolatile;
3342 int ledger_idx_volatile_compressed;
3343 int ledger_idx_nonvolatile_compressed;
3344 int ledger_idx_composite;
3345 int ledger_idx_external_wired;
3346 int do_footprint;
3347
3348 m_object = VM_PAGE_OBJECT(mem);
3349
3350 vm_object_lock_assert_exclusive(m_object);
3351 assert(mem->vmp_tabled);
3352 assert(!mem->vmp_cleaning);
3353 assert(!mem->vmp_laundry);
3354
3355 if (VM_PAGE_PAGEABLE(mem)) {
3356 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3357 }
3358 #if 0
3359 /*
3360 * we don't hold the page queue lock
3361 * so this check isn't safe to make
3362 */
3363 VM_PAGE_CHECK(mem);
3364 #endif
3365 if (remove_from_hash == TRUE) {
3366 /*
3367 * Remove from the object_object/offset hash table
3368 */
3369 hash_id = vm_page_hash(m_object, mem->vmp_offset);
3370 bucket = &vm_page_buckets[hash_id];
3371 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
3372
3373 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
3374
3375 if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
3376 /* optimize for common case */
3377
3378 bucket->page_list = mem->vmp_next_m;
3379 } else {
3380 vm_page_packed_t *prev;
3381
3382 for (prev = &this->vmp_next_m;
3383 (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
3384 prev = &this->vmp_next_m) {
3385 continue;
3386 }
3387 *prev = this->vmp_next_m;
3388 }
3389 #if MACH_PAGE_HASH_STATS
3390 bucket->cur_count--;
3391 #endif /* MACH_PAGE_HASH_STATS */
3392 mem->vmp_hashed = FALSE;
3393 this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
3394 lck_ticket_unlock(bucket_lock);
3395 }
3396 /*
3397 * Now remove from the object's list of backed pages.
3398 */
3399
3400 vm_page_remove_internal(mem);
3401
3402 /*
3403 * And show that the object has one fewer resident
3404 * page.
3405 */
3406
3407 assert(m_object->resident_page_count > 0);
3408 m_object->resident_page_count--;
3409
3410 #if DEVELOPMENT || DEBUG
3411 if (m_object->object_is_shared_cache &&
3412 m_object->pager != NULL &&
3413 m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
3414 assert(!m_object->internal);
3415 OSAddAtomic(-1, &shared_region_pagers_resident_count);
3416 }
3417 #endif /* DEVELOPMENT || DEBUG */
3418
3419 if (m_object->internal) {
3420 #if DEBUG
3421 assert(vm_page_internal_count);
3422 #endif /* DEBUG */
3423
3424 OSAddAtomic(-1, &vm_page_internal_count);
3425 } else {
3426 assert(vm_page_external_count);
3427 OSAddAtomic(-1, &vm_page_external_count);
3428
3429 if (mem->vmp_xpmapped) {
3430 assert(vm_page_xpmapped_external_count);
3431 OSAddAtomic(-1, &vm_page_xpmapped_external_count);
3432 }
3433 }
3434 if (!m_object->internal &&
3435 m_object->cached_list.next &&
3436 m_object->cached_list.prev) {
3437 if (m_object->resident_page_count == 0) {
3438 vm_object_cache_remove(m_object);
3439 }
3440 }
3441
3442 if (VM_PAGE_WIRED(mem)) {
3443 assert(mem->vmp_wire_count > 0);
3444 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
3445 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
3446 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
3447 }
3448 assert(m_object->resident_page_count >=
3449 m_object->wired_page_count);
3450 if (mem->vmp_reusable) {
3451 assert(m_object->reusable_page_count > 0);
3452 m_object->reusable_page_count--;
3453 assert(m_object->reusable_page_count <=
3454 m_object->resident_page_count);
3455 mem->vmp_reusable = FALSE;
3456 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
3457 vm_page_stats_reusable.reused_remove++;
3458 } else if (m_object->all_reusable) {
3459 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
3460 vm_page_stats_reusable.reused_remove++;
3461 }
3462
3463 if (m_object->purgable == VM_PURGABLE_DENY &&
3464 !m_object->vo_ledger_tag) {
3465 owner = TASK_NULL;
3466 } else {
3467 owner = VM_OBJECT_OWNER(m_object);
3468 vm_object_ledger_tag_ledgers(m_object,
3469 &ledger_idx_volatile,
3470 &ledger_idx_nonvolatile,
3471 &ledger_idx_volatile_compressed,
3472 &ledger_idx_nonvolatile_compressed,
3473 &ledger_idx_composite,
3474 &ledger_idx_external_wired,
3475 &do_footprint);
3476 }
3477 if (owner &&
3478 m_object->internal &&
3479 (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
3480 m_object->purgable == VM_PURGABLE_DENY ||
3481 VM_PAGE_WIRED(mem))) {
3482 /* less non-volatile bytes */
3483 ledger_debit(owner->ledger,
3484 ledger_idx_nonvolatile,
3485 PAGE_SIZE);
3486 if (do_footprint) {
3487 /* less footprint */
3488 ledger_debit(owner->ledger,
3489 task_ledgers.phys_footprint,
3490 PAGE_SIZE);
3491 } else if (ledger_idx_composite != -1) {
3492 ledger_debit(owner->ledger,
3493 ledger_idx_composite,
3494 PAGE_SIZE);
3495 }
3496 } else if (owner &&
3497 m_object->internal &&
3498 (m_object->purgable == VM_PURGABLE_VOLATILE ||
3499 m_object->purgable == VM_PURGABLE_EMPTY)) {
3500 assert(!VM_PAGE_WIRED(mem));
3501 /* less volatile bytes */
3502 ledger_debit(owner->ledger,
3503 ledger_idx_volatile,
3504 PAGE_SIZE);
3505 }
3506
3507 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
3508 if (VM_PAGE_WIRED(mem)) {
3509 assert(vm_page_purgeable_wired_count > 0);
3510 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
3511 } else {
3512 assert(vm_page_purgeable_count > 0);
3513 OSAddAtomic(-1, &vm_page_purgeable_count);
3514 }
3515 }
3516
3517 if (m_object->set_cache_attr == TRUE) {
3518 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
3519 }
3520
3521 mem->vmp_tabled = FALSE;
3522 mem->vmp_object = 0;
3523 mem->vmp_offset = (vm_object_offset_t) -1;
3524 }
3525
3526
3527 /*
3528 * vm_page_lookup:
3529 *
3530 * Returns the page associated with the object/offset
3531 * pair specified; if none is found, VM_PAGE_NULL is returned.
3532 *
3533 * The object must be locked. No side effects.
3534 */
3535
3536 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
3537
3538 #if DEBUG_VM_PAGE_LOOKUP
3539
3540 struct {
3541 uint64_t vpl_total;
3542 uint64_t vpl_empty_obj;
3543 uint64_t vpl_bucket_NULL;
3544 uint64_t vpl_hit_hint;
3545 uint64_t vpl_hit_hint_next;
3546 uint64_t vpl_hit_hint_prev;
3547 uint64_t vpl_fast;
3548 uint64_t vpl_slow;
3549 uint64_t vpl_hit;
3550 uint64_t vpl_miss;
3551
3552 uint64_t vpl_fast_elapsed;
3553 uint64_t vpl_slow_elapsed;
3554 } vm_page_lookup_stats __attribute__((aligned(8)));
3555
3556 #endif
3557
3558 #define KDP_VM_PAGE_WALK_MAX 1000
3559
3560 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)3561 kdp_vm_page_lookup(
3562 vm_object_t object,
3563 vm_object_offset_t offset)
3564 {
3565 vm_page_t cur_page;
3566 int num_traversed = 0;
3567
3568 if (not_in_kdp) {
3569 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
3570 }
3571
3572 vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
3573 if (cur_page->vmp_offset == offset) {
3574 return cur_page;
3575 }
3576 num_traversed++;
3577
3578 if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
3579 return VM_PAGE_NULL;
3580 }
3581 }
3582
3583 return VM_PAGE_NULL;
3584 }
3585
3586 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)3587 vm_page_lookup(
3588 vm_object_t object,
3589 vm_object_offset_t offset)
3590 {
3591 vm_page_t mem;
3592 vm_page_bucket_t *bucket;
3593 vm_page_queue_entry_t qe;
3594 lck_ticket_t *bucket_lock = NULL;
3595 int hash_id;
3596 #if DEBUG_VM_PAGE_LOOKUP
3597 uint64_t start, elapsed;
3598
3599 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
3600 #endif
3601
3602 #if KASAN_TBI
3603 if (is_kernel_object(object)) {
3604 offset = vm_memtag_canonicalize_kernel(offset);
3605 }
3606 #endif /* KASAN_TBI */
3607
3608 vm_object_lock_assert_held(object);
3609 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
3610
3611 if (object->resident_page_count == 0) {
3612 #if DEBUG_VM_PAGE_LOOKUP
3613 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
3614 #endif
3615 return VM_PAGE_NULL;
3616 }
3617
3618 mem = object->memq_hint;
3619
3620 if (mem != VM_PAGE_NULL) {
3621 assert(VM_PAGE_OBJECT(mem) == object);
3622
3623 if (mem->vmp_offset == offset) {
3624 #if DEBUG_VM_PAGE_LOOKUP
3625 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
3626 #endif
3627 return mem;
3628 }
3629 qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
3630
3631 if (!vm_page_queue_end(&object->memq, qe)) {
3632 vm_page_t next_page;
3633
3634 next_page = (vm_page_t)((uintptr_t)qe);
3635 assert(VM_PAGE_OBJECT(next_page) == object);
3636
3637 if (next_page->vmp_offset == offset) {
3638 object->memq_hint = next_page; /* new hint */
3639 #if DEBUG_VM_PAGE_LOOKUP
3640 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
3641 #endif
3642 return next_page;
3643 }
3644 }
3645 qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
3646
3647 if (!vm_page_queue_end(&object->memq, qe)) {
3648 vm_page_t prev_page;
3649
3650 prev_page = (vm_page_t)((uintptr_t)qe);
3651 assert(VM_PAGE_OBJECT(prev_page) == object);
3652
3653 if (prev_page->vmp_offset == offset) {
3654 object->memq_hint = prev_page; /* new hint */
3655 #if DEBUG_VM_PAGE_LOOKUP
3656 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
3657 #endif
3658 return prev_page;
3659 }
3660 }
3661 }
3662 /*
3663 * Search the hash table for this object/offset pair
3664 */
3665 hash_id = vm_page_hash(object, offset);
3666 bucket = &vm_page_buckets[hash_id];
3667
3668 /*
3669 * since we hold the object lock, we are guaranteed that no
3670 * new pages can be inserted into this object... this in turn
3671 * guarantess that the page we're looking for can't exist
3672 * if the bucket it hashes to is currently NULL even when looked
3673 * at outside the scope of the hash bucket lock... this is a
3674 * really cheap optimiztion to avoid taking the lock
3675 */
3676 if (!bucket->page_list) {
3677 #if DEBUG_VM_PAGE_LOOKUP
3678 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
3679 #endif
3680 return VM_PAGE_NULL;
3681 }
3682
3683 #if DEBUG_VM_PAGE_LOOKUP
3684 start = mach_absolute_time();
3685 #endif
3686 if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
3687 /*
3688 * on average, it's roughly 3 times faster to run a short memq list
3689 * than to take the spin lock and go through the hash list
3690 */
3691 mem = (vm_page_t)vm_page_queue_first(&object->memq);
3692
3693 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
3694 if (mem->vmp_offset == offset) {
3695 break;
3696 }
3697
3698 mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
3699 }
3700 if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
3701 mem = NULL;
3702 }
3703 } else {
3704 vm_page_object_t packed_object;
3705
3706 packed_object = VM_PAGE_PACK_OBJECT(object);
3707
3708 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
3709
3710 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
3711
3712 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
3713 mem != VM_PAGE_NULL;
3714 mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
3715 #if 0
3716 /*
3717 * we don't hold the page queue lock
3718 * so this check isn't safe to make
3719 */
3720 VM_PAGE_CHECK(mem);
3721 #endif
3722 if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
3723 break;
3724 }
3725 }
3726 lck_ticket_unlock(bucket_lock);
3727 }
3728
3729 #if DEBUG_VM_PAGE_LOOKUP
3730 elapsed = mach_absolute_time() - start;
3731
3732 if (bucket_lock) {
3733 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
3734 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
3735 } else {
3736 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
3737 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
3738 }
3739 if (mem != VM_PAGE_NULL) {
3740 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
3741 } else {
3742 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
3743 }
3744 #endif
3745 if (mem != VM_PAGE_NULL) {
3746 assert(VM_PAGE_OBJECT(mem) == object);
3747
3748 object->memq_hint = mem;
3749 }
3750 return mem;
3751 }
3752
3753
3754 /*
3755 * vm_page_rename:
3756 *
3757 * Move the given memory entry from its
3758 * current object to the specified target object/offset.
3759 *
3760 * The object must be locked.
3761 */
3762 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)3763 vm_page_rename(
3764 vm_page_t mem,
3765 vm_object_t new_object,
3766 vm_object_offset_t new_offset)
3767 {
3768 boolean_t internal_to_external, external_to_internal;
3769 vm_tag_t tag;
3770 vm_object_t m_object;
3771
3772 m_object = VM_PAGE_OBJECT(mem);
3773
3774 assert(m_object != new_object);
3775 assert(m_object);
3776
3777 /*
3778 * Changes to mem->vmp_object require the page lock because
3779 * the pageout daemon uses that lock to get the object.
3780 */
3781 vm_page_lockspin_queues();
3782
3783 internal_to_external = FALSE;
3784 external_to_internal = FALSE;
3785
3786 if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
3787 /*
3788 * it's much easier to get the vm_page_pageable_xxx accounting correct
3789 * if we first move the page to the active queue... it's going to end
3790 * up there anyway, and we don't do vm_page_rename's frequently enough
3791 * for this to matter.
3792 */
3793 vm_page_queues_remove(mem, FALSE);
3794 vm_page_activate(mem);
3795 }
3796 if (VM_PAGE_PAGEABLE(mem)) {
3797 if (m_object->internal && !new_object->internal) {
3798 internal_to_external = TRUE;
3799 }
3800 if (!m_object->internal && new_object->internal) {
3801 external_to_internal = TRUE;
3802 }
3803 }
3804
3805 tag = m_object->wire_tag;
3806 vm_page_remove(mem, TRUE);
3807 vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
3808
3809 if (internal_to_external) {
3810 vm_page_pageable_internal_count--;
3811 vm_page_pageable_external_count++;
3812 } else if (external_to_internal) {
3813 vm_page_pageable_external_count--;
3814 vm_page_pageable_internal_count++;
3815 }
3816
3817 vm_page_unlock_queues();
3818 }
3819
3820 /*
3821 * vm_page_init:
3822 *
3823 * Initialize the fields in a new page.
3824 * This takes a structure with random values and initializes it
3825 * so that it can be given to vm_page_release or vm_page_insert.
3826 */
3827 void
vm_page_init(vm_page_t mem,ppnum_t phys_page)3828 vm_page_init(vm_page_t mem, ppnum_t phys_page)
3829 {
3830 assert(phys_page);
3831
3832 #if DEBUG
3833 if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
3834 if (!(pmap_valid_page(phys_page))) {
3835 panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
3836 }
3837 }
3838 #endif /* DEBUG */
3839
3840 /*
3841 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
3842 * try to use initial values which match 0. This minimizes the number of writes
3843 * needed for boot-time initialization.
3844 */
3845 assert(VM_PAGE_NOT_ON_Q == 0);
3846 assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
3847 *mem = (struct vm_page) {
3848 .vmp_offset = (vm_object_offset_t)-1,
3849 .vmp_q_state = VM_PAGE_NOT_ON_Q,
3850 .vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY,
3851 .vmp_canonical = vm_page_in_array(mem),
3852 .vmp_busy = true,
3853 };
3854
3855 VM_PAGE_INIT_PHYS_PAGE(mem, phys_page);
3856
3857 #if 0
3858 /*
3859 * we're leaving this turned off for now... currently pages
3860 * come off the free list and are either immediately dirtied/referenced
3861 * due to zero-fill or COW faults, or are used to read or write files...
3862 * in the file I/O case, the UPL mechanism takes care of clearing
3863 * the state of the HW ref/mod bits in a somewhat fragile way.
3864 * Since we may change the way this works in the future (to toughen it up),
3865 * I'm leaving this as a reminder of where these bits could get cleared
3866 */
3867
3868 /*
3869 * make sure both the h/w referenced and modified bits are
3870 * clear at this point... we are especially dependent on
3871 * not finding a 'stale' h/w modified in a number of spots
3872 * once this page goes back into use
3873 */
3874 pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
3875 #endif
3876 }
3877
3878 vm_page_t
vm_page_create_fictitious(void)3879 vm_page_create_fictitious(void)
3880 {
3881 return vm_page_create(vm_page_fictitious_addr, false, Z_WAITOK);
3882 }
3883
3884 vm_page_t
vm_page_create_guard(bool canwait)3885 vm_page_create_guard(bool canwait)
3886 {
3887 return vm_page_create(vm_page_guard_addr, false, canwait ? Z_WAITOK : Z_NOWAIT);
3888 }
3889
3890 vm_page_t
vm_page_create_private(ppnum_t base_page)3891 vm_page_create_private(ppnum_t base_page)
3892 {
3893 assert(base_page != vm_page_fictitious_addr &&
3894 base_page != vm_page_guard_addr);
3895 return vm_page_create(base_page, false, Z_WAITOK);
3896 }
3897
3898 bool
vm_page_is_canonical(const struct vm_page * m)3899 vm_page_is_canonical(const struct vm_page *m)
3900 {
3901 return m->vmp_canonical;
3902 }
3903
3904 bool
vm_page_is_fictitious(const struct vm_page * m)3905 vm_page_is_fictitious(const struct vm_page *m)
3906 {
3907 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
3908 if (vm_page_in_array(m)) {
3909 return false;
3910 }
3911 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
3912 switch (VM_PAGE_GET_PHYS_PAGE(m)) {
3913 case vm_page_guard_addr:
3914 case vm_page_fictitious_addr:
3915 return true;
3916 default:
3917 return false;
3918 }
3919 }
3920
3921 bool
vm_page_is_guard(const struct vm_page * m)3922 vm_page_is_guard(const struct vm_page *m)
3923 {
3924 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
3925 if (vm_page_in_array(m)) {
3926 return false;
3927 }
3928 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
3929 return VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr;
3930 }
3931
3932 bool
vm_page_is_private(const struct vm_page * m)3933 vm_page_is_private(const struct vm_page *m)
3934 {
3935 return !vm_page_is_canonical(m) && !vm_page_is_fictitious(m);
3936 }
3937
3938 void
vm_page_make_private(vm_page_t m,ppnum_t base_page)3939 vm_page_make_private(vm_page_t m, ppnum_t base_page)
3940 {
3941 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3942 assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr);
3943
3944 VM_PAGE_SET_PHYS_PAGE(m, base_page);
3945 }
3946
3947 void
vm_page_reset_private(vm_page_t m)3948 vm_page_reset_private(vm_page_t m)
3949 {
3950 assert(vm_page_is_private(m));
3951
3952 VM_PAGE_SET_PHYS_PAGE(m, vm_page_fictitious_addr);
3953 }
3954
3955 /*
3956 * vm_page_release_fictitious:
3957 *
3958 * Release a fictitious page to the zone pool
3959 */
3960 static void
vm_page_release_fictitious(vm_page_t m)3961 vm_page_release_fictitious(vm_page_t m)
3962 {
3963 assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3964 (m->vmp_q_state == VM_PAGE_IS_WIRED));
3965 assert(vm_page_is_fictitious(m));
3966 assert(!m->vmp_realtime);
3967
3968 if (vm_page_is_guard(m)) {
3969 counter_dec(&vm_guard_count);
3970 }
3971 zfree(vm_page_zone, m);
3972 }
3973
3974 /*
3975 * vm_pool_low():
3976 *
3977 * Return true if it is not likely that a non-vm_privileged thread
3978 * can get memory without blocking. Advisory only, since the
3979 * situation may change under us.
3980 */
3981 bool
vm_pool_low(void)3982 vm_pool_low(void)
3983 {
3984 /* No locking, at worst we will fib. */
3985 return vm_page_free_count <= vm_page_free_reserved;
3986 }
3987
3988 boolean_t vm_darkwake_mode = FALSE;
3989
3990 /*
3991 * vm_update_darkwake_mode():
3992 *
3993 * Tells the VM that the system is in / out of darkwake.
3994 *
3995 * Today, the VM only lowers/raises the background queue target
3996 * so as to favor consuming more/less background pages when
3997 * darwake is ON/OFF.
3998 *
3999 * We might need to do more things in the future.
4000 */
4001
4002 void
vm_update_darkwake_mode(boolean_t darkwake_mode)4003 vm_update_darkwake_mode(boolean_t darkwake_mode)
4004 {
4005 #if XNU_TARGET_OS_OSX && defined(__arm64__)
4006 #pragma unused(darkwake_mode)
4007 assert(vm_darkwake_mode == FALSE);
4008 /*
4009 * Darkwake mode isn't supported for AS macOS.
4010 */
4011 return;
4012 #else /* XNU_TARGET_OS_OSX && __arm64__ */
4013 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4014
4015 vm_page_lockspin_queues();
4016
4017 if (vm_darkwake_mode == darkwake_mode) {
4018 /*
4019 * No change.
4020 */
4021 vm_page_unlock_queues();
4022 return;
4023 }
4024
4025 vm_darkwake_mode = darkwake_mode;
4026
4027 if (vm_darkwake_mode == TRUE) {
4028 /* save background target to restore later */
4029 vm_page_background_target_snapshot = vm_page_background_target;
4030
4031 /* target is set to 0...no protection for background pages */
4032 vm_page_background_target = 0;
4033 } else if (vm_darkwake_mode == FALSE) {
4034 if (vm_page_background_target_snapshot) {
4035 vm_page_background_target = vm_page_background_target_snapshot;
4036 }
4037 }
4038 vm_page_unlock_queues();
4039 #endif
4040 }
4041
4042 void
vm_page_update_special_state(vm_page_t mem)4043 vm_page_update_special_state(vm_page_t mem)
4044 {
4045 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
4046 return;
4047 }
4048
4049 switch (mem->vmp_on_specialq) {
4050 case VM_PAGE_SPECIAL_Q_BG:
4051 {
4052 task_t my_task = current_task_early();
4053
4054 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
4055 return;
4056 }
4057
4058 if (my_task) {
4059 if (task_get_darkwake_mode(my_task)) {
4060 return;
4061 }
4062 }
4063
4064 if (my_task) {
4065 if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
4066 return;
4067 }
4068 }
4069 vm_page_lockspin_queues();
4070
4071 vm_page_background_promoted_count++;
4072
4073 vm_page_remove_from_specialq(mem);
4074 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
4075
4076 vm_page_unlock_queues();
4077 break;
4078 }
4079
4080 case VM_PAGE_SPECIAL_Q_DONATE:
4081 {
4082 task_t my_task = current_task_early();
4083
4084 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
4085 return;
4086 }
4087
4088 if (my_task->donates_own_pages == false) {
4089 vm_page_lockspin_queues();
4090
4091 vm_page_remove_from_specialq(mem);
4092 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
4093
4094 vm_page_unlock_queues();
4095 }
4096 break;
4097 }
4098
4099 default:
4100 {
4101 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
4102 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
4103 break;
4104 }
4105 }
4106 }
4107
4108
4109 void
vm_page_assign_special_state(vm_page_t mem,vm_page_specialq_t mode)4110 vm_page_assign_special_state(vm_page_t mem, vm_page_specialq_t mode)
4111 {
4112 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
4113 return;
4114 }
4115
4116 switch (mode) {
4117 case VM_PAGE_SPECIAL_Q_BG:
4118 {
4119 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
4120 return;
4121 }
4122
4123 task_t my_task = current_task_early();
4124
4125 if (my_task) {
4126 if (task_get_darkwake_mode(my_task)) {
4127 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
4128 return;
4129 }
4130 }
4131
4132 if (my_task) {
4133 mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
4134 }
4135 break;
4136 }
4137
4138 case VM_PAGE_SPECIAL_Q_DONATE:
4139 {
4140 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
4141 return;
4142 }
4143 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
4144 break;
4145 }
4146
4147 default:
4148 break;
4149 }
4150 }
4151
4152
4153 void
vm_page_remove_from_specialq(vm_page_t mem)4154 vm_page_remove_from_specialq(vm_page_t mem)
4155 {
4156 vm_object_t m_object;
4157
4158 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4159
4160 switch (mem->vmp_on_specialq) {
4161 case VM_PAGE_SPECIAL_Q_BG:
4162 {
4163 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
4164 vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
4165
4166 mem->vmp_specialq.next = 0;
4167 mem->vmp_specialq.prev = 0;
4168
4169 vm_page_background_count--;
4170
4171 m_object = VM_PAGE_OBJECT(mem);
4172
4173 if (m_object->internal) {
4174 vm_page_background_internal_count--;
4175 } else {
4176 vm_page_background_external_count--;
4177 }
4178 }
4179 break;
4180 }
4181
4182 case VM_PAGE_SPECIAL_Q_DONATE:
4183 {
4184 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
4185 vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
4186 mem->vmp_specialq.next = 0;
4187 mem->vmp_specialq.prev = 0;
4188 vm_page_donate_count--;
4189 if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
4190 assert(vm_page_donate_target == vm_page_donate_target_low);
4191 vm_page_donate_target = vm_page_donate_target_high;
4192 vm_page_donate_queue_ripe = false;
4193 }
4194 }
4195
4196 break;
4197 }
4198
4199 default:
4200 {
4201 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
4202 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
4203 break;
4204 }
4205 }
4206 }
4207
4208
4209 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)4210 vm_page_add_to_specialq(vm_page_t mem, boolean_t first)
4211 {
4212 vm_object_t m_object;
4213
4214 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4215
4216 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
4217 return;
4218 }
4219
4220 switch (mem->vmp_on_specialq) {
4221 case VM_PAGE_SPECIAL_Q_BG:
4222 {
4223 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
4224 return;
4225 }
4226
4227 m_object = VM_PAGE_OBJECT(mem);
4228
4229 if (vm_page_background_exclude_external && !m_object->internal) {
4230 return;
4231 }
4232
4233 if (first == TRUE) {
4234 vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
4235 } else {
4236 vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
4237 }
4238 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
4239
4240 vm_page_background_count++;
4241
4242 if (m_object->internal) {
4243 vm_page_background_internal_count++;
4244 } else {
4245 vm_page_background_external_count++;
4246 }
4247 break;
4248 }
4249
4250 case VM_PAGE_SPECIAL_Q_DONATE:
4251 {
4252 if (first == TRUE) {
4253 vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
4254 } else {
4255 vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
4256 }
4257 vm_page_donate_count++;
4258 if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
4259 assert(vm_page_donate_target == vm_page_donate_target_high);
4260 vm_page_donate_target = vm_page_donate_target_low;
4261 vm_page_donate_queue_ripe = true;
4262 }
4263 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
4264 break;
4265 }
4266
4267 default:
4268 break;
4269 }
4270 }
4271
4272 /*!
4273 * @brief
4274 * Prepares a page that has been successfully grabbed for the caller.
4275 *
4276 * @discussion
4277 * This function will update accounting, emit tracements, ...
4278 */
4279 static vm_page_t
vm_page_grab_finalize(vm_grab_options_t grab_options __unused,vm_page_t mem)4280 vm_page_grab_finalize(vm_grab_options_t grab_options __unused, vm_page_t mem)
4281 {
4282 task_t task;
4283
4284 #if MACH_ASSERT
4285 /*
4286 * For all free pages, no matter their provenance...
4287 * ensure they are not referenced anywhere,
4288 * and their state is clean.
4289 */
4290 if (vm_check_refs_on_alloc) {
4291 vm_page_validate_no_references(mem);
4292 }
4293 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
4294 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0 &&
4295 mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0 &&
4296 mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0 &&
4297 mem->vmp_next_m == 0 &&
4298 mem->vmp_object == 0 &&
4299 mem->vmp_wire_count == 0 &&
4300 mem->vmp_busy &&
4301 !mem->vmp_tabled &&
4302 !mem->vmp_laundry &&
4303 !mem->vmp_pmapped &&
4304 !mem->vmp_wpmapped &&
4305 !mem->vmp_realtime);
4306 #endif /* MACH_ASSERT */
4307
4308 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4309 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
4310
4311 {
4312 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB,
4313 DBG_FUNC_NONE, grab_options, 0, 0, 0);
4314 }
4315
4316 counter_inc(&vm_page_grab_count);
4317
4318 task = current_task_early();
4319 if (task != TASK_NULL) {
4320 counter_inc(&task->pages_grabbed);
4321 }
4322 if (task != TASK_NULL && task != kernel_task) {
4323 /*
4324 * tag:DONATE this is where the donate state of the page
4325 * is decided according to what task grabs it
4326 */
4327 if (task->donates_own_pages) {
4328 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
4329 } else {
4330 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
4331 }
4332 }
4333
4334 return mem;
4335 }
4336
4337 #if __x86_64__
4338 /*
4339 * This can be switched to FALSE to help debug drivers
4340 * that are having problems with memory > 4G.
4341 */
4342 boolean_t vm_himemory_mode = TRUE;
4343 #endif /* __x86_64__ */
4344
4345 #if XNU_VM_HAS_LOPAGE
4346
4347 vm_page_t
vm_page_grablo(vm_grab_options_t grab_options)4348 vm_page_grablo(vm_grab_options_t grab_options)
4349 {
4350 vm_page_t mem = VM_PAGE_NULL;
4351
4352 if (!vm_lopage_needed) {
4353 return vm_page_grab_options(grab_options);
4354 }
4355
4356 vm_free_page_lock_spin();
4357 if (vm_lopage_free_count) {
4358 #if LCK_MTX_USE_ARCH
4359 /*
4360 * Intel locks do not really always disable preemption
4361 * for lck_mtx_lock_spin(), and vm_page_free_queue_grab()
4362 * really want that.
4363 */
4364 disable_preemption();
4365 #endif
4366 mem = vm_page_free_queue_grab(grab_options,
4367 VM_MEMORY_CLASS_LOPAGE, 1, VM_PAGE_NOT_ON_Q).vmpl_head;
4368 #if LCK_MTX_USE_ARCH
4369 enable_preemption();
4370 #endif
4371 }
4372 vm_free_page_unlock();
4373
4374 if (mem == VM_PAGE_NULL) {
4375 if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
4376 vm_free_page_lock_spin();
4377 vm_lopages_allocated_cpm_failed++;
4378 vm_free_page_unlock();
4379
4380 return VM_PAGE_NULL;
4381 }
4382 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4383
4384 mem->vmp_busy = TRUE;
4385
4386 vm_page_lockspin_queues();
4387
4388 mem->vmp_gobbled = FALSE;
4389 vm_page_gobble_count--;
4390 vm_page_wire_count--;
4391
4392 vm_lopages_allocated_cpm_success++;
4393 vm_page_unlock_queues();
4394 }
4395
4396 return vm_page_grab_finalize(grab_options, mem);
4397 }
4398
4399 #endif /* XNU_VM_HAS_LOPAGE */
4400 #if CONFIG_SECLUDED_MEMORY
4401
4402 /*!
4403 * @brief
4404 * Attempt to allocate a page from the secluded queue
4405 *
4406 * @discussion
4407 * This function will check that the caller is eligible
4408 * for the secluded pool, and if not, return VM_PAGE_NULL.
4409 */
4410 __attribute__((noinline))
4411 static vm_page_t
vm_page_grab_secluded(vm_grab_options_t grab_options)4412 vm_page_grab_secluded(vm_grab_options_t grab_options)
4413 {
4414 vm_page_t mem;
4415 vm_object_t object;
4416 int refmod_state;
4417
4418 if (vm_page_secluded_count == 0) {
4419 return VM_PAGE_NULL;
4420 }
4421
4422 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
4423 vm_page_secluded.grab_for_iokit++;
4424 } else if (!task_can_use_secluded_mem(current_task(), TRUE)) {
4425 return VM_PAGE_NULL;
4426 }
4427
4428
4429 /* secluded queue is protected by the VM page queue lock */
4430 vm_page_lock_queues();
4431
4432 if (vm_page_secluded_count == 0) {
4433 /* no secluded pages to grab... */
4434 vm_page_unlock_queues();
4435 return VM_PAGE_NULL;
4436 }
4437
4438 #if 00
4439 /* can we grab from the secluded queue? */
4440 if (vm_page_secluded_count > vm_page_secluded_target ||
4441 (vm_page_secluded_count > 0 &&
4442 task_can_use_secluded_mem(current_task(), TRUE))) {
4443 /* OK */
4444 } else {
4445 /* can't grab from secluded queue... */
4446 vm_page_unlock_queues();
4447 return VM_PAGE_NULL;
4448 }
4449 #endif
4450
4451 /* we can grab a page from secluded queue! */
4452 assert((vm_page_secluded_count_free +
4453 vm_page_secluded_count_inuse) ==
4454 vm_page_secluded_count);
4455 if (current_task()->task_can_use_secluded_mem) {
4456 assert(num_tasks_can_use_secluded_mem > 0);
4457 }
4458 assert(!vm_page_queue_empty(&vm_page_queue_secluded));
4459 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4460 mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
4461 assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
4462 vm_page_queues_remove(mem, TRUE);
4463
4464 object = VM_PAGE_OBJECT(mem);
4465
4466 assert(!vm_page_is_fictitious(mem));
4467 assert(!VM_PAGE_WIRED(mem));
4468 if (object == VM_OBJECT_NULL) {
4469 /* free for grab! */
4470 vm_page_unlock_queues();
4471 vm_page_secluded.grab_success_free++;
4472 goto out_success;
4473 }
4474
4475 assert(!object->internal);
4476 // vm_page_pageable_external_count--;
4477
4478 if (!vm_object_lock_try(object)) {
4479 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
4480 vm_page_secluded.grab_failure_locked++;
4481 reactivate_secluded_page:
4482 vm_page_activate(mem);
4483 vm_page_unlock_queues();
4484 return VM_PAGE_NULL;
4485 }
4486 if (mem->vmp_busy ||
4487 mem->vmp_cleaning ||
4488 mem->vmp_laundry) {
4489 /* can't steal page in this state... */
4490 vm_object_unlock(object);
4491 vm_page_secluded.grab_failure_state++;
4492 goto reactivate_secluded_page;
4493 }
4494 if (mem->vmp_realtime) {
4495 /* don't steal pages used by realtime threads... */
4496 vm_object_unlock(object);
4497 vm_page_secluded.grab_failure_realtime++;
4498 goto reactivate_secluded_page;
4499 }
4500
4501 mem->vmp_busy = TRUE;
4502 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
4503 if (refmod_state & VM_MEM_REFERENCED) {
4504 mem->vmp_reference = TRUE;
4505 }
4506 if (refmod_state & VM_MEM_MODIFIED) {
4507 SET_PAGE_DIRTY(mem, FALSE);
4508 }
4509 if (mem->vmp_dirty || mem->vmp_precious) {
4510 /* can't grab a dirty page; re-activate */
4511 // printf("SECLUDED: dirty page %p\n", mem);
4512 vm_page_wakeup_done(object, mem);
4513 vm_page_secluded.grab_failure_dirty++;
4514 vm_object_unlock(object);
4515 goto reactivate_secluded_page;
4516 }
4517 if (mem->vmp_reference) {
4518 /* it's been used but we do need to grab a page... */
4519 }
4520
4521 vm_page_unlock_queues();
4522
4523 /* finish what vm_page_free() would have done... */
4524 vm_page_free_prepare_object(mem, TRUE);
4525 vm_object_unlock(object);
4526 object = VM_OBJECT_NULL;
4527
4528 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4529 vm_page_secluded.grab_success_other++;
4530
4531 out_success:
4532
4533 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
4534 vm_page_secluded.grab_for_iokit_success++;
4535 }
4536 return mem;
4537 }
4538
4539 uint64_t
vm_page_secluded_drain(void)4540 vm_page_secluded_drain(void)
4541 {
4542 vm_page_t local_freeq;
4543 int local_freed;
4544 uint64_t num_reclaimed;
4545 unsigned int saved_secluded_count, saved_secluded_target;
4546
4547 num_reclaimed = 0;
4548 local_freeq = NULL;
4549 local_freed = 0;
4550
4551 vm_page_lock_queues();
4552
4553 saved_secluded_count = vm_page_secluded_count;
4554 saved_secluded_target = vm_page_secluded_target;
4555 vm_page_secluded_target = 0;
4556 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4557 while (vm_page_secluded_count) {
4558 vm_page_t secluded_page;
4559
4560 assert((vm_page_secluded_count_free +
4561 vm_page_secluded_count_inuse) ==
4562 vm_page_secluded_count);
4563 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
4564 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
4565
4566 vm_page_queues_remove(secluded_page, FALSE);
4567 assert(!vm_page_is_fictitious(secluded_page));
4568 assert(!VM_PAGE_WIRED(secluded_page));
4569
4570 if (secluded_page->vmp_object == 0) {
4571 /* transfer to free queue */
4572 assert(secluded_page->vmp_busy);
4573 secluded_page->vmp_snext = local_freeq;
4574 local_freeq = secluded_page;
4575 local_freed += 1;
4576 } else {
4577 /* transfer to head of active queue */
4578 vm_page_enqueue_active(secluded_page, FALSE);
4579 secluded_page = VM_PAGE_NULL;
4580 }
4581 num_reclaimed++;
4582 }
4583 vm_page_secluded_target = saved_secluded_target;
4584 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4585
4586 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
4587
4588 vm_page_unlock_queues();
4589
4590 if (local_freed) {
4591 vm_page_free_list(local_freeq, TRUE);
4592 local_freeq = NULL;
4593 local_freed = 0;
4594 }
4595
4596 return num_reclaimed;
4597 }
4598
4599 #endif /* CONFIG_SECLUDED_MEMORY */
4600
4601 /*!
4602 * @brief
4603 * Attempts to allocate a page from the specified per-cpu page queue.
4604 */
4605 static vm_page_t
vm_page_grab_from_cpu(vm_page_t * cpu_list,scalable_counter_t * counter)4606 vm_page_grab_from_cpu(vm_page_t *cpu_list, scalable_counter_t *counter)
4607 {
4608 vm_page_t mem = _vm_page_list_pop(cpu_list);
4609
4610 if (mem != VM_PAGE_NULL) {
4611 #if HIBERNATION
4612 if (hibernate_rebuild_needed) {
4613 panic("should not modify cpu->free_pages while hibernating");
4614 }
4615 #endif /* HIBERNATION */
4616 counter_dec_preemption_disabled(counter);
4617 }
4618 return mem;
4619 }
4620
4621
4622 /*!
4623 * @brief
4624 * Attempts to allocate pages from free queues, and to populate the per-cpu
4625 * queue as a side effect.
4626 *
4627 * @discussion
4628 * This function will take the properties of the allocating thread into account
4629 * to decide how many pages it can allocate.
4630 *
4631 * If the free queues are depleted, then it will return VM_PAGE_NULL.
4632 */
4633 __attribute__((noinline))
4634 static vm_page_t
vm_page_grab_slow(vm_grab_options_t grab_options)4635 vm_page_grab_slow(vm_grab_options_t grab_options)
4636 {
4637 unsigned int target = vm_free_magazine_refill_limit;
4638 vm_memory_class_t class = VM_MEMORY_CLASS_REGULAR;
4639 vm_page_t mem = VM_PAGE_NULL;
4640 vm_page_list_t list = { };
4641 vm_page_t *cpu_list = NULL;
4642 scalable_counter_t *counter = NULL;
4643
4644 vm_free_page_lock_spin();
4645 #if LCK_MTX_USE_ARCH
4646 /* Intel does't disable preemption with vm_free_page_lock_spin() */
4647 disable_preemption();
4648 #endif /* LCK_MTX_USE_ARCH */
4649 cpu_list = PERCPU_GET(free_pages);
4650 counter = &vm_cpu_free_count;
4651 {
4652 mem = vm_page_grab_from_cpu(cpu_list, counter);
4653 }
4654 if (mem != VM_PAGE_NULL) {
4655 #if LCK_MTX_USE_ARCH
4656 enable_preemption();
4657 #endif /* LCK_MTX_USE_ARCH */
4658 vm_free_page_unlock();
4659 return mem;
4660 }
4661
4662 if (vm_page_free_count <= vm_page_free_reserved) {
4663 if ((current_thread()->options & TH_OPT_VMPRIV) == 0) {
4664 target = 0;
4665 } else if (vm_page_free_count == 0) {
4666 target = 0;
4667 } else {
4668 target = 1;
4669 }
4670 } else {
4671 target = MIN(target, vm_page_free_count - vm_page_free_reserved);
4672 }
4673
4674 #if HIBERNATION
4675 if (target > 0 && hibernate_rebuild_needed) {
4676 panic("should not modify CPU free_pages while hibernating");
4677 }
4678 #endif /* HIBERNATION */
4679
4680 /*
4681 * Convert the lock hold into a mutex, to signal to waiters that the
4682 * lock may be held for longer.
4683 */
4684 #if !LCK_MTX_USE_ARCH
4685 disable_preemption();
4686 #endif /* !LCK_MTX_USE_ARCH */
4687 vm_free_page_lock_convert();
4688
4689 if (target != 0) {
4690 list = vm_page_free_queue_grab(grab_options, class, target,
4691 VM_PAGE_ON_FREE_LOCAL_Q);
4692 }
4693
4694 #if VM_PAGE_WIRE_COUNT_WARNING
4695 if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
4696 printf("mk: vm_page_grab(): high wired page count of %d\n",
4697 vm_page_wire_count);
4698 }
4699 #endif
4700 #if VM_PAGE_GOBBLE_COUNT_WARNING
4701 if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
4702 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
4703 vm_page_gobble_count);
4704 }
4705 #endif
4706
4707 if (vm_page_free_count < vm_page_free_min && !vm_pageout_running) {
4708 thread_wakeup(&vm_page_free_wanted);
4709 }
4710
4711 vm_free_page_unlock();
4712
4713 VM_CHECK_MEMORYSTATUS;
4714
4715 if (list.vmpl_head) {
4716 /* Steal a page off the list for the caller. */
4717 mem = vm_page_list_pop(&list);
4718
4719 /* Add the remaining pages to the CPU's free list. */
4720 assert(*cpu_list == VM_PAGE_NULL);
4721 *cpu_list = list.vmpl_head;
4722 counter_add_preemption_disabled(counter, list.vmpl_count);
4723 }
4724
4725 enable_preemption();
4726
4727 return mem;
4728 }
4729
4730 vm_page_t
vm_page_grab_options(vm_grab_options_t options)4731 vm_page_grab_options(vm_grab_options_t options)
4732 {
4733 vm_page_t mem;
4734
4735 restart:
4736
4737 /*
4738 * Step 1: look at the CPU magazines.
4739 */
4740
4741 disable_preemption();
4742 mem = vm_page_grab_from_cpu(PERCPU_GET(free_pages), &vm_cpu_free_count);
4743 enable_preemption();
4744
4745 if (mem != VM_PAGE_NULL) {
4746 return vm_page_grab_finalize(options, mem);
4747 }
4748
4749 #if XNU_VM_HAS_DELAYED_PAGES
4750 /*
4751 * If free count is low and we have delayed pages from early boot,
4752 * get one of those instead.
4753 */
4754 if (__improbable(vm_delayed_count > 0 &&
4755 vm_page_free_count <= vm_page_free_target)) {
4756 mem = vm_get_delayed_page(options);
4757 if (mem != VM_PAGE_NULL) {
4758 return vm_page_grab_finalize(options, mem);
4759 }
4760 }
4761 #endif /* XNU_VM_HAS_DELAYED_PAGES */
4762
4763
4764 /*
4765 * Step 2: Try to promote pages from the free queues,
4766 * or the secluded queue if appropriate.
4767 */
4768
4769 mem = vm_page_grab_slow(options);
4770 if (mem != VM_PAGE_NULL) {
4771 return vm_page_grab_finalize(options, mem);
4772 }
4773
4774 #if CONFIG_SECLUDED_MEMORY
4775 mem = vm_page_grab_secluded(options);
4776 if (mem != VM_PAGE_NULL) {
4777 return vm_page_grab_finalize(options, mem);
4778 }
4779 #endif /* CONFIG_SECLUDED_MEMORY */
4780
4781
4782 /*
4783 * Step 3: Privileged threads block and retry, others fail.
4784 */
4785
4786 if ((options & VM_PAGE_GRAB_NOPAGEWAIT) == 0 &&
4787 (current_thread()->options & TH_OPT_VMPRIV) != 0) {
4788 VM_PAGE_WAIT();
4789 goto restart;
4790 }
4791
4792 return VM_PAGE_NULL;
4793 }
4794
4795 vm_grab_options_t
vm_page_grab_options_for_object(vm_object_t object __unused)4796 vm_page_grab_options_for_object(vm_object_t object __unused)
4797 {
4798 vm_grab_options_t options = VM_PAGE_GRAB_OPTIONS_NONE;
4799
4800 #if CONFIG_SECLUDED_MEMORY
4801 if (object->can_grab_secluded) {
4802 options |= VM_PAGE_GRAB_SECLUDED;
4803 }
4804 #endif /* CONFIG_SECLUDED_MEMORY */
4805
4806 return options;
4807 }
4808
4809 /*!
4810 * @function vm_page_free_queue_steal()
4811 *
4812 * @abstract
4813 * Steal a given page from the free queues.
4814 *
4815 * @discussion
4816 * The given page must be in the given free queue, or state may be corrupted.
4817 *
4818 * Internally, the free queue is not synchronized, so any locking must be done
4819 * outside of this function.
4820 *
4821 * This function, like vm_page_grab(), takes care of waking up
4822 * page out scan as needed.
4823 */
4824 static void
vm_page_free_queue_steal(vm_grab_options_t options,vm_page_t mem)4825 vm_page_free_queue_steal(vm_grab_options_t options, vm_page_t mem)
4826 {
4827 ppnum_t pnum = VM_PAGE_GET_PHYS_PAGE(mem);
4828 vm_memory_class_t class = vm_page_get_memory_class(mem, pnum);
4829
4830 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
4831 assert(!mem->vmp_lopage && mem->vmp_busy);
4832
4833 vm_page_free_queue_remove(class, mem, pnum, VM_PAGE_NOT_ON_Q);
4834 vm_page_grab_finalize(options, mem);
4835
4836 if (vm_page_free_count < vm_page_free_min && !vm_pageout_running) {
4837 thread_wakeup(&vm_page_free_wanted);
4838 }
4839 }
4840
4841
4842 /*
4843 * vm_page_wait:
4844 *
4845 * Wait for a page to become available.
4846 * If there are plenty of free pages, then we don't sleep.
4847 *
4848 * Returns:
4849 * TRUE: There may be another page, try again
4850 * FALSE: We were interrupted out of our wait, don't try again
4851 */
4852
4853 boolean_t
vm_page_wait(int interruptible)4854 vm_page_wait(int interruptible)
4855 {
4856 /*
4857 * We can't use vm_page_free_reserved to make this
4858 * determination. Consider: some thread might
4859 * need to allocate two pages. The first allocation
4860 * succeeds, the second fails. After the first page is freed,
4861 * a call to vm_page_wait must really block.
4862 */
4863 kern_return_t wait_result = THREAD_NOT_WAITING;
4864 thread_t cur_thread = current_thread();
4865 bool is_privileged = cur_thread->options & TH_OPT_VMPRIV;
4866 bool need_wakeup = false;
4867 event_t wait_event = NULL;
4868
4869 vm_free_page_lock_spin();
4870
4871 if (is_privileged) {
4872 if (vm_page_free_count) {
4873 vm_free_page_unlock();
4874 goto out;
4875 }
4876
4877 if (vm_page_free_wanted_privileged++ == 0) {
4878 need_wakeup = true;
4879 }
4880
4881 wait_event = (event_t)&vm_page_free_wanted_privileged;
4882 } else if (vm_page_free_count >= vm_page_free_target) {
4883 vm_free_page_unlock();
4884 goto out;
4885 #if CONFIG_SECLUDED_MEMORY
4886 } else if (secluded_for_apps &&
4887 task_can_use_secluded_mem(current_task(), FALSE)) {
4888 #if 00
4889 /* XXX FBDP: need pageq lock for this... */
4890 /* XXX FBDP: might wait even if pages available, */
4891 /* XXX FBDP: hopefully not for too long... */
4892 if (vm_page_secluded_count > 0) {
4893 vm_free_page_unlock();
4894 goto out;
4895 }
4896 #endif
4897 if (vm_page_free_wanted_secluded++ == 0) {
4898 need_wakeup = true;
4899 }
4900
4901 wait_event = (event_t)&vm_page_free_wanted_secluded;
4902 #endif /* CONFIG_SECLUDED_MEMORY */
4903 } else {
4904 if (vm_page_free_wanted++ == 0) {
4905 need_wakeup = true;
4906 }
4907
4908 wait_event = (event_t)&vm_page_free_count;
4909 }
4910
4911 if (vm_pageout_running) {
4912 need_wakeup = false;
4913 }
4914
4915 /*
4916 * We don't do a vm_pageout_scan wakeup if we already have
4917 * some waiters because vm_pageout_scan checks for waiters
4918 * before it returns and does so behind the vm_page_queue_free_lock,
4919 * which we own when we bump the waiter counts.
4920 */
4921
4922 if (vps_dynamic_priority_enabled) {
4923 /*
4924 * We are waking up vm_pageout_scan here. If it needs
4925 * the vm_page_queue_free_lock before we unlock it
4926 * we'll end up just blocking and incur an extra
4927 * context switch. Could be a perf. issue.
4928 */
4929
4930 if (need_wakeup) {
4931 thread_wakeup((event_t)&vm_page_free_wanted);
4932 }
4933
4934 /*
4935 * LD: This event is going to get recorded every time because
4936 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4937 * We just block in that routine.
4938 */
4939 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4940 vm_page_free_wanted_privileged,
4941 vm_page_free_wanted,
4942 #if CONFIG_SECLUDED_MEMORY
4943 vm_page_free_wanted_secluded,
4944 #else /* CONFIG_SECLUDED_MEMORY */
4945 0,
4946 #endif /* CONFIG_SECLUDED_MEMORY */
4947 0);
4948 wait_result = lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4949 LCK_SLEEP_UNLOCK,
4950 wait_event,
4951 vm_pageout_scan_thread,
4952 interruptible,
4953 0);
4954 } else {
4955 wait_result = assert_wait(wait_event, interruptible);
4956
4957 vm_free_page_unlock();
4958
4959 if (need_wakeup) {
4960 thread_wakeup((event_t)&vm_page_free_wanted);
4961 }
4962
4963 if (wait_result != THREAD_WAITING) {
4964 goto out;
4965 }
4966
4967
4968 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4969 DBG_VM_PAGE_WAIT_BLOCK,
4970 DBG_FUNC_START,
4971 vm_page_free_wanted_privileged,
4972 vm_page_free_wanted,
4973 #if CONFIG_SECLUDED_MEMORY
4974 vm_page_free_wanted_secluded,
4975 #else /* CONFIG_SECLUDED_MEMORY */
4976 0,
4977 #endif /* CONFIG_SECLUDED_MEMORY */
4978 0);
4979 wait_result = thread_block(THREAD_CONTINUE_NULL);
4980 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4981 DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4982 }
4983
4984 out:
4985 return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4986 }
4987
4988 /*
4989 * vm_page_free_prepare:
4990 *
4991 * Removes page from any queue it may be on
4992 * and disassociates it from its VM object.
4993 *
4994 * Object and page queues must be locked prior to entry.
4995 */
4996 static void
vm_page_free_prepare(vm_page_t mem)4997 vm_page_free_prepare(
4998 vm_page_t mem)
4999 {
5000 #if CONFIG_SPTM
5001 /**
5002 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
5003 * created and destroyed. In order to catch potential cases where this
5004 * does not happen, add an appropriate assert here. This code should be
5005 * executed on every frame that is about to be released to the VM.
5006 */
5007 const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
5008 __unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
5009
5010 assert(frame_type == XNU_DEFAULT);
5011 #endif /* CONFIG_SPTM */
5012
5013 vm_page_free_prepare_queues(mem);
5014 vm_page_free_prepare_object(mem, TRUE);
5015 }
5016
5017
5018 void
vm_page_free_prepare_queues(vm_page_t mem)5019 vm_page_free_prepare_queues(
5020 vm_page_t mem)
5021 {
5022 vm_object_t m_object;
5023
5024 VM_PAGE_CHECK(mem);
5025
5026 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
5027 assert(!mem->vmp_cleaning);
5028 m_object = VM_PAGE_OBJECT(mem);
5029
5030 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5031 if (m_object) {
5032 vm_object_lock_assert_exclusive(m_object);
5033 }
5034 if (mem->vmp_laundry) {
5035 /*
5036 * We may have to free a page while it's being laundered
5037 * if we lost its pager (due to a forced unmount, for example).
5038 * We need to call vm_pageout_steal_laundry() before removing
5039 * the page from its VM object, so that we can remove it
5040 * from its pageout queue and adjust the laundry accounting
5041 */
5042 vm_pageout_steal_laundry(mem, TRUE);
5043 }
5044
5045 vm_page_queues_remove(mem, TRUE);
5046
5047 if (mem->vmp_realtime) {
5048 mem->vmp_realtime = false;
5049 VM_COUNTER_DEC(&vm_page_realtime_count);
5050 }
5051
5052 if (VM_PAGE_WIRED(mem)) {
5053 assert(mem->vmp_wire_count > 0);
5054
5055 if (m_object) {
5056 task_t owner;
5057 int ledger_idx_volatile;
5058 int ledger_idx_nonvolatile;
5059 int ledger_idx_volatile_compressed;
5060 int ledger_idx_nonvolatile_compressed;
5061 int ledger_idx_composite;
5062 int ledger_idx_external_wired;
5063 boolean_t do_footprint;
5064
5065 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
5066 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
5067 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
5068
5069 assert(m_object->resident_page_count >=
5070 m_object->wired_page_count);
5071
5072 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
5073 OSAddAtomic(+1, &vm_page_purgeable_count);
5074 assert(vm_page_purgeable_wired_count > 0);
5075 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
5076 }
5077 if (m_object->internal &&
5078 m_object->vo_owner != TASK_NULL &&
5079 (m_object->purgable == VM_PURGABLE_VOLATILE ||
5080 m_object->purgable == VM_PURGABLE_EMPTY)) {
5081 owner = VM_OBJECT_OWNER(m_object);
5082 vm_object_ledger_tag_ledgers(
5083 m_object,
5084 &ledger_idx_volatile,
5085 &ledger_idx_nonvolatile,
5086 &ledger_idx_volatile_compressed,
5087 &ledger_idx_nonvolatile_compressed,
5088 &ledger_idx_composite,
5089 &ledger_idx_external_wired,
5090 &do_footprint);
5091 /*
5092 * While wired, this page was accounted
5093 * as "non-volatile" but it should now
5094 * be accounted as "volatile".
5095 */
5096 /* one less "non-volatile"... */
5097 ledger_debit(owner->ledger,
5098 ledger_idx_nonvolatile,
5099 PAGE_SIZE);
5100 if (do_footprint) {
5101 /* ... and "phys_footprint" */
5102 ledger_debit(owner->ledger,
5103 task_ledgers.phys_footprint,
5104 PAGE_SIZE);
5105 } else if (ledger_idx_composite != -1) {
5106 ledger_debit(owner->ledger,
5107 ledger_idx_composite,
5108 PAGE_SIZE);
5109 }
5110 /* one more "volatile" */
5111 ledger_credit(owner->ledger,
5112 ledger_idx_volatile,
5113 PAGE_SIZE);
5114 }
5115 }
5116 if (vm_page_is_canonical(mem)) {
5117 vm_page_wire_count--;
5118 }
5119
5120
5121 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
5122 mem->vmp_wire_count = 0;
5123 assert(!mem->vmp_gobbled);
5124 } else if (mem->vmp_gobbled) {
5125 if (vm_page_is_canonical(mem)) {
5126 vm_page_wire_count--;
5127 }
5128 vm_page_gobble_count--;
5129 }
5130 }
5131
5132 /*
5133 * like vm_page_init, but we have to preserve fields related to phys page
5134 */
5135 inline static void
vm_page_reset_canonical(vm_page_t mem)5136 vm_page_reset_canonical(vm_page_t mem)
5137 {
5138 *mem = (struct vm_page){
5139 .vmp_offset = (vm_object_offset_t)-1,
5140 .vmp_q_state = VM_PAGE_NOT_ON_Q,
5141 .vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY,
5142 #if XNU_VM_HAS_LOPAGE
5143 .vmp_lopage = mem->vmp_lopage,
5144 #endif /* XNU_VM_HAS_LOPAGE */
5145 .vmp_canonical = true,
5146 .vmp_busy = true,
5147 .vmp_realtime = mem->vmp_realtime,
5148 #if !XNU_VM_HAS_LINEAR_PAGES_ARRAY
5149 .vmp_phys_page = mem->vmp_phys_page,
5150 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
5151 };
5152 /* ECC information is out of `struct vm_page` and preserved */
5153 }
5154
5155 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)5156 vm_page_free_prepare_object(vm_page_t mem, boolean_t remove_from_hash)
5157 {
5158 if (mem->vmp_tabled) {
5159 vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
5160 }
5161 vm_page_wakeup(VM_OBJECT_NULL, mem); /* clears wanted */
5162
5163 if (vm_page_is_private(mem)) {
5164 vm_page_reset_private(mem);
5165 }
5166 if (vm_page_is_canonical(mem)) {
5167 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0 &&
5168 mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0 &&
5169 mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0 &&
5170 mem->vmp_next_m == 0);
5171
5172 vm_page_validate_no_references(mem);
5173
5174 vm_page_reset_canonical(mem);
5175 }
5176 }
5177
5178 /*
5179 * vm_page_release:
5180 *
5181 * Return a page to the free list.
5182 *
5183 * Keep in sync with vm_page_free_list().
5184 */
5185
5186 void
vm_page_release(vm_page_t mem,vmp_release_options_t options)5187 vm_page_release(vm_page_t mem, vmp_release_options_t options)
5188 {
5189 if (options & VMP_RELEASE_Q_LOCKED) {
5190 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5191 } else {
5192 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
5193 }
5194
5195 assert(vm_page_is_canonical(mem));
5196 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
5197
5198 if ((options & VMP_RELEASE_SKIP_FREE_CHECK) == 0) {
5199 vm_page_validate_no_references(mem);
5200 }
5201
5202 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
5203
5204
5205 vm_page_free_queue_enter_list(vm_page_list_for_page(mem), options);
5206 }
5207
5208 /*
5209 * This version of vm_page_release() is used only at startup
5210 * when we are single-threaded and pages are being released
5211 * for the first time. Hence, no locking or unnecessary checks are made.
5212 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
5213 */
5214 void
vm_page_release_startup(vm_page_t mem)5215 vm_page_release_startup(vm_page_t mem)
5216 {
5217 vm_page_free_queue_enter_list(vm_page_list_for_page(mem),
5218 VMP_RELEASE_STARTUP);
5219 }
5220
5221 /*
5222 * vm_page_free:
5223 *
5224 * Returns the given page to the free list,
5225 * disassociating it with any VM object.
5226 *
5227 * Object and page queues must be locked prior to entry.
5228 */
5229 void
vm_page_free(vm_page_t mem)5230 vm_page_free(vm_page_t mem)
5231 {
5232 vm_page_free_prepare(mem);
5233
5234 if (vm_page_is_canonical(mem)) {
5235 /* page queues are locked */
5236 vm_page_release(mem, VMP_RELEASE_Q_LOCKED |
5237 VMP_RELEASE_SKIP_FREE_CHECK);
5238 } else {
5239 vm_page_release_fictitious(mem);
5240 }
5241 }
5242
5243
5244 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)5245 vm_page_free_unlocked(vm_page_t mem, boolean_t remove_from_hash)
5246 {
5247 vm_page_lockspin_queues();
5248 vm_page_free_prepare_queues(mem);
5249 vm_page_unlock_queues();
5250
5251 vm_page_free_prepare_object(mem, remove_from_hash);
5252
5253 if (vm_page_is_canonical(mem)) {
5254 /* page queues are not locked */
5255 vm_page_release(mem, VMP_RELEASE_SKIP_FREE_CHECK);
5256 } else {
5257 vm_page_release_fictitious(mem);
5258 }
5259 }
5260
5261
5262 /*
5263 * Free a list of pages. The list can be up to several hundred pages,
5264 * as blocked up by vm_pageout_scan().
5265 * The big win is not having to take the free list lock once
5266 * per page.
5267 *
5268 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
5269 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
5270 *
5271 * Keep in sync with vm_page_release().
5272 */
5273 void
vm_page_free_list(vm_page_t freeq,bool prepare_object)5274 vm_page_free_list(vm_page_t freeq, bool prepare_object)
5275 {
5276 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
5277 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
5278
5279 while (freeq) {
5280 vm_page_list_t list = { };
5281
5282 while (list.vmpl_count < VMP_FREE_BATCH_SIZE && freeq) {
5283 vm_page_t mem = _vm_page_list_pop(&freeq);
5284
5285 assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5286 (mem->vmp_q_state == VM_PAGE_IS_WIRED));
5287
5288 if (prepare_object) {
5289 vm_page_free_prepare_object(mem, TRUE);
5290 }
5291
5292 if (vm_page_is_fictitious(mem)) {
5293 vm_page_release_fictitious(mem);
5294 continue;
5295 }
5296
5297 if (!prepare_object) {
5298 /* vm_page_free_prepare_object() checked it */
5299 vm_page_validate_no_references(mem);
5300 }
5301
5302 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
5303
5304
5305 /*
5306 * IMPORTANT: we can't set the page "free" here
5307 * because that would make the page eligible for
5308 * a physically-contiguous allocation (see
5309 * vm_page_find_contiguous()) right away (we don't
5310 * hold the vm_page_queue_free lock). That would
5311 * cause trouble because the page is not actually
5312 * in the free queue yet...
5313 */
5314
5315 vm_page_list_push(&list, mem);
5316 }
5317
5318 if (list.vmpl_count) {
5319 vm_page_free_queue_enter_list(list, VMP_RELEASE_NONE);
5320 }
5321 }
5322 }
5323
5324
5325 /*
5326 * vm_page_wire:
5327 *
5328 * Mark this page as wired down by yet
5329 * another map, removing it from paging queues
5330 * as necessary.
5331 *
5332 * The page's object and the page queues must be locked.
5333 */
5334
5335
5336 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)5337 vm_page_wire(
5338 vm_page_t mem,
5339 vm_tag_t tag,
5340 boolean_t check_memorystatus)
5341 {
5342 vm_object_t m_object;
5343
5344 m_object = VM_PAGE_OBJECT(mem);
5345
5346 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
5347
5348 VM_PAGE_CHECK(mem);
5349 if (m_object) {
5350 vm_object_lock_assert_exclusive(m_object);
5351 } else {
5352 /*
5353 * In theory, the page should be in an object before it
5354 * gets wired, since we need to hold the object lock
5355 * to update some fields in the page structure.
5356 * However, some code (i386 pmap, for example) might want
5357 * to wire a page before it gets inserted into an object.
5358 * That's somewhat OK, as long as nobody else can get to
5359 * that page and update it at the same time.
5360 */
5361 }
5362 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5363 if (!VM_PAGE_WIRED(mem)) {
5364 if (mem->vmp_laundry) {
5365 vm_pageout_steal_laundry(mem, TRUE);
5366 }
5367
5368 vm_page_queues_remove(mem, TRUE);
5369
5370 assert(mem->vmp_wire_count == 0);
5371 mem->vmp_q_state = VM_PAGE_IS_WIRED;
5372
5373 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5374 if (mem->vmp_unmodified_ro == true) {
5375 /* Object and PageQ locks are held*/
5376 mem->vmp_unmodified_ro = false;
5377 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
5378 vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
5379 }
5380 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5381
5382 if (m_object) {
5383 task_t owner;
5384 int ledger_idx_volatile;
5385 int ledger_idx_nonvolatile;
5386 int ledger_idx_volatile_compressed;
5387 int ledger_idx_nonvolatile_compressed;
5388 int ledger_idx_composite;
5389 int ledger_idx_external_wired;
5390 boolean_t do_footprint;
5391
5392 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
5393 VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
5394 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
5395
5396 assert(m_object->resident_page_count >=
5397 m_object->wired_page_count);
5398 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
5399 assert(vm_page_purgeable_count > 0);
5400 OSAddAtomic(-1, &vm_page_purgeable_count);
5401 OSAddAtomic(1, &vm_page_purgeable_wired_count);
5402 }
5403 if (m_object->internal &&
5404 m_object->vo_owner != TASK_NULL &&
5405 (m_object->purgable == VM_PURGABLE_VOLATILE ||
5406 m_object->purgable == VM_PURGABLE_EMPTY)) {
5407 owner = VM_OBJECT_OWNER(m_object);
5408 vm_object_ledger_tag_ledgers(
5409 m_object,
5410 &ledger_idx_volatile,
5411 &ledger_idx_nonvolatile,
5412 &ledger_idx_volatile_compressed,
5413 &ledger_idx_nonvolatile_compressed,
5414 &ledger_idx_composite,
5415 &ledger_idx_external_wired,
5416 &do_footprint);
5417 /* less volatile bytes */
5418 ledger_debit(owner->ledger,
5419 ledger_idx_volatile,
5420 PAGE_SIZE);
5421 /* more not-quite-volatile bytes */
5422 ledger_credit(owner->ledger,
5423 ledger_idx_nonvolatile,
5424 PAGE_SIZE);
5425 if (do_footprint) {
5426 /* more footprint */
5427 ledger_credit(owner->ledger,
5428 task_ledgers.phys_footprint,
5429 PAGE_SIZE);
5430 } else if (ledger_idx_composite != -1) {
5431 ledger_credit(owner->ledger,
5432 ledger_idx_composite,
5433 PAGE_SIZE);
5434 }
5435 }
5436
5437 if (m_object->all_reusable) {
5438 /*
5439 * Wired pages are not counted as "re-usable"
5440 * in "all_reusable" VM objects, so nothing
5441 * to do here.
5442 */
5443 } else if (mem->vmp_reusable) {
5444 /*
5445 * This page is not "re-usable" when it's
5446 * wired, so adjust its state and the
5447 * accounting.
5448 */
5449 vm_page_lockconvert_queues();
5450 vm_object_reuse_pages(m_object,
5451 mem->vmp_offset,
5452 mem->vmp_offset + PAGE_SIZE_64,
5453 FALSE);
5454 }
5455 }
5456 assert(!mem->vmp_reusable);
5457
5458 if (vm_page_is_canonical(mem) && !mem->vmp_gobbled) {
5459 vm_page_wire_count++;
5460 }
5461 if (mem->vmp_gobbled) {
5462 vm_page_gobble_count--;
5463 }
5464 mem->vmp_gobbled = FALSE;
5465
5466 if (check_memorystatus == TRUE) {
5467 VM_CHECK_MEMORYSTATUS;
5468 }
5469 }
5470 assert(!mem->vmp_gobbled);
5471 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
5472 mem->vmp_wire_count++;
5473
5474
5475 if (__improbable(mem->vmp_wire_count == 0)) {
5476 panic("vm_page_wire(%p): wire_count overflow", mem);
5477 }
5478 VM_PAGE_CHECK(mem);
5479 }
5480
5481 /*
5482 * vm_page_unwire:
5483 *
5484 * Release one wiring of this page, potentially
5485 * enabling it to be paged again.
5486 *
5487 * The page's object and the page queues must be locked.
5488 */
5489 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)5490 vm_page_unwire(
5491 vm_page_t mem,
5492 boolean_t queueit)
5493 {
5494 vm_object_t m_object;
5495
5496 m_object = VM_PAGE_OBJECT(mem);
5497
5498 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
5499
5500 VM_PAGE_CHECK(mem);
5501 assert(VM_PAGE_WIRED(mem));
5502 assert(mem->vmp_wire_count > 0);
5503 assert(!mem->vmp_gobbled);
5504 assert(m_object != VM_OBJECT_NULL);
5505 vm_object_lock_assert_exclusive(m_object);
5506 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5507 if (--mem->vmp_wire_count == 0) {
5508 task_t owner;
5509 int ledger_idx_volatile;
5510 int ledger_idx_nonvolatile;
5511 int ledger_idx_volatile_compressed;
5512 int ledger_idx_nonvolatile_compressed;
5513 int ledger_idx_composite;
5514 int ledger_idx_external_wired;
5515 boolean_t do_footprint;
5516
5517 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
5518
5519 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
5520 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
5521 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
5522 if (vm_page_is_canonical(mem)) {
5523 vm_page_wire_count--;
5524 }
5525
5526
5527 assert(m_object->resident_page_count >=
5528 m_object->wired_page_count);
5529 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
5530 OSAddAtomic(+1, &vm_page_purgeable_count);
5531 assert(vm_page_purgeable_wired_count > 0);
5532 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
5533 }
5534 if (m_object->internal &&
5535 m_object->vo_owner != TASK_NULL &&
5536 (m_object->purgable == VM_PURGABLE_VOLATILE ||
5537 m_object->purgable == VM_PURGABLE_EMPTY)) {
5538 owner = VM_OBJECT_OWNER(m_object);
5539 vm_object_ledger_tag_ledgers(
5540 m_object,
5541 &ledger_idx_volatile,
5542 &ledger_idx_nonvolatile,
5543 &ledger_idx_volatile_compressed,
5544 &ledger_idx_nonvolatile_compressed,
5545 &ledger_idx_composite,
5546 &ledger_idx_external_wired,
5547 &do_footprint);
5548 /* more volatile bytes */
5549 ledger_credit(owner->ledger,
5550 ledger_idx_volatile,
5551 PAGE_SIZE);
5552 /* less not-quite-volatile bytes */
5553 ledger_debit(owner->ledger,
5554 ledger_idx_nonvolatile,
5555 PAGE_SIZE);
5556 if (do_footprint) {
5557 /* less footprint */
5558 ledger_debit(owner->ledger,
5559 task_ledgers.phys_footprint,
5560 PAGE_SIZE);
5561 } else if (ledger_idx_composite != -1) {
5562 ledger_debit(owner->ledger,
5563 ledger_idx_composite,
5564 PAGE_SIZE);
5565 }
5566 }
5567 assert(!is_kernel_object(m_object));
5568 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
5569
5570 if (queueit == TRUE) {
5571 if (m_object->purgable == VM_PURGABLE_EMPTY) {
5572 vm_page_deactivate(mem);
5573 } else {
5574 vm_page_activate(mem);
5575 }
5576 }
5577
5578 VM_CHECK_MEMORYSTATUS;
5579 }
5580 VM_PAGE_CHECK(mem);
5581 }
5582
5583 /*
5584 * vm_page_deactivate:
5585 *
5586 * Returns the given page to the inactive list,
5587 * indicating that no physical maps have access
5588 * to this page. [Used by the physical mapping system.]
5589 *
5590 * The page queues must be locked.
5591 */
5592 void
vm_page_deactivate(vm_page_t m)5593 vm_page_deactivate(
5594 vm_page_t m)
5595 {
5596 vm_page_deactivate_internal(m, TRUE);
5597 }
5598
5599
5600 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)5601 vm_page_deactivate_internal(
5602 vm_page_t m,
5603 boolean_t clear_hw_reference)
5604 {
5605 vm_object_t m_object;
5606
5607 m_object = VM_PAGE_OBJECT(m);
5608
5609 VM_PAGE_CHECK(m);
5610 assert(!is_kernel_object(m_object));
5611 assert(!vm_page_is_guard(m));
5612
5613 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
5614 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5615 /*
5616 * This page is no longer very interesting. If it was
5617 * interesting (active or inactive/referenced), then we
5618 * clear the reference bit and (re)enter it in the
5619 * inactive queue. Note wired pages should not have
5620 * their reference bit cleared.
5621 */
5622 assert( !(m->vmp_absent && !m->vmp_unusual));
5623
5624 if (m->vmp_gobbled) { /* can this happen? */
5625 assert( !VM_PAGE_WIRED(m));
5626
5627 if (vm_page_is_canonical(m)) {
5628 vm_page_wire_count--;
5629 }
5630 vm_page_gobble_count--;
5631 m->vmp_gobbled = FALSE;
5632 }
5633 /*
5634 * if this page is currently on the pageout queue, we can't do the
5635 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5636 * and we can't remove it manually since we would need the object lock
5637 * (which is not required here) to decrement the activity_in_progress
5638 * reference which is held on the object while the page is in the pageout queue...
5639 * just let the normal laundry processing proceed
5640 */
5641 if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5642 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5643 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5644 VM_PAGE_WIRED(m)) {
5645 return;
5646 }
5647 if (!m->vmp_absent && clear_hw_reference == TRUE) {
5648 vm_page_lockconvert_queues();
5649 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
5650 }
5651
5652 m->vmp_reference = FALSE;
5653 m->vmp_no_cache = FALSE;
5654
5655 if (!VM_PAGE_INACTIVE(m)) {
5656 vm_page_queues_remove(m, FALSE);
5657
5658 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5659 m->vmp_dirty && m_object->internal &&
5660 (m_object->purgable == VM_PURGABLE_DENY ||
5661 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5662 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5663 vm_page_check_pageable_safe(m);
5664 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5665 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5666 vm_page_throttled_count++;
5667 } else {
5668 if (m_object->named &&
5669 os_ref_get_count_raw(&m_object->ref_count) == 1) {
5670 vm_page_speculate(m, FALSE);
5671 #if DEVELOPMENT || DEBUG
5672 vm_page_speculative_recreated++;
5673 #endif
5674 } else {
5675 vm_page_enqueue_inactive(m, FALSE);
5676 }
5677 }
5678 }
5679 }
5680
5681 /*
5682 * vm_page_enqueue_cleaned
5683 *
5684 * Put the page on the cleaned queue, mark it cleaned, etc.
5685 * Being on the cleaned queue (and having m->clean_queue set)
5686 * does ** NOT ** guarantee that the page is clean!
5687 *
5688 * Call with the queues lock held.
5689 */
5690
5691 void
vm_page_enqueue_cleaned(vm_page_t m)5692 vm_page_enqueue_cleaned(vm_page_t m)
5693 {
5694 vm_object_t m_object;
5695
5696 m_object = VM_PAGE_OBJECT(m);
5697
5698 assert(!vm_page_is_guard(m));
5699 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5700 assert(!(m->vmp_absent && !m->vmp_unusual));
5701
5702 if (VM_PAGE_WIRED(m)) {
5703 return;
5704 }
5705
5706 if (m->vmp_gobbled) {
5707 if (vm_page_is_canonical(m)) {
5708 vm_page_wire_count--;
5709 }
5710 vm_page_gobble_count--;
5711 m->vmp_gobbled = FALSE;
5712 }
5713 /*
5714 * if this page is currently on the pageout queue, we can't do the
5715 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5716 * and we can't remove it manually since we would need the object lock
5717 * (which is not required here) to decrement the activity_in_progress
5718 * reference which is held on the object while the page is in the pageout queue...
5719 * just let the normal laundry processing proceed
5720 */
5721 if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5722 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5723 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5724 return;
5725 }
5726 vm_page_queues_remove(m, FALSE);
5727
5728 vm_page_check_pageable_safe(m);
5729 vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5730 m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5731 vm_page_cleaned_count++;
5732
5733 vm_page_inactive_count++;
5734 if (m_object->internal) {
5735 vm_page_pageable_internal_count++;
5736 } else {
5737 vm_page_pageable_external_count++;
5738 }
5739 vm_page_add_to_specialq(m, TRUE);
5740 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5741 }
5742
5743 /*
5744 * vm_page_activate:
5745 *
5746 * Put the specified page on the active list (if appropriate).
5747 *
5748 * The page queues must be locked.
5749 */
5750
5751 void
vm_page_activate(vm_page_t m)5752 vm_page_activate(
5753 vm_page_t m)
5754 {
5755 vm_object_t m_object;
5756
5757 m_object = VM_PAGE_OBJECT(m);
5758
5759 VM_PAGE_CHECK(m);
5760 #ifdef FIXME_4778297
5761 assert(!is_kernel_object(m_object));
5762 #endif
5763 assert(!vm_page_is_guard(m));
5764 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5765 assert( !(m->vmp_absent && !m->vmp_unusual));
5766
5767 if (m->vmp_gobbled) {
5768 assert( !VM_PAGE_WIRED(m));
5769 if (vm_page_is_canonical(m)) {
5770 vm_page_wire_count--;
5771 }
5772 vm_page_gobble_count--;
5773 m->vmp_gobbled = FALSE;
5774 }
5775 /*
5776 * if this page is currently on the pageout queue, we can't do the
5777 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5778 * and we can't remove it manually since we would need the object lock
5779 * (which is not required here) to decrement the activity_in_progress
5780 * reference which is held on the object while the page is in the pageout queue...
5781 * just let the normal laundry processing proceed
5782 */
5783 if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5784 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5785 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5786 return;
5787 }
5788
5789 #if DEBUG
5790 if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5791 panic("vm_page_activate: already active");
5792 }
5793 #endif
5794
5795 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5796 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5797 DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5798 }
5799
5800 /*
5801 * A freshly activated page should be promoted in the donation queue.
5802 * So we remove it here while preserving its hint and we will enqueue
5803 * it again in vm_page_enqueue_active.
5804 */
5805 vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5806
5807 if (!VM_PAGE_WIRED(m)) {
5808 vm_page_check_pageable_safe(m);
5809 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5810 m->vmp_dirty && m_object->internal &&
5811 (m_object->purgable == VM_PURGABLE_DENY ||
5812 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5813 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5814 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5815 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5816 vm_page_throttled_count++;
5817 } else {
5818 #if CONFIG_SECLUDED_MEMORY
5819 if (secluded_for_filecache &&
5820 vm_page_secluded_target != 0 &&
5821 num_tasks_can_use_secluded_mem == 0 &&
5822 m_object->eligible_for_secluded &&
5823 !m->vmp_realtime) {
5824 vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5825 m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5826 vm_page_secluded_count++;
5827 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5828 vm_page_secluded_count_inuse++;
5829 assert(!m_object->internal);
5830 // vm_page_pageable_external_count++;
5831 } else
5832 #endif /* CONFIG_SECLUDED_MEMORY */
5833 vm_page_enqueue_active(m, FALSE);
5834 }
5835 m->vmp_reference = TRUE;
5836 m->vmp_no_cache = FALSE;
5837 }
5838 VM_PAGE_CHECK(m);
5839 }
5840
5841
5842 /*
5843 * vm_page_speculate:
5844 *
5845 * Put the specified page on the speculative list (if appropriate).
5846 *
5847 * The page queues must be locked.
5848 */
5849 void
vm_page_speculate(vm_page_t m,boolean_t new)5850 vm_page_speculate(
5851 vm_page_t m,
5852 boolean_t new)
5853 {
5854 struct vm_speculative_age_q *aq;
5855 vm_object_t m_object;
5856
5857 m_object = VM_PAGE_OBJECT(m);
5858
5859 VM_PAGE_CHECK(m);
5860 vm_page_check_pageable_safe(m);
5861
5862 assert(!vm_page_is_guard(m));
5863 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5864 assert(!(m->vmp_absent && !m->vmp_unusual));
5865 assert(m_object->internal == FALSE);
5866
5867 /*
5868 * if this page is currently on the pageout queue, we can't do the
5869 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5870 * and we can't remove it manually since we would need the object lock
5871 * (which is not required here) to decrement the activity_in_progress
5872 * reference which is held on the object while the page is in the pageout queue...
5873 * just let the normal laundry processing proceed
5874 */
5875 if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5876 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5877 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5878 return;
5879 }
5880
5881 vm_page_queues_remove(m, FALSE);
5882
5883 if (!VM_PAGE_WIRED(m)) {
5884 mach_timespec_t ts;
5885 clock_sec_t sec;
5886 clock_nsec_t nsec;
5887
5888 clock_get_system_nanotime(&sec, &nsec);
5889 ts.tv_sec = (unsigned int) sec;
5890 ts.tv_nsec = nsec;
5891
5892 if (vm_page_speculative_count == 0) {
5893 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5894 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5895
5896 aq = &vm_page_queue_speculative[speculative_age_index];
5897
5898 /*
5899 * set the timer to begin a new group
5900 */
5901 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5902 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5903
5904 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5905 } else {
5906 aq = &vm_page_queue_speculative[speculative_age_index];
5907
5908 if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5909 speculative_age_index++;
5910
5911 if (speculative_age_index > vm_page_max_speculative_age_q) {
5912 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5913 }
5914 if (speculative_age_index == speculative_steal_index) {
5915 speculative_steal_index = speculative_age_index + 1;
5916
5917 if (speculative_steal_index > vm_page_max_speculative_age_q) {
5918 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5919 }
5920 }
5921 aq = &vm_page_queue_speculative[speculative_age_index];
5922
5923 if (!vm_page_queue_empty(&aq->age_q)) {
5924 vm_page_speculate_ageit(aq);
5925 }
5926
5927 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5928 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5929
5930 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5931 }
5932 }
5933 vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5934 m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5935 vm_page_speculative_count++;
5936 vm_page_pageable_external_count++;
5937
5938 if (new == TRUE) {
5939 vm_object_lock_assert_exclusive(m_object);
5940
5941 m_object->pages_created++;
5942 #if DEVELOPMENT || DEBUG
5943 vm_page_speculative_created++;
5944 #endif
5945 }
5946 }
5947 VM_PAGE_CHECK(m);
5948 }
5949
5950
5951 /*
5952 * move pages from the specified aging bin to
5953 * the speculative bin that pageout_scan claims from
5954 *
5955 * The page queues must be locked.
5956 */
5957 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5958 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5959 {
5960 struct vm_speculative_age_q *sq;
5961 vm_page_t t;
5962
5963 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5964
5965 if (vm_page_queue_empty(&sq->age_q)) {
5966 sq->age_q.next = aq->age_q.next;
5967 sq->age_q.prev = aq->age_q.prev;
5968
5969 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5970 t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5971
5972 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5973 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5974 } else {
5975 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5976 t->vmp_pageq.next = aq->age_q.next;
5977
5978 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5979 t->vmp_pageq.prev = sq->age_q.prev;
5980
5981 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5982 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5983
5984 sq->age_q.prev = aq->age_q.prev;
5985 }
5986 vm_page_queue_init(&aq->age_q);
5987 }
5988
5989
5990 void
vm_page_lru(vm_page_t m)5991 vm_page_lru(
5992 vm_page_t m)
5993 {
5994 VM_PAGE_CHECK(m);
5995 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5996 assert(!vm_page_is_guard(m));
5997
5998 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5999
6000 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
6001 /*
6002 * we don't need to do all the other work that
6003 * vm_page_queues_remove and vm_page_enqueue_inactive
6004 * bring along for the ride
6005 */
6006 assert(!m->vmp_laundry);
6007 assert(!vm_page_is_private(m));
6008
6009 m->vmp_no_cache = FALSE;
6010
6011 vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
6012 vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
6013
6014 return;
6015 }
6016 /*
6017 * if this page is currently on the pageout queue, we can't do the
6018 * vm_page_queues_remove (which doesn't handle the pageout queue case)
6019 * and we can't remove it manually since we would need the object lock
6020 * (which is not required here) to decrement the activity_in_progress
6021 * reference which is held on the object while the page is in the pageout queue...
6022 * just let the normal laundry processing proceed
6023 */
6024 if (m->vmp_laundry || vm_page_is_private(m) ||
6025 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
6026 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
6027 VM_PAGE_WIRED(m)) {
6028 return;
6029 }
6030
6031 m->vmp_no_cache = FALSE;
6032
6033 vm_page_queues_remove(m, FALSE);
6034
6035 vm_page_enqueue_inactive(m, FALSE);
6036 }
6037
6038
6039 void
vm_page_reactivate_all_throttled(void)6040 vm_page_reactivate_all_throttled(void)
6041 {
6042 vm_page_t first_throttled, last_throttled;
6043 vm_page_t first_active;
6044 vm_page_t m;
6045 int extra_active_count;
6046 int extra_internal_count, extra_external_count;
6047 vm_object_t m_object;
6048
6049 if (!VM_DYNAMIC_PAGING_ENABLED()) {
6050 return;
6051 }
6052
6053 extra_active_count = 0;
6054 extra_internal_count = 0;
6055 extra_external_count = 0;
6056 vm_page_lock_queues();
6057 if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
6058 /*
6059 * Switch "throttled" pages to "active".
6060 */
6061 vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
6062 VM_PAGE_CHECK(m);
6063 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
6064
6065 m_object = VM_PAGE_OBJECT(m);
6066
6067 extra_active_count++;
6068 if (m_object->internal) {
6069 extra_internal_count++;
6070 } else {
6071 extra_external_count++;
6072 }
6073
6074 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
6075 VM_PAGE_CHECK(m);
6076 vm_page_add_to_specialq(m, FALSE);
6077 }
6078
6079 /*
6080 * Transfer the entire throttled queue to a regular LRU page queues.
6081 * We insert it at the head of the active queue, so that these pages
6082 * get re-evaluated by the LRU algorithm first, since they've been
6083 * completely out of it until now.
6084 */
6085 first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
6086 last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
6087 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
6088 if (vm_page_queue_empty(&vm_page_queue_active)) {
6089 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
6090 } else {
6091 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
6092 }
6093 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
6094 first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
6095 last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
6096
6097 #if DEBUG
6098 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
6099 #endif
6100 vm_page_queue_init(&vm_page_queue_throttled);
6101 /*
6102 * Adjust the global page counts.
6103 */
6104 vm_page_active_count += extra_active_count;
6105 vm_page_pageable_internal_count += extra_internal_count;
6106 vm_page_pageable_external_count += extra_external_count;
6107 vm_page_throttled_count = 0;
6108 }
6109 assert(vm_page_throttled_count == 0);
6110 assert(vm_page_queue_empty(&vm_page_queue_throttled));
6111 vm_page_unlock_queues();
6112 }
6113
6114
6115 /*
6116 * move pages from the indicated local queue to the global active queue
6117 * its ok to fail if we're below the hard limit and force == FALSE
6118 * the nolocks == TRUE case is to allow this function to be run on
6119 * the hibernate path
6120 */
6121
6122 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)6123 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
6124 {
6125 struct vpl *lq;
6126 vm_page_t first_local, last_local;
6127 vm_page_t first_active;
6128 vm_page_t m;
6129 uint32_t count = 0;
6130
6131 if (vm_page_local_q == NULL) {
6132 return;
6133 }
6134
6135 lq = zpercpu_get_cpu(vm_page_local_q, lid);
6136
6137 if (nolocks == FALSE) {
6138 if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
6139 if (!vm_page_trylockspin_queues()) {
6140 return;
6141 }
6142 } else {
6143 vm_page_lockspin_queues();
6144 }
6145
6146 VPL_LOCK(&lq->vpl_lock);
6147 }
6148 if (lq->vpl_count) {
6149 /*
6150 * Switch "local" pages to "active".
6151 */
6152 assert(!vm_page_queue_empty(&lq->vpl_queue));
6153
6154 vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
6155 VM_PAGE_CHECK(m);
6156 vm_page_check_pageable_safe(m);
6157 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
6158 assert(!vm_page_is_fictitious(m));
6159
6160 if (m->vmp_local_id != lid) {
6161 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
6162 }
6163
6164 m->vmp_local_id = 0;
6165 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
6166 VM_PAGE_CHECK(m);
6167 vm_page_add_to_specialq(m, FALSE);
6168 count++;
6169 }
6170 if (count != lq->vpl_count) {
6171 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
6172 }
6173
6174 /*
6175 * Transfer the entire local queue to a regular LRU page queues.
6176 */
6177 first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
6178 last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
6179 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
6180
6181 if (vm_page_queue_empty(&vm_page_queue_active)) {
6182 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
6183 } else {
6184 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
6185 }
6186 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
6187 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
6188 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
6189
6190 vm_page_queue_init(&lq->vpl_queue);
6191 /*
6192 * Adjust the global page counts.
6193 */
6194 vm_page_active_count += lq->vpl_count;
6195 vm_page_pageable_internal_count += lq->vpl_internal_count;
6196 vm_page_pageable_external_count += lq->vpl_external_count;
6197 lq->vpl_count = 0;
6198 lq->vpl_internal_count = 0;
6199 lq->vpl_external_count = 0;
6200 }
6201 assert(vm_page_queue_empty(&lq->vpl_queue));
6202
6203 if (nolocks == FALSE) {
6204 VPL_UNLOCK(&lq->vpl_lock);
6205
6206 vm_page_balance_inactive(count / 4);
6207 vm_page_unlock_queues();
6208 }
6209 }
6210
6211 /*
6212 * vm_page_part_zero_fill:
6213 *
6214 * Zero-fill a part of the page.
6215 */
6216 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
6217 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)6218 vm_page_part_zero_fill(
6219 vm_page_t m,
6220 vm_offset_t m_pa,
6221 vm_size_t len)
6222 {
6223 #if 0
6224 /*
6225 * we don't hold the page queue lock
6226 * so this check isn't safe to make
6227 */
6228 VM_PAGE_CHECK(m);
6229 #endif
6230
6231 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
6232 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
6233 #else
6234 vm_page_t tmp;
6235 while (1) {
6236 tmp = vm_page_grab();
6237 if (tmp == VM_PAGE_NULL) {
6238 vm_page_wait(THREAD_UNINT);
6239 continue;
6240 }
6241 break;
6242 }
6243 vm_page_zero_fill(
6244 tmp
6245 );
6246 if (m_pa != 0) {
6247 vm_page_part_copy(m, 0, tmp, 0, m_pa);
6248 }
6249 if ((m_pa + len) < PAGE_SIZE) {
6250 vm_page_part_copy(m, m_pa + len, tmp,
6251 m_pa + len, PAGE_SIZE - (m_pa + len));
6252 }
6253 vm_page_copy(tmp, m);
6254 VM_PAGE_FREE(tmp);
6255 #endif
6256 }
6257
6258 /*!
6259 * @function vm_page_zero_fill
6260 *
6261 * @abstract
6262 * Zero-fill the specified page.
6263 *
6264 * @param m the page to be zero-filled.
6265 */
6266 void
vm_page_zero_fill(vm_page_t m)6267 vm_page_zero_fill(
6268 vm_page_t m
6269 )
6270 {
6271 int options = 0;
6272 #if 0
6273 /*
6274 * we don't hold the page queue lock
6275 * so this check isn't safe to make
6276 */
6277 VM_PAGE_CHECK(m);
6278 #endif
6279
6280 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
6281 pmap_zero_page_with_options(VM_PAGE_GET_PHYS_PAGE(m), options);
6282 }
6283
6284 /*
6285 * vm_page_part_copy:
6286 *
6287 * copy part of one page to another
6288 *
6289 * This function is currently only consumed downstream of a
6290 * vm_map_copy_overwrite(). The implementation has a simpler contract
6291 * than vm_page_copy() as there's a restricted set of cases that
6292 * are allowed to be overwriteable. If vm_map_entry_is_overwriteable()
6293 * is expanded, this function may have to be adjusted.
6294 */
6295 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)6296 vm_page_part_copy(
6297 vm_page_t src_m,
6298 vm_offset_t src_pa,
6299 vm_page_t dst_m,
6300 vm_offset_t dst_pa,
6301 vm_size_t len)
6302 {
6303 #if 0
6304 /*
6305 * we don't hold the page queue lock
6306 * so this check isn't safe to make
6307 */
6308 VM_PAGE_CHECK(src_m);
6309 VM_PAGE_CHECK(dst_m);
6310 #endif
6311
6312 /*
6313 * Copying from/into restricted pages is a security issue,
6314 * as it allows for restricted pages' policies bypass.
6315 */
6316 if (vm_page_is_restricted(src_m)) {
6317 panic("%s: cannot copy from a restricted page", __func__);
6318 }
6319
6320 if (vm_page_is_restricted(dst_m)) {
6321 panic("%s: cannot copy into a restricted page", __func__);
6322 }
6323
6324
6325 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
6326 VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
6327 }
6328
6329 /*
6330 * vm_page_copy:
6331 *
6332 * Copy one page to another
6333 */
6334
6335 int vm_page_copy_cs_validations = 0;
6336 int vm_page_copy_cs_tainted = 0;
6337
6338 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)6339 vm_page_copy(
6340 vm_page_t src_m,
6341 vm_page_t dest_m)
6342 {
6343 vm_object_t src_m_object;
6344 int options = 0;
6345
6346 src_m_object = VM_PAGE_OBJECT(src_m);
6347
6348 #if 0
6349 /*
6350 * we don't hold the page queue lock
6351 * so this check isn't safe to make
6352 */
6353 VM_PAGE_CHECK(src_m);
6354 VM_PAGE_CHECK(dest_m);
6355 #endif
6356 vm_object_lock_assert_held(src_m_object);
6357
6358 /*
6359 * Copying from/into restricted pages is a security issue,
6360 * as it allows for restricted pages' policies bypass.
6361 */
6362 if (vm_page_is_restricted(src_m)) {
6363 panic("%s: cannot copy from a restricted page", __func__);
6364 }
6365
6366 if (vm_page_is_restricted(dest_m)) {
6367 panic("%s: cannot copy into a restricted page", __func__);
6368 }
6369
6370 if (src_m_object != VM_OBJECT_NULL &&
6371 src_m_object->code_signed) {
6372 /*
6373 * We're copying a page from a code-signed object.
6374 * Whoever ends up mapping the copy page might care about
6375 * the original page's integrity, so let's validate the
6376 * source page now.
6377 */
6378 vm_page_copy_cs_validations++;
6379 vm_page_validate_cs(src_m, PAGE_SIZE, 0);
6380 #if DEVELOPMENT || DEBUG
6381 DTRACE_VM4(codesigned_copy,
6382 vm_object_t, src_m_object,
6383 vm_object_offset_t, src_m->vmp_offset,
6384 int, src_m->vmp_cs_validated,
6385 int, src_m->vmp_cs_tainted);
6386 #endif /* DEVELOPMENT || DEBUG */
6387 }
6388
6389 /*
6390 * Propagate the cs_tainted bit to the copy page. Do not propagate
6391 * the cs_validated bit.
6392 */
6393 dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
6394 dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
6395 if (dest_m->vmp_cs_tainted) {
6396 vm_page_copy_cs_tainted++;
6397 }
6398
6399
6400 dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
6401 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m), options);
6402 }
6403
6404 #if MACH_ASSERT
6405 static void
_vm_page_print(vm_page_t p)6406 _vm_page_print(
6407 vm_page_t p)
6408 {
6409 printf("vm_page %p: \n", p);
6410 printf(" pageq: next=%p prev=%p\n",
6411 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
6412 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
6413 printf(" listq: next=%p prev=%p\n",
6414 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
6415 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
6416 printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
6417 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
6418 printf(" wire_count=%u\n", p->vmp_wire_count);
6419 printf(" q_state=%u\n", p->vmp_q_state);
6420
6421 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
6422 (p->vmp_laundry ? "" : "!"),
6423 (p->vmp_reference ? "" : "!"),
6424 (p->vmp_gobbled ? "" : "!"),
6425 (vm_page_is_private(p) ? "" : "!"));
6426 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
6427 (p->vmp_busy ? "" : "!"),
6428 (p->vmp_wanted ? "" : "!"),
6429 (p->vmp_tabled ? "" : "!"),
6430 (vm_page_is_fictitious(p) ? "" : "!"),
6431 (p->vmp_pmapped ? "" : "!"),
6432 (p->vmp_wpmapped ? "" : "!"));
6433 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
6434 (p->vmp_free_when_done ? "" : "!"),
6435 (p->vmp_absent ? "" : "!"),
6436 (VMP_ERROR_GET(p) ? "" : "!"),
6437 (p->vmp_dirty ? "" : "!"),
6438 (p->vmp_cleaning ? "" : "!"),
6439 (p->vmp_precious ? "" : "!"),
6440 (p->vmp_clustered ? "" : "!"));
6441 printf(" %soverwriting, %srestart, %sunusual\n",
6442 (p->vmp_overwriting ? "" : "!"),
6443 (p->vmp_restart ? "" : "!"),
6444 (p->vmp_unusual ? "" : "!"));
6445 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
6446 p->vmp_cs_validated,
6447 p->vmp_cs_tainted,
6448 p->vmp_cs_nx,
6449 (p->vmp_no_cache ? "" : "!"));
6450
6451 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
6452 }
6453
6454 /*
6455 * Check that the list of pages is ordered by
6456 * ascending physical address and has no holes.
6457 */
6458 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)6459 vm_page_verify_contiguous(
6460 vm_page_t pages,
6461 unsigned int npages)
6462 {
6463 vm_page_t m;
6464 unsigned int page_count;
6465 vm_offset_t prev_addr;
6466
6467 prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
6468 page_count = 1;
6469 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
6470 if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
6471 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
6472 m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
6473 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
6474 panic("vm_page_verify_contiguous: not contiguous!");
6475 }
6476 prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
6477 ++page_count;
6478 }
6479 if (page_count != npages) {
6480 printf("pages %p actual count 0x%x but requested 0x%x\n",
6481 pages, page_count, npages);
6482 panic("vm_page_verify_contiguous: count error");
6483 }
6484 return 1;
6485 }
6486
6487
6488 /*
6489 * Check the free lists for proper length etc.
6490 */
6491 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
6492 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)6493 vm_page_verify_free_list(
6494 vm_page_queue_head_t *vm_page_queue,
6495 unsigned int color,
6496 vm_page_t look_for_page,
6497 boolean_t expect_page)
6498 {
6499 unsigned int npages;
6500 vm_page_t m;
6501 vm_page_t prev_m;
6502 boolean_t found_page;
6503
6504 if (!vm_page_verify_this_free_list_enabled) {
6505 return 0;
6506 }
6507
6508 found_page = FALSE;
6509 npages = 0;
6510 prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
6511
6512 vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
6513 if (m == look_for_page) {
6514 found_page = TRUE;
6515 }
6516 if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
6517 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
6518 color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
6519 }
6520 if (!m->vmp_busy) {
6521 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
6522 color, npages, m);
6523 }
6524 if (color != (unsigned int) -1) {
6525 if (VM_PAGE_GET_COLOR(m) != color) {
6526 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
6527 color, npages, m, VM_PAGE_GET_COLOR(m), color);
6528 }
6529 if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
6530 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
6531 color, npages, m, m->vmp_q_state);
6532 }
6533 } else {
6534 if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
6535 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
6536 npages, m, m->vmp_q_state);
6537 }
6538 }
6539 ++npages;
6540 prev_m = m;
6541 }
6542 if (look_for_page != VM_PAGE_NULL) {
6543 unsigned int other_color;
6544
6545 if (expect_page && !found_page) {
6546 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
6547 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
6548 _vm_page_print(look_for_page);
6549 for (other_color = 0;
6550 other_color < vm_colors;
6551 other_color++) {
6552 if (other_color == color) {
6553 continue;
6554 }
6555 vm_page_verify_free_list(&vm_page_queue_free.vmpfq_queues[other_color].qhead,
6556 other_color, look_for_page, FALSE);
6557 }
6558 #if XNU_VM_HAS_LOPAGE
6559 if (color == (unsigned int) -1) {
6560 vm_page_verify_free_list(&vm_lopage_queue_free,
6561 (unsigned int) -1, look_for_page, FALSE);
6562 }
6563 #endif /* XNU_VM_HAS_LOPAGE */
6564 panic("vm_page_verify_free_list(color=%u)", color);
6565 }
6566 if (!expect_page && found_page) {
6567 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
6568 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
6569 }
6570 }
6571 return npages;
6572 }
6573
6574 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
6575 static void
vm_page_verify_free_lists(void)6576 vm_page_verify_free_lists( void )
6577 {
6578 unsigned int color, npages, nlopages;
6579 boolean_t toggle = TRUE;
6580
6581 if (!vm_page_verify_all_free_lists_enabled) {
6582 return;
6583 }
6584
6585 npages = 0;
6586 nlopages = 0;
6587
6588 vm_free_page_lock();
6589
6590 if (vm_page_verify_this_free_list_enabled == TRUE) {
6591 /*
6592 * This variable has been set globally for extra checking of
6593 * each free list Q. Since we didn't set it, we don't own it
6594 * and we shouldn't toggle it.
6595 */
6596 toggle = FALSE;
6597 }
6598
6599 if (toggle == TRUE) {
6600 vm_page_verify_this_free_list_enabled = TRUE;
6601 }
6602
6603 for (color = 0; color < vm_colors; color++) {
6604 npages += vm_page_verify_free_list(&vm_page_queue_free.vmpfq_queues[color].qhead,
6605 color, VM_PAGE_NULL, FALSE);
6606 }
6607 #if XNU_VM_HAS_LOPAGE
6608 nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
6609 (unsigned int) -1,
6610 VM_PAGE_NULL, FALSE);
6611 #endif /* XNU_VM_HAS_LOPAGE */
6612 if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
6613 panic("vm_page_verify_free_lists: "
6614 "npages %u free_count %d nlopages %u lo_free_count %u",
6615 npages, vm_page_free_count, nlopages, vm_lopage_free_count);
6616 }
6617
6618 if (toggle == TRUE) {
6619 vm_page_verify_this_free_list_enabled = FALSE;
6620 }
6621
6622 vm_free_page_unlock();
6623 }
6624
6625 #endif /* MACH_ASSERT */
6626
6627 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
6628
6629 /*
6630 * CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
6631 */
6632
6633 /*
6634 * Helper function used to determine if a page can be relocated
6635 * A page is relocatable if it is in a stable non-transient state
6636 * and if the page being relocated is compatible with the reason for reloc
6637 * The page queue lock must be held, and the object lock too, if the page
6638 * is in an object.
6639 */
6640 boolean_t
vm_page_is_relocatable(vm_page_t m,vm_relocate_reason_t reloc_reason)6641 vm_page_is_relocatable(vm_page_t m, vm_relocate_reason_t reloc_reason)
6642 {
6643
6644 if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
6645 m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
6646 /*
6647 * Page is in a transient state
6648 * or a state we don't want to deal with.
6649 */
6650 return FALSE;
6651 } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
6652 (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
6653 #if XNU_VM_HAS_LOPAGE
6654 (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
6655 #endif /* XNU_VM_HAS_LOPAGE */
6656 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6657 /*
6658 * Page needs to be on one of our queues (other then the pageout or special
6659 * free queues) or it needs to belong to the compressor pool (which is now
6660 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
6661 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
6662 * locks we hold at this point...
6663 */
6664 return FALSE;
6665 } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
6666 (!m->vmp_tabled || m->vmp_busy)) {
6667 /*
6668 * pages on the free list are always 'busy'
6669 * so we couldn't test for 'busy' in the check
6670 * for the transient states... pages that are
6671 * 'free' are never 'tabled', so we also couldn't
6672 * test for 'tabled'. So we check here to make
6673 * sure that a non-free page is not busy and is
6674 * tabled on an object...
6675 */
6676 return FALSE;
6677 }
6678
6679 /*
6680 * Lastly, check the page against the relocation reason; the page may
6681 * be in a relocatable state, but not be a page we WANT to relocate for
6682 * the caller's use case.
6683 */
6684 switch (reloc_reason) {
6685 case VM_RELOCATE_REASON_CONTIGUOUS:
6686 {
6687 break;
6688 }
6689
6690 default:
6691 {
6692 panic("Invalid relocation reason %u", reloc_reason);
6693 __builtin_unreachable();
6694 }
6695 }
6696
6697 return TRUE;
6698 }
6699
6700 /*
6701 * Free up the given page by possibily relocating its contents to a new page
6702 * If the page is on an object the object lock must be held.
6703 *
6704 * Whether or not the page is considered relocatable is contingent on the
6705 * reason it is being relocated.
6706 *
6707 * Return the new page back to the caller if requested, as done in
6708 * vm_object_iopl_wire_full().
6709 *
6710 * The VM page queues lock must also be held.
6711 *
6712 * @returns
6713 * - KERN_SUCCESS if the relocation was successful.
6714 * - KERN_INVALID_OBJECT if @c m1's object is VM_OBJECT_NULL.
6715 * - KERN_FAILURE if the reolcation failed due to @c m1's state.
6716 * - KERN_RESOURCE_SHORTAGE if no page could be allocated to relocate @c m1.
6717 */
6718 kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages,vm_relocate_reason_t reloc_reason,vm_page_t * new_page)6719 vm_page_relocate(
6720 vm_page_t m1,
6721 int *compressed_pages,
6722 vm_relocate_reason_t reloc_reason,
6723 vm_page_t* new_page)
6724 {
6725 int refmod = 0;
6726 vm_object_t object = VM_PAGE_OBJECT(m1);
6727 kern_return_t kr;
6728
6729 switch (reloc_reason) {
6730 case VM_RELOCATE_REASON_CONTIGUOUS:
6731 {
6732 break;
6733 }
6734 default:
6735 {
6736 panic("Unrecognized relocation reason %u\n", reloc_reason);
6737 break;
6738 }
6739 }
6740
6741 if (object == VM_OBJECT_NULL) {
6742 return KERN_INVALID_OBJECT;
6743 }
6744
6745 vm_object_lock_assert_held(object);
6746
6747 if (VM_PAGE_WIRED(m1) ||
6748 m1->vmp_gobbled ||
6749 m1->vmp_laundry ||
6750 m1->vmp_wanted ||
6751 m1->vmp_cleaning ||
6752 m1->vmp_overwriting ||
6753 m1->vmp_free_when_done ||
6754 m1->vmp_busy ||
6755 m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6756 return KERN_FAILURE;
6757 }
6758
6759 boolean_t disconnected = FALSE;
6760 boolean_t reusable = FALSE;
6761
6762 /*
6763 * Pages from reusable objects can be reclaimed directly.
6764 */
6765 if ((m1->vmp_reusable || object->all_reusable) &&
6766 m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6767 !m1->vmp_reference) {
6768 /*
6769 * reusable page...
6770 */
6771
6772 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6773 disconnected = TRUE;
6774 if (refmod == 0) {
6775 /*
6776 * ... not reused: can steal without relocating contents.
6777 */
6778 reusable = TRUE;
6779 }
6780 }
6781
6782 if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6783 vm_grab_options_t grab_options = VM_PAGE_GRAB_Q_LOCK_HELD;
6784 vm_object_offset_t offset;
6785 int copy_page_options = 0;
6786
6787 /* page is not reusable, we need to allocate a new page
6788 * and move its contents there.
6789 */
6790 vm_page_t m2 = vm_page_grab_options(grab_options);
6791
6792 if (m2 == VM_PAGE_NULL) {
6793 return KERN_RESOURCE_SHORTAGE;
6794 }
6795
6796 if (!disconnected) {
6797 if (m1->vmp_pmapped) {
6798 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6799 } else {
6800 refmod = 0;
6801 }
6802 }
6803
6804 /* copy the page's contents */
6805 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2), copy_page_options);
6806
6807 /* copy the page's state */
6808 assert(!VM_PAGE_WIRED(m1));
6809 assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6810 assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6811 assert(!m1->vmp_laundry);
6812 m2->vmp_reference = m1->vmp_reference;
6813 assert(!m1->vmp_gobbled);
6814 m2->vmp_no_cache = m1->vmp_no_cache;
6815 m2->vmp_xpmapped = 0;
6816 assert(!m1->vmp_busy);
6817 assert(!m1->vmp_wanted);
6818 assert(vm_page_is_canonical(m1));
6819 m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6820 m2->vmp_wpmapped = m1->vmp_wpmapped;
6821 assert(!m1->vmp_free_when_done);
6822 m2->vmp_absent = m1->vmp_absent;
6823 m2->vmp_error = VMP_ERROR_GET(m1);
6824 m2->vmp_dirty = m1->vmp_dirty;
6825 assert(!m1->vmp_cleaning);
6826 m2->vmp_precious = m1->vmp_precious;
6827 m2->vmp_clustered = m1->vmp_clustered;
6828 assert(!m1->vmp_overwriting);
6829 m2->vmp_restart = m1->vmp_restart;
6830 m2->vmp_unusual = m1->vmp_unusual;
6831 m2->vmp_cs_validated = m1->vmp_cs_validated;
6832 m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6833 m2->vmp_cs_nx = m1->vmp_cs_nx;
6834
6835 m2->vmp_realtime = m1->vmp_realtime;
6836 m1->vmp_realtime = false;
6837
6838 /*
6839 * If m1 had really been reusable,
6840 * we would have just stolen it, so
6841 * let's not propagate its "reusable"
6842 * bit and assert that m2 is not
6843 * marked as "reusable".
6844 */
6845 // m2->vmp_reusable = m1->vmp_reusable;
6846 assert(!m2->vmp_reusable);
6847
6848 // assert(!m1->vmp_lopage);
6849
6850 if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6851 m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6852 /*
6853 * We just grabbed m2 up above and so it isn't
6854 * going to be on any special Q as yet and so
6855 * we don't need to 'remove' it from the special
6856 * queues. Just resetting the state should be enough.
6857 */
6858 m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6859 }
6860
6861 /*
6862 * page may need to be flushed if
6863 * it is marshalled into a UPL
6864 * that is going to be used by a device
6865 * that doesn't support coherency
6866 */
6867 m2->vmp_written_by_kernel = TRUE;
6868
6869 /*
6870 * make sure we clear the ref/mod state
6871 * from the pmap layer... else we risk
6872 * inheriting state from the last time
6873 * this page was used...
6874 */
6875 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6876 VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6877
6878 if (refmod & VM_MEM_REFERENCED) {
6879 m2->vmp_reference = TRUE;
6880 }
6881 if (refmod & VM_MEM_MODIFIED) {
6882 SET_PAGE_DIRTY(m2, TRUE);
6883 }
6884 offset = m1->vmp_offset;
6885
6886 /*
6887 * completely cleans up the state
6888 * of the page so that it is ready
6889 * to be put onto the free list, or
6890 * for this purpose it looks like it
6891 * just came off of the free list
6892 */
6893 vm_page_free_prepare(m1);
6894
6895 /*
6896 * now put the substitute page on the object
6897 */
6898 vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6899 TRUE, FALSE, FALSE, NULL);
6900
6901 /*
6902 * Return the relocated vm_page_t if the caller wants to know.
6903 */
6904 if (new_page) {
6905 *new_page = m2;
6906 }
6907
6908 if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6909 m2->vmp_pmapped = TRUE;
6910 m2->vmp_wpmapped = TRUE;
6911
6912 kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6913 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, TRUE);
6914
6915 assert(kr == KERN_SUCCESS);
6916
6917 if (compressed_pages) {
6918 ++*compressed_pages;
6919 }
6920 } else {
6921 /* relocated page was not used by the compressor
6922 * put it on either the active or inactive lists */
6923 if (m2->vmp_reference) {
6924 vm_page_activate(m2);
6925 } else {
6926 vm_page_deactivate(m2);
6927 }
6928 }
6929
6930 /* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6931 vm_page_wakeup_done(object, m2);
6932 } else {
6933 assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6934
6935 /*
6936 * completely cleans up the state
6937 * of the page so that it is ready
6938 * to be put onto the free list, or
6939 * for this purpose it looks like it
6940 * just came off of the free list
6941 */
6942 vm_page_free_prepare(m1);
6943
6944 if (new_page) {
6945 vm_page_t m2;
6946 vm_object_offset_t offset;
6947 vm_grab_options_t grab_options = VM_PAGE_GRAB_Q_LOCK_HELD;
6948
6949 /* The caller still wanted a page, so let's give them a new one. */
6950 offset = m1->vmp_offset;
6951 m2 = vm_page_grab_options(grab_options);
6952
6953 if (m2 == VM_PAGE_NULL) {
6954 return KERN_RESOURCE_SHORTAGE;
6955 }
6956
6957 /*
6958 * make sure we clear the ref/mod state
6959 * from the pmap layer... else we risk
6960 * inheriting state from the last time
6961 * this page was used...
6962 */
6963 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6964 VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6965
6966 offset = m1->vmp_offset;
6967
6968 /*
6969 * now put the substitute page on the object
6970 */
6971 vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6972 TRUE, FALSE, FALSE, NULL);
6973
6974 *new_page = m2;
6975 }
6976 }
6977
6978 /* we're done here */
6979 return KERN_SUCCESS;
6980 }
6981
6982 /*
6983 * CONTIGUOUS PAGE ALLOCATION
6984 *
6985 * Find a region large enough to contain at least n pages
6986 * of contiguous physical memory.
6987 *
6988 * This is done by traversing the vm_page_t array in a linear fashion
6989 * we assume that the vm_page_t array has the avaiable physical pages in an
6990 * ordered, ascending list... this is currently true of all our implementations
6991 * and must remain so... there can be 'holes' in the array... we also can
6992 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6993 * which use to happen via 'vm_page_convert'... that function was no longer
6994 * being called and was removed...
6995 *
6996 * The basic flow consists of stabilizing some of the interesting state of
6997 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6998 * sweep at the beginning of the array looking for pages that meet our criterea
6999 * for a 'stealable' page... currently we are pretty conservative... if the page
7000 * meets this criterea and is physically contiguous to the previous page in the 'run'
7001 * we keep developing it. If we hit a page that doesn't fit, we reset our state
7002 * and start to develop a new run... if at this point we've already considered
7003 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
7004 * and mutex_pause (which will yield the processor), to keep the latency low w/r
7005 * to other threads trying to acquire free pages (or move pages from q to q),
7006 * and then continue from the spot we left off... we only make 1 pass through the
7007 * array. Once we have a 'run' that is long enough, we'll go into the loop which
7008 * which steals the pages from the queues they're currently on... pages on the free
7009 * queue can be stolen directly... pages that are on any of the other queues
7010 * must be removed from the object they are tabled on... this requires taking the
7011 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
7012 * or if the state of the page behind the vm_object lock is no longer viable, we'll
7013 * dump the pages we've currently stolen back to the free list, and pick up our
7014 * scan from the point where we aborted the 'current' run.
7015 *
7016 *
7017 * Requirements:
7018 * - neither vm_page_queue nor vm_free_list lock can be held on entry
7019 *
7020 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
7021 *
7022 * Algorithm:
7023 */
7024
7025 #define MAX_CONSIDERED_BEFORE_YIELD 1000
7026
7027
7028 #define RESET_STATE_OF_RUN() \
7029 MACRO_BEGIN \
7030 prevcontaddr = -2; \
7031 start_pnum = -1; \
7032 free_considered = 0; \
7033 substitute_needed = 0; \
7034 npages = 0; \
7035 MACRO_END
7036
7037 /*
7038 * Can we steal in-use (i.e. not free) pages when searching for
7039 * physically-contiguous pages ?
7040 */
7041 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
7042
7043 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
7044 #if DEBUG
7045 int vm_page_find_contig_debug = 0;
7046 #endif
7047
7048 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)7049 vm_page_find_contiguous(
7050 unsigned int contig_pages,
7051 ppnum_t max_pnum,
7052 ppnum_t pnum_mask,
7053 boolean_t wire,
7054 int flags)
7055 {
7056 vm_page_list_t list = { };
7057 ppnum_t prevcontaddr = 0;
7058 ppnum_t start_pnum = 0;
7059 unsigned int npages = 0, considered = 0, scanned = 0;
7060 unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
7061 unsigned int idx_last_contig_page_found = 0;
7062 int free_considered = 0, free_available = 0;
7063 int substitute_needed = 0;
7064 int zone_gc_called = 0;
7065 boolean_t wrapped;
7066 kern_return_t kr;
7067 #if DEBUG
7068 clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
7069 clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
7070 #endif
7071
7072 int yielded = 0;
7073 int dumped_run = 0;
7074 int stolen_pages = 0;
7075 int compressed_pages = 0;
7076
7077
7078 if (contig_pages == 0) {
7079 return VM_PAGE_NULL;
7080 }
7081
7082 full_scan_again:
7083
7084 #if MACH_ASSERT
7085 vm_page_verify_free_lists();
7086 #endif
7087 #if DEBUG
7088 clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
7089 #endif
7090 PAGE_REPLACEMENT_ALLOWED(TRUE);
7091
7092 #if XNU_VM_HAS_DELAYED_PAGES
7093 /*
7094 * If there are still delayed pages, try to free up some that match.
7095 */
7096 if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
7097 vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
7098 }
7099 #endif /* XNU_VM_HAS_DELAYED_PAGES */
7100
7101 vm_page_lock_queues();
7102 vm_free_page_lock();
7103
7104 RESET_STATE_OF_RUN();
7105
7106 scanned = 0;
7107 considered = 0;
7108 free_available = vm_page_free_count - vm_page_free_reserved;
7109
7110 wrapped = FALSE;
7111
7112 if (flags & KMA_LOMEM) {
7113 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
7114 } else {
7115 idx_last_contig_page_found = vm_page_find_contiguous_last_idx;
7116 }
7117
7118 orig_last_idx = idx_last_contig_page_found;
7119 last_idx = orig_last_idx;
7120
7121 for (page_idx = last_idx, start_idx = last_idx;
7122 npages < contig_pages && page_idx < vm_pages_count;
7123 page_idx++) {
7124 vm_page_t m = NULL;
7125
7126 retry:
7127 if (wrapped &&
7128 npages == 0 &&
7129 page_idx >= orig_last_idx) {
7130 /*
7131 * We're back where we started and we haven't
7132 * found any suitable contiguous range. Let's
7133 * give up.
7134 */
7135 break;
7136 }
7137 scanned++;
7138 m = vm_page_get(page_idx);
7139
7140 assert(vm_page_is_canonical(m));
7141
7142 if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
7143 /* no more low pages... */
7144 break;
7145 }
7146 if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
7147 /*
7148 * not aligned
7149 */
7150 RESET_STATE_OF_RUN();
7151 } else if (!vm_page_is_relocatable(m,
7152 VM_RELOCATE_REASON_CONTIGUOUS)) {
7153 /*
7154 * page is not relocatable */
7155 RESET_STATE_OF_RUN();
7156 } else {
7157 if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
7158 if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
7159 RESET_STATE_OF_RUN();
7160 goto did_consider;
7161 } else {
7162 npages = 1;
7163 start_idx = page_idx;
7164 start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
7165 }
7166 } else {
7167 npages++;
7168 }
7169 prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
7170
7171 VM_PAGE_CHECK(m);
7172 if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
7173 free_considered++;
7174 } else {
7175 /*
7176 * This page is not free.
7177 * If we can't steal used pages,
7178 * we have to give up this run
7179 * and keep looking.
7180 * Otherwise, we might need to
7181 * move the contents of this page
7182 * into a substitute page.
7183 */
7184 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
7185 if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
7186 substitute_needed++;
7187 }
7188 #else
7189 RESET_STATE_OF_RUN();
7190 #endif
7191 }
7192
7193 if ((free_considered + substitute_needed) > free_available) {
7194 /*
7195 * if we let this run continue
7196 * we will end up dropping the vm_page_free_count
7197 * below the reserve limit... we need to abort
7198 * this run, but we can at least re-consider this
7199 * page... thus the jump back to 'retry'
7200 */
7201 RESET_STATE_OF_RUN();
7202
7203 if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
7204 considered++;
7205 goto retry;
7206 }
7207 /*
7208 * free_available == 0
7209 * so can't consider any free pages... if
7210 * we went to retry in this case, we'd
7211 * get stuck looking at the same page
7212 * w/o making any forward progress
7213 * we also want to take this path if we've already
7214 * reached our limit that controls the lock latency
7215 */
7216 }
7217 }
7218 did_consider:
7219 if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
7220 PAGE_REPLACEMENT_ALLOWED(FALSE);
7221
7222 vm_free_page_unlock();
7223 vm_page_unlock_queues();
7224
7225 mutex_pause(0);
7226
7227 PAGE_REPLACEMENT_ALLOWED(TRUE);
7228
7229 vm_page_lock_queues();
7230 vm_free_page_lock();
7231
7232 RESET_STATE_OF_RUN();
7233 /*
7234 * reset our free page limit since we
7235 * dropped the lock protecting the vm_page_free_queue
7236 */
7237 free_available = vm_page_free_count - vm_page_free_reserved;
7238 considered = 0;
7239
7240 yielded++;
7241
7242 goto retry;
7243 }
7244 considered++;
7245 } /* main for-loop end */
7246
7247 if (npages != contig_pages) {
7248 if (!wrapped) {
7249 /*
7250 * We didn't find a contiguous range but we didn't
7251 * start from the very first page.
7252 * Start again from the very first page.
7253 */
7254 RESET_STATE_OF_RUN();
7255 if (flags & KMA_LOMEM) {
7256 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0;
7257 } else {
7258 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
7259 }
7260 last_idx = 0;
7261 page_idx = last_idx;
7262 wrapped = TRUE;
7263 goto retry;
7264 }
7265 vm_free_page_unlock();
7266 } else {
7267 vm_page_t m1;
7268 unsigned int cur_idx;
7269 unsigned int tmp_start_idx;
7270 vm_object_t locked_object = VM_OBJECT_NULL;
7271 bool abort_run = false;
7272
7273 assert(page_idx - start_idx == contig_pages);
7274
7275 tmp_start_idx = start_idx;
7276
7277 /*
7278 * first pass through to pull the free pages
7279 * off of the free queue so that in case we
7280 * need substitute pages, we won't grab any
7281 * of the free pages in the run... we'll clear
7282 * the 'free' bit in the 2nd pass, and even in
7283 * an abort_run case, we'll collect all of the
7284 * free pages in this run and return them to the free list
7285 */
7286 while (start_idx < page_idx) {
7287 vm_grab_options_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7288
7289 m1 = vm_page_get(start_idx++);
7290
7291 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
7292 assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
7293 #endif
7294 if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
7295 vm_page_free_queue_steal(options, m1);
7296 }
7297 }
7298 if (flags & KMA_LOMEM) {
7299 vm_page_lomem_find_contiguous_last_idx = page_idx;
7300 } else {
7301 vm_page_find_contiguous_last_idx = page_idx;
7302 }
7303
7304 /*
7305 * we can drop the free queue lock at this point since
7306 * we've pulled any 'free' candidates off of the list
7307 * we need it dropped so that we can do a vm_page_grab
7308 * when substituing for pmapped/dirty pages
7309 */
7310 vm_free_page_unlock();
7311
7312 start_idx = tmp_start_idx;
7313 cur_idx = page_idx - 1;
7314
7315 while (start_idx++ < page_idx) {
7316 /*
7317 * must go through the list from back to front
7318 * so that the page list is created in the
7319 * correct order - low -> high phys addresses
7320 */
7321 m1 = vm_page_get(cur_idx--);
7322
7323 if (m1->vmp_object == 0) {
7324 /*
7325 * page has already been removed from
7326 * the free list in the 1st pass
7327 */
7328 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
7329 assert(m1->vmp_offset == (vm_object_offset_t) -1);
7330 assert(m1->vmp_busy);
7331 assert(!m1->vmp_wanted);
7332 assert(!m1->vmp_laundry);
7333 } else {
7334 /*
7335 * try to relocate/steal the page
7336 */
7337 if (abort_run) {
7338 continue;
7339 }
7340
7341 assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
7342
7343 vm_object_t object = VM_PAGE_OBJECT(m1);
7344
7345 if (object != locked_object) {
7346 if (locked_object) {
7347 vm_object_unlock(locked_object);
7348 locked_object = VM_OBJECT_NULL;
7349 }
7350 if (vm_object_lock_try(object)) {
7351 locked_object = object;
7352 } else {
7353 /* object must be locked to relocate its pages */
7354 tmp_start_idx = cur_idx;
7355 abort_run = true;
7356 continue;
7357 }
7358 }
7359
7360 kr = vm_page_relocate(m1, &compressed_pages, VM_RELOCATE_REASON_CONTIGUOUS, NULL);
7361 if (kr != KERN_SUCCESS) {
7362 if (locked_object) {
7363 vm_object_unlock(locked_object);
7364 locked_object = VM_OBJECT_NULL;
7365 }
7366 tmp_start_idx = cur_idx;
7367 abort_run = true;
7368 continue;
7369 }
7370
7371 stolen_pages++;
7372 }
7373
7374 /* m1 is ours at this point ... */
7375
7376 if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
7377 /*
7378 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
7379 * change it for pages marked as used-by-compressor.
7380 */
7381 vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
7382 }
7383 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
7384 vm_page_list_push(&list, m1);
7385 }
7386
7387 if (locked_object) {
7388 vm_object_unlock(locked_object);
7389 locked_object = VM_OBJECT_NULL;
7390 }
7391
7392 if (abort_run) {
7393 /*
7394 * want the index of the last
7395 * page in this run that was
7396 * successfully 'stolen', so back
7397 * it up 1 for the auto-decrement on use
7398 * and 1 more to bump back over this page
7399 */
7400 page_idx = tmp_start_idx + 2;
7401 if (page_idx >= vm_pages_count) {
7402 if (wrapped) {
7403 if (list.vmpl_count) {
7404 vm_page_unlock_queues();
7405 vm_page_free_list(list.vmpl_head, FALSE);
7406 vm_page_lock_queues();
7407 list = (vm_page_list_t){ };
7408 }
7409 dumped_run++;
7410 goto done_scanning;
7411 }
7412 page_idx = last_idx = 0;
7413 wrapped = TRUE;
7414 }
7415 abort_run = false;
7416
7417 /*
7418 * We didn't find a contiguous range but we didn't
7419 * start from the very first page.
7420 * Start again from the very first page.
7421 */
7422 RESET_STATE_OF_RUN();
7423
7424 if (flags & KMA_LOMEM) {
7425 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx;
7426 } else {
7427 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
7428 }
7429
7430 last_idx = page_idx;
7431
7432 if (list.vmpl_count) {
7433 vm_page_unlock_queues();
7434 vm_page_free_list(list.vmpl_head, FALSE);
7435 vm_page_lock_queues();
7436 list = (vm_page_list_t){ };
7437 }
7438 dumped_run++;
7439
7440 vm_free_page_lock();
7441 /*
7442 * reset our free page limit since we
7443 * dropped the lock protecting the vm_page_free_queue
7444 */
7445 free_available = vm_page_free_count - vm_page_free_reserved;
7446 goto retry;
7447 }
7448
7449 vm_page_list_foreach(m1, list) {
7450 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
7451 assert(m1->vmp_wire_count == 0);
7452
7453 if (wire == TRUE) {
7454 m1->vmp_wire_count++;
7455 m1->vmp_q_state = VM_PAGE_IS_WIRED;
7456
7457 } else {
7458 m1->vmp_gobbled = TRUE;
7459 }
7460 }
7461 if (wire == FALSE) {
7462 vm_page_gobble_count += npages;
7463 }
7464
7465 /*
7466 * gobbled pages are also counted as wired pages
7467 */
7468 vm_page_wire_count += npages;
7469
7470 assert(vm_page_verify_contiguous(list.vmpl_head, npages));
7471 }
7472 done_scanning:
7473 PAGE_REPLACEMENT_ALLOWED(FALSE);
7474
7475 vm_page_unlock_queues();
7476
7477 #if DEBUG
7478 clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
7479
7480 tv_end_sec -= tv_start_sec;
7481 if (tv_end_usec < tv_start_usec) {
7482 tv_end_sec--;
7483 tv_end_usec += 1000000;
7484 }
7485 tv_end_usec -= tv_start_usec;
7486 if (tv_end_usec >= 1000000) {
7487 tv_end_sec++;
7488 tv_end_sec -= 1000000;
7489 }
7490 if (vm_page_find_contig_debug) {
7491 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
7492 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
7493 (long)tv_end_sec, tv_end_usec, orig_last_idx,
7494 scanned, yielded, dumped_run, stolen_pages, compressed_pages);
7495 }
7496
7497 #endif
7498 #if MACH_ASSERT
7499 vm_page_verify_free_lists();
7500 #endif
7501 if (list.vmpl_count == 0 && zone_gc_called < 2) {
7502 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
7503 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
7504 scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
7505
7506 if (consider_buffer_cache_collect != NULL) {
7507 (void)(*consider_buffer_cache_collect)(1);
7508 }
7509
7510 zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
7511
7512 zone_gc_called++;
7513
7514 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
7515 goto full_scan_again;
7516 }
7517
7518 return list.vmpl_head;
7519 }
7520
7521 /*
7522 * Allocate a list of contiguous, wired pages.
7523 */
7524 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)7525 cpm_allocate(
7526 vm_size_t size,
7527 vm_page_t *list,
7528 ppnum_t max_pnum,
7529 ppnum_t pnum_mask,
7530 boolean_t wire,
7531 int flags)
7532 {
7533 vm_page_t pages;
7534 unsigned int npages;
7535
7536 if (size % PAGE_SIZE != 0) {
7537 return KERN_INVALID_ARGUMENT;
7538 }
7539
7540 npages = (unsigned int) (size / PAGE_SIZE);
7541 if (npages != size / PAGE_SIZE) {
7542 /* 32-bit overflow */
7543 return KERN_INVALID_ARGUMENT;
7544 }
7545
7546 /*
7547 * Obtain a pointer to a subset of the free
7548 * list large enough to satisfy the request;
7549 * the region will be physically contiguous.
7550 */
7551 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
7552
7553 if (pages == VM_PAGE_NULL) {
7554 return KERN_NO_SPACE;
7555 }
7556 /*
7557 * determine need for wakeups
7558 */
7559 if (vm_page_free_count < vm_page_free_min) {
7560 vm_free_page_lock();
7561 if (vm_pageout_running == FALSE) {
7562 vm_free_page_unlock();
7563 thread_wakeup((event_t) &vm_page_free_wanted);
7564 } else {
7565 vm_free_page_unlock();
7566 }
7567 }
7568
7569 VM_CHECK_MEMORYSTATUS;
7570
7571 /*
7572 * The CPM pages should now be available and
7573 * ordered by ascending physical address.
7574 */
7575 assert(vm_page_verify_contiguous(pages, npages));
7576
7577 if (flags & KMA_ZERO) {
7578 for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
7579 vm_page_zero_fill(
7580 m
7581 );
7582 }
7583 }
7584
7585 *list = pages;
7586 return KERN_SUCCESS;
7587 }
7588
7589
7590 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
7591
7592 /*
7593 * when working on a 'run' of pages, it is necessary to hold
7594 * the vm_page_queue_lock (a hot global lock) for certain operations
7595 * on the page... however, the majority of the work can be done
7596 * while merely holding the object lock... in fact there are certain
7597 * collections of pages that don't require any work brokered by the
7598 * vm_page_queue_lock... to mitigate the time spent behind the global
7599 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
7600 * while doing all of the work that doesn't require the vm_page_queue_lock...
7601 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
7602 * necessary work for each page... we will grab the busy bit on the page
7603 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
7604 * if it can't immediately take the vm_page_queue_lock in order to compete
7605 * for the locks in the same order that vm_pageout_scan takes them.
7606 * the operation names are modeled after the names of the routines that
7607 * need to be called in order to make the changes very obvious in the
7608 * original loop
7609 */
7610
7611 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)7612 vm_page_do_delayed_work(
7613 vm_object_t object,
7614 vm_tag_t tag,
7615 struct vm_page_delayed_work *dwp,
7616 int dw_count)
7617 {
7618 int j;
7619 vm_page_t m;
7620 vm_page_t local_free_q = VM_PAGE_NULL;
7621
7622 /*
7623 * pageout_scan takes the vm_page_lock_queues first
7624 * then tries for the object lock... to avoid what
7625 * is effectively a lock inversion, we'll go to the
7626 * trouble of taking them in that same order... otherwise
7627 * if this object contains the majority of the pages resident
7628 * in the UBC (or a small set of large objects actively being
7629 * worked on contain the majority of the pages), we could
7630 * cause the pageout_scan thread to 'starve' in its attempt
7631 * to find pages to move to the free queue, since it has to
7632 * successfully acquire the object lock of any candidate page
7633 * before it can steal/clean it.
7634 */
7635 if (!vm_page_trylock_queues()) {
7636 vm_object_unlock(object);
7637
7638 /*
7639 * "Turnstile enabled vm_pageout_scan" can be runnable
7640 * for a very long time without getting on a core.
7641 * If this is a higher priority thread it could be
7642 * waiting here for a very long time respecting the fact
7643 * that pageout_scan would like its object after VPS does
7644 * a mutex_pause(0).
7645 * So we cap the number of yields in the vm_object_lock_avoid()
7646 * case to a single mutex_pause(0) which will give vm_pageout_scan
7647 * 10us to run and grab the object if needed.
7648 */
7649 vm_page_lock_queues();
7650
7651 for (j = 0;; j++) {
7652 if ((!vm_object_lock_avoid(object) ||
7653 (vps_dynamic_priority_enabled && (j > 0))) &&
7654 _vm_object_lock_try(object)) {
7655 break;
7656 }
7657 vm_page_unlock_queues();
7658 mutex_pause(j);
7659 vm_page_lock_queues();
7660 }
7661 }
7662 for (j = 0; j < dw_count; j++, dwp++) {
7663 m = dwp->dw_m;
7664
7665 if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
7666 vm_pageout_throttle_up(m);
7667 }
7668 #if CONFIG_PHANTOM_CACHE
7669 if (dwp->dw_mask & DW_vm_phantom_cache_update) {
7670 vm_phantom_cache_update(m);
7671 }
7672 #endif
7673 if (dwp->dw_mask & DW_vm_page_wire) {
7674 vm_page_wire(m, tag, FALSE);
7675 } else if (dwp->dw_mask & DW_vm_page_unwire) {
7676 boolean_t queueit;
7677
7678 queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
7679
7680 vm_page_unwire(m, queueit);
7681 }
7682 if (dwp->dw_mask & DW_vm_page_free) {
7683 vm_page_free_prepare_queues(m);
7684
7685 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7686 /*
7687 * Add this page to our list of reclaimed pages,
7688 * to be freed later.
7689 */
7690 m->vmp_snext = local_free_q;
7691 local_free_q = m;
7692 } else {
7693 if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
7694 vm_page_deactivate_internal(m, FALSE);
7695 } else if (dwp->dw_mask & DW_vm_page_activate) {
7696 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
7697 vm_page_activate(m);
7698 }
7699 } else if (dwp->dw_mask & DW_vm_page_speculate) {
7700 vm_page_speculate(m, TRUE);
7701 } else if (dwp->dw_mask & DW_enqueue_cleaned) {
7702 /*
7703 * if we didn't hold the object lock and did this,
7704 * we might disconnect the page, then someone might
7705 * soft fault it back in, then we would put it on the
7706 * cleaned queue, and so we would have a referenced (maybe even dirty)
7707 * page on that queue, which we don't want
7708 */
7709 int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7710
7711 if ((refmod_state & VM_MEM_REFERENCED)) {
7712 /*
7713 * this page has been touched since it got cleaned; let's activate it
7714 * if it hasn't already been
7715 */
7716 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
7717 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
7718
7719 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
7720 vm_page_activate(m);
7721 }
7722 } else {
7723 m->vmp_reference = FALSE;
7724 vm_page_enqueue_cleaned(m);
7725 }
7726 } else if (dwp->dw_mask & DW_vm_page_lru) {
7727 vm_page_lru(m);
7728 } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
7729 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7730 vm_page_queues_remove(m, TRUE);
7731 }
7732 }
7733 if (dwp->dw_mask & DW_set_reference) {
7734 m->vmp_reference = TRUE;
7735 } else if (dwp->dw_mask & DW_clear_reference) {
7736 m->vmp_reference = FALSE;
7737 }
7738
7739 if (dwp->dw_mask & DW_move_page) {
7740 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7741 vm_page_queues_remove(m, FALSE);
7742
7743 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
7744
7745 vm_page_enqueue_inactive(m, FALSE);
7746 }
7747 }
7748 if (dwp->dw_mask & DW_clear_busy) {
7749 m->vmp_busy = FALSE;
7750 }
7751
7752 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7753 vm_page_wakeup(object, m);
7754 }
7755 }
7756 }
7757 vm_page_unlock_queues();
7758
7759 if (local_free_q) {
7760 vm_page_free_list(local_free_q, TRUE);
7761 }
7762
7763 VM_CHECK_MEMORYSTATUS;
7764 }
7765
7766 __abortlike
7767 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7768 __vm_page_alloc_list_failed_panic(
7769 vm_size_t page_count,
7770 kma_flags_t flags,
7771 kern_return_t kr)
7772 {
7773 panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7774 (size_t)page_count, flags, kr);
7775 }
7776
7777 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7778 vm_page_alloc_list(vm_size_t page_count, kma_flags_t flags, vm_page_t *list)
7779 {
7780 vm_page_t page_list = VM_PAGE_NULL;
7781 vm_page_t mem;
7782 kern_return_t kr = KERN_SUCCESS;
7783 int page_grab_count = 0;
7784 task_t task;
7785
7786 for (vm_size_t i = 0; i < page_count; i++) {
7787 for (;;) {
7788 vm_grab_options_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7789
7790 if (flags & KMA_NOPAGEWAIT) {
7791 options |= VM_PAGE_GRAB_NOPAGEWAIT;
7792 }
7793 if (flags & KMA_LOMEM) {
7794 mem = vm_page_grablo(options);
7795 } else {
7796 mem = vm_page_grab_options(options);
7797 }
7798
7799 if (mem != VM_PAGE_NULL) {
7800 break;
7801 }
7802
7803 if (flags & KMA_NOPAGEWAIT) {
7804 kr = KERN_RESOURCE_SHORTAGE;
7805 goto out;
7806 }
7807 if ((flags & KMA_LOMEM) && vm_lopage_needed) {
7808 kr = KERN_RESOURCE_SHORTAGE;
7809 goto out;
7810 }
7811
7812 /* VM privileged threads should have waited in vm_page_grab() and not get here. */
7813 assert(!(current_thread()->options & TH_OPT_VMPRIV));
7814
7815 if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_count) > max_mem / 4) {
7816 uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7817 if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7818 kr = KERN_RESOURCE_SHORTAGE;
7819 goto out;
7820 }
7821 }
7822 VM_PAGE_WAIT();
7823 }
7824
7825 page_grab_count++;
7826 mem->vmp_snext = page_list;
7827 page_list = mem;
7828 }
7829
7830 if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7831 for (mem = page_list; mem; mem = mem->vmp_snext) {
7832 vm_page_zero_fill(
7833 mem
7834 );
7835 }
7836 }
7837
7838 out:
7839 task = current_task_early();
7840 if (task != NULL) {
7841 counter_add(&task->pages_grabbed_kern, page_grab_count);
7842 }
7843
7844 if (kr == KERN_SUCCESS) {
7845 *list = page_list;
7846 } else if (flags & KMA_NOFAIL) {
7847 __vm_page_alloc_list_failed_panic(page_count, flags, kr);
7848 } else {
7849 vm_page_free_list(page_list, FALSE);
7850 }
7851
7852 return kr;
7853 }
7854
7855 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7856 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7857 {
7858 page->vmp_offset = offset;
7859 }
7860
7861 vm_page_t
vm_page_get_next(vm_page_t page)7862 vm_page_get_next(vm_page_t page)
7863 {
7864 return page->vmp_snext;
7865 }
7866
7867 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7868 vm_page_get_offset(vm_page_t page)
7869 {
7870 return page->vmp_offset;
7871 }
7872
7873 ppnum_t
vm_page_get_phys_page(vm_page_t page)7874 vm_page_get_phys_page(vm_page_t page)
7875 {
7876 return VM_PAGE_GET_PHYS_PAGE(page);
7877 }
7878
7879
7880 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7881
7882 #if HIBERNATION
7883
7884 static vm_page_t hibernate_gobble_queue;
7885
7886 static uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7887
7888 struct hibernate_statistics {
7889 int hibernate_considered;
7890 int hibernate_reentered_on_q;
7891 int hibernate_found_dirty;
7892 int hibernate_skipped_cleaning;
7893 int hibernate_skipped_transient;
7894 int hibernate_skipped_precious;
7895 int hibernate_skipped_external;
7896 int hibernate_queue_nolock;
7897 int hibernate_queue_paused;
7898 int hibernate_throttled;
7899 int hibernate_throttle_timeout;
7900 int hibernate_drained;
7901 int hibernate_drain_timeout;
7902 int cd_lock_failed;
7903 int cd_found_precious;
7904 int cd_found_wired;
7905 int cd_found_busy;
7906 int cd_found_unusual;
7907 int cd_found_cleaning;
7908 int cd_found_laundry;
7909 int cd_found_dirty;
7910 int cd_found_xpmapped;
7911 int cd_skipped_xpmapped;
7912 int cd_local_free;
7913 int cd_total_free;
7914 int cd_vm_page_wire_count;
7915 int cd_vm_struct_pages_unneeded;
7916 int cd_pages;
7917 int cd_discarded;
7918 int cd_count_wire;
7919 } hibernate_stats;
7920
7921 #if CONFIG_SPTM
7922 /**
7923 * On SPTM-based systems don't save any executable pages into the hibernation
7924 * image. The SPTM has stronger guarantees around not allowing write access to
7925 * the executable pages than on older systems, which prevents XNU from being
7926 * able to restore any pages mapped as executable.
7927 */
7928 #define HIBERNATE_XPMAPPED_LIMIT 0ULL
7929 #else /* CONFIG_SPTM */
7930 /*
7931 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7932 * so that we don't overrun the estimated image size, which would
7933 * result in a hibernation failure.
7934 *
7935 * We use a size value instead of pages because we don't want to take up more space
7936 * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7937 * to have that additional space available.
7938 *
7939 * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7940 * xpmapped size.
7941 */
7942 #define HIBERNATE_XPMAPPED_LIMIT ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7943 #endif /* CONFIG_SPTM */
7944
7945 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7946 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7947 {
7948 wait_result_t wait_result;
7949
7950 vm_page_lock_queues();
7951
7952 while (!vm_page_queue_empty(&q->pgo_pending)) {
7953 q->pgo_draining = TRUE;
7954
7955 assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7956
7957 vm_page_unlock_queues();
7958
7959 wait_result = thread_block(THREAD_CONTINUE_NULL);
7960
7961 if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7962 hibernate_stats.hibernate_drain_timeout++;
7963
7964 if (q == &vm_pageout_queue_external) {
7965 return 0;
7966 }
7967
7968 return 1;
7969 }
7970 vm_page_lock_queues();
7971
7972 hibernate_stats.hibernate_drained++;
7973 }
7974 vm_page_unlock_queues();
7975
7976 return 0;
7977 }
7978
7979
7980 boolean_t hibernate_skip_external = FALSE;
7981
7982 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7983 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7984 {
7985 vm_page_t m;
7986 vm_object_t l_object = NULL;
7987 vm_object_t m_object = NULL;
7988 int refmod_state = 0;
7989 int try_failed_count = 0;
7990 int retval = 0;
7991 int current_run = 0;
7992 struct vm_pageout_queue *iq;
7993 struct vm_pageout_queue *eq;
7994 struct vm_pageout_queue *tq;
7995
7996 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7997 VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7998
7999 iq = &vm_pageout_queue_internal;
8000 eq = &vm_pageout_queue_external;
8001
8002 vm_page_lock_queues();
8003
8004 while (qcount && !vm_page_queue_empty(q)) {
8005 if (current_run++ == 1000) {
8006 if (hibernate_should_abort()) {
8007 retval = 1;
8008 break;
8009 }
8010 current_run = 0;
8011 }
8012
8013 m = (vm_page_t) vm_page_queue_first(q);
8014 m_object = VM_PAGE_OBJECT(m);
8015
8016 /*
8017 * check to see if we currently are working
8018 * with the same object... if so, we've
8019 * already got the lock
8020 */
8021 if (m_object != l_object) {
8022 /*
8023 * the object associated with candidate page is
8024 * different from the one we were just working
8025 * with... dump the lock if we still own it
8026 */
8027 if (l_object != NULL) {
8028 vm_object_unlock(l_object);
8029 l_object = NULL;
8030 }
8031 /*
8032 * Try to lock object; since we've alread got the
8033 * page queues lock, we can only 'try' for this one.
8034 * if the 'try' fails, we need to do a mutex_pause
8035 * to allow the owner of the object lock a chance to
8036 * run...
8037 */
8038 if (!vm_object_lock_try_scan(m_object)) {
8039 if (try_failed_count > 20) {
8040 hibernate_stats.hibernate_queue_nolock++;
8041
8042 goto reenter_pg_on_q;
8043 }
8044
8045 vm_page_unlock_queues();
8046 mutex_pause(try_failed_count++);
8047 vm_page_lock_queues();
8048
8049 hibernate_stats.hibernate_queue_paused++;
8050 continue;
8051 } else {
8052 l_object = m_object;
8053 }
8054 }
8055 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
8056 /*
8057 * page is not to be cleaned
8058 * put it back on the head of its queue
8059 */
8060 if (m->vmp_cleaning) {
8061 hibernate_stats.hibernate_skipped_cleaning++;
8062 } else {
8063 hibernate_stats.hibernate_skipped_transient++;
8064 }
8065
8066 goto reenter_pg_on_q;
8067 }
8068 if (m_object->vo_copy == VM_OBJECT_NULL) {
8069 if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
8070 /*
8071 * let the normal hibernate image path
8072 * deal with these
8073 */
8074 goto reenter_pg_on_q;
8075 }
8076 }
8077 if (!m->vmp_dirty && m->vmp_pmapped) {
8078 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
8079
8080 if ((refmod_state & VM_MEM_MODIFIED)) {
8081 SET_PAGE_DIRTY(m, FALSE);
8082 }
8083 } else {
8084 refmod_state = 0;
8085 }
8086
8087 if (!m->vmp_dirty) {
8088 /*
8089 * page is not to be cleaned
8090 * put it back on the head of its queue
8091 */
8092 if (m->vmp_precious) {
8093 hibernate_stats.hibernate_skipped_precious++;
8094 }
8095
8096 goto reenter_pg_on_q;
8097 }
8098
8099 if (hibernate_skip_external == TRUE && !m_object->internal) {
8100 hibernate_stats.hibernate_skipped_external++;
8101
8102 goto reenter_pg_on_q;
8103 }
8104 tq = NULL;
8105
8106 if (m_object->internal) {
8107 if (VM_PAGE_Q_THROTTLED(iq)) {
8108 tq = iq;
8109 }
8110 } else if (VM_PAGE_Q_THROTTLED(eq)) {
8111 tq = eq;
8112 }
8113
8114 if (tq != NULL) {
8115 wait_result_t wait_result;
8116 int wait_count = 5;
8117
8118 if (l_object != NULL) {
8119 vm_object_unlock(l_object);
8120 l_object = NULL;
8121 }
8122
8123 while (retval == 0) {
8124 tq->pgo_throttled = TRUE;
8125
8126 assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
8127
8128 vm_page_unlock_queues();
8129
8130 wait_result = thread_block(THREAD_CONTINUE_NULL);
8131
8132 vm_page_lock_queues();
8133
8134 if (wait_result != THREAD_TIMED_OUT) {
8135 break;
8136 }
8137 if (!VM_PAGE_Q_THROTTLED(tq)) {
8138 break;
8139 }
8140
8141 if (hibernate_should_abort()) {
8142 retval = 1;
8143 }
8144
8145 if (--wait_count == 0) {
8146 hibernate_stats.hibernate_throttle_timeout++;
8147
8148 if (tq == eq) {
8149 hibernate_skip_external = TRUE;
8150 break;
8151 }
8152 retval = 1;
8153 }
8154 }
8155 if (retval) {
8156 break;
8157 }
8158
8159 hibernate_stats.hibernate_throttled++;
8160
8161 continue;
8162 }
8163 /*
8164 * we've already factored out pages in the laundry which
8165 * means this page can't be on the pageout queue so it's
8166 * safe to do the vm_page_queues_remove
8167 */
8168 vm_page_queues_remove(m, TRUE);
8169
8170 if (m_object->internal == TRUE) {
8171 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
8172 }
8173
8174 vm_pageout_cluster(m);
8175
8176 hibernate_stats.hibernate_found_dirty++;
8177
8178 goto next_pg;
8179
8180 reenter_pg_on_q:
8181 vm_page_queue_remove(q, m, vmp_pageq);
8182 vm_page_queue_enter(q, m, vmp_pageq);
8183
8184 hibernate_stats.hibernate_reentered_on_q++;
8185 next_pg:
8186 hibernate_stats.hibernate_considered++;
8187
8188 qcount--;
8189 try_failed_count = 0;
8190 }
8191 if (l_object != NULL) {
8192 vm_object_unlock(l_object);
8193 l_object = NULL;
8194 }
8195
8196 vm_page_unlock_queues();
8197
8198 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
8199
8200 return retval;
8201 }
8202
8203
8204 static int
hibernate_flush_dirty_pages(int pass)8205 hibernate_flush_dirty_pages(int pass)
8206 {
8207 struct vm_speculative_age_q *aq;
8208 uint32_t i;
8209
8210 if (vm_page_local_q) {
8211 zpercpu_foreach_cpu(lid) {
8212 vm_page_reactivate_local(lid, TRUE, FALSE);
8213 }
8214 }
8215
8216 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8217 int qcount;
8218 vm_page_t m;
8219
8220 aq = &vm_page_queue_speculative[i];
8221
8222 if (vm_page_queue_empty(&aq->age_q)) {
8223 continue;
8224 }
8225 qcount = 0;
8226
8227 vm_page_lockspin_queues();
8228
8229 vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
8230 qcount++;
8231 }
8232 vm_page_unlock_queues();
8233
8234 if (qcount) {
8235 if (hibernate_flush_queue(&aq->age_q, qcount)) {
8236 return 1;
8237 }
8238 }
8239 }
8240 if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
8241 return 1;
8242 }
8243 /* XXX FBDP TODO: flush secluded queue */
8244 if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
8245 return 1;
8246 }
8247 if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
8248 return 1;
8249 }
8250 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
8251 return 1;
8252 }
8253
8254 if (pass == 1) {
8255 vm_compressor_record_warmup_start();
8256 }
8257
8258 if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
8259 if (pass == 1) {
8260 vm_compressor_record_warmup_end();
8261 }
8262 return 1;
8263 }
8264 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
8265 if (pass == 1) {
8266 vm_compressor_record_warmup_end();
8267 }
8268 return 1;
8269 }
8270 if (pass == 1) {
8271 vm_compressor_record_warmup_end();
8272 }
8273
8274 if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
8275 return 1;
8276 }
8277
8278 return 0;
8279 }
8280
8281
8282 void
hibernate_reset_stats(void)8283 hibernate_reset_stats(void)
8284 {
8285 bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
8286 }
8287
8288
8289 int
hibernate_flush_memory(void)8290 hibernate_flush_memory(void)
8291 {
8292 int retval;
8293
8294 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
8295
8296 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
8297
8298 hibernate_cleaning_in_progress = TRUE;
8299 hibernate_skip_external = FALSE;
8300
8301 if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
8302 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
8303
8304 vm_compressor_flush();
8305
8306 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
8307
8308 if (consider_buffer_cache_collect != NULL) {
8309 unsigned int orig_wire_count;
8310
8311 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
8312 orig_wire_count = vm_page_wire_count;
8313
8314 (void)(*consider_buffer_cache_collect)(1);
8315 zone_gc(ZONE_GC_DRAIN);
8316
8317 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
8318
8319 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
8320 }
8321 }
8322 hibernate_cleaning_in_progress = FALSE;
8323
8324 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
8325
8326 if (retval) {
8327 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
8328 }
8329
8330
8331 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
8332 hibernate_stats.hibernate_considered,
8333 hibernate_stats.hibernate_reentered_on_q,
8334 hibernate_stats.hibernate_found_dirty);
8335 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
8336 hibernate_stats.hibernate_skipped_cleaning,
8337 hibernate_stats.hibernate_skipped_transient,
8338 hibernate_stats.hibernate_skipped_precious,
8339 hibernate_stats.hibernate_skipped_external,
8340 hibernate_stats.hibernate_queue_nolock);
8341 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
8342 hibernate_stats.hibernate_queue_paused,
8343 hibernate_stats.hibernate_throttled,
8344 hibernate_stats.hibernate_throttle_timeout,
8345 hibernate_stats.hibernate_drained,
8346 hibernate_stats.hibernate_drain_timeout);
8347
8348 return retval;
8349 }
8350
8351
8352 static void
hibernate_page_list_zero(hibernate_page_list_t * list)8353 hibernate_page_list_zero(hibernate_page_list_t *list)
8354 {
8355 uint32_t bank;
8356 hibernate_bitmap_t * bitmap;
8357
8358 bitmap = &list->bank_bitmap[0];
8359 for (bank = 0; bank < list->bank_count; bank++) {
8360 uint32_t last_bit;
8361
8362 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
8363 // set out-of-bound bits at end of bitmap.
8364 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
8365 if (last_bit) {
8366 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
8367 }
8368
8369 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
8370 }
8371 }
8372
8373 void
hibernate_free_gobble_pages(void)8374 hibernate_free_gobble_pages(void)
8375 {
8376 vm_page_t m, next;
8377 uint32_t count = 0;
8378
8379 m = (vm_page_t) hibernate_gobble_queue;
8380 while (m) {
8381 next = m->vmp_snext;
8382 vm_page_free(m);
8383 count++;
8384 m = next;
8385 }
8386 hibernate_gobble_queue = VM_PAGE_NULL;
8387
8388 if (count) {
8389 HIBLOG("Freed %d pages\n", count);
8390 }
8391 }
8392
8393 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)8394 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
8395 {
8396 vm_object_t object = NULL;
8397 int refmod_state;
8398 boolean_t discard = FALSE;
8399
8400 do{
8401 if (vm_page_is_private(m)) {
8402 panic("hibernate_consider_discard: private");
8403 }
8404
8405 object = VM_PAGE_OBJECT(m);
8406
8407 if (!vm_object_lock_try(object)) {
8408 object = NULL;
8409 if (!preflight) {
8410 hibernate_stats.cd_lock_failed++;
8411 }
8412 break;
8413 }
8414 if (VM_PAGE_WIRED(m)) {
8415 if (!preflight) {
8416 hibernate_stats.cd_found_wired++;
8417 }
8418 break;
8419 }
8420 if (m->vmp_precious) {
8421 if (!preflight) {
8422 hibernate_stats.cd_found_precious++;
8423 }
8424 break;
8425 }
8426 if (m->vmp_busy || !object->alive) {
8427 /*
8428 * Somebody is playing with this page.
8429 */
8430 if (!preflight) {
8431 hibernate_stats.cd_found_busy++;
8432 }
8433 break;
8434 }
8435 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
8436 /*
8437 * If it's unusual in anyway, ignore it
8438 */
8439 if (!preflight) {
8440 hibernate_stats.cd_found_unusual++;
8441 }
8442 break;
8443 }
8444 if (m->vmp_cleaning) {
8445 if (!preflight) {
8446 hibernate_stats.cd_found_cleaning++;
8447 }
8448 break;
8449 }
8450 if (m->vmp_laundry) {
8451 if (!preflight) {
8452 hibernate_stats.cd_found_laundry++;
8453 }
8454 break;
8455 }
8456 if (!m->vmp_dirty) {
8457 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
8458
8459 if (refmod_state & VM_MEM_REFERENCED) {
8460 m->vmp_reference = TRUE;
8461 }
8462 if (refmod_state & VM_MEM_MODIFIED) {
8463 SET_PAGE_DIRTY(m, FALSE);
8464 }
8465 }
8466
8467 /*
8468 * If it's clean or purgeable we can discard the page on wakeup.
8469 */
8470 discard = (!m->vmp_dirty)
8471 || (VM_PURGABLE_VOLATILE == object->purgable)
8472 || (VM_PURGABLE_EMPTY == object->purgable);
8473
8474
8475 if (discard == FALSE) {
8476 if (!preflight) {
8477 hibernate_stats.cd_found_dirty++;
8478 }
8479 } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
8480 if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
8481 if (!preflight) {
8482 hibernate_stats.cd_found_xpmapped++;
8483 }
8484 discard = FALSE;
8485 } else {
8486 if (!preflight) {
8487 hibernate_stats.cd_skipped_xpmapped++;
8488 }
8489 }
8490 }
8491 }while (FALSE);
8492
8493 if (object) {
8494 vm_object_unlock(object);
8495 }
8496
8497 return discard;
8498 }
8499
8500
8501 static void
hibernate_discard_page(vm_page_t m)8502 hibernate_discard_page(vm_page_t m)
8503 {
8504 vm_object_t m_object;
8505
8506 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
8507 /*
8508 * If it's unusual in anyway, ignore
8509 */
8510 return;
8511 }
8512
8513 m_object = VM_PAGE_OBJECT(m);
8514
8515 #if MACH_ASSERT || DEBUG
8516 if (!vm_object_lock_try(m_object)) {
8517 panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
8518 }
8519 #else
8520 /* No need to lock page queue for token delete, hibernate_vm_unlock()
8521 * makes sure these locks are uncontended before sleep */
8522 #endif /* MACH_ASSERT || DEBUG */
8523
8524 if (m->vmp_pmapped == TRUE) {
8525 __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8526 }
8527
8528 if (m->vmp_laundry) {
8529 panic("hibernate_discard_page(%p) laundry", m);
8530 }
8531 if (vm_page_is_private(m)) {
8532 panic("hibernate_discard_page(%p) private", m);
8533 }
8534 if (vm_page_is_fictitious(m)) {
8535 panic("hibernate_discard_page(%p) fictitious", m);
8536 }
8537
8538 if (VM_PURGABLE_VOLATILE == m_object->purgable) {
8539 /* object should be on a queue */
8540 assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
8541 purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
8542 assert(old_queue);
8543 if (m_object->purgeable_when_ripe) {
8544 vm_purgeable_token_delete_first(old_queue);
8545 }
8546 vm_object_lock_assert_exclusive(m_object);
8547 VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
8548
8549 /*
8550 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
8551 * accounted in the "volatile" ledger, so no change here.
8552 * We have to update vm_page_purgeable_count, though, since we're
8553 * effectively purging this object.
8554 */
8555 unsigned int delta;
8556 assert(m_object->resident_page_count >= m_object->wired_page_count);
8557 delta = (m_object->resident_page_count - m_object->wired_page_count);
8558 assert(vm_page_purgeable_count >= delta);
8559 assert(delta > 0);
8560 OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
8561 }
8562
8563 vm_page_free(m);
8564
8565 #if MACH_ASSERT || DEBUG
8566 vm_object_unlock(m_object);
8567 #endif /* MACH_ASSERT || DEBUG */
8568 }
8569
8570 /*
8571 * Grab locks for hibernate_page_list_setall()
8572 */
8573 void
hibernate_vm_lock_queues(void)8574 hibernate_vm_lock_queues(void)
8575 {
8576 vm_object_lock(compressor_object);
8577 vm_page_lock_queues();
8578 vm_free_page_lock();
8579 lck_mtx_lock(&vm_purgeable_queue_lock);
8580
8581 if (vm_page_local_q) {
8582 zpercpu_foreach(lq, vm_page_local_q) {
8583 VPL_LOCK(&lq->vpl_lock);
8584 }
8585 }
8586 }
8587
8588 void
hibernate_vm_unlock_queues(void)8589 hibernate_vm_unlock_queues(void)
8590 {
8591 if (vm_page_local_q) {
8592 zpercpu_foreach(lq, vm_page_local_q) {
8593 VPL_UNLOCK(&lq->vpl_lock);
8594 }
8595 }
8596 lck_mtx_unlock(&vm_purgeable_queue_lock);
8597 vm_free_page_unlock();
8598 vm_page_unlock_queues();
8599 vm_object_unlock(compressor_object);
8600 }
8601
8602 #if CONFIG_SPTM
8603 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)8604 hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)
8605 {
8606 const sptm_paddr_t paddr = ptoa_64(VM_PAGE_GET_PHYS_PAGE(vmp));
8607 const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
8608 const vm_object_t vmp_objp = VM_PAGE_OBJECT(vmp);
8609
8610 return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG ||
8611 (frame_type == XNU_USER_EXEC && vmp_objp->internal == TRUE);
8612 }
8613 #endif
8614
8615 /*
8616 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
8617 * pages known to VM to not need saving are subtracted.
8618 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
8619 */
8620
8621 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)8622 hibernate_page_list_setall(hibernate_page_list_t * page_list,
8623 hibernate_page_list_t * page_list_wired,
8624 hibernate_page_list_t * page_list_pal,
8625 boolean_t preflight,
8626 boolean_t will_discard,
8627 uint32_t * pagesOut)
8628 {
8629 uint64_t start, end, nsec;
8630 vm_page_t m;
8631 vm_page_t next;
8632 __block uint32_t pages = page_list->page_count;
8633 __block uint32_t count_wire = pages;
8634 uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
8635 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
8636 uint32_t count_discard_active = 0;
8637 uint32_t count_discard_inactive = 0;
8638 uint32_t count_retired = 0;
8639 uint32_t count_discard_cleaned = 0;
8640 uint32_t count_discard_purgeable = 0;
8641 uint32_t count_discard_speculative = 0;
8642 uint32_t count_discard_vm_struct_pages = 0;
8643 uint32_t bank;
8644 hibernate_bitmap_t * bitmap;
8645 hibernate_bitmap_t * bitmap_wired;
8646 boolean_t discard_all;
8647 boolean_t discard = FALSE;
8648
8649 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
8650
8651 if (preflight) {
8652 page_list = NULL;
8653 page_list_wired = NULL;
8654 page_list_pal = NULL;
8655 discard_all = FALSE;
8656 } else {
8657 discard_all = will_discard;
8658 }
8659
8660 #if MACH_ASSERT || DEBUG
8661 if (!preflight) {
8662 assert(hibernate_vm_locks_are_safe());
8663 vm_page_lock_queues();
8664 if (vm_page_local_q) {
8665 zpercpu_foreach(lq, vm_page_local_q) {
8666 VPL_LOCK(&lq->vpl_lock);
8667 }
8668 }
8669 }
8670 #endif /* MACH_ASSERT || DEBUG */
8671
8672
8673 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
8674
8675 clock_get_uptime(&start);
8676
8677 if (!preflight) {
8678 hibernate_page_list_zero(page_list);
8679 hibernate_page_list_zero(page_list_wired);
8680 hibernate_page_list_zero(page_list_pal);
8681
8682 hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
8683 hibernate_stats.cd_pages = pages;
8684 }
8685
8686 if (vm_page_local_q) {
8687 zpercpu_foreach_cpu(lid) {
8688 vm_page_reactivate_local(lid, TRUE, !preflight);
8689 }
8690 }
8691
8692 if (preflight) {
8693 vm_object_lock(compressor_object);
8694 vm_page_lock_queues();
8695 vm_free_page_lock();
8696 }
8697
8698 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8699
8700 hibernation_vmqueues_inspection = TRUE;
8701
8702 m = (vm_page_t) hibernate_gobble_queue;
8703 while (m) {
8704 pages--;
8705 count_wire--;
8706 if (!preflight) {
8707 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8708 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8709 }
8710 m = m->vmp_snext;
8711 }
8712
8713 __auto_type hib_free_boilerplate = ^(vm_page_t page) {
8714 assert((page->vmp_q_state == VM_PAGE_ON_FREE_Q) ||
8715 #if XNU_VM_HAS_LOPAGE
8716 (page->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
8717 #endif /* XNU_VM_HAS_LOPAGE */
8718 (page->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q));
8719
8720 pages--;
8721 count_wire--;
8722
8723 if (!preflight) {
8724 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(page));
8725 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(page));
8726
8727 hibernate_stats.cd_total_free++;
8728
8729 if (page->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) {
8730 hibernate_stats.cd_local_free++;
8731 }
8732 }
8733 };
8734
8735 if (!preflight) {
8736 percpu_foreach(free_pages_head, free_pages) {
8737 _vm_page_list_foreach(m, *free_pages_head) {
8738 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
8739 hib_free_boilerplate(m);
8740 }
8741 }
8742 }
8743
8744 #if CONFIG_SPTM
8745 if (vm_pages_free_masks()) {
8746 uint32_t bits = vm_pages_free_mask_len() * MAX_COLORS;
8747 bitmap_t *map = vm_pages_free_masks_as_bitmap(0);
8748
8749 for (int bit = bitmap_first(map, bits);
8750 bit >= 0; bit = bitmap_next(map, bit)) {
8751 ppnum_t pnum = pmap_first_pnum + bit;
8752 vm_page_t mem = vm_page_find_canonical(pnum);
8753
8754 hib_free_boilerplate(mem);
8755 }
8756 } else
8757 #endif /* CONFIG_SPTM */
8758 {
8759 vm_page_free_queue_foreach(&vm_page_queue_free, hib_free_boilerplate);
8760 }
8761 #if XNU_VM_HAS_LOPAGE
8762 vm_page_free_queue_foreach(&vm_lopage_queue_free, hib_free_boilerplate);
8763 #endif /* XNU_VM_HAS_LOPAGE */
8764
8765 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8766 while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8767 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8768
8769 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8770 discard = FALSE;
8771 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8772 && hibernate_consider_discard(m, preflight)) {
8773 if (!preflight) {
8774 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8775 }
8776 count_discard_inactive++;
8777 discard = discard_all;
8778 } else {
8779 count_throttled++;
8780 }
8781 count_wire--;
8782 if (!preflight) {
8783 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8784 }
8785
8786 if (discard) {
8787 hibernate_discard_page(m);
8788 }
8789 m = next;
8790 }
8791
8792 m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8793 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8794 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8795 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8796 #if CONFIG_SPTM
8797 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8798 #endif
8799 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8800 discard = FALSE;
8801 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8802 hibernate_consider_discard(m, preflight)) {
8803 if (!preflight) {
8804 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8805 }
8806 if (m->vmp_dirty) {
8807 count_discard_purgeable++;
8808 } else {
8809 count_discard_inactive++;
8810 }
8811 discard = discard_all;
8812 } else {
8813 /*
8814 * If the page must be force-added to the wired page list, prevent it from appearing
8815 * in the unwired page list.
8816 */
8817 if (force_to_wired_list) {
8818 if (!preflight) {
8819 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8820 }
8821 } else {
8822 count_anonymous++;
8823 }
8824 }
8825 /*
8826 * If the page is NOT being forced into the wired page list, remove it from the
8827 * wired page list here.
8828 */
8829 if (!force_to_wired_list) {
8830 count_wire--;
8831 if (!preflight) {
8832 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8833 }
8834 }
8835 if (discard) {
8836 hibernate_discard_page(m);
8837 }
8838 m = next;
8839 }
8840
8841 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8842 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8843 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8844
8845 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8846 discard = FALSE;
8847 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8848 hibernate_consider_discard(m, preflight)) {
8849 if (!preflight) {
8850 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8851 }
8852 if (m->vmp_dirty) {
8853 count_discard_purgeable++;
8854 } else {
8855 count_discard_cleaned++;
8856 }
8857 discard = discard_all;
8858 } else {
8859 count_cleaned++;
8860 }
8861 count_wire--;
8862 if (!preflight) {
8863 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8864 }
8865 if (discard) {
8866 hibernate_discard_page(m);
8867 }
8868 m = next;
8869 }
8870
8871 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8872 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8873 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8874 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8875 #if CONFIG_SPTM
8876 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8877 #endif
8878 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8879 discard = FALSE;
8880 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8881 hibernate_consider_discard(m, preflight)) {
8882 if (!preflight) {
8883 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8884 }
8885 if (m->vmp_dirty) {
8886 count_discard_purgeable++;
8887 } else {
8888 count_discard_active++;
8889 }
8890 discard = discard_all;
8891 } else {
8892 /*
8893 * If the page must be force-added to the wired page list, prevent it from appearing
8894 * in the unwired page list.
8895 */
8896 if (force_to_wired_list) {
8897 if (!preflight) {
8898 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8899 }
8900 } else {
8901 count_active++;
8902 }
8903 }
8904 /*
8905 * If the page is NOT being forced into the wired page list, remove it from the
8906 * wired page list here.
8907 */
8908 if (!force_to_wired_list) {
8909 count_wire--;
8910 if (!preflight) {
8911 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8912 }
8913 }
8914 if (discard) {
8915 hibernate_discard_page(m);
8916 }
8917 m = next;
8918 }
8919
8920 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8921 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8922 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8923 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8924 #if CONFIG_SPTM
8925 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8926 #endif
8927 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8928 discard = FALSE;
8929 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8930 hibernate_consider_discard(m, preflight)) {
8931 if (!preflight) {
8932 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8933 }
8934 if (m->vmp_dirty) {
8935 count_discard_purgeable++;
8936 } else {
8937 count_discard_inactive++;
8938 }
8939 discard = discard_all;
8940 } else {
8941 /*
8942 * If the page must be force-added to the wired page list, prevent it from appearing
8943 * in the unwired page list.
8944 */
8945 if (force_to_wired_list) {
8946 if (!preflight) {
8947 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8948 }
8949 } else {
8950 count_inactive++;
8951 }
8952 }
8953 /*
8954 * If the page is NOT being forced into the wired page list, remove it from the
8955 * wired page list here.
8956 */
8957 if (!force_to_wired_list) {
8958 count_wire--;
8959 if (!preflight) {
8960 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8961 }
8962 }
8963 if (discard) {
8964 hibernate_discard_page(m);
8965 }
8966 m = next;
8967 }
8968 /* XXX FBDP TODO: secluded queue */
8969
8970 for (uint32_t i = 0; i <= vm_page_max_speculative_age_q; i++) {
8971 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8972 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8973 assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8974 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8975 m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8976
8977 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8978 discard = FALSE;
8979 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8980 hibernate_consider_discard(m, preflight)) {
8981 if (!preflight) {
8982 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8983 }
8984 count_discard_speculative++;
8985 discard = discard_all;
8986 } else {
8987 count_speculative++;
8988 }
8989 count_wire--;
8990 if (!preflight) {
8991 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8992 }
8993 if (discard) {
8994 hibernate_discard_page(m);
8995 }
8996 m = next;
8997 }
8998 }
8999
9000 vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
9001 assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
9002
9003 count_compressor++;
9004 count_wire--;
9005 if (!preflight) {
9006 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
9007 }
9008 }
9009
9010
9011 if (preflight == FALSE && discard_all == TRUE) {
9012 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
9013
9014 HIBLOG("hibernate_teardown started\n");
9015 count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
9016 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
9017
9018 pages -= count_discard_vm_struct_pages;
9019 count_wire -= count_discard_vm_struct_pages;
9020
9021 hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
9022
9023 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
9024 }
9025
9026 if (!preflight) {
9027 // pull wired from hibernate_bitmap
9028 bitmap = &page_list->bank_bitmap[0];
9029 bitmap_wired = &page_list_wired->bank_bitmap[0];
9030 for (bank = 0; bank < page_list->bank_count; bank++) {
9031 for (uint32_t i = 0; i < bitmap->bitmapwords; i++) {
9032 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
9033 }
9034 bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
9035 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
9036 }
9037 }
9038
9039 // machine dependent adjustments
9040 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
9041
9042 if (!preflight) {
9043 hibernate_stats.cd_count_wire = count_wire;
9044 hibernate_stats.cd_discarded = count_discard_active +
9045 count_discard_inactive + count_discard_purgeable +
9046 count_discard_speculative + count_discard_cleaned +
9047 count_discard_vm_struct_pages;
9048 }
9049
9050 clock_get_uptime(&end);
9051 absolutetime_to_nanoseconds(end - start, &nsec);
9052 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
9053
9054 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, "
9055 "zf %d, throt %d, compr %d, xpmapped %d\n"
9056 " %s discard act %d inact %d purgeable %d "
9057 "spec %d cleaned %d retired %d\n",
9058 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative,
9059 count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
9060 discard_all ? "did" : "could",
9061 count_discard_active, count_discard_inactive, count_discard_purgeable,
9062 count_discard_speculative, count_discard_cleaned, count_retired);
9063
9064 if (hibernate_stats.cd_skipped_xpmapped) {
9065 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n",
9066 hibernate_stats.cd_skipped_xpmapped);
9067 }
9068
9069 *pagesOut = pages - count_discard_active - count_discard_inactive -
9070 count_discard_purgeable - count_discard_speculative -
9071 count_discard_cleaned - count_retired;
9072
9073 if (preflight && will_discard) {
9074 *pagesOut -= count_compressor + count_throttled +
9075 count_anonymous + count_inactive + count_cleaned +
9076 count_speculative + count_active;
9077
9078 /*
9079 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
9080 * even if these are clean and so we need to size the hibernation image accordingly.
9081 *
9082 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
9083 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
9084 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
9085 * clean xpmapped pages.
9086 *
9087 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
9088 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
9089 */
9090 *pagesOut += HIBERNATE_XPMAPPED_LIMIT;
9091 }
9092
9093 hibernation_vmqueues_inspection = FALSE;
9094
9095 #if MACH_ASSERT || DEBUG
9096 if (!preflight) {
9097 if (vm_page_local_q) {
9098 zpercpu_foreach(lq, vm_page_local_q) {
9099 VPL_UNLOCK(&lq->vpl_lock);
9100 }
9101 }
9102 vm_page_unlock_queues();
9103 }
9104 #endif /* MACH_ASSERT || DEBUG */
9105
9106 if (preflight) {
9107 vm_free_page_unlock();
9108 vm_page_unlock_queues();
9109 vm_object_unlock(compressor_object);
9110 }
9111
9112 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
9113 }
9114
9115 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)9116 hibernate_page_list_discard(hibernate_page_list_t * page_list)
9117 {
9118 uint64_t start, end, nsec;
9119 vm_page_t m;
9120 vm_page_t next;
9121 uint32_t i;
9122 uint32_t count_discard_active = 0;
9123 uint32_t count_discard_inactive = 0;
9124 uint32_t count_discard_purgeable = 0;
9125 uint32_t count_discard_cleaned = 0;
9126 uint32_t count_discard_speculative = 0;
9127
9128
9129 #if MACH_ASSERT || DEBUG
9130 vm_page_lock_queues();
9131 if (vm_page_local_q) {
9132 zpercpu_foreach(lq, vm_page_local_q) {
9133 VPL_LOCK(&lq->vpl_lock);
9134 }
9135 }
9136 #endif /* MACH_ASSERT || DEBUG */
9137
9138 clock_get_uptime(&start);
9139
9140 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
9141 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
9142 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
9143
9144 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
9145 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
9146 if (m->vmp_dirty) {
9147 count_discard_purgeable++;
9148 } else {
9149 count_discard_inactive++;
9150 }
9151 hibernate_discard_page(m);
9152 }
9153 m = next;
9154 }
9155
9156 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
9157 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
9158 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
9159 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
9160
9161 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
9162 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
9163 count_discard_speculative++;
9164 hibernate_discard_page(m);
9165 }
9166 m = next;
9167 }
9168 }
9169
9170 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
9171 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
9172 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
9173
9174 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
9175 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
9176 if (m->vmp_dirty) {
9177 count_discard_purgeable++;
9178 } else {
9179 count_discard_inactive++;
9180 }
9181 hibernate_discard_page(m);
9182 }
9183 m = next;
9184 }
9185 /* XXX FBDP TODO: secluded queue */
9186
9187 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
9188 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
9189 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
9190
9191 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
9192 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
9193 if (m->vmp_dirty) {
9194 count_discard_purgeable++;
9195 } else {
9196 count_discard_active++;
9197 }
9198 hibernate_discard_page(m);
9199 }
9200 m = next;
9201 }
9202
9203 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
9204 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
9205 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
9206
9207 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
9208 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
9209 if (m->vmp_dirty) {
9210 count_discard_purgeable++;
9211 } else {
9212 count_discard_cleaned++;
9213 }
9214 hibernate_discard_page(m);
9215 }
9216 m = next;
9217 }
9218
9219 #if MACH_ASSERT || DEBUG
9220 if (vm_page_local_q) {
9221 zpercpu_foreach(lq, vm_page_local_q) {
9222 VPL_UNLOCK(&lq->vpl_lock);
9223 }
9224 }
9225 vm_page_unlock_queues();
9226 #endif /* MACH_ASSERT || DEBUG */
9227
9228 clock_get_uptime(&end);
9229 absolutetime_to_nanoseconds(end - start, &nsec);
9230 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
9231 nsec / 1000000ULL,
9232 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
9233 }
9234
9235 boolean_t hibernate_paddr_map_inited = FALSE;
9236 unsigned int hibernate_teardown_last_valid_compact_indx = -1;
9237 vm_page_t hibernate_rebuild_hash_list = NULL;
9238
9239 unsigned int hibernate_teardown_found_tabled_pages = 0;
9240 unsigned int hibernate_teardown_found_created_pages = 0;
9241 unsigned int hibernate_teardown_found_free_pages = 0;
9242 unsigned int hibernate_teardown_vm_page_free_count;
9243
9244
9245 struct ppnum_mapping {
9246 struct ppnum_mapping *ppnm_next;
9247 ppnum_t ppnm_base_paddr;
9248 unsigned int ppnm_sindx;
9249 unsigned int ppnm_eindx;
9250 };
9251
9252 struct ppnum_mapping *ppnm_head;
9253 struct ppnum_mapping *ppnm_last_found = NULL;
9254
9255
9256 void
hibernate_create_paddr_map(void)9257 hibernate_create_paddr_map(void)
9258 {
9259 unsigned int i;
9260 ppnum_t next_ppnum_in_run = 0;
9261 struct ppnum_mapping *ppnm = NULL;
9262
9263 if (hibernate_paddr_map_inited == FALSE) {
9264 for (i = 0; i < vm_pages_count; i++) {
9265 if (ppnm) {
9266 ppnm->ppnm_eindx = i;
9267 }
9268
9269 if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(vm_page_get(i)) != next_ppnum_in_run) {
9270 ppnm = zalloc_permanent_type(struct ppnum_mapping);
9271
9272 ppnm->ppnm_next = ppnm_head;
9273 ppnm_head = ppnm;
9274
9275 ppnm->ppnm_sindx = i;
9276 ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(vm_page_get(i));
9277 }
9278 next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(vm_page_get(i)) + 1;
9279 }
9280 ppnm->ppnm_eindx = vm_pages_count;
9281
9282 hibernate_paddr_map_inited = TRUE;
9283 }
9284 }
9285
9286 static ppnum_t
hibernate_lookup_paddr(unsigned int indx)9287 hibernate_lookup_paddr(unsigned int indx)
9288 {
9289 struct ppnum_mapping *ppnm = NULL;
9290
9291 ppnm = ppnm_last_found;
9292
9293 if (ppnm) {
9294 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
9295 goto done;
9296 }
9297 }
9298 for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
9299 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
9300 ppnm_last_found = ppnm;
9301 break;
9302 }
9303 }
9304 if (ppnm == NULL) {
9305 panic("hibernate_lookup_paddr of %d failed", indx);
9306 }
9307 done:
9308 return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
9309 }
9310
9311
9312 static uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)9313 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
9314 {
9315 addr64_t saddr_aligned;
9316 addr64_t eaddr_aligned;
9317 addr64_t addr;
9318 ppnum_t paddr;
9319 unsigned int mark_as_unneeded_pages = 0;
9320
9321 saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
9322 eaddr_aligned = eaddr & ~PAGE_MASK_64;
9323
9324 for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
9325 paddr = pmap_find_phys(kernel_pmap, addr);
9326
9327 assert(paddr);
9328
9329 hibernate_page_bitset(page_list, TRUE, paddr);
9330 hibernate_page_bitset(page_list_wired, TRUE, paddr);
9331
9332 mark_as_unneeded_pages++;
9333 }
9334 return mark_as_unneeded_pages;
9335 }
9336
9337
9338 static void
hibernate_hash_insert_page(vm_page_t mem)9339 hibernate_hash_insert_page(vm_page_t mem)
9340 {
9341 vm_page_bucket_t *bucket;
9342 int hash_id;
9343 vm_object_t m_object;
9344
9345 m_object = VM_PAGE_OBJECT(mem);
9346
9347 assert(mem->vmp_hashed);
9348 assert(m_object);
9349 assert(mem->vmp_offset != (vm_object_offset_t) -1);
9350
9351 /*
9352 * Insert it into the object_object/offset hash table
9353 */
9354 hash_id = vm_page_hash(m_object, mem->vmp_offset);
9355 bucket = &vm_page_buckets[hash_id];
9356
9357 mem->vmp_next_m = bucket->page_list;
9358 bucket->page_list = VM_PAGE_PACK_PTR(mem);
9359 }
9360
9361
9362 static void
hibernate_free_range_flush(vm_page_list_t * list)9363 hibernate_free_range_flush(vm_page_list_t *list)
9364 {
9365 vm_page_free_queue_enter_list(*list, VMP_RELEASE_HIBERNATE);
9366 *list = (vm_page_list_t){ };
9367 }
9368
9369 static void
hibernate_free_range(vm_page_list_t * list,int sindx,int eindx)9370 hibernate_free_range(vm_page_list_t *list, int sindx, int eindx)
9371 {
9372 for (; sindx < eindx; sindx++) {
9373 vm_page_t mem = vm_page_get(sindx);
9374 ppnum_t pnum = hibernate_lookup_paddr(sindx);
9375
9376 vm_page_init(mem, pnum);
9377 vm_page_list_push(list, mem);
9378
9379 if (list->vmpl_count >= VMP_FREE_BATCH_SIZE) {
9380 hibernate_free_range_flush(list);
9381 }
9382 }
9383 }
9384
9385 void
hibernate_rebuild_vm_structs(void)9386 hibernate_rebuild_vm_structs(void)
9387 {
9388 int cindx, sindx, eindx;
9389 vm_page_list_t list = { };
9390 vm_page_t mem, tmem, mem_next;
9391 AbsoluteTime startTime, endTime;
9392 uint64_t nsec;
9393
9394 if (!hibernate_rebuild_needed) {
9395 return;
9396 }
9397
9398 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
9399 HIBLOG("hibernate_rebuild started\n");
9400
9401 clock_get_uptime(&startTime);
9402
9403 pal_hib_rebuild_pmap_structs();
9404
9405 bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
9406 eindx = vm_pages_count;
9407
9408 /*
9409 * Mark all the vm_pages[] that have not been initialized yet as being
9410 * transient. This is needed to ensure that buddy page search is corrrect.
9411 * Without this random data in these vm_pages[] can trip the buddy search
9412 */
9413 for (int i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
9414 vm_page_get(i)->vmp_q_state = VM_PAGE_NOT_ON_Q;
9415 }
9416
9417 for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
9418 mem = vm_page_get(cindx);
9419 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
9420 /*
9421 * hibernate_teardown_vm_structs leaves the location where
9422 * this vm_page_t must be located in "next".
9423 */
9424 tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9425 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
9426 assert(tmem >= mem);
9427
9428 sindx = (int)(tmem - vm_page_get(0));
9429
9430 if (mem != tmem) {
9431 /*
9432 * this vm_page_t was moved by hibernate_teardown_vm_structs,
9433 * so move it back to its real location
9434 */
9435 *tmem = *mem;
9436 mem = tmem;
9437 }
9438 if (mem->vmp_hashed) {
9439 hibernate_hash_insert_page(mem);
9440 }
9441 /*
9442 * the 'hole' between this vm_page_t and the previous
9443 * vm_page_t we moved needs to be initialized as
9444 * a range of free vm_page_t's
9445 */
9446 hibernate_free_range(&list, sindx + 1, eindx);
9447
9448 eindx = sindx;
9449 }
9450 hibernate_free_range(&list, 0, sindx);
9451 hibernate_free_range_flush(&list);
9452
9453 assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
9454
9455 /*
9456 * process the list of vm_page_t's that were entered in the hash,
9457 * but were not located in the vm_pages arrary... these are
9458 * vm_page_t's that were created on the fly (i.e. fictitious)
9459 */
9460 for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
9461 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9462
9463 mem->vmp_next_m = 0;
9464 hibernate_hash_insert_page(mem);
9465 }
9466 hibernate_rebuild_hash_list = NULL;
9467
9468 clock_get_uptime(&endTime);
9469 SUB_ABSOLUTETIME(&endTime, &startTime);
9470 absolutetime_to_nanoseconds(endTime, &nsec);
9471
9472 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
9473
9474 hibernate_rebuild_needed = false;
9475
9476 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
9477 }
9478
9479 static uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)9480 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
9481 {
9482 unsigned int compact_target_indx;
9483 unsigned int mark_as_unneeded_pages = 0;
9484 unsigned int unneeded_vm_page_bucket_pages = 0;
9485 unsigned int unneeded_vm_pages_pages = 0;
9486 unsigned int unneeded_pmap_pages = 0;
9487 addr64_t start_of_unneeded = 0;
9488 addr64_t end_of_unneeded = 0;
9489
9490
9491 if (hibernate_should_abort()) {
9492 return 0;
9493 }
9494
9495 hibernate_rebuild_needed = true;
9496
9497 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, "
9498 "active_pages %d, inactive_pages %d, speculative_pages %d, "
9499 "cleaned_pages %d, compressor_pages %d\n",
9500 vm_page_wire_count, vm_page_free_count,
9501 vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
9502 vm_page_cleaned_count, compressor_object->resident_page_count);
9503
9504 for (uint32_t i = 0; i < vm_page_bucket_count; i++) {
9505 vm_page_bucket_t *bucket = &vm_page_buckets[i];
9506 vm_page_t mem, mem_next;
9507
9508 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
9509 assert(mem->vmp_hashed);
9510
9511 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9512
9513 if (!vm_page_in_array(mem)) {
9514 mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
9515 hibernate_rebuild_hash_list = mem;
9516 }
9517 }
9518 }
9519 unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0],
9520 (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
9521 mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
9522
9523 hibernate_teardown_vm_page_free_count = vm_page_free_count;
9524
9525 compact_target_indx = 0;
9526
9527 vm_free_page_lock();
9528
9529 for (uint32_t i = 0; i < vm_pages_count; i++) {
9530 vm_page_t mem = vm_page_get(i);
9531 ppnum_t pnum = VM_PAGE_GET_PHYS_PAGE(mem);
9532 vm_memory_class_t class = vm_page_get_memory_class(mem, pnum);
9533
9534 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
9535 vm_page_free_queue_remove(class, mem, pnum,
9536 VM_PAGE_ON_FREE_Q);
9537 hibernate_teardown_found_free_pages++;
9538
9539 if (vm_page_get(compact_target_indx)->vmp_q_state != VM_PAGE_ON_FREE_Q) {
9540 compact_target_indx = i;
9541 }
9542 } else {
9543 /*
9544 * record this vm_page_t's original location
9545 * we need this even if it doesn't get moved
9546 * as an indicator to the rebuild function that
9547 * we don't have to move it
9548 */
9549 mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
9550
9551 if (vm_page_get(compact_target_indx)->vmp_q_state == VM_PAGE_ON_FREE_Q) {
9552 /*
9553 * we've got a hole to fill, so
9554 * move this vm_page_t to it's new home
9555 */
9556 *vm_page_get(compact_target_indx) = *mem;
9557 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
9558
9559 hibernate_teardown_last_valid_compact_indx = compact_target_indx;
9560 compact_target_indx++;
9561 } else {
9562 hibernate_teardown_last_valid_compact_indx = i;
9563 }
9564 }
9565 }
9566
9567 vm_free_page_unlock();
9568
9569 unneeded_vm_pages_pages = hibernate_mark_as_unneeded(
9570 (addr64_t)vm_page_get(hibernate_teardown_last_valid_compact_indx + 1),
9571 (addr64_t)vm_page_get(vm_pages_count - 1),
9572 page_list, page_list_wired);
9573 mark_as_unneeded_pages += unneeded_vm_pages_pages;
9574
9575 pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
9576
9577 if (start_of_unneeded) {
9578 unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded,
9579 end_of_unneeded, page_list, page_list_wired);
9580 mark_as_unneeded_pages += unneeded_pmap_pages;
9581 }
9582 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n",
9583 unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
9584
9585 return mark_as_unneeded_pages;
9586 }
9587
9588 #endif /* HIBERNATION */
9589
9590 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9591
9592 #include <mach_vm_debug.h>
9593 #if MACH_VM_DEBUG
9594
9595 #include <mach_debug/hash_info.h>
9596 #include <vm/vm_debug_internal.h>
9597
9598 /*
9599 * Routine: vm_page_info
9600 * Purpose:
9601 * Return information about the global VP table.
9602 * Fills the buffer with as much information as possible
9603 * and returns the desired size of the buffer.
9604 * Conditions:
9605 * Nothing locked. The caller should provide
9606 * possibly-pageable memory.
9607 */
9608
9609 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)9610 vm_page_info(
9611 hash_info_bucket_t *info,
9612 unsigned int count)
9613 {
9614 unsigned int i;
9615 lck_ticket_t *bucket_lock;
9616
9617 if (vm_page_bucket_count < count) {
9618 count = vm_page_bucket_count;
9619 }
9620
9621 for (i = 0; i < count; i++) {
9622 vm_page_bucket_t *bucket = &vm_page_buckets[i];
9623 unsigned int bucket_count = 0;
9624 vm_page_t m;
9625
9626 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
9627 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
9628
9629 for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
9630 m != VM_PAGE_NULL;
9631 m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
9632 bucket_count++;
9633 }
9634
9635 lck_ticket_unlock(bucket_lock);
9636
9637 /* don't touch pageable memory while holding locks */
9638 info[i].hib_count = bucket_count;
9639 }
9640
9641 return vm_page_bucket_count;
9642 }
9643 #endif /* MACH_VM_DEBUG */
9644
9645 #if VM_PAGE_BUCKETS_CHECK
9646 void
vm_page_buckets_check(void)9647 vm_page_buckets_check(void)
9648 {
9649 unsigned int i;
9650 vm_page_t p;
9651 unsigned int p_hash;
9652 vm_page_bucket_t *bucket;
9653 lck_ticket_t *bucket_lock;
9654
9655 if (!vm_page_buckets_check_ready) {
9656 return;
9657 }
9658
9659 #if HIBERNATION
9660 if (hibernate_rebuild_needed ||
9661 hibernate_rebuild_hash_list) {
9662 panic("BUCKET_CHECK: hibernation in progress: "
9663 "rebuild_needed=%d rebuild_hash_list=%p\n",
9664 hibernate_rebuild_needed,
9665 hibernate_rebuild_hash_list);
9666 }
9667 #endif /* HIBERNATION */
9668
9669 #if VM_PAGE_FAKE_BUCKETS
9670 char *cp;
9671 for (cp = (char *) vm_page_fake_buckets_start;
9672 cp < (char *) vm_page_fake_buckets_end;
9673 cp++) {
9674 if (*cp != 0x5a) {
9675 panic("BUCKET_CHECK: corruption at %p in fake buckets "
9676 "[0x%llx:0x%llx]\n",
9677 cp,
9678 (uint64_t) vm_page_fake_buckets_start,
9679 (uint64_t) vm_page_fake_buckets_end);
9680 }
9681 }
9682 #endif /* VM_PAGE_FAKE_BUCKETS */
9683
9684 for (i = 0; i < vm_page_bucket_count; i++) {
9685 vm_object_t p_object;
9686
9687 bucket = &vm_page_buckets[i];
9688 if (!bucket->page_list) {
9689 continue;
9690 }
9691
9692 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
9693 lck_ticket_lock(bucket_lock, &vm_page_lck_grp_bucket);
9694 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
9695
9696 while (p != VM_PAGE_NULL) {
9697 p_object = VM_PAGE_OBJECT(p);
9698
9699 if (!p->vmp_hashed) {
9700 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
9701 "hash %d in bucket %d at %p "
9702 "is not hashed\n",
9703 p, p_object, p->vmp_offset,
9704 p_hash, i, bucket);
9705 }
9706 p_hash = vm_page_hash(p_object, p->vmp_offset);
9707 if (p_hash != i) {
9708 panic("BUCKET_CHECK: corruption in bucket %d "
9709 "at %p: page %p object %p offset 0x%llx "
9710 "hash %d\n",
9711 i, bucket, p, p_object, p->vmp_offset,
9712 p_hash);
9713 }
9714 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
9715 }
9716 lck_ticket_unlock(bucket_lock);
9717 }
9718
9719 // printf("BUCKET_CHECK: checked buckets\n");
9720 }
9721 #endif /* VM_PAGE_BUCKETS_CHECK */
9722
9723 /*
9724 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
9725 * local queues if they exist... its the only spot in the system where we add pages
9726 * to those queues... once on those queues, those pages can only move to one of the
9727 * global page queues or the free queues... they NEVER move from local q to local q.
9728 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
9729 * the global vm_page_queue_lock at this point... we still need to take the local lock
9730 * in case this operation is being run on a different CPU then the local queue's identity,
9731 * but we don't have to worry about the page moving to a global queue or becoming wired
9732 * while we're grabbing the local lock since those operations would require the global
9733 * vm_page_queue_lock to be held, and we already own it.
9734 *
9735 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
9736 * 'wired' and local are ALWAYS mutually exclusive conditions.
9737 */
9738
9739 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)9740 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
9741 {
9742 boolean_t was_pageable = TRUE;
9743 vm_object_t m_object;
9744
9745 m_object = VM_PAGE_OBJECT(mem);
9746
9747 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9748
9749 if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
9750 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9751 if (remove_from_specialq == TRUE) {
9752 vm_page_remove_from_specialq(mem);
9753 }
9754 /*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
9755 * assert(mem->vmp_specialq.next != 0);
9756 * assert(mem->vmp_specialq.prev != 0);
9757 * } else {*/
9758 if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
9759 assert(mem->vmp_specialq.next == 0);
9760 assert(mem->vmp_specialq.prev == 0);
9761 }
9762 return;
9763 }
9764
9765 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9766 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9767 assert(mem->vmp_specialq.next == 0 &&
9768 mem->vmp_specialq.prev == 0 &&
9769 mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9770 return;
9771 }
9772 if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9773 /*
9774 * might put these guys on a list for debugging purposes
9775 * if we do, we'll need to remove this assert
9776 */
9777 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9778 assert(mem->vmp_specialq.next == 0 &&
9779 mem->vmp_specialq.prev == 0);
9780 /*
9781 * Recall that vmp_on_specialq also means a request to put
9782 * it on the special Q. So we don't want to reset that bit
9783 * just because a wiring request came in. We might want to
9784 * put it on the special queue post-unwiring.
9785 *
9786 * &&
9787 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9788 */
9789 return;
9790 }
9791
9792 assert(m_object != compressor_object);
9793 assert(!is_kernel_object(m_object));
9794 assert(!vm_page_is_fictitious(mem));
9795
9796 switch (mem->vmp_q_state) {
9797 case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9798 {
9799 struct vpl *lq;
9800
9801 lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9802 VPL_LOCK(&lq->vpl_lock);
9803 vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9804 mem->vmp_local_id = 0;
9805 lq->vpl_count--;
9806 if (m_object->internal) {
9807 lq->vpl_internal_count--;
9808 } else {
9809 lq->vpl_external_count--;
9810 }
9811 VPL_UNLOCK(&lq->vpl_lock);
9812 was_pageable = FALSE;
9813 break;
9814 }
9815 case VM_PAGE_ON_ACTIVE_Q:
9816 {
9817 vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9818 vm_page_active_count--;
9819 break;
9820 }
9821
9822 case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9823 {
9824 assert(m_object->internal == TRUE);
9825
9826 vm_page_inactive_count--;
9827 vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9828 vm_page_anonymous_count--;
9829
9830 vm_purgeable_q_advance_all();
9831 vm_page_balance_inactive(3);
9832 break;
9833 }
9834
9835 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9836 {
9837 assert(m_object->internal == FALSE);
9838
9839 vm_page_inactive_count--;
9840 vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9841 vm_purgeable_q_advance_all();
9842 vm_page_balance_inactive(3);
9843 break;
9844 }
9845
9846 case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9847 {
9848 assert(m_object->internal == FALSE);
9849
9850 vm_page_inactive_count--;
9851 vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9852 vm_page_cleaned_count--;
9853 vm_page_balance_inactive(3);
9854 break;
9855 }
9856
9857 case VM_PAGE_ON_THROTTLED_Q:
9858 {
9859 assert(m_object->internal == TRUE);
9860
9861 vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9862 vm_page_throttled_count--;
9863 was_pageable = FALSE;
9864 break;
9865 }
9866
9867 case VM_PAGE_ON_SPECULATIVE_Q:
9868 {
9869 assert(m_object->internal == FALSE);
9870
9871 vm_page_remque(&mem->vmp_pageq);
9872 vm_page_speculative_count--;
9873 vm_page_balance_inactive(3);
9874 break;
9875 }
9876
9877 #if CONFIG_SECLUDED_MEMORY
9878 case VM_PAGE_ON_SECLUDED_Q:
9879 {
9880 vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9881 vm_page_secluded_count--;
9882 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9883 if (m_object == VM_OBJECT_NULL) {
9884 vm_page_secluded_count_free--;
9885 was_pageable = FALSE;
9886 } else {
9887 assert(!m_object->internal);
9888 vm_page_secluded_count_inuse--;
9889 was_pageable = FALSE;
9890 // was_pageable = TRUE;
9891 }
9892 break;
9893 }
9894 #endif /* CONFIG_SECLUDED_MEMORY */
9895
9896 default:
9897 {
9898 /*
9899 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9900 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9901 * the caller is responsible for determing if the page is on that queue, and if so, must
9902 * either first remove it (it needs both the page queues lock and the object lock to do
9903 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9904 *
9905 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9906 * or any of the undefined states
9907 */
9908 panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9909 break;
9910 }
9911 }
9912 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9913 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9914
9915 if (remove_from_specialq == TRUE) {
9916 vm_page_remove_from_specialq(mem);
9917 }
9918 if (was_pageable) {
9919 if (m_object->internal) {
9920 vm_page_pageable_internal_count--;
9921 } else {
9922 vm_page_pageable_external_count--;
9923 }
9924 }
9925 }
9926
9927 void
vm_page_remove_internal(vm_page_t page)9928 vm_page_remove_internal(vm_page_t page)
9929 {
9930 vm_object_t __object = VM_PAGE_OBJECT(page);
9931 if (page == __object->memq_hint) {
9932 vm_page_t __new_hint;
9933 vm_page_queue_entry_t __qe;
9934 __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9935 if (vm_page_queue_end(&__object->memq, __qe)) {
9936 __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9937 if (vm_page_queue_end(&__object->memq, __qe)) {
9938 __qe = NULL;
9939 }
9940 }
9941 __new_hint = (vm_page_t)((uintptr_t) __qe);
9942 __object->memq_hint = __new_hint;
9943 }
9944 vm_page_queue_remove(&__object->memq, page, vmp_listq);
9945 #if CONFIG_SECLUDED_MEMORY
9946 if (__object->eligible_for_secluded) {
9947 vm_page_secluded.eligible_for_secluded--;
9948 }
9949 #endif /* CONFIG_SECLUDED_MEMORY */
9950 }
9951
9952 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9953 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9954 {
9955 vm_object_t m_object;
9956
9957 m_object = VM_PAGE_OBJECT(mem);
9958
9959 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9960 assert(!vm_page_is_fictitious(mem));
9961 assert(!mem->vmp_laundry);
9962 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9963 vm_page_check_pageable_safe(mem);
9964
9965 if (m_object->internal) {
9966 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9967
9968 if (first == TRUE) {
9969 vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9970 } else {
9971 vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9972 }
9973
9974 vm_page_anonymous_count++;
9975 vm_page_pageable_internal_count++;
9976 } else {
9977 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9978
9979 if (first == TRUE) {
9980 vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9981 } else {
9982 vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9983 }
9984
9985 vm_page_pageable_external_count++;
9986 }
9987 vm_page_inactive_count++;
9988 token_new_pagecount++;
9989
9990 vm_page_add_to_specialq(mem, FALSE);
9991 }
9992
9993 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9994 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9995 {
9996 vm_object_t m_object;
9997
9998 m_object = VM_PAGE_OBJECT(mem);
9999
10000 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
10001 assert(!vm_page_is_fictitious(mem));
10002 assert(!mem->vmp_laundry);
10003 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
10004 vm_page_check_pageable_safe(mem);
10005
10006 mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
10007 if (first == TRUE) {
10008 vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
10009 } else {
10010 vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
10011 }
10012 vm_page_active_count++;
10013
10014 if (m_object->internal) {
10015 vm_page_pageable_internal_count++;
10016 } else {
10017 vm_page_pageable_external_count++;
10018 }
10019
10020 vm_page_add_to_specialq(mem, FALSE);
10021 vm_page_balance_inactive(3);
10022 }
10023
10024 /*
10025 * Pages from special kernel objects shouldn't
10026 * be placed on pageable queues.
10027 */
10028 void
vm_page_check_pageable_safe(vm_page_t page)10029 vm_page_check_pageable_safe(vm_page_t page)
10030 {
10031 vm_object_t page_object;
10032
10033 page_object = VM_PAGE_OBJECT(page);
10034
10035 if (is_kernel_object(page_object)) {
10036 panic("vm_page_check_pageable_safe: trying to add page"
10037 "from a kernel object to pageable queue");
10038 }
10039
10040 if (page_object == compressor_object) {
10041 panic("vm_page_check_pageable_safe: trying to add page"
10042 "from compressor object (%p) to pageable queue", compressor_object);
10043 }
10044 }
10045
10046 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
10047 * wired page diagnose
10048 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
10049
10050 #include <libkern/OSKextLibPrivate.h>
10051
10052 #define KA_SIZE(namelen, subtotalscount) \
10053 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
10054
10055 #define KA_NAME(alloc) \
10056 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
10057
10058 #define KA_NAME_LEN(alloc) \
10059 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
10060
10061 vm_tag_t
vm_tag_bt(void)10062 vm_tag_bt(void)
10063 {
10064 uintptr_t* frameptr;
10065 uintptr_t* frameptr_next;
10066 uintptr_t retaddr;
10067 uintptr_t kstackb, kstackt;
10068 const vm_allocation_site_t * site;
10069 thread_t cthread;
10070 kern_allocation_name_t name;
10071
10072 cthread = current_thread();
10073 if (__improbable(cthread == NULL)) {
10074 return VM_KERN_MEMORY_OSFMK;
10075 }
10076
10077 if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
10078 if (!name->tag) {
10079 vm_tag_alloc(name);
10080 }
10081 return name->tag;
10082 }
10083
10084 kstackb = cthread->kernel_stack;
10085 kstackt = kstackb + kernel_stack_size;
10086
10087 /* Load stack frame pointer (EBP on x86) into frameptr */
10088 frameptr = __builtin_frame_address(0);
10089 site = NULL;
10090 while (frameptr != NULL) {
10091 /* Verify thread stack bounds */
10092 if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
10093 break;
10094 }
10095
10096 /* Next frame pointer is pointed to by the previous one */
10097 frameptr_next = (uintptr_t*) *frameptr;
10098 #if defined(HAS_APPLE_PAC)
10099 frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
10100 #endif
10101
10102 /* Pull return address from one spot above the frame pointer */
10103 retaddr = *(frameptr + 1);
10104
10105 #if defined(HAS_APPLE_PAC)
10106 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
10107 #endif
10108
10109 if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
10110 || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
10111 site = OSKextGetAllocationSiteForCaller(retaddr);
10112 break;
10113 }
10114 frameptr = frameptr_next;
10115 }
10116
10117 if (site) {
10118 return site->tag;
10119 }
10120
10121 #if MACH_ASSERT
10122 /*
10123 * Kernel tests appear here as unrecognized call sites and would get
10124 * no memory tag. Give them a default tag to prevent panics later.
10125 */
10126 if (thread_get_test_option(test_option_vm_prevent_wire_tag_panic)) {
10127 return VM_KERN_MEMORY_OSFMK;
10128 }
10129 #endif
10130
10131 return VM_KERN_MEMORY_NONE;
10132 }
10133
10134 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
10135
10136 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)10137 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
10138 {
10139 vm_tag_t tag;
10140 uint64_t avail;
10141 uint32_t idx;
10142 vm_allocation_site_t * prev;
10143
10144 if (site->tag) {
10145 return;
10146 }
10147
10148 idx = 0;
10149 while (TRUE) {
10150 avail = free_tag_bits[idx];
10151 if (avail) {
10152 tag = (vm_tag_t)__builtin_clzll(avail);
10153 avail &= ~(1ULL << (63 - tag));
10154 free_tag_bits[idx] = avail;
10155 tag += (idx << 6);
10156 break;
10157 }
10158 idx++;
10159 if (idx >= ARRAY_COUNT(free_tag_bits)) {
10160 for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
10161 prev = vm_allocation_sites[idx];
10162 if (!prev) {
10163 continue;
10164 }
10165 if (!KA_NAME_LEN(prev)) {
10166 continue;
10167 }
10168 if (!prev->tag) {
10169 continue;
10170 }
10171 if (prev->total) {
10172 continue;
10173 }
10174 if (1 != prev->refcount) {
10175 continue;
10176 }
10177
10178 assert(idx == prev->tag);
10179 tag = (vm_tag_t)idx;
10180 prev->tag = VM_KERN_MEMORY_NONE;
10181 *releasesiteP = prev;
10182 break;
10183 }
10184 if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
10185 tag = VM_KERN_MEMORY_ANY;
10186 }
10187 break;
10188 }
10189 }
10190 site->tag = tag;
10191
10192 OSAddAtomic16(1, &site->refcount);
10193
10194 if (VM_KERN_MEMORY_ANY != tag) {
10195 vm_allocation_sites[tag] = site;
10196 }
10197
10198 if (tag > vm_allocation_tag_highest) {
10199 vm_allocation_tag_highest = tag;
10200 }
10201 }
10202
10203 static void
vm_tag_free_locked(vm_tag_t tag)10204 vm_tag_free_locked(vm_tag_t tag)
10205 {
10206 uint64_t avail;
10207 uint32_t idx;
10208 uint64_t bit;
10209
10210 if (VM_KERN_MEMORY_ANY == tag) {
10211 return;
10212 }
10213
10214 idx = (tag >> 6);
10215 avail = free_tag_bits[idx];
10216 tag &= 63;
10217 bit = (1ULL << (63 - tag));
10218 assert(!(avail & bit));
10219 free_tag_bits[idx] = (avail | bit);
10220 }
10221
10222 static void
vm_tag_init(void)10223 vm_tag_init(void)
10224 {
10225 vm_tag_t tag;
10226 for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
10227 vm_tag_free_locked(tag);
10228 }
10229
10230 for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
10231 vm_tag_free_locked(tag);
10232 }
10233 }
10234
10235 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)10236 vm_tag_alloc(vm_allocation_site_t * site)
10237 {
10238 vm_allocation_site_t * releasesite;
10239
10240 if (!site->tag) {
10241 releasesite = NULL;
10242 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10243 vm_tag_alloc_locked(site, &releasesite);
10244 lck_ticket_unlock(&vm_allocation_sites_lock);
10245 if (releasesite) {
10246 kern_allocation_name_release(releasesite);
10247 }
10248 }
10249
10250 return site->tag;
10251 }
10252
10253 #if VM_BTLOG_TAGS
10254 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
10255 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
10256 #define VM_TAG_BTLOG_SIZE (16u << 10)
10257
10258 btlog_t vmtaglog_btlog;
10259 vm_tag_t vmtaglog_tag;
10260
10261 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)10262 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
10263 {
10264 if (is_kernel_object(object)) {
10265 /* kernel object backtraces are tracked in vm entries */
10266 return;
10267 }
10268 if (delta > 0) {
10269 btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
10270 btlog_record(vmtaglog_btlog, object, 0, ref);
10271 } else if (object->wired_page_count == 0) {
10272 btlog_erase(vmtaglog_btlog, object);
10273 }
10274 }
10275
10276 #ifndef ARRAY_SIZE
10277 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
10278 #endif /* ARRAY_SIZE */
10279 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
10280 const char *vm_kern_memory_strs[] = {
10281 VM_KERN_MEMORY_ELEM(OSFMK),
10282 VM_KERN_MEMORY_ELEM(BSD),
10283 VM_KERN_MEMORY_ELEM(IOKIT),
10284 VM_KERN_MEMORY_ELEM(LIBKERN),
10285 VM_KERN_MEMORY_ELEM(OSKEXT),
10286 VM_KERN_MEMORY_ELEM(KEXT),
10287 VM_KERN_MEMORY_ELEM(IPC),
10288 VM_KERN_MEMORY_ELEM(STACK),
10289 VM_KERN_MEMORY_ELEM(CPU),
10290 VM_KERN_MEMORY_ELEM(PMAP),
10291 VM_KERN_MEMORY_ELEM(PTE),
10292 VM_KERN_MEMORY_ELEM(ZONE),
10293 VM_KERN_MEMORY_ELEM(KALLOC),
10294 VM_KERN_MEMORY_ELEM(COMPRESSOR),
10295 VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
10296 VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
10297 VM_KERN_MEMORY_ELEM(WAITQ),
10298 VM_KERN_MEMORY_ELEM(DIAG),
10299 VM_KERN_MEMORY_ELEM(LOG),
10300 VM_KERN_MEMORY_ELEM(FILE),
10301 VM_KERN_MEMORY_ELEM(MBUF),
10302 VM_KERN_MEMORY_ELEM(UBC),
10303 VM_KERN_MEMORY_ELEM(SECURITY),
10304 VM_KERN_MEMORY_ELEM(MLOCK),
10305 VM_KERN_MEMORY_ELEM(REASON),
10306 VM_KERN_MEMORY_ELEM(SKYWALK),
10307 VM_KERN_MEMORY_ELEM(LTABLE),
10308 VM_KERN_MEMORY_ELEM(HV),
10309 VM_KERN_MEMORY_ELEM(KALLOC_DATA),
10310 VM_KERN_MEMORY_ELEM(RETIRED),
10311 VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
10312 VM_KERN_MEMORY_ELEM(TRIAGE),
10313 VM_KERN_MEMORY_ELEM(RECOUNT),
10314 };
10315
10316 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])10317 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
10318 {
10319 for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
10320 if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10321 return i;
10322 }
10323 }
10324
10325 if (!strncmp("dynamic", tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10326 return VM_KERN_MEMORY_FIRST_DYNAMIC;
10327 }
10328
10329 if (!strncmp("any", tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10330 return VM_KERN_MEMORY_ANY;
10331 }
10332
10333 printf("Unable to find vm tag %s for btlog\n", tagstr);
10334 return VM_KERN_MEMORY_NONE;
10335 }
10336
10337 __startup_func
10338 static void
vm_btlog_init(void)10339 vm_btlog_init(void)
10340 {
10341 vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
10342
10343 if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
10344 vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
10345 }
10346 }
10347 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
10348 #endif /* VM_BTLOG_TAGS */
10349
10350 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)10351 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
10352 {
10353 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
10354
10355 kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
10356 }
10357
10358 uint64_t
vm_tag_get_size(vm_tag_t tag)10359 vm_tag_get_size(vm_tag_t tag)
10360 {
10361 vm_allocation_site_t *allocation;
10362
10363 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
10364
10365 allocation = vm_allocation_sites[tag];
10366 return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
10367 }
10368
10369 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)10370 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
10371 {
10372 uint64_t value;
10373
10374 value = os_atomic_add(&allocation->total, delta, relaxed);
10375 if (delta < 0) {
10376 assertf(value + (uint64_t)-delta > value,
10377 "tag %d, site %p", allocation->tag, allocation);
10378 }
10379
10380 #if DEBUG || DEVELOPMENT
10381 if (value > allocation->peak) {
10382 os_atomic_max(&allocation->peak, value, relaxed);
10383 }
10384 #endif /* DEBUG || DEVELOPMENT */
10385
10386 if (value == (uint64_t)delta && !allocation->tag) {
10387 vm_tag_alloc(allocation);
10388 }
10389
10390 #if VM_BTLOG_TAGS
10391 if (vmtaglog_matches(allocation->tag) && object) {
10392 vm_tag_log(object, delta, __builtin_frame_address(0));
10393 }
10394 #endif /* VM_BTLOG_TAGS */
10395 }
10396
10397 #if VM_TAG_SIZECLASSES
10398
10399 void
vm_allocation_zones_init(void)10400 vm_allocation_zones_init(void)
10401 {
10402 vm_offset_t addr;
10403 vm_size_t size;
10404
10405 const vm_tag_t early_tags[] = {
10406 VM_KERN_MEMORY_DIAG,
10407 VM_KERN_MEMORY_KALLOC,
10408 VM_KERN_MEMORY_KALLOC_DATA,
10409 VM_KERN_MEMORY_KALLOC_TYPE,
10410 VM_KERN_MEMORY_LIBKERN,
10411 VM_KERN_MEMORY_OSFMK,
10412 VM_KERN_MEMORY_RECOUNT,
10413 };
10414
10415 size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
10416 + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
10417
10418 kmem_alloc(kernel_map, &addr, round_page(size),
10419 KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
10420 VM_KERN_MEMORY_DIAG);
10421
10422 vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
10423 addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
10424
10425 // prepopulate early tag ranges so allocations
10426 // in vm_tag_update_zone_size() and early boot won't recurse
10427 for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
10428 vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
10429 addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
10430 }
10431 }
10432
10433 __attribute__((noinline))
10434 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)10435 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
10436 {
10437 vm_allocation_zone_total_t *stats;
10438 vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
10439
10440 flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
10441 stats = kalloc_data(size, flags);
10442 if (!stats) {
10443 return VM_KERN_MEMORY_NONE;
10444 }
10445 if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
10446 kfree_data(stats, size);
10447 }
10448 return tag;
10449 }
10450
10451 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)10452 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
10453 {
10454 assert(VM_KERN_MEMORY_NONE != tag);
10455 assert(tag < VM_MAX_TAG_VALUE);
10456
10457 if (__probable(vm_allocation_zone_totals[tag])) {
10458 return tag;
10459 }
10460 return vm_tag_zone_stats_alloc(tag, zflags);
10461 }
10462
10463 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)10464 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
10465 {
10466 vm_allocation_zone_total_t *stats;
10467 vm_size_t value;
10468
10469 assert(VM_KERN_MEMORY_NONE != tag);
10470 assert(tag < VM_MAX_TAG_VALUE);
10471
10472 if (zidx >= VM_TAG_SIZECLASSES) {
10473 return;
10474 }
10475
10476 stats = vm_allocation_zone_totals[tag];
10477 assert(stats);
10478 stats += zidx;
10479
10480 value = os_atomic_add(&stats->vazt_total, delta, relaxed);
10481 if (delta < 0) {
10482 assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
10483 return;
10484 } else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
10485 os_atomic_max(&stats->vazt_peak, value, relaxed);
10486 }
10487 }
10488
10489 #endif /* VM_TAG_SIZECLASSES */
10490
10491 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)10492 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
10493 {
10494 kern_allocation_name_t other;
10495 struct vm_allocation_total * total;
10496 uint32_t subidx;
10497
10498 assert(VM_KERN_MEMORY_NONE != subtag);
10499 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10500 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
10501 total = &allocation->subtotals[subidx];
10502 if (subtag == total->tag) {
10503 break;
10504 }
10505 }
10506 if (subidx >= allocation->subtotalscount) {
10507 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
10508 total = &allocation->subtotals[subidx];
10509 if ((VM_KERN_MEMORY_NONE == total->tag)
10510 || !total->total) {
10511 total->tag = (vm_tag_t)subtag;
10512 break;
10513 }
10514 }
10515 }
10516 assert(subidx < allocation->subtotalscount);
10517 if (subidx >= allocation->subtotalscount) {
10518 lck_ticket_unlock(&vm_allocation_sites_lock);
10519 return;
10520 }
10521 if (delta < 0) {
10522 assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
10523 }
10524 OSAddAtomic64(delta, &total->total);
10525 lck_ticket_unlock(&vm_allocation_sites_lock);
10526
10527 other = vm_allocation_sites[subtag];
10528 assert(other);
10529 if (delta < 0) {
10530 assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
10531 }
10532 OSAddAtomic64(delta, &other->mapped);
10533 }
10534
10535 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)10536 kern_allocation_get_name(kern_allocation_name_t allocation)
10537 {
10538 return KA_NAME(allocation);
10539 }
10540
10541 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)10542 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
10543 {
10544 kern_allocation_name_t allocation;
10545 uint16_t namelen;
10546
10547 namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
10548
10549 allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
10550 allocation->refcount = 1;
10551 allocation->subtotalscount = subtotalscount;
10552 allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
10553 strlcpy(KA_NAME(allocation), name, namelen + 1);
10554
10555 vm_tag_alloc(allocation);
10556 return allocation;
10557 }
10558
10559 void
kern_allocation_name_release(kern_allocation_name_t allocation)10560 kern_allocation_name_release(kern_allocation_name_t allocation)
10561 {
10562 assert(allocation->refcount > 0);
10563 if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
10564 kfree_data(allocation,
10565 KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
10566 }
10567 }
10568
10569 #if !VM_TAG_ACTIVE_UPDATE
10570 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)10571 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
10572 {
10573 if (!object->wired_page_count) {
10574 return;
10575 }
10576 if (!is_kernel_object(object)) {
10577 assert(object->wire_tag < num_info);
10578 info[object->wire_tag].size += ptoa_64(object->wired_page_count);
10579 }
10580 }
10581
10582 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
10583 unsigned int num_info, vm_object_t object);
10584
10585 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)10586 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
10587 vm_page_iterate_proc proc, purgeable_q_t queue,
10588 int group)
10589 {
10590 vm_object_t object;
10591
10592 for (object = (vm_object_t) queue_first(&queue->objq[group]);
10593 !queue_end(&queue->objq[group], (queue_entry_t) object);
10594 object = (vm_object_t) queue_next(&object->objq)) {
10595 proc(info, num_info, object);
10596 }
10597 }
10598
10599 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)10600 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
10601 vm_page_iterate_proc proc)
10602 {
10603 vm_object_t object;
10604
10605 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
10606 queue_iterate(&vm_objects_wired,
10607 object,
10608 vm_object_t,
10609 wired_objq)
10610 {
10611 proc(info, num_info, object);
10612 }
10613 lck_spin_unlock(&vm_objects_wired_lock);
10614 }
10615 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10616
10617 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)10618 process_account(mach_memory_info_t * info, unsigned int num_info,
10619 uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
10620 {
10621 size_t namelen;
10622 unsigned int idx, count, nextinfo;
10623 vm_allocation_site_t * site;
10624 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10625
10626 for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
10627 site = vm_allocation_sites[idx];
10628 if (!site) {
10629 continue;
10630 }
10631 info[idx].mapped = site->mapped;
10632 info[idx].tag = site->tag;
10633 if (!iterated) {
10634 info[idx].size = site->total;
10635 #if DEBUG || DEVELOPMENT
10636 info[idx].peak = site->peak;
10637 #endif /* DEBUG || DEVELOPMENT */
10638 } else {
10639 if (!site->subtotalscount && (site->total != info[idx].size)) {
10640 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
10641 info[idx].size = site->total;
10642 }
10643 }
10644 info[idx].flags |= VM_KERN_SITE_WIRED;
10645 if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
10646 info[idx].site = idx;
10647 info[idx].flags |= VM_KERN_SITE_TAG;
10648 if (VM_KERN_MEMORY_ZONE == idx) {
10649 info[idx].flags |= VM_KERN_SITE_HIDE;
10650 info[idx].flags &= ~VM_KERN_SITE_WIRED;
10651 info[idx].collectable_bytes = zones_collectable_bytes;
10652 }
10653 } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
10654 info[idx].site = 0;
10655 info[idx].flags |= VM_KERN_SITE_NAMED;
10656 if (namelen > sizeof(info[idx].name)) {
10657 namelen = sizeof(info[idx].name);
10658 }
10659 strncpy(&info[idx].name[0], KA_NAME(site), namelen);
10660 } else if (VM_TAG_KMOD & site->flags) {
10661 info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
10662 info[idx].flags |= VM_KERN_SITE_KMOD;
10663 } else {
10664 info[idx].site = VM_KERNEL_UNSLIDE(site);
10665 info[idx].flags |= VM_KERN_SITE_KERNEL;
10666 }
10667 }
10668
10669 nextinfo = (vm_allocation_tag_highest + 1);
10670 count = nextinfo;
10671 if (count >= num_info) {
10672 count = num_info;
10673 }
10674
10675 for (idx = 0; idx < count; idx++) {
10676 site = vm_allocation_sites[idx];
10677 if (!site) {
10678 continue;
10679 }
10680 #if VM_TAG_SIZECLASSES
10681 vm_allocation_zone_total_t * zone;
10682 unsigned int zidx;
10683
10684 if (!redact_info
10685 && vm_allocation_zone_totals
10686 && (zone = vm_allocation_zone_totals[idx])
10687 && (nextinfo < num_info)) {
10688 for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10689 if (!zone[zidx].vazt_peak) {
10690 continue;
10691 }
10692 info[nextinfo] = info[idx];
10693 info[nextinfo].zone = zone_index_from_tag_index(zidx);
10694 info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
10695 info[nextinfo].flags |= VM_KERN_SITE_ZONE;
10696 info[nextinfo].flags |= VM_KERN_SITE_KALLOC;
10697 info[nextinfo].size = zone[zidx].vazt_total;
10698 info[nextinfo].peak = zone[zidx].vazt_peak;
10699 info[nextinfo].mapped = 0;
10700 nextinfo++;
10701 }
10702 }
10703 #endif /* VM_TAG_SIZECLASSES */
10704 if (site->subtotalscount) {
10705 uint64_t mapped, mapcost, take;
10706 uint32_t sub;
10707 vm_tag_t alloctag;
10708
10709 info[idx].size = site->total;
10710 mapped = info[idx].size;
10711 info[idx].mapped = mapped;
10712 mapcost = 0;
10713 for (sub = 0; sub < site->subtotalscount; sub++) {
10714 alloctag = site->subtotals[sub].tag;
10715 assert(alloctag < num_info);
10716 if (info[alloctag].name[0]) {
10717 continue;
10718 }
10719 take = site->subtotals[sub].total;
10720 if (take > info[alloctag].size) {
10721 take = info[alloctag].size;
10722 }
10723 if (take > mapped) {
10724 take = mapped;
10725 }
10726 info[alloctag].mapped -= take;
10727 info[alloctag].size -= take;
10728 mapped -= take;
10729 mapcost += take;
10730 }
10731 info[idx].size = mapcost;
10732 }
10733 }
10734 lck_ticket_unlock(&vm_allocation_sites_lock);
10735
10736 return 0;
10737 }
10738
10739 uint32_t
vm_page_diagnose_estimate(void)10740 vm_page_diagnose_estimate(void)
10741 {
10742 vm_allocation_site_t * site;
10743 uint32_t count = zone_view_count;
10744 uint32_t idx;
10745
10746 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10747 for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
10748 site = vm_allocation_sites[idx];
10749 if (!site) {
10750 continue;
10751 }
10752 count++;
10753 #if VM_TAG_SIZECLASSES
10754 if (vm_allocation_zone_totals) {
10755 vm_allocation_zone_total_t * zone;
10756 zone = vm_allocation_zone_totals[idx];
10757 if (!zone) {
10758 continue;
10759 }
10760 for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10761 count += (zone[zidx].vazt_peak != 0);
10762 }
10763 }
10764 #endif
10765 }
10766 lck_ticket_unlock(&vm_allocation_sites_lock);
10767
10768 /* some slop for new tags created */
10769 count += 8;
10770 count += VM_KERN_COUNTER_COUNT;
10771
10772 return count;
10773 }
10774
10775 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)10776 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
10777 bool percpu)
10778 {
10779 zpercpu_foreach(zs, zstats) {
10780 info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
10781 }
10782 if (percpu) {
10783 info->size *= zpercpu_count();
10784 }
10785 info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
10786 }
10787
10788 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10789 vm_page_add_info(
10790 mach_memory_info_t *info,
10791 zone_stats_t stats,
10792 bool per_cpu,
10793 const char *parent_heap_name,
10794 const char *parent_zone_name,
10795 const char *view_name)
10796 {
10797 vm_page_diagnose_zone_stats(info, stats, per_cpu);
10798 snprintf(info->name, sizeof(info->name),
10799 "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10800 }
10801
10802 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10803 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10804 {
10805 vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10806 z->z_name, "raw");
10807 }
10808
10809 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10810 vm_page_add_view(
10811 mach_memory_info_t *info,
10812 zone_stats_t stats,
10813 const char *parent_heap_name,
10814 const char *parent_zone_name,
10815 const char *view_name)
10816 {
10817 vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10818 view_name);
10819 }
10820
10821 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10822 vm_page_diagnose_heap_views(
10823 mach_memory_info_t *info,
10824 kalloc_heap_t kh,
10825 const char *parent_heap_name,
10826 const char *parent_zone_name)
10827 {
10828 uint32_t i = 0;
10829
10830 while (kh) {
10831 vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10832 parent_zone_name, kh->kh_name);
10833 kh = kh->kh_views;
10834 i++;
10835 }
10836 return i;
10837 }
10838
10839 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10840 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10841 {
10842 uint32_t i = 0;
10843
10844 for (; i < KHEAP_NUM_ZONES; i++) {
10845 vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10846 }
10847
10848 i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10849 NULL);
10850 return i;
10851 }
10852
10853 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10854 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10855 {
10856 uint32_t idx = 0;
10857 vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10858 "", "raw");
10859 idx++;
10860
10861 for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10862 struct kheap_info heap = kalloc_type_heap_array[i];
10863 char heap_num_tmp[MAX_ZONE_NAME] = "";
10864 const char *heap_num;
10865
10866 snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10867 heap_num = &heap_num_tmp[0];
10868
10869 for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10870 ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10871 if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10872 vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10873 heap_num, ktv->kt_name);
10874 idx++;
10875 }
10876 }
10877
10878 idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10879 KHEAP_KT_VAR->kh_name, heap_num);
10880 }
10881
10882 return idx;
10883 }
10884
10885 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10886 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10887 {
10888 uint64_t wired_size;
10889 uint64_t wired_managed_size;
10890 uint64_t wired_reserved_size;
10891 boolean_t iterate;
10892 mach_memory_info_t * counts;
10893 uint32_t i;
10894
10895 bzero(info, num_info * sizeof(mach_memory_info_t));
10896
10897 if (!vm_page_wire_count_initial) {
10898 return KERN_ABORTED;
10899 }
10900
10901 wired_size = ptoa_64(vm_page_wire_count);
10902 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10903 #if XNU_TARGET_OS_OSX
10904 wired_size += ptoa_64(vm_lopage_free_count + vm_page_throttled_count);
10905 wired_reserved_size += ptoa_64(vm_page_throttled_count);
10906 #endif /* XNU_TARGET_OS_OSX */
10907 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10908
10909 wired_size += booter_size;
10910
10911 assert(num_info >= VM_KERN_COUNTER_COUNT);
10912 num_info -= VM_KERN_COUNTER_COUNT;
10913 counts = &info[num_info];
10914
10915 #define SET_COUNT(xcount, xsize, xflags) \
10916 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
10917 counts[xcount].site = (xcount); \
10918 counts[xcount].size = (xsize); \
10919 counts[xcount].mapped = (xsize); \
10920 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10921
10922 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10923 SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10924 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10925 SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10926 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10927 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10928 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10929 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10930 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10931 #if CONFIG_SPTM
10932 SET_COUNT(VM_KERN_COUNT_EXCLAVES_CARVEOUT, SPTMArgs->sk_carveout_size, 0);
10933 #endif
10934
10935 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10936 counts[xcount].site = (xcount); \
10937 counts[xcount].size = (xsize); \
10938 counts[xcount].mapped = (xsize); \
10939 counts[xcount].free = (xfree); \
10940 counts[xcount].largest = (xlargest); \
10941 counts[xcount].flags = VM_KERN_SITE_COUNTER;
10942
10943 vm_map_size_t map_size, map_free, map_largest;
10944
10945 vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10946 SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10947
10948 zone_map_sizes(&map_size, &map_free, &map_largest);
10949 SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10950
10951 assert(num_info >= zone_view_count);
10952 num_info -= zone_view_count;
10953 counts = &info[num_info];
10954 i = 0;
10955
10956 if (!redact_info) {
10957 if (zone_is_data_kheap(KHEAP_DATA_BUFFERS->kh_heap_id)) {
10958 i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10959 }
10960 if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10961 i += vm_page_diagnose_kt_heaps(counts + i);
10962 }
10963 assert(i <= zone_view_count);
10964
10965 zone_index_foreach(zidx) {
10966 zone_t z = &zone_array[zidx];
10967 zone_security_flags_t zsflags = zone_security_array[zidx];
10968 zone_view_t zv = z->z_views;
10969
10970 if (zv == NULL) {
10971 continue;
10972 }
10973
10974 zone_stats_t zv_stats_head = z->z_stats;
10975 bool has_raw_view = false;
10976
10977 for (; zv; zv = zv->zv_next) {
10978 /*
10979 * kalloc_types that allocate from the same zone are linked
10980 * as views. Only print the ones that have their own stats.
10981 */
10982 if (zv->zv_stats == zv_stats_head) {
10983 continue;
10984 }
10985 has_raw_view = true;
10986 vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10987 z->z_percpu);
10988 snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10989 zone_heap_name(z), z->z_name, zv->zv_name);
10990 i++;
10991 assert(i <= zone_view_count);
10992 }
10993
10994 /*
10995 * Print raw views for non kalloc or kalloc_type zones
10996 */
10997 bool kalloc_type = zsflags.z_kalloc_type;
10998 if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10999 (kalloc_type && has_raw_view)) {
11000 vm_page_diagnose_zone(counts + i, z);
11001 i++;
11002 assert(i <= zone_view_count);
11003 }
11004 }
11005 }
11006
11007 iterate = !VM_TAG_ACTIVE_UPDATE;
11008 if (iterate) {
11009 enum { kMaxKernelDepth = 1 };
11010 vm_map_t maps[kMaxKernelDepth];
11011 vm_map_entry_t entries[kMaxKernelDepth];
11012 vm_map_t map;
11013 vm_map_entry_t entry;
11014 vm_object_offset_t offset;
11015 vm_page_t page;
11016 int stackIdx, count;
11017
11018 #if !VM_TAG_ACTIVE_UPDATE
11019 vm_page_iterate_objects(info, num_info, &vm_page_count_object);
11020 #endif /* ! VM_TAG_ACTIVE_UPDATE */
11021
11022 map = kernel_map;
11023 stackIdx = 0;
11024 while (map) {
11025 vm_map_lock(map);
11026 for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
11027 if (entry->is_sub_map) {
11028 assert(stackIdx < kMaxKernelDepth);
11029 maps[stackIdx] = map;
11030 entries[stackIdx] = entry;
11031 stackIdx++;
11032 map = VME_SUBMAP(entry);
11033 entry = NULL;
11034 break;
11035 }
11036 if (is_kernel_object(VME_OBJECT(entry))) {
11037 count = 0;
11038 vm_object_lock(VME_OBJECT(entry));
11039 for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
11040 page = vm_page_lookup(VME_OBJECT(entry), offset);
11041 if (page && VM_PAGE_WIRED(page)) {
11042 count++;
11043 }
11044 }
11045 vm_object_unlock(VME_OBJECT(entry));
11046
11047 if (count) {
11048 assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
11049 assert(VME_ALIAS(entry) < num_info);
11050 info[VME_ALIAS(entry)].size += ptoa_64(count);
11051 }
11052 }
11053 while (map && (entry == vm_map_last_entry(map))) {
11054 vm_map_unlock(map);
11055 if (!stackIdx) {
11056 map = NULL;
11057 } else {
11058 --stackIdx;
11059 map = maps[stackIdx];
11060 entry = entries[stackIdx];
11061 }
11062 }
11063 }
11064 }
11065 }
11066
11067 process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
11068
11069 return KERN_SUCCESS;
11070 }
11071
11072 #if DEBUG || DEVELOPMENT
11073
11074 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)11075 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
11076 {
11077 kern_return_t ret;
11078 vm_size_t zsize;
11079 vm_map_t map;
11080 vm_map_entry_t entry;
11081
11082 zsize = zone_element_info((void *) addr, tag);
11083 if (zsize) {
11084 *zone_size = *size = zsize;
11085 return KERN_SUCCESS;
11086 }
11087
11088 *zone_size = 0;
11089 ret = KERN_INVALID_ADDRESS;
11090 for (map = kernel_map; map;) {
11091 vm_map_lock(map);
11092 if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
11093 break;
11094 }
11095 if (entry->is_sub_map) {
11096 if (map != kernel_map) {
11097 break;
11098 }
11099 map = VME_SUBMAP(entry);
11100 continue;
11101 }
11102 if (entry->vme_start != addr) {
11103 break;
11104 }
11105 *tag = (vm_tag_t)VME_ALIAS(entry);
11106 *size = (entry->vme_end - addr);
11107 ret = KERN_SUCCESS;
11108 break;
11109 }
11110 if (map != kernel_map) {
11111 vm_map_unlock(map);
11112 }
11113 vm_map_unlock(kernel_map);
11114
11115 return ret;
11116 }
11117
11118 #endif /* DEBUG || DEVELOPMENT */
11119
11120 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)11121 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
11122 {
11123 vm_allocation_site_t * site;
11124 uint32_t kmodId;
11125
11126 kmodId = 0;
11127 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
11128 if ((site = vm_allocation_sites[tag])) {
11129 if (VM_TAG_KMOD & site->flags) {
11130 kmodId = OSKextGetKmodIDForSite(site, name, namelen);
11131 }
11132 }
11133 lck_ticket_unlock(&vm_allocation_sites_lock);
11134
11135 return kmodId;
11136 }
11137
11138
11139 #if CONFIG_SECLUDED_MEMORY
11140 /*
11141 * Note that there's no locking around other accesses to vm_page_secluded_target.
11142 * That should be OK, since these are the only place where it can be changed after
11143 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
11144 * but will eventually get the correct value. This brief mismatch is OK as pageout
11145 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
11146 * over time.
11147 */
11148 unsigned int vm_page_secluded_suppress_cnt = 0;
11149 unsigned int vm_page_secluded_save_target;
11150
11151 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
11152 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
11153
11154 void
start_secluded_suppression(task_t task)11155 start_secluded_suppression(task_t task)
11156 {
11157 if (task->task_suppressed_secluded) {
11158 return;
11159 }
11160 lck_spin_lock(&secluded_suppress_slock);
11161 if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
11162 task->task_suppressed_secluded = TRUE;
11163 vm_page_secluded_save_target = vm_page_secluded_target;
11164 vm_page_secluded_target = 0;
11165 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
11166 }
11167 lck_spin_unlock(&secluded_suppress_slock);
11168 }
11169
11170 void
stop_secluded_suppression(task_t task)11171 stop_secluded_suppression(task_t task)
11172 {
11173 lck_spin_lock(&secluded_suppress_slock);
11174 if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
11175 task->task_suppressed_secluded = FALSE;
11176 vm_page_secluded_target = vm_page_secluded_save_target;
11177 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
11178 }
11179 lck_spin_unlock(&secluded_suppress_slock);
11180 }
11181
11182 #endif /* CONFIG_SECLUDED_MEMORY */
11183
11184 /*
11185 * Move the list of retired pages on the vm_page_queue_retired to
11186 * their final resting place on retired_pages_object.
11187 */
11188 void
vm_retire_boot_pages(void)11189 vm_retire_boot_pages(void)
11190 {
11191 }
11192
11193 /*
11194 * This holds the reported physical address if an ECC error leads to a panic.
11195 * SMC will store it in PMU SRAM under the 'sECC' key.
11196 */
11197 uint64_t ecc_panic_physical_address = 0;
11198
11199