1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init_xnu.h>
85 #include <vm/vm_map_internal.h>
86 #include <vm/vm_page_internal.h>
87 #include <vm/vm_pageout_internal.h>
88 #include <vm/vm_kern_xnu.h> /* kmem_alloc() */
89 #include <vm/vm_compressor_pager_internal.h>
90 #include <kern/misc_protos.h>
91 #include <mach_debug/zone_info.h>
92 #include <vm/cpm_internal.h>
93 #include <pexpert/pexpert.h>
94 #include <pexpert/device_tree.h>
95 #include <san/kasan.h>
96 #include <os/log.h>
97
98 #include <vm/vm_protos_internal.h>
99 #include <vm/memory_object.h>
100 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor_internal.h>
102 #include <vm/vm_iokit.h>
103 #include <vm/vm_object_internal.h>
104 #if defined (__x86_64__)
105 #include <i386/misc_protos.h>
106 #endif
107
108 #if CONFIG_PHANTOM_CACHE
109 #include <vm/vm_phantom_cache_internal.h>
110 #endif
111
112 #if HIBERNATION
113 #include <IOKit/IOHibernatePrivate.h>
114 #include <machine/pal_hibernate.h>
115 #endif /* HIBERNATION */
116
117 #include <sys/kdebug.h>
118
119 #if defined(HAS_APPLE_PAC)
120 #include <ptrauth.h>
121 #endif
122 #if defined(__arm64__)
123 #include <arm/cpu_internal.h>
124 #endif /* defined(__arm64__) */
125
126 #if MACH_ASSERT
127
128 TUNABLE(bool, vm_check_refs_on_free, "vm_check_refs_on_free", true);
129 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
130
131 #else /* MACH_ASSERT */
132
133 #define ASSERT_PMAP_FREE(mem) /* nothing */
134
135 #endif /* MACH_ASSERT */
136
137 extern boolean_t vm_pageout_running;
138 extern thread_t vm_pageout_scan_thread;
139 extern bool vps_dynamic_priority_enabled;
140
141 char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
142 char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
143 char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
144 char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
145
146 #if CONFIG_SECLUDED_MEMORY
147 struct vm_page_secluded_data vm_page_secluded;
148 #endif /* CONFIG_SECLUDED_MEMORY */
149
150 #if DEVELOPMENT || DEBUG
151 extern struct memory_object_pager_ops shared_region_pager_ops;
152 unsigned int shared_region_pagers_resident_count = 0;
153 unsigned int shared_region_pagers_resident_peak = 0;
154 #endif /* DEVELOPMENT || DEBUG */
155
156
157
158 int PERCPU_DATA(start_color);
159 vm_page_t PERCPU_DATA(free_pages);
160 boolean_t hibernate_cleaning_in_progress = FALSE;
161
162 uint32_t vm_lopage_free_count = 0;
163 uint32_t vm_lopage_free_limit = 0;
164 uint32_t vm_lopage_lowater = 0;
165 boolean_t vm_lopage_refill = FALSE;
166 boolean_t vm_lopage_needed = FALSE;
167
168 int speculative_age_index = 0;
169 int speculative_steal_index = 0;
170 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
171
172 boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
173 * Updated and checked behind the vm_page_queues_lock. */
174
175 static void vm_page_free_prepare(vm_page_t page);
176 static vm_page_t vm_page_grab_fictitious_common(ppnum_t, boolean_t);
177
178 static void vm_tag_init(void);
179
180 /* for debugging purposes */
181 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
182 VM_PAGE_PACKED_FROM_ARRAY;
183 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
184 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
185
186 /*
187 * Associated with page of user-allocatable memory is a
188 * page structure.
189 */
190
191 /*
192 * These variables record the values returned by vm_page_bootstrap,
193 * for debugging purposes. The implementation of pmap_steal_memory
194 * and pmap_startup here also uses them internally.
195 */
196
197 vm_offset_t virtual_space_start;
198 vm_offset_t virtual_space_end;
199 uint32_t vm_page_pages;
200
201 /*
202 * The vm_page_lookup() routine, which provides for fast
203 * (virtual memory object, offset) to page lookup, employs
204 * the following hash table. The vm_page_{insert,remove}
205 * routines install and remove associations in the table.
206 * [This table is often called the virtual-to-physical,
207 * or VP, table.]
208 */
209 typedef struct {
210 vm_page_packed_t page_list;
211 #if MACH_PAGE_HASH_STATS
212 int cur_count; /* current count */
213 int hi_count; /* high water mark */
214 #endif /* MACH_PAGE_HASH_STATS */
215 } vm_page_bucket_t;
216
217
218 #define BUCKETS_PER_LOCK 16
219
220 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets; /* Array of buckets */
221 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_count = 0; /* How big is array? */
222 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_mask; /* Mask for hash function */
223 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_shift; /* Shift for hash function */
224 SECURITY_READ_ONLY_LATE(uint32_t) vm_page_bucket_hash; /* Basic bucket hash */
225 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_lock_count = 0; /* How big is array of locks? */
226
227 #ifndef VM_TAG_ACTIVE_UPDATE
228 #error VM_TAG_ACTIVE_UPDATE
229 #endif
230 #ifndef VM_TAG_SIZECLASSES
231 #error VM_TAG_SIZECLASSES
232 #endif
233
234 /* for debugging */
235 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
236 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
237
238 vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
239 vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
240 #if VM_TAG_SIZECLASSES
241 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
242 #endif /* VM_TAG_SIZECLASSES */
243
244 vm_tag_t vm_allocation_tag_highest;
245
246 #if VM_PAGE_BUCKETS_CHECK
247 boolean_t vm_page_buckets_check_ready = FALSE;
248 #if VM_PAGE_FAKE_BUCKETS
249 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
250 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
251 #endif /* VM_PAGE_FAKE_BUCKETS */
252 #endif /* VM_PAGE_BUCKETS_CHECK */
253
254 #if MACH_PAGE_HASH_STATS
255 /* This routine is only for debug. It is intended to be called by
256 * hand by a developer using a kernel debugger. This routine prints
257 * out vm_page_hash table statistics to the kernel debug console.
258 */
259 void
hash_debug(void)260 hash_debug(void)
261 {
262 int i;
263 int numbuckets = 0;
264 int highsum = 0;
265 int maxdepth = 0;
266
267 for (i = 0; i < vm_page_bucket_count; i++) {
268 if (vm_page_buckets[i].hi_count) {
269 numbuckets++;
270 highsum += vm_page_buckets[i].hi_count;
271 if (vm_page_buckets[i].hi_count > maxdepth) {
272 maxdepth = vm_page_buckets[i].hi_count;
273 }
274 }
275 }
276 printf("Total number of buckets: %d\n", vm_page_bucket_count);
277 printf("Number used buckets: %d = %d%%\n",
278 numbuckets, 100 * numbuckets / vm_page_bucket_count);
279 printf("Number unused buckets: %d = %d%%\n",
280 vm_page_bucket_count - numbuckets,
281 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
282 printf("Sum of bucket max depth: %d\n", highsum);
283 printf("Average bucket depth: %d.%2d\n",
284 highsum / vm_page_bucket_count,
285 highsum % vm_page_bucket_count);
286 printf("Maximum bucket depth: %d\n", maxdepth);
287 }
288 #endif /* MACH_PAGE_HASH_STATS */
289
290 /*
291 * The virtual page size is currently implemented as a runtime
292 * variable, but is constant once initialized using vm_set_page_size.
293 * This initialization must be done in the machine-dependent
294 * bootstrap sequence, before calling other machine-independent
295 * initializations.
296 *
297 * All references to the virtual page size outside this
298 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
299 * constants.
300 */
301 #if defined(__arm64__)
302 vm_size_t page_size;
303 vm_size_t page_mask;
304 int page_shift;
305 #else
306 vm_size_t page_size = PAGE_SIZE;
307 vm_size_t page_mask = PAGE_MASK;
308 int page_shift = PAGE_SHIFT;
309 #endif
310
311 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
312 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
313 vm_page_t vm_page_array_ending_addr;
314
315 unsigned int vm_pages_count = 0;
316
317 /*
318 * Resident pages that represent real memory
319 * are allocated from a set of free lists,
320 * one per color.
321 */
322 unsigned int vm_colors;
323 unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
324 unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
325 unsigned int vm_free_magazine_refill_limit = 0;
326
327
328 struct vm_page_queue_free_head {
329 vm_page_queue_head_t qhead;
330 } VM_PAGE_PACKED_ALIGNED;
331
332 struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
333
334
335 unsigned int vm_page_free_wanted;
336 unsigned int vm_page_free_wanted_privileged;
337 #if CONFIG_SECLUDED_MEMORY
338 unsigned int vm_page_free_wanted_secluded;
339 #endif /* CONFIG_SECLUDED_MEMORY */
340 unsigned int vm_page_free_count;
341
342 unsigned int vm_page_realtime_count;
343
344 /*
345 * Occasionally, the virtual memory system uses
346 * resident page structures that do not refer to
347 * real pages, for example to leave a page with
348 * important state information in the VP table.
349 *
350 * These page structures are allocated the way
351 * most other kernel structures are.
352 */
353 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
354 vm_locks_array_t vm_page_locks;
355
356 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
357 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
358 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
359 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
360 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
361 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
362 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
363 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
364 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
365
366 unsigned int vm_page_local_q_soft_limit = 250;
367 unsigned int vm_page_local_q_hard_limit = 500;
368 struct vpl *__zpercpu vm_page_local_q;
369
370 /* N.B. Guard and fictitious pages must not
371 * be assigned a zero phys_page value.
372 */
373 /*
374 * Fictitious pages don't have a physical address,
375 * but we must initialize phys_page to something.
376 * For debugging, this should be a strange value
377 * that the pmap module can recognize in assertions.
378 */
379 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
380
381 /*
382 * Guard pages are not accessible so they don't
383 * need a physical address, but we need to enter
384 * one in the pmap.
385 * Let's make it recognizable and make sure that
386 * we don't use a real physical page with that
387 * physical address.
388 */
389 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
390
391 /*
392 * Resident page structures are also chained on
393 * queues that are used by the page replacement
394 * system (pageout daemon). These queues are
395 * defined here, but are shared by the pageout
396 * module. The inactive queue is broken into
397 * file backed and anonymous for convenience as the
398 * pageout daemon often assignes a higher
399 * importance to anonymous pages (less likely to pick)
400 */
401 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
402 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
403 #if CONFIG_SECLUDED_MEMORY
404 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
405 #endif /* CONFIG_SECLUDED_MEMORY */
406 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */
407 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
408
409 queue_head_t vm_objects_wired;
410
411 vm_page_queue_head_t vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
412 uint32_t vm_page_donate_mode;
413 uint32_t vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
414 uint32_t vm_page_donate_count;
415 bool vm_page_donate_queue_ripe;
416
417
418 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
419 uint32_t vm_page_background_target;
420 uint32_t vm_page_background_target_snapshot;
421 uint32_t vm_page_background_count;
422 uint64_t vm_page_background_promoted_count;
423
424 uint32_t vm_page_background_internal_count;
425 uint32_t vm_page_background_external_count;
426
427 uint32_t vm_page_background_mode;
428 uint32_t vm_page_background_exclude_external;
429
430 unsigned int vm_page_active_count;
431 unsigned int vm_page_inactive_count;
432 unsigned int vm_page_kernelcache_count;
433 #if CONFIG_SECLUDED_MEMORY
434 unsigned int vm_page_secluded_count;
435 unsigned int vm_page_secluded_count_free;
436 unsigned int vm_page_secluded_count_inuse;
437 unsigned int vm_page_secluded_count_over_target;
438 #endif /* CONFIG_SECLUDED_MEMORY */
439 unsigned int vm_page_anonymous_count;
440 unsigned int vm_page_throttled_count;
441 unsigned int vm_page_speculative_count;
442
443 unsigned int vm_page_wire_count;
444 unsigned int vm_page_wire_count_on_boot = 0;
445 unsigned int vm_page_stolen_count = 0;
446 unsigned int vm_page_wire_count_initial;
447 unsigned int vm_page_gobble_count = 0;
448 unsigned int vm_page_kern_lpage_count = 0;
449
450 uint64_t booter_size; /* external so it can be found in core dumps */
451
452 #define VM_PAGE_WIRE_COUNT_WARNING 0
453 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
454
455 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
456 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
457 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
458
459 unsigned int vm_page_xpmapped_external_count = 0;
460 unsigned int vm_page_external_count = 0;
461 unsigned int vm_page_internal_count = 0;
462 unsigned int vm_page_pageable_external_count = 0;
463 unsigned int vm_page_pageable_internal_count = 0;
464
465 #if DEVELOPMENT || DEBUG
466 unsigned int vm_page_speculative_recreated = 0;
467 unsigned int vm_page_speculative_created = 0;
468 unsigned int vm_page_speculative_used = 0;
469 #endif
470
471 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
472
473 unsigned int vm_page_cleaned_count = 0;
474
475 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
476 ppnum_t max_valid_low_ppnum = PPNUM_MAX;
477
478
479 /*
480 * Several page replacement parameters are also
481 * shared with this module, so that page allocation
482 * (done here in vm_page_alloc) can trigger the
483 * pageout daemon.
484 */
485 unsigned int vm_page_free_target = 0;
486 unsigned int vm_page_free_min = 0;
487 unsigned int vm_page_throttle_limit = 0;
488 unsigned int vm_page_inactive_target = 0;
489 #if CONFIG_SECLUDED_MEMORY
490 unsigned int vm_page_secluded_target = 0;
491 #endif /* CONFIG_SECLUDED_MEMORY */
492 unsigned int vm_page_anonymous_min = 0;
493 unsigned int vm_page_free_reserved = 0;
494
495
496 /*
497 * The VM system has a couple of heuristics for deciding
498 * that pages are "uninteresting" and should be placed
499 * on the inactive queue as likely candidates for replacement.
500 * These variables let the heuristics be controlled at run-time
501 * to make experimentation easier.
502 */
503
504 boolean_t vm_page_deactivate_hint = TRUE;
505
506 struct vm_page_stats_reusable vm_page_stats_reusable;
507
508 /*
509 * vm_set_page_size:
510 *
511 * Sets the page size, perhaps based upon the memory
512 * size. Must be called before any use of page-size
513 * dependent functions.
514 *
515 * Sets page_shift and page_mask from page_size.
516 */
517 void
vm_set_page_size(void)518 vm_set_page_size(void)
519 {
520 page_size = PAGE_SIZE;
521 page_mask = PAGE_MASK;
522 page_shift = PAGE_SHIFT;
523
524 if ((page_mask & page_size) != 0) {
525 panic("vm_set_page_size: page size not a power of two");
526 }
527
528 for (page_shift = 0;; page_shift++) {
529 if ((1U << page_shift) == page_size) {
530 break;
531 }
532 }
533 }
534
535 #if defined (__x86_64__)
536
537 #define MAX_CLUMP_SIZE 16
538 #define DEFAULT_CLUMP_SIZE 4
539
540 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
541
542 #if DEVELOPMENT || DEBUG
543 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
544 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
545
546 static inline void
vm_clump_update_stats(unsigned int c)547 vm_clump_update_stats(unsigned int c)
548 {
549 assert(c <= vm_clump_size);
550 if (c > 0 && c <= vm_clump_size) {
551 vm_clump_stats[c] += c;
552 }
553 vm_clump_allocs += c;
554 }
555 #endif /* if DEVELOPMENT || DEBUG */
556
557 /* Called once to setup the VM clump knobs */
558 static void
vm_page_setup_clump(void)559 vm_page_setup_clump( void )
560 {
561 unsigned int override, n;
562
563 vm_clump_size = DEFAULT_CLUMP_SIZE;
564 if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
565 vm_clump_size = override;
566 }
567
568 if (vm_clump_size > MAX_CLUMP_SIZE) {
569 panic("vm_page_setup_clump:: clump_size is too large!");
570 }
571 if (vm_clump_size < 1) {
572 panic("vm_page_setup_clump:: clump_size must be >= 1");
573 }
574 if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
575 panic("vm_page_setup_clump:: clump_size must be a power of 2");
576 }
577
578 vm_clump_promote_threshold = vm_clump_size;
579 vm_clump_mask = vm_clump_size - 1;
580 for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
581 ;
582 }
583
584 #if DEVELOPMENT || DEBUG
585 bzero(vm_clump_stats, sizeof(vm_clump_stats));
586 vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
587 #endif /* if DEVELOPMENT || DEBUG */
588 }
589
590 #endif /* #if defined (__x86_64__) */
591
592 #define COLOR_GROUPS_TO_STEAL 4
593
594 /* Called once during statup, once the cache geometry is known.
595 */
596 static void
vm_page_set_colors(void)597 vm_page_set_colors( void )
598 {
599 unsigned int n, override;
600
601 #if defined (__x86_64__)
602 /* adjust #colors because we need to color outside the clump boundary */
603 vm_cache_geometry_colors >>= vm_clump_shift;
604 #endif
605 if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */
606 n = override;
607 } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */
608 n = vm_cache_geometry_colors;
609 } else {
610 n = DEFAULT_COLORS; /* use default if all else fails */
611 }
612 if (n == 0) {
613 n = 1;
614 }
615 if (n > MAX_COLORS) {
616 n = MAX_COLORS;
617 }
618
619 /* the count must be a power of 2 */
620 if ((n & (n - 1)) != 0) {
621 n = DEFAULT_COLORS; /* use default if all else fails */
622 }
623 vm_colors = n;
624 vm_color_mask = n - 1;
625
626 vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
627
628 #if defined (__x86_64__)
629 /* adjust for reduction in colors due to clumping and multiple cores */
630 if (real_ncpus) {
631 vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
632 }
633 #endif
634 }
635
636 /*
637 * During single threaded early boot we don't initialize all pages.
638 * This avoids some delay during boot. They'll be initialized and
639 * added to the free list as needed or after we are multithreaded by
640 * what becomes the pageout thread.
641 */
642 static boolean_t fill = FALSE;
643 static unsigned int fillval;
644 uint_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */
645 ppnum_t delay_above_pnum = PPNUM_MAX;
646
647 /*
648 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
649 * If ARM ever uses delayed page initialization, this value may need to be quite different.
650 */
651 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
652
653 /*
654 * When we have to dip into more delayed pages due to low memory, free up
655 * a large chunk to get things back to normal. This avoids contention on the
656 * delayed code allocating page by page.
657 */
658 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
659
660 /*
661 * Get and initialize the next delayed page.
662 */
663 static vm_page_t
vm_get_delayed_page(int grab_options)664 vm_get_delayed_page(int grab_options)
665 {
666 vm_page_t p;
667 ppnum_t pnum;
668
669 /*
670 * Get a new page if we have one.
671 */
672 vm_free_page_lock();
673 if (vm_delayed_count == 0) {
674 vm_free_page_unlock();
675 return NULL;
676 }
677
678 if (!pmap_next_page(&pnum)) {
679 vm_delayed_count = 0;
680 vm_free_page_unlock();
681 return NULL;
682 }
683
684
685 assert(vm_delayed_count > 0);
686 --vm_delayed_count;
687
688 #if defined(__x86_64__)
689 /* x86 cluster code requires increasing phys_page in vm_pages[] */
690 if (vm_pages_count > 0) {
691 assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
692 }
693 #endif
694 p = &vm_pages[vm_pages_count];
695 assert(p < vm_page_array_ending_addr);
696 vm_page_init(p, pnum, FALSE);
697 ++vm_pages_count;
698 ++vm_page_pages;
699 vm_free_page_unlock();
700
701 /*
702 * These pages were initially counted as wired, undo that now.
703 */
704 if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
705 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
706 } else {
707 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
708 vm_page_lockspin_queues();
709 }
710 --vm_page_wire_count;
711 --vm_page_wire_count_initial;
712 if (vm_page_wire_count_on_boot != 0) {
713 --vm_page_wire_count_on_boot;
714 }
715 if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
716 vm_page_unlock_queues();
717 }
718
719
720 if (fill) {
721 fillPage(pnum, fillval);
722 }
723 return p;
724 }
725
726 static void vm_page_module_init_delayed(void);
727
728 /*
729 * Free all remaining delayed pages to the free lists.
730 */
731 void
vm_free_delayed_pages(void)732 vm_free_delayed_pages(void)
733 {
734 vm_page_t p;
735 vm_page_t list = NULL;
736 uint_t cnt = 0;
737 vm_offset_t start_free_va;
738 int64_t free_size;
739
740 while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
741 if (vm_himemory_mode) {
742 vm_page_release(p, FALSE);
743 } else {
744 p->vmp_snext = list;
745 list = p;
746 }
747 ++cnt;
748 }
749
750 /*
751 * Free the pages in reverse order if not himemory mode.
752 * Hence the low memory pages will be first on free lists. (LIFO)
753 */
754 while (list != NULL) {
755 p = list;
756 list = p->vmp_snext;
757 p->vmp_snext = NULL;
758 vm_page_release(p, FALSE);
759 }
760 #if DEVELOPMENT || DEBUG
761 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
762 #endif
763
764 /*
765 * Free up any unused full pages at the end of the vm_pages[] array
766 */
767 start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
768
769 #if defined(__x86_64__)
770 /*
771 * Since x86 might have used large pages for vm_pages[], we can't
772 * free starting in the middle of a partially used large page.
773 */
774 if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
775 start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
776 }
777 #endif
778 if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
779 free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
780 if (free_size > 0) {
781 ml_static_mfree(start_free_va, (vm_offset_t)free_size);
782 vm_page_array_ending_addr = (void *)start_free_va;
783
784 /*
785 * Note there's no locking here, as only this thread will ever change this value.
786 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
787 */
788 vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
789
790 #if DEVELOPMENT || DEBUG
791 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
792 (long)free_size, (long)start_free_va);
793 #endif
794 }
795 }
796
797
798 /*
799 * now we can create the VM page array zone
800 */
801 vm_page_module_init_delayed();
802 }
803
804 /*
805 * Try and free up enough delayed pages to match a contig memory allocation.
806 */
807 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)808 vm_free_delayed_pages_contig(
809 uint_t npages,
810 ppnum_t max_pnum,
811 ppnum_t pnum_mask)
812 {
813 vm_page_t p;
814 ppnum_t pnum;
815 uint_t cnt = 0;
816
817 /*
818 * Treat 0 as the absolute max page number.
819 */
820 if (max_pnum == 0) {
821 max_pnum = PPNUM_MAX;
822 }
823
824 /*
825 * Free till we get a properly aligned start page
826 */
827 for (;;) {
828 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
829 if (p == NULL) {
830 return;
831 }
832 pnum = VM_PAGE_GET_PHYS_PAGE(p);
833 vm_page_release(p, FALSE);
834 if (pnum >= max_pnum) {
835 return;
836 }
837 if ((pnum & pnum_mask) == 0) {
838 break;
839 }
840 }
841
842 /*
843 * Having a healthy pool of free pages will help performance. We don't
844 * want to fall back to the delayed code for every page allocation.
845 */
846 if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
847 npages += VM_DELAY_PAGE_CHUNK;
848 }
849
850 /*
851 * Now free up the pages
852 */
853 for (cnt = 1; cnt < npages; ++cnt) {
854 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
855 if (p == NULL) {
856 return;
857 }
858 vm_page_release(p, FALSE);
859 }
860 }
861
862 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
863
864 void
vm_page_init_local_q(unsigned int num_cpus)865 vm_page_init_local_q(unsigned int num_cpus)
866 {
867 struct vpl *t_local_q;
868
869 /*
870 * no point in this for a uni-processor system
871 */
872 if (num_cpus >= 2) {
873 ml_cpu_info_t cpu_info;
874
875 /*
876 * Force the allocation alignment to a cacheline,
877 * because the `vpl` struct has a lock and will be taken
878 * cross CPU so we want to isolate the rest of the per-CPU
879 * data to avoid false sharing due to this lock being taken.
880 */
881
882 ml_cpu_get_info(&cpu_info);
883
884 t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
885 cpu_info.cache_line_size - 1);
886
887 zpercpu_foreach(lq, t_local_q) {
888 VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
889 vm_page_queue_init(&lq->vpl_queue);
890 }
891
892 /* make the initialization visible to all cores */
893 os_atomic_store(&vm_page_local_q, t_local_q, release);
894 }
895 }
896
897 /*
898 * vm_init_before_launchd
899 *
900 * This should be called right before launchd is loaded.
901 */
902 void
vm_init_before_launchd()903 vm_init_before_launchd()
904 {
905 vm_page_lockspin_queues();
906 vm_page_wire_count_on_boot = vm_page_wire_count;
907 vm_page_unlock_queues();
908 }
909
910
911 /*
912 * vm_page_bootstrap:
913 *
914 * Initializes the resident memory module.
915 *
916 * Allocates memory for the page cells, and
917 * for the object/offset-to-page hash table headers.
918 * Each page cell is initialized and placed on the free list.
919 * Returns the range of available kernel virtual memory.
920 */
921 __startup_func
922 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)923 vm_page_bootstrap(
924 vm_offset_t *startp,
925 vm_offset_t *endp)
926 {
927 unsigned int i;
928 unsigned int log1;
929 unsigned int log2;
930 unsigned int size;
931
932 /*
933 * Initialize the page queues.
934 */
935
936 lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
937 lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
938 lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
939
940 for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
941 int group;
942
943 purgeable_queues[i].token_q_head = 0;
944 purgeable_queues[i].token_q_tail = 0;
945 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
946 queue_init(&purgeable_queues[i].objq[group]);
947 }
948
949 purgeable_queues[i].type = i;
950 purgeable_queues[i].new_pages = 0;
951 #if MACH_ASSERT
952 purgeable_queues[i].debug_count_tokens = 0;
953 purgeable_queues[i].debug_count_objects = 0;
954 #endif
955 }
956 ;
957 purgeable_nonvolatile_count = 0;
958 queue_init(&purgeable_nonvolatile_queue);
959
960 for (i = 0; i < MAX_COLORS; i++) {
961 vm_page_queue_init(&vm_page_queue_free[i].qhead);
962 }
963
964 vm_page_queue_init(&vm_lopage_queue_free);
965 vm_page_queue_init(&vm_page_queue_active);
966 vm_page_queue_init(&vm_page_queue_inactive);
967 #if CONFIG_SECLUDED_MEMORY
968 vm_page_queue_init(&vm_page_queue_secluded);
969 #endif /* CONFIG_SECLUDED_MEMORY */
970 vm_page_queue_init(&vm_page_queue_cleaned);
971 vm_page_queue_init(&vm_page_queue_throttled);
972 vm_page_queue_init(&vm_page_queue_anonymous);
973 queue_init(&vm_objects_wired);
974
975 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
976 vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
977
978 vm_page_queue_speculative[i].age_ts.tv_sec = 0;
979 vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
980 }
981
982 vm_page_queue_init(&vm_page_queue_donate);
983 vm_page_queue_init(&vm_page_queue_background);
984
985 vm_page_background_count = 0;
986 vm_page_background_internal_count = 0;
987 vm_page_background_external_count = 0;
988 vm_page_background_promoted_count = 0;
989
990 vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
991
992 if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
993 vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
994 }
995
996 #if defined(__LP64__)
997 vm_page_background_mode = VM_PAGE_BG_ENABLED;
998 vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
999 #else
1000 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1001 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1002 #endif
1003 vm_page_background_exclude_external = 0;
1004
1005 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1006 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1007 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1008
1009 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1010 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1011 }
1012
1013 PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1014 if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1015 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1016 }
1017
1018 vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1019 vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1020 vm_page_donate_target = vm_page_donate_target_high;
1021 vm_page_donate_count = 0;
1022
1023 vm_page_free_wanted = 0;
1024 vm_page_free_wanted_privileged = 0;
1025 #if CONFIG_SECLUDED_MEMORY
1026 vm_page_free_wanted_secluded = 0;
1027 #endif /* CONFIG_SECLUDED_MEMORY */
1028
1029 #if defined (__x86_64__)
1030 /* this must be called before vm_page_set_colors() */
1031 vm_page_setup_clump();
1032 #endif
1033
1034 vm_page_set_colors();
1035
1036 bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1037 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1038 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1039 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1040
1041 bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1042 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1043 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1044 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1045 vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1046 vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1047 vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1048 #if CONFIG_SECLUDED_MEMORY
1049 vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1050 #endif /* CONFIG_SECLUDED_MEMORY */
1051
1052 bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1053 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1054 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1055 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1056 vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1057 vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1058 #if CONFIG_SECLUDED_MEMORY
1059 vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1060 #endif /* CONFIG_SECLUDED_MEMORY */
1061
1062 bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1063 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1064 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1065 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1066 vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1067 #if CONFIG_SECLUDED_MEMORY
1068 vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1069 #endif /* CONFIG_SECLUDED_MEMORY */
1070
1071 for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1072 vm_allocation_sites_static[t].refcount = 2;
1073 vm_allocation_sites_static[t].tag = t;
1074 vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1075 }
1076 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1077 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1078 vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1079
1080 /*
1081 * Steal memory for the map and zone subsystems.
1082 */
1083 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1084
1085 /*
1086 * Allocate (and initialize) the virtual-to-physical
1087 * table hash buckets.
1088 *
1089 * The number of buckets should be a power of two to
1090 * get a good hash function. The following computation
1091 * chooses the first power of two that is greater
1092 * than the number of physical pages in the system.
1093 */
1094
1095 if (vm_page_bucket_count == 0) {
1096 unsigned int npages = pmap_free_pages();
1097
1098 vm_page_bucket_count = 1;
1099 while (vm_page_bucket_count < npages) {
1100 vm_page_bucket_count <<= 1;
1101 }
1102 }
1103 vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1104
1105 vm_page_hash_mask = vm_page_bucket_count - 1;
1106
1107 /*
1108 * Calculate object shift value for hashing algorithm:
1109 * O = log2(sizeof(struct vm_object))
1110 * B = log2(vm_page_bucket_count)
1111 * hash shifts the object left by
1112 * B/2 - O
1113 */
1114 size = vm_page_bucket_count;
1115 for (log1 = 0; size > 1; log1++) {
1116 size /= 2;
1117 }
1118 size = sizeof(struct vm_object);
1119 for (log2 = 0; size > 1; log2++) {
1120 size /= 2;
1121 }
1122 vm_page_hash_shift = log1 / 2 - log2 + 1;
1123
1124 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
1125 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
1126 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1127
1128 if (vm_page_hash_mask & vm_page_bucket_count) {
1129 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1130 }
1131
1132 #if VM_PAGE_BUCKETS_CHECK
1133 #if VM_PAGE_FAKE_BUCKETS
1134 /*
1135 * Allocate a decoy set of page buckets, to detect
1136 * any stomping there.
1137 */
1138 vm_page_fake_buckets = (vm_page_bucket_t *)
1139 pmap_steal_memory(vm_page_bucket_count *
1140 sizeof(vm_page_bucket_t), 0);
1141 vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1142 vm_page_fake_buckets_end =
1143 vm_map_round_page((vm_page_fake_buckets_start +
1144 (vm_page_bucket_count *
1145 sizeof(vm_page_bucket_t))),
1146 PAGE_MASK);
1147 char *cp;
1148 for (cp = (char *)vm_page_fake_buckets_start;
1149 cp < (char *)vm_page_fake_buckets_end;
1150 cp++) {
1151 *cp = 0x5a;
1152 }
1153 #endif /* VM_PAGE_FAKE_BUCKETS */
1154 #endif /* VM_PAGE_BUCKETS_CHECK */
1155
1156 kernel_debug_string_early("vm_page_buckets");
1157 vm_page_buckets = (vm_page_bucket_t *)
1158 pmap_steal_memory(vm_page_bucket_count *
1159 sizeof(vm_page_bucket_t), 0);
1160
1161 kernel_debug_string_early("vm_page_bucket_locks");
1162 vm_page_bucket_locks = (lck_spin_t *)
1163 pmap_steal_memory(vm_page_bucket_lock_count *
1164 sizeof(lck_spin_t), 0);
1165
1166 for (i = 0; i < vm_page_bucket_count; i++) {
1167 vm_page_bucket_t *bucket = &vm_page_buckets[i];
1168
1169 bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1170 #if MACH_PAGE_HASH_STATS
1171 bucket->cur_count = 0;
1172 bucket->hi_count = 0;
1173 #endif /* MACH_PAGE_HASH_STATS */
1174 }
1175
1176 for (i = 0; i < vm_page_bucket_lock_count; i++) {
1177 lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1178 }
1179
1180 vm_tag_init();
1181
1182 #if VM_PAGE_BUCKETS_CHECK
1183 vm_page_buckets_check_ready = TRUE;
1184 #endif /* VM_PAGE_BUCKETS_CHECK */
1185
1186 /*
1187 * Machine-dependent code allocates the resident page table.
1188 * It uses vm_page_init to initialize the page frames.
1189 * The code also returns to us the virtual space available
1190 * to the kernel. We don't trust the pmap module
1191 * to get the alignment right.
1192 */
1193
1194 kernel_debug_string_early("pmap_startup");
1195 pmap_startup(&virtual_space_start, &virtual_space_end);
1196 virtual_space_start = round_page(virtual_space_start);
1197 virtual_space_end = trunc_page(virtual_space_end);
1198
1199 *startp = virtual_space_start;
1200 *endp = virtual_space_end;
1201
1202 /*
1203 * Compute the initial "wire" count.
1204 * Up until now, the pages which have been set aside are not under
1205 * the VM system's control, so although they aren't explicitly
1206 * wired, they nonetheless can't be moved. At this moment,
1207 * all VM managed pages are "free", courtesy of pmap_startup.
1208 */
1209 assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1210 vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1211 vm_page_free_count - vm_lopage_free_count;
1212 #if CONFIG_SECLUDED_MEMORY
1213 vm_page_wire_count -= vm_page_secluded_count;
1214 #endif
1215 vm_page_wire_count_initial = vm_page_wire_count;
1216
1217 /* capture this for later use */
1218 booter_size = ml_get_booter_memory_size();
1219
1220 printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1221 vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1222
1223 kernel_debug_string_early("vm_page_bootstrap complete");
1224 }
1225
1226 #ifndef MACHINE_PAGES
1227 /*
1228 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1229 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1230 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1231 */
1232 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1233 pmap_steal_memory_internal(
1234 vm_size_t size,
1235 vm_size_t alignment,
1236 boolean_t might_free,
1237 unsigned int flags,
1238 pmap_mapping_type_t mapping_type)
1239 {
1240 kern_return_t kr;
1241 vm_offset_t addr;
1242 vm_offset_t map_addr;
1243 ppnum_t phys_page;
1244 unsigned int pmap_flags;
1245
1246 /*
1247 * Size needs to be aligned to word size.
1248 */
1249 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1250
1251 /*
1252 * Alignment defaults to word size if not specified.
1253 */
1254 if (alignment == 0) {
1255 alignment = sizeof(void*);
1256 }
1257
1258 /*
1259 * Alignment must be no greater than a page and must be a power of two.
1260 */
1261 assert(alignment <= PAGE_SIZE);
1262 assert((alignment & (alignment - 1)) == 0);
1263
1264 /*
1265 * On the first call, get the initial values for virtual address space
1266 * and page align them.
1267 */
1268 if (virtual_space_start == virtual_space_end) {
1269 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1270 virtual_space_start = round_page(virtual_space_start);
1271 virtual_space_end = trunc_page(virtual_space_end);
1272
1273 #if defined(__x86_64__)
1274 /*
1275 * Release remaining unused section of preallocated KVA and the 4K page tables
1276 * that map it. This makes the VA available for large page mappings.
1277 */
1278 Idle_PTs_release(virtual_space_start, virtual_space_end);
1279 #endif
1280 }
1281
1282 /*
1283 * Allocate the virtual space for this request. On x86, we'll align to a large page
1284 * address if the size is big enough to back with at least 1 large page.
1285 */
1286 #if defined(__x86_64__)
1287 if (size >= I386_LPGBYTES) {
1288 virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1289 }
1290 #endif
1291 virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1292 addr = virtual_space_start;
1293 virtual_space_start += size;
1294
1295 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
1296
1297 /*
1298 * Allocate and map physical pages to back the new virtual space.
1299 */
1300 map_addr = round_page(addr);
1301 while (map_addr < addr + size) {
1302 #if defined(__x86_64__)
1303 /*
1304 * Back with a large page if properly aligned on x86
1305 */
1306 if ((map_addr & I386_LPGMASK) == 0 &&
1307 map_addr + I386_LPGBYTES <= addr + size &&
1308 pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1309 pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1310 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1311 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1312 VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1313
1314 if (kr != KERN_SUCCESS) {
1315 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1316 (unsigned long)map_addr, phys_page);
1317 }
1318 map_addr += I386_LPGBYTES;
1319 vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1320 vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1321 vm_page_kern_lpage_count++;
1322 continue;
1323 }
1324 #endif
1325
1326 if (!pmap_next_page_hi(&phys_page, might_free)) {
1327 panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1328 }
1329
1330 #if defined(__x86_64__)
1331 pmap_pre_expand(kernel_pmap, map_addr);
1332 #endif
1333 pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1334
1335 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1336 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1337 pmap_flags, FALSE, mapping_type);
1338
1339 if (kr != KERN_SUCCESS) {
1340 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1341 (unsigned long)map_addr, phys_page);
1342 }
1343 map_addr += PAGE_SIZE;
1344
1345 /*
1346 * Account for newly stolen memory
1347 */
1348 vm_page_wire_count++;
1349 vm_page_stolen_count++;
1350 }
1351
1352 #if defined(__x86_64__)
1353 /*
1354 * The call with might_free is currently the last use of pmap_steal_memory*().
1355 * Notify the pmap layer to record which high pages were allocated so far.
1356 */
1357 if (might_free) {
1358 pmap_hi_pages_done();
1359 }
1360 #endif
1361 #if KASAN
1362 kasan_notify_address(round_page(addr), size);
1363 #endif
1364 return (void *) addr;
1365 }
1366
1367 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1368 pmap_steal_memory(
1369 vm_size_t size,
1370 vm_size_t alignment)
1371 {
1372 return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1373 }
1374
1375 void *
pmap_steal_freeable_memory(vm_size_t size)1376 pmap_steal_freeable_memory(
1377 vm_size_t size)
1378 {
1379 return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1380 }
1381
1382
1383
1384
1385 #if CONFIG_SECLUDED_MEMORY
1386 /* boot-args to control secluded memory */
1387 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1388 /* IOKit can use secluded memory */
1389 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1390 /* apps can use secluded memory */
1391 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1392 /* filecache can use seclude memory */
1393 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1394 uint64_t secluded_shutoff_trigger = 0;
1395 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1396 #endif /* CONFIG_SECLUDED_MEMORY */
1397
1398
1399 #if defined(__arm64__)
1400 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1401 unsigned int vm_first_phys_ppnum = 0;
1402 #endif
1403
1404 void vm_page_release_startup(vm_page_t mem);
1405 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1406 pmap_startup(
1407 vm_offset_t *startp,
1408 vm_offset_t *endp)
1409 {
1410 unsigned int i, npages;
1411 ppnum_t phys_page;
1412 uint64_t mem_sz;
1413 uint64_t start_ns;
1414 uint64_t now_ns;
1415 uint_t low_page_count = 0;
1416
1417 #if defined(__LP64__)
1418 /*
1419 * make sure we are aligned on a 64 byte boundary
1420 * for VM_PAGE_PACK_PTR (it clips off the low-order
1421 * 6 bits of the pointer)
1422 */
1423 if (virtual_space_start != virtual_space_end) {
1424 virtual_space_start = round_page(virtual_space_start);
1425 }
1426 #endif
1427
1428 /*
1429 * We calculate how many page frames we will have
1430 * and then allocate the page structures in one chunk.
1431 *
1432 * Note that the calculation here doesn't take into account
1433 * the memory needed to map what's being allocated, i.e. the page
1434 * table entries. So the actual number of pages we get will be
1435 * less than this. To do someday: include that in the computation.
1436 *
1437 * Also for ARM, we don't use the count of free_pages, but rather the
1438 * range from last page to first page (ignore holes due to retired pages).
1439 */
1440 #if defined(__arm64__)
1441 mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1442 #else /* defined(__arm64__) */
1443 mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1444 #endif /* defined(__arm64__) */
1445 mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */
1446 npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages))); /* scaled to include the vm_page_ts */
1447
1448
1449 vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1450
1451 /*
1452 * Check if we want to initialize pages to a known value
1453 */
1454 if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1455 fill = TRUE;
1456 }
1457 #if DEBUG
1458 /* This slows down booting the DEBUG kernel, particularly on
1459 * large memory systems, but is worthwhile in deterministically
1460 * trapping uninitialized memory usage.
1461 */
1462 if (!fill) {
1463 fill = TRUE;
1464 fillval = 0xDEB8F177;
1465 }
1466 #endif
1467 if (fill) {
1468 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1469 }
1470
1471 #if CONFIG_SECLUDED_MEMORY
1472 /*
1473 * Figure out how much secluded memory to have before we start
1474 * release pages to free lists.
1475 * The default, if specified nowhere else, is no secluded mem.
1476 */
1477 vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1478
1479 /*
1480 * Allow a really large app to effectively use secluded memory until it exits.
1481 */
1482 if (vm_page_secluded_target != 0) {
1483 /*
1484 * Get an amount from boot-args, else use 1/2 of max_mem.
1485 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1486 * used munch to induce jetsam thrashing of false idle daemons on N56.
1487 */
1488 int secluded_shutoff_mb;
1489 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1490 sizeof(secluded_shutoff_mb))) {
1491 secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1492 } else {
1493 secluded_shutoff_trigger = max_mem / 2;
1494 }
1495
1496 /* ensure the headroom value is sensible and avoid underflows */
1497 assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1498 }
1499
1500 #endif /* CONFIG_SECLUDED_MEMORY */
1501
1502 #if defined(__x86_64__)
1503
1504 /*
1505 * Decide how much memory we delay freeing at boot time.
1506 */
1507 uint32_t delay_above_gb;
1508 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1509 delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1510 }
1511
1512 if (delay_above_gb == 0) {
1513 delay_above_pnum = PPNUM_MAX;
1514 } else {
1515 delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1516 }
1517
1518 /* make sure we have sane breathing room: 1G above low memory */
1519 if (delay_above_pnum <= max_valid_low_ppnum) {
1520 delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1521 }
1522
1523 if (delay_above_pnum < PPNUM_MAX) {
1524 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1525 }
1526
1527 #endif /* defined(__x86_64__) */
1528
1529 /*
1530 * Initialize and release the page frames.
1531 */
1532 kernel_debug_string_early("page_frame_init");
1533
1534 vm_page_array_beginning_addr = &vm_pages[0];
1535 vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */
1536 #if VM_PAGE_PACKED_FROM_ARRAY
1537 if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1538 panic("pmap_startup(): too many pages to support vm_page packing");
1539 }
1540 #endif
1541
1542 vm_delayed_count = 0;
1543
1544 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1545 vm_pages_count = 0;
1546 for (i = 0; i < npages; i++) {
1547 /* Did we run out of pages? */
1548 if (!pmap_next_page(&phys_page)) {
1549 break;
1550 }
1551
1552 if (phys_page < max_valid_low_ppnum) {
1553 ++low_page_count;
1554 }
1555
1556 /* Are we at high enough pages to delay the rest? */
1557 if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1558 vm_delayed_count = pmap_free_pages();
1559 break;
1560 }
1561
1562 #if defined(__arm64__)
1563 if (i == 0) {
1564 vm_first_phys_ppnum = phys_page;
1565 patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1566 (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1567 }
1568 #endif /* defined(__arm64__) */
1569
1570 #if defined(__x86_64__)
1571 /* The x86 clump freeing code requires increasing ppn's to work correctly */
1572 if (i > 0) {
1573 assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1574 }
1575 #endif
1576 ++vm_pages_count;
1577 vm_page_init(&vm_pages[i], phys_page, FALSE);
1578 if (fill) {
1579 fillPage(phys_page, fillval);
1580 }
1581 if (vm_himemory_mode) {
1582 vm_page_release_startup(&vm_pages[i]);
1583 }
1584 }
1585 vm_page_pages = vm_pages_count; /* used to report to user space */
1586
1587 if (!vm_himemory_mode) {
1588 do {
1589 if (!VMP_ERROR_GET(&vm_pages[--i])) { /* skip retired pages */
1590 vm_page_release_startup(&vm_pages[i]);
1591 }
1592 } while (i != 0);
1593 }
1594
1595 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1596 printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1597 printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1598
1599 #if defined(__LP64__)
1600 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1601 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1602 }
1603
1604 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1605 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1606 }
1607 #endif
1608
1609 VM_CHECK_MEMORYSTATUS;
1610
1611 /*
1612 * We have to re-align virtual_space_start,
1613 * because pmap_steal_memory has been using it.
1614 */
1615 virtual_space_start = round_page(virtual_space_start);
1616 *startp = virtual_space_start;
1617 *endp = virtual_space_end;
1618 }
1619 #endif /* MACHINE_PAGES */
1620
1621 /*
1622 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1623 * or frees to this zone. It's just here for reporting purposes via zprint command.
1624 * This needs to be done after all initially delayed pages are put on the free lists.
1625 */
1626 static void
vm_page_module_init_delayed(void)1627 vm_page_module_init_delayed(void)
1628 {
1629 (void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1630 ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1631 uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1632
1633 zone_set_exhaustible(z, 0, true);
1634 /*
1635 * Reflect size and usage information for vm_pages[].
1636 */
1637
1638 z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1639 z->z_elems_free = z->z_elems_avail - vm_pages_count;
1640 zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1641 vm_pages_count * sizeof(struct vm_page);
1642 vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1643 vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1644 z->z_wired_cur += vm_page_zone_pages;
1645 z->z_wired_hwm = z->z_wired_cur;
1646 z->z_va_cur = z->z_wired_cur;
1647 /* since zone accounts for these, take them out of stolen */
1648 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1649 });
1650 }
1651
1652 /*
1653 * Create the vm_pages zone. This is used for the vm_page structures for the pages
1654 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1655 * this needs to happen in early VM bootstrap.
1656 */
1657
1658 __startup_func
1659 static void
vm_page_module_init(void)1660 vm_page_module_init(void)
1661 {
1662 vm_size_t vm_page_with_ppnum_size;
1663
1664 /*
1665 * Since the pointers to elements in this zone will be packed, they
1666 * must have appropriate size. Not strictly what sizeof() reports.
1667 */
1668 vm_page_with_ppnum_size =
1669 (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1670 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1671
1672 vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1673 ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NO_TBI_TAG,
1674 ZONE_ID_ANY, ^(zone_t z) {
1675 /*
1676 * The number "10" is a small number that is larger than the number
1677 * of fictitious pages that any single caller will attempt to allocate
1678 * without blocking.
1679 *
1680 * The largest such number at the moment is kmem_alloc()
1681 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1682 * taking into account the 50% hysteresis the zone allocator uses.
1683 *
1684 * Note: this works at all because the zone allocator
1685 * doesn't ever allocate fictitious pages.
1686 */
1687 zone_raise_reserve(z, 10);
1688 });
1689 }
1690 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1691
1692 /*
1693 * Routine: vm_page_create
1694 * Purpose:
1695 * After the VM system is up, machine-dependent code
1696 * may stumble across more physical memory. For example,
1697 * memory that it was reserving for a frame buffer.
1698 * vm_page_create turns this memory into available pages.
1699 */
1700
1701 void
vm_page_create(ppnum_t start,ppnum_t end)1702 vm_page_create(
1703 ppnum_t start,
1704 ppnum_t end)
1705 {
1706 ppnum_t phys_page;
1707 vm_page_t m;
1708
1709 for (phys_page = start;
1710 phys_page < end;
1711 phys_page++) {
1712 m = vm_page_grab_fictitious_common(phys_page, TRUE);
1713 m->vmp_fictitious = FALSE;
1714 pmap_clear_noencrypt(phys_page);
1715
1716
1717 vm_free_page_lock();
1718 vm_page_pages++;
1719 vm_free_page_unlock();
1720 vm_page_release(m, FALSE);
1721 }
1722 }
1723
1724
1725 /*
1726 * vm_page_hash:
1727 *
1728 * Distributes the object/offset key pair among hash buckets.
1729 *
1730 * NOTE: The bucket count must be a power of 2
1731 */
1732 #define vm_page_hash(object, offset) (\
1733 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1734 & vm_page_hash_mask)
1735
1736
1737 /*
1738 * vm_page_insert: [ internal use only ]
1739 *
1740 * Inserts the given mem entry into the object/object-page
1741 * table and object list.
1742 *
1743 * The object must be locked.
1744 */
1745 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1746 vm_page_insert(
1747 vm_page_t mem,
1748 vm_object_t object,
1749 vm_object_offset_t offset)
1750 {
1751 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1752 }
1753
1754 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1755 vm_page_insert_wired(
1756 vm_page_t mem,
1757 vm_object_t object,
1758 vm_object_offset_t offset,
1759 vm_tag_t tag)
1760 {
1761 vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1762 }
1763
1764 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1765 vm_page_insert_internal(
1766 vm_page_t mem,
1767 vm_object_t object,
1768 vm_object_offset_t offset,
1769 vm_tag_t tag,
1770 boolean_t queues_lock_held,
1771 boolean_t insert_in_hash,
1772 boolean_t batch_pmap_op,
1773 boolean_t batch_accounting,
1774 uint64_t *delayed_ledger_update)
1775 {
1776 vm_page_bucket_t *bucket;
1777 lck_spin_t *bucket_lock;
1778 int hash_id;
1779 task_t owner;
1780 int ledger_idx_volatile;
1781 int ledger_idx_nonvolatile;
1782 int ledger_idx_volatile_compressed;
1783 int ledger_idx_nonvolatile_compressed;
1784 int ledger_idx_composite;
1785 int ledger_idx_external_wired;
1786 boolean_t do_footprint;
1787
1788 #if 0
1789 /*
1790 * we may not hold the page queue lock
1791 * so this check isn't safe to make
1792 */
1793 VM_PAGE_CHECK(mem);
1794 #endif
1795
1796 assertf(page_aligned(offset), "0x%llx\n", offset);
1797
1798 assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1799
1800 vm_object_lock_assert_exclusive(object);
1801 LCK_MTX_ASSERT(&vm_page_queue_lock,
1802 queues_lock_held ? LCK_MTX_ASSERT_OWNED
1803 : LCK_MTX_ASSERT_NOTOWNED);
1804
1805 if (queues_lock_held == FALSE) {
1806 assert(!VM_PAGE_PAGEABLE(mem));
1807 }
1808
1809 if (insert_in_hash == TRUE) {
1810 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1811 if (mem->vmp_tabled || mem->vmp_object) {
1812 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1813 "already in (obj=%p,off=0x%llx)",
1814 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1815 }
1816 #endif
1817 if (object->internal && (offset >= object->vo_size)) {
1818 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1819 mem, object, offset, object->vo_size);
1820 }
1821
1822 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1823
1824 /*
1825 * Record the object/offset pair in this page
1826 */
1827
1828 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1829 mem->vmp_offset = offset;
1830
1831 #if CONFIG_SECLUDED_MEMORY
1832 if (object->eligible_for_secluded) {
1833 vm_page_secluded.eligible_for_secluded++;
1834 }
1835 #endif /* CONFIG_SECLUDED_MEMORY */
1836
1837 /*
1838 * Insert it into the object_object/offset hash table
1839 */
1840 hash_id = vm_page_hash(object, offset);
1841 bucket = &vm_page_buckets[hash_id];
1842 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1843
1844 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1845
1846 mem->vmp_next_m = bucket->page_list;
1847 bucket->page_list = VM_PAGE_PACK_PTR(mem);
1848 assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1849
1850 #if MACH_PAGE_HASH_STATS
1851 if (++bucket->cur_count > bucket->hi_count) {
1852 bucket->hi_count = bucket->cur_count;
1853 }
1854 #endif /* MACH_PAGE_HASH_STATS */
1855 mem->vmp_hashed = TRUE;
1856 lck_spin_unlock(bucket_lock);
1857 }
1858
1859 {
1860 unsigned int cache_attr;
1861
1862 cache_attr = object->wimg_bits & VM_WIMG_MASK;
1863
1864 if (cache_attr != VM_WIMG_USE_DEFAULT) {
1865 PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1866 }
1867 }
1868 /*
1869 * Now link into the object's list of backed pages.
1870 */
1871 vm_page_queue_enter(&object->memq, mem, vmp_listq);
1872 object->memq_hint = mem;
1873 mem->vmp_tabled = TRUE;
1874
1875 /*
1876 * Show that the object has one more resident page.
1877 */
1878
1879 object->resident_page_count++;
1880 if (VM_PAGE_WIRED(mem)) {
1881 assert(mem->vmp_wire_count > 0);
1882 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1883 VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1884 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1885 }
1886 assert(object->resident_page_count >= object->wired_page_count);
1887
1888 #if DEVELOPMENT || DEBUG
1889 if (object->object_is_shared_cache &&
1890 object->pager != NULL &&
1891 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1892 int new, old;
1893 assert(!object->internal);
1894 new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1895 do {
1896 old = shared_region_pagers_resident_peak;
1897 } while (old < new &&
1898 !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1899 }
1900 #endif /* DEVELOPMENT || DEBUG */
1901
1902 if (batch_accounting == FALSE) {
1903 if (object->internal) {
1904 OSAddAtomic(1, &vm_page_internal_count);
1905 } else {
1906 OSAddAtomic(1, &vm_page_external_count);
1907 }
1908 }
1909
1910 /*
1911 * It wouldn't make sense to insert a "reusable" page in
1912 * an object (the page would have been marked "reusable" only
1913 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1914 * in the object at that time).
1915 * But a page could be inserted in a "all_reusable" object, if
1916 * something faults it in (a vm_read() from another task or a
1917 * "use-after-free" issue in user space, for example). It can
1918 * also happen if we're relocating a page from that object to
1919 * a different physical page during a physically-contiguous
1920 * allocation.
1921 */
1922 assert(!mem->vmp_reusable);
1923 if (object->all_reusable) {
1924 OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1925 }
1926
1927 if (object->purgable == VM_PURGABLE_DENY &&
1928 !object->vo_ledger_tag) {
1929 owner = TASK_NULL;
1930 } else {
1931 owner = VM_OBJECT_OWNER(object);
1932 vm_object_ledger_tag_ledgers(object,
1933 &ledger_idx_volatile,
1934 &ledger_idx_nonvolatile,
1935 &ledger_idx_volatile_compressed,
1936 &ledger_idx_nonvolatile_compressed,
1937 &ledger_idx_composite,
1938 &ledger_idx_external_wired,
1939 &do_footprint);
1940 }
1941 if (owner &&
1942 object->internal &&
1943 (object->purgable == VM_PURGABLE_NONVOLATILE ||
1944 object->purgable == VM_PURGABLE_DENY ||
1945 VM_PAGE_WIRED(mem))) {
1946 if (delayed_ledger_update) {
1947 *delayed_ledger_update += PAGE_SIZE;
1948 } else {
1949 /* more non-volatile bytes */
1950 ledger_credit(owner->ledger,
1951 ledger_idx_nonvolatile,
1952 PAGE_SIZE);
1953 if (do_footprint) {
1954 /* more footprint */
1955 ledger_credit(owner->ledger,
1956 task_ledgers.phys_footprint,
1957 PAGE_SIZE);
1958 } else if (ledger_idx_composite != -1) {
1959 ledger_credit(owner->ledger,
1960 ledger_idx_composite,
1961 PAGE_SIZE);
1962 }
1963 }
1964 } else if (owner &&
1965 object->internal &&
1966 (object->purgable == VM_PURGABLE_VOLATILE ||
1967 object->purgable == VM_PURGABLE_EMPTY)) {
1968 assert(!VM_PAGE_WIRED(mem));
1969 /* more volatile bytes */
1970 ledger_credit(owner->ledger,
1971 ledger_idx_volatile,
1972 PAGE_SIZE);
1973 }
1974
1975 if (object->purgable == VM_PURGABLE_VOLATILE) {
1976 if (VM_PAGE_WIRED(mem)) {
1977 OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1978 } else {
1979 OSAddAtomic(+1, &vm_page_purgeable_count);
1980 }
1981 } else if (object->purgable == VM_PURGABLE_EMPTY &&
1982 mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1983 /*
1984 * This page belongs to a purged VM object but hasn't
1985 * been purged (because it was "busy").
1986 * It's in the "throttled" queue and hence not
1987 * visible to vm_pageout_scan(). Move it to a pageable
1988 * queue, so that it can eventually be reclaimed, instead
1989 * of lingering in the "empty" object.
1990 */
1991 if (queues_lock_held == FALSE) {
1992 vm_page_lockspin_queues();
1993 }
1994 vm_page_deactivate(mem);
1995 if (queues_lock_held == FALSE) {
1996 vm_page_unlock_queues();
1997 }
1998 }
1999
2000 #if VM_OBJECT_TRACKING_OP_MODIFIED
2001 if (vm_object_tracking_btlog &&
2002 object->internal &&
2003 object->resident_page_count == 0 &&
2004 object->pager == NULL &&
2005 object->shadow != NULL &&
2006 object->shadow->vo_copy == object) {
2007 btlog_record(vm_object_tracking_btlog, object,
2008 VM_OBJECT_TRACKING_OP_MODIFIED,
2009 btref_get(__builtin_frame_address(0), 0));
2010 }
2011 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2012 }
2013
2014 /*
2015 * vm_page_replace:
2016 *
2017 * Exactly like vm_page_insert, except that we first
2018 * remove any existing page at the given offset in object.
2019 *
2020 * The object must be locked.
2021 */
2022 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2023 vm_page_replace(
2024 vm_page_t mem,
2025 vm_object_t object,
2026 vm_object_offset_t offset)
2027 {
2028 vm_page_bucket_t *bucket;
2029 vm_page_t found_m = VM_PAGE_NULL;
2030 lck_spin_t *bucket_lock;
2031 int hash_id;
2032
2033 #if 0
2034 /*
2035 * we don't hold the page queue lock
2036 * so this check isn't safe to make
2037 */
2038 VM_PAGE_CHECK(mem);
2039 #endif
2040 vm_object_lock_assert_exclusive(object);
2041 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2042 if (mem->vmp_tabled || mem->vmp_object) {
2043 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2044 "already in (obj=%p,off=0x%llx)",
2045 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2046 }
2047 #endif
2048 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2049
2050 assert(!VM_PAGE_PAGEABLE(mem));
2051
2052 /*
2053 * Record the object/offset pair in this page
2054 */
2055 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2056 mem->vmp_offset = offset;
2057
2058 /*
2059 * Insert it into the object_object/offset hash table,
2060 * replacing any page that might have been there.
2061 */
2062
2063 hash_id = vm_page_hash(object, offset);
2064 bucket = &vm_page_buckets[hash_id];
2065 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2066
2067 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2068
2069 if (bucket->page_list) {
2070 vm_page_packed_t *mp = &bucket->page_list;
2071 vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2072
2073 do {
2074 /*
2075 * compare packed object pointers
2076 */
2077 if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2078 /*
2079 * Remove old page from hash list
2080 */
2081 *mp = m->vmp_next_m;
2082 m->vmp_hashed = FALSE;
2083 m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2084
2085 found_m = m;
2086 break;
2087 }
2088 mp = &m->vmp_next_m;
2089 } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2090
2091 mem->vmp_next_m = bucket->page_list;
2092 } else {
2093 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2094 }
2095 /*
2096 * insert new page at head of hash list
2097 */
2098 bucket->page_list = VM_PAGE_PACK_PTR(mem);
2099 mem->vmp_hashed = TRUE;
2100
2101 lck_spin_unlock(bucket_lock);
2102
2103 if (found_m) {
2104 /*
2105 * there was already a page at the specified
2106 * offset for this object... remove it from
2107 * the object and free it back to the free list
2108 */
2109 vm_page_free_unlocked(found_m, FALSE);
2110 }
2111 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2112 }
2113
2114 /*
2115 * vm_page_remove: [ internal use only ]
2116 *
2117 * Removes the given mem entry from the object/offset-page
2118 * table and the object page list.
2119 *
2120 * The object must be locked.
2121 */
2122
2123 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2124 vm_page_remove(
2125 vm_page_t mem,
2126 boolean_t remove_from_hash)
2127 {
2128 vm_page_bucket_t *bucket;
2129 vm_page_t this;
2130 lck_spin_t *bucket_lock;
2131 int hash_id;
2132 task_t owner;
2133 vm_object_t m_object;
2134 int ledger_idx_volatile;
2135 int ledger_idx_nonvolatile;
2136 int ledger_idx_volatile_compressed;
2137 int ledger_idx_nonvolatile_compressed;
2138 int ledger_idx_composite;
2139 int ledger_idx_external_wired;
2140 int do_footprint;
2141
2142 m_object = VM_PAGE_OBJECT(mem);
2143
2144 vm_object_lock_assert_exclusive(m_object);
2145 assert(mem->vmp_tabled);
2146 assert(!mem->vmp_cleaning);
2147 assert(!mem->vmp_laundry);
2148
2149 if (VM_PAGE_PAGEABLE(mem)) {
2150 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2151 }
2152 #if 0
2153 /*
2154 * we don't hold the page queue lock
2155 * so this check isn't safe to make
2156 */
2157 VM_PAGE_CHECK(mem);
2158 #endif
2159 if (remove_from_hash == TRUE) {
2160 /*
2161 * Remove from the object_object/offset hash table
2162 */
2163 hash_id = vm_page_hash(m_object, mem->vmp_offset);
2164 bucket = &vm_page_buckets[hash_id];
2165 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2166
2167 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2168
2169 if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2170 /* optimize for common case */
2171
2172 bucket->page_list = mem->vmp_next_m;
2173 } else {
2174 vm_page_packed_t *prev;
2175
2176 for (prev = &this->vmp_next_m;
2177 (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2178 prev = &this->vmp_next_m) {
2179 continue;
2180 }
2181 *prev = this->vmp_next_m;
2182 }
2183 #if MACH_PAGE_HASH_STATS
2184 bucket->cur_count--;
2185 #endif /* MACH_PAGE_HASH_STATS */
2186 mem->vmp_hashed = FALSE;
2187 this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2188 lck_spin_unlock(bucket_lock);
2189 }
2190 /*
2191 * Now remove from the object's list of backed pages.
2192 */
2193
2194 vm_page_remove_internal(mem);
2195
2196 /*
2197 * And show that the object has one fewer resident
2198 * page.
2199 */
2200
2201 assert(m_object->resident_page_count > 0);
2202 m_object->resident_page_count--;
2203
2204 #if DEVELOPMENT || DEBUG
2205 if (m_object->object_is_shared_cache &&
2206 m_object->pager != NULL &&
2207 m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2208 assert(!m_object->internal);
2209 OSAddAtomic(-1, &shared_region_pagers_resident_count);
2210 }
2211 #endif /* DEVELOPMENT || DEBUG */
2212
2213 if (m_object->internal) {
2214 #if DEBUG
2215 assert(vm_page_internal_count);
2216 #endif /* DEBUG */
2217
2218 OSAddAtomic(-1, &vm_page_internal_count);
2219 } else {
2220 assert(vm_page_external_count);
2221 OSAddAtomic(-1, &vm_page_external_count);
2222
2223 if (mem->vmp_xpmapped) {
2224 assert(vm_page_xpmapped_external_count);
2225 OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2226 }
2227 }
2228 if (!m_object->internal &&
2229 m_object->cached_list.next &&
2230 m_object->cached_list.prev) {
2231 if (m_object->resident_page_count == 0) {
2232 vm_object_cache_remove(m_object);
2233 }
2234 }
2235
2236 if (VM_PAGE_WIRED(mem)) {
2237 assert(mem->vmp_wire_count > 0);
2238 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2239 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2240 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2241 }
2242 assert(m_object->resident_page_count >=
2243 m_object->wired_page_count);
2244 if (mem->vmp_reusable) {
2245 assert(m_object->reusable_page_count > 0);
2246 m_object->reusable_page_count--;
2247 assert(m_object->reusable_page_count <=
2248 m_object->resident_page_count);
2249 mem->vmp_reusable = FALSE;
2250 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2251 vm_page_stats_reusable.reused_remove++;
2252 } else if (m_object->all_reusable) {
2253 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2254 vm_page_stats_reusable.reused_remove++;
2255 }
2256
2257 if (m_object->purgable == VM_PURGABLE_DENY &&
2258 !m_object->vo_ledger_tag) {
2259 owner = TASK_NULL;
2260 } else {
2261 owner = VM_OBJECT_OWNER(m_object);
2262 vm_object_ledger_tag_ledgers(m_object,
2263 &ledger_idx_volatile,
2264 &ledger_idx_nonvolatile,
2265 &ledger_idx_volatile_compressed,
2266 &ledger_idx_nonvolatile_compressed,
2267 &ledger_idx_composite,
2268 &ledger_idx_external_wired,
2269 &do_footprint);
2270 }
2271 if (owner &&
2272 m_object->internal &&
2273 (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2274 m_object->purgable == VM_PURGABLE_DENY ||
2275 VM_PAGE_WIRED(mem))) {
2276 /* less non-volatile bytes */
2277 ledger_debit(owner->ledger,
2278 ledger_idx_nonvolatile,
2279 PAGE_SIZE);
2280 if (do_footprint) {
2281 /* less footprint */
2282 ledger_debit(owner->ledger,
2283 task_ledgers.phys_footprint,
2284 PAGE_SIZE);
2285 } else if (ledger_idx_composite != -1) {
2286 ledger_debit(owner->ledger,
2287 ledger_idx_composite,
2288 PAGE_SIZE);
2289 }
2290 } else if (owner &&
2291 m_object->internal &&
2292 (m_object->purgable == VM_PURGABLE_VOLATILE ||
2293 m_object->purgable == VM_PURGABLE_EMPTY)) {
2294 assert(!VM_PAGE_WIRED(mem));
2295 /* less volatile bytes */
2296 ledger_debit(owner->ledger,
2297 ledger_idx_volatile,
2298 PAGE_SIZE);
2299 }
2300
2301 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2302 if (VM_PAGE_WIRED(mem)) {
2303 assert(vm_page_purgeable_wired_count > 0);
2304 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2305 } else {
2306 assert(vm_page_purgeable_count > 0);
2307 OSAddAtomic(-1, &vm_page_purgeable_count);
2308 }
2309 }
2310
2311 if (m_object->set_cache_attr == TRUE) {
2312 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2313 }
2314
2315 mem->vmp_tabled = FALSE;
2316 mem->vmp_object = 0;
2317 mem->vmp_offset = (vm_object_offset_t) -1;
2318 }
2319
2320
2321 /*
2322 * vm_page_lookup:
2323 *
2324 * Returns the page associated with the object/offset
2325 * pair specified; if none is found, VM_PAGE_NULL is returned.
2326 *
2327 * The object must be locked. No side effects.
2328 */
2329
2330 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
2331
2332 #if DEBUG_VM_PAGE_LOOKUP
2333
2334 struct {
2335 uint64_t vpl_total;
2336 uint64_t vpl_empty_obj;
2337 uint64_t vpl_bucket_NULL;
2338 uint64_t vpl_hit_hint;
2339 uint64_t vpl_hit_hint_next;
2340 uint64_t vpl_hit_hint_prev;
2341 uint64_t vpl_fast;
2342 uint64_t vpl_slow;
2343 uint64_t vpl_hit;
2344 uint64_t vpl_miss;
2345
2346 uint64_t vpl_fast_elapsed;
2347 uint64_t vpl_slow_elapsed;
2348 } vm_page_lookup_stats __attribute__((aligned(8)));
2349
2350 #endif
2351
2352 #define KDP_VM_PAGE_WALK_MAX 1000
2353
2354 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2355 kdp_vm_page_lookup(
2356 vm_object_t object,
2357 vm_object_offset_t offset)
2358 {
2359 vm_page_t cur_page;
2360 int num_traversed = 0;
2361
2362 if (not_in_kdp) {
2363 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2364 }
2365
2366 vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2367 if (cur_page->vmp_offset == offset) {
2368 return cur_page;
2369 }
2370 num_traversed++;
2371
2372 if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2373 return VM_PAGE_NULL;
2374 }
2375 }
2376
2377 return VM_PAGE_NULL;
2378 }
2379
2380 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2381 vm_page_lookup(
2382 vm_object_t object,
2383 vm_object_offset_t offset)
2384 {
2385 vm_page_t mem;
2386 vm_page_bucket_t *bucket;
2387 vm_page_queue_entry_t qe;
2388 lck_spin_t *bucket_lock = NULL;
2389 int hash_id;
2390 #if DEBUG_VM_PAGE_LOOKUP
2391 uint64_t start, elapsed;
2392
2393 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2394 #endif
2395
2396 if (VM_KERNEL_ADDRESS(offset)) {
2397 offset = VM_KERNEL_STRIP_UPTR(offset);
2398 }
2399
2400 vm_object_lock_assert_held(object);
2401 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2402
2403 if (object->resident_page_count == 0) {
2404 #if DEBUG_VM_PAGE_LOOKUP
2405 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2406 #endif
2407 return VM_PAGE_NULL;
2408 }
2409
2410 mem = object->memq_hint;
2411
2412 if (mem != VM_PAGE_NULL) {
2413 assert(VM_PAGE_OBJECT(mem) == object);
2414
2415 if (mem->vmp_offset == offset) {
2416 #if DEBUG_VM_PAGE_LOOKUP
2417 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2418 #endif
2419 return mem;
2420 }
2421 qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2422
2423 if (!vm_page_queue_end(&object->memq, qe)) {
2424 vm_page_t next_page;
2425
2426 next_page = (vm_page_t)((uintptr_t)qe);
2427 assert(VM_PAGE_OBJECT(next_page) == object);
2428
2429 if (next_page->vmp_offset == offset) {
2430 object->memq_hint = next_page; /* new hint */
2431 #if DEBUG_VM_PAGE_LOOKUP
2432 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2433 #endif
2434 return next_page;
2435 }
2436 }
2437 qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2438
2439 if (!vm_page_queue_end(&object->memq, qe)) {
2440 vm_page_t prev_page;
2441
2442 prev_page = (vm_page_t)((uintptr_t)qe);
2443 assert(VM_PAGE_OBJECT(prev_page) == object);
2444
2445 if (prev_page->vmp_offset == offset) {
2446 object->memq_hint = prev_page; /* new hint */
2447 #if DEBUG_VM_PAGE_LOOKUP
2448 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2449 #endif
2450 return prev_page;
2451 }
2452 }
2453 }
2454 /*
2455 * Search the hash table for this object/offset pair
2456 */
2457 hash_id = vm_page_hash(object, offset);
2458 bucket = &vm_page_buckets[hash_id];
2459
2460 /*
2461 * since we hold the object lock, we are guaranteed that no
2462 * new pages can be inserted into this object... this in turn
2463 * guarantess that the page we're looking for can't exist
2464 * if the bucket it hashes to is currently NULL even when looked
2465 * at outside the scope of the hash bucket lock... this is a
2466 * really cheap optimiztion to avoid taking the lock
2467 */
2468 if (!bucket->page_list) {
2469 #if DEBUG_VM_PAGE_LOOKUP
2470 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2471 #endif
2472 return VM_PAGE_NULL;
2473 }
2474
2475 #if DEBUG_VM_PAGE_LOOKUP
2476 start = mach_absolute_time();
2477 #endif
2478 if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2479 /*
2480 * on average, it's roughly 3 times faster to run a short memq list
2481 * than to take the spin lock and go through the hash list
2482 */
2483 mem = (vm_page_t)vm_page_queue_first(&object->memq);
2484
2485 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2486 if (mem->vmp_offset == offset) {
2487 break;
2488 }
2489
2490 mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2491 }
2492 if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2493 mem = NULL;
2494 }
2495 } else {
2496 vm_page_object_t packed_object;
2497
2498 packed_object = VM_PAGE_PACK_OBJECT(object);
2499
2500 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2501
2502 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2503
2504 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2505 mem != VM_PAGE_NULL;
2506 mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2507 #if 0
2508 /*
2509 * we don't hold the page queue lock
2510 * so this check isn't safe to make
2511 */
2512 VM_PAGE_CHECK(mem);
2513 #endif
2514 if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2515 break;
2516 }
2517 }
2518 lck_spin_unlock(bucket_lock);
2519 }
2520
2521 #if DEBUG_VM_PAGE_LOOKUP
2522 elapsed = mach_absolute_time() - start;
2523
2524 if (bucket_lock) {
2525 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2526 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2527 } else {
2528 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2529 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2530 }
2531 if (mem != VM_PAGE_NULL) {
2532 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2533 } else {
2534 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2535 }
2536 #endif
2537 if (mem != VM_PAGE_NULL) {
2538 assert(VM_PAGE_OBJECT(mem) == object);
2539
2540 object->memq_hint = mem;
2541 }
2542 return mem;
2543 }
2544
2545
2546 /*
2547 * vm_page_rename:
2548 *
2549 * Move the given memory entry from its
2550 * current object to the specified target object/offset.
2551 *
2552 * The object must be locked.
2553 */
2554 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2555 vm_page_rename(
2556 vm_page_t mem,
2557 vm_object_t new_object,
2558 vm_object_offset_t new_offset)
2559 {
2560 boolean_t internal_to_external, external_to_internal;
2561 vm_tag_t tag;
2562 vm_object_t m_object;
2563
2564 m_object = VM_PAGE_OBJECT(mem);
2565
2566 assert(m_object != new_object);
2567 assert(m_object);
2568
2569 /*
2570 * Changes to mem->vmp_object require the page lock because
2571 * the pageout daemon uses that lock to get the object.
2572 */
2573 vm_page_lockspin_queues();
2574
2575 internal_to_external = FALSE;
2576 external_to_internal = FALSE;
2577
2578 if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2579 /*
2580 * it's much easier to get the vm_page_pageable_xxx accounting correct
2581 * if we first move the page to the active queue... it's going to end
2582 * up there anyway, and we don't do vm_page_rename's frequently enough
2583 * for this to matter.
2584 */
2585 vm_page_queues_remove(mem, FALSE);
2586 vm_page_activate(mem);
2587 }
2588 if (VM_PAGE_PAGEABLE(mem)) {
2589 if (m_object->internal && !new_object->internal) {
2590 internal_to_external = TRUE;
2591 }
2592 if (!m_object->internal && new_object->internal) {
2593 external_to_internal = TRUE;
2594 }
2595 }
2596
2597 tag = m_object->wire_tag;
2598 vm_page_remove(mem, TRUE);
2599 vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2600
2601 if (internal_to_external) {
2602 vm_page_pageable_internal_count--;
2603 vm_page_pageable_external_count++;
2604 } else if (external_to_internal) {
2605 vm_page_pageable_external_count--;
2606 vm_page_pageable_internal_count++;
2607 }
2608
2609 vm_page_unlock_queues();
2610 }
2611
2612 /*
2613 * vm_page_init:
2614 *
2615 * Initialize the fields in a new page.
2616 * This takes a structure with random values and initializes it
2617 * so that it can be given to vm_page_release or vm_page_insert.
2618 */
2619 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2620 vm_page_init(
2621 vm_page_t mem,
2622 ppnum_t phys_page,
2623 boolean_t lopage)
2624 {
2625 uint_t i;
2626 uintptr_t *p;
2627
2628 assert(phys_page);
2629
2630 #if DEBUG
2631 if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2632 if (!(pmap_valid_page(phys_page))) {
2633 panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2634 }
2635 }
2636 #endif /* DEBUG */
2637
2638 /*
2639 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2640 * try to use initial values which match 0. This minimizes the number of writes
2641 * needed for boot-time initialization.
2642 *
2643 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2644 */
2645 assert(VM_PAGE_NOT_ON_Q == 0);
2646 assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2647 for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2648 *p++ = 0;
2649 }
2650 mem->vmp_offset = (vm_object_offset_t)-1;
2651 mem->vmp_busy = TRUE;
2652 mem->vmp_lopage = lopage;
2653
2654 VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2655 #if 0
2656 /*
2657 * we're leaving this turned off for now... currently pages
2658 * come off the free list and are either immediately dirtied/referenced
2659 * due to zero-fill or COW faults, or are used to read or write files...
2660 * in the file I/O case, the UPL mechanism takes care of clearing
2661 * the state of the HW ref/mod bits in a somewhat fragile way.
2662 * Since we may change the way this works in the future (to toughen it up),
2663 * I'm leaving this as a reminder of where these bits could get cleared
2664 */
2665
2666 /*
2667 * make sure both the h/w referenced and modified bits are
2668 * clear at this point... we are especially dependent on
2669 * not finding a 'stale' h/w modified in a number of spots
2670 * once this page goes back into use
2671 */
2672 pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2673 #endif
2674 }
2675
2676 /*
2677 * vm_page_grab_fictitious:
2678 *
2679 * Remove a fictitious page from the free list.
2680 * Returns VM_PAGE_NULL if there are no free pages.
2681 */
2682
2683 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2684 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2685 {
2686 vm_page_t m;
2687
2688 m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2689 if (m) {
2690 vm_page_init(m, phys_addr, FALSE);
2691 m->vmp_fictitious = TRUE;
2692 }
2693 return m;
2694 }
2695
2696 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2697 vm_page_grab_fictitious(boolean_t canwait)
2698 {
2699 return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2700 }
2701
2702 int vm_guard_count;
2703
2704
2705 vm_page_t
vm_page_grab_guard(boolean_t canwait)2706 vm_page_grab_guard(boolean_t canwait)
2707 {
2708 vm_page_t page;
2709 page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2710 if (page) {
2711 OSAddAtomic(1, &vm_guard_count);
2712 }
2713 return page;
2714 }
2715
2716
2717 /*
2718 * vm_page_release_fictitious:
2719 *
2720 * Release a fictitious page to the zone pool
2721 */
2722 void
vm_page_release_fictitious(vm_page_t m)2723 vm_page_release_fictitious(
2724 vm_page_t m)
2725 {
2726 assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2727 assert(m->vmp_fictitious);
2728 assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2729 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2730 assert(!m->vmp_realtime);
2731
2732 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2733 OSAddAtomic(-1, &vm_guard_count);
2734 }
2735
2736 zfree(vm_page_zone, m);
2737 }
2738
2739 /*
2740 * vm_pool_low():
2741 *
2742 * Return true if it is not likely that a non-vm_privileged thread
2743 * can get memory without blocking. Advisory only, since the
2744 * situation may change under us.
2745 */
2746 bool
vm_pool_low(void)2747 vm_pool_low(void)
2748 {
2749 /* No locking, at worst we will fib. */
2750 return vm_page_free_count <= vm_page_free_reserved;
2751 }
2752
2753 boolean_t vm_darkwake_mode = FALSE;
2754
2755 /*
2756 * vm_update_darkwake_mode():
2757 *
2758 * Tells the VM that the system is in / out of darkwake.
2759 *
2760 * Today, the VM only lowers/raises the background queue target
2761 * so as to favor consuming more/less background pages when
2762 * darwake is ON/OFF.
2763 *
2764 * We might need to do more things in the future.
2765 */
2766
2767 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2768 vm_update_darkwake_mode(boolean_t darkwake_mode)
2769 {
2770 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2771 #pragma unused(darkwake_mode)
2772 assert(vm_darkwake_mode == FALSE);
2773 /*
2774 * Darkwake mode isn't supported for AS macOS.
2775 */
2776 return;
2777 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2778 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2779
2780 vm_page_lockspin_queues();
2781
2782 if (vm_darkwake_mode == darkwake_mode) {
2783 /*
2784 * No change.
2785 */
2786 vm_page_unlock_queues();
2787 return;
2788 }
2789
2790 vm_darkwake_mode = darkwake_mode;
2791
2792 if (vm_darkwake_mode == TRUE) {
2793 /* save background target to restore later */
2794 vm_page_background_target_snapshot = vm_page_background_target;
2795
2796 /* target is set to 0...no protection for background pages */
2797 vm_page_background_target = 0;
2798 } else if (vm_darkwake_mode == FALSE) {
2799 if (vm_page_background_target_snapshot) {
2800 vm_page_background_target = vm_page_background_target_snapshot;
2801 }
2802 }
2803 vm_page_unlock_queues();
2804 #endif
2805 }
2806
2807 void
vm_page_update_special_state(vm_page_t mem)2808 vm_page_update_special_state(vm_page_t mem)
2809 {
2810 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2811 return;
2812 }
2813
2814 int mode = mem->vmp_on_specialq;
2815
2816 switch (mode) {
2817 case VM_PAGE_SPECIAL_Q_BG:
2818 {
2819 task_t my_task = current_task_early();
2820
2821 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2822 return;
2823 }
2824
2825 if (my_task) {
2826 if (task_get_darkwake_mode(my_task)) {
2827 return;
2828 }
2829 }
2830
2831 if (my_task) {
2832 if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2833 return;
2834 }
2835 }
2836 vm_page_lockspin_queues();
2837
2838 vm_page_background_promoted_count++;
2839
2840 vm_page_remove_from_specialq(mem);
2841 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2842
2843 vm_page_unlock_queues();
2844 break;
2845 }
2846
2847 case VM_PAGE_SPECIAL_Q_DONATE:
2848 {
2849 task_t my_task = current_task_early();
2850
2851 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2852 return;
2853 }
2854
2855 if (my_task->donates_own_pages == false) {
2856 vm_page_lockspin_queues();
2857
2858 vm_page_remove_from_specialq(mem);
2859 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2860
2861 vm_page_unlock_queues();
2862 }
2863 break;
2864 }
2865
2866 default:
2867 {
2868 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2869 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2870 break;
2871 }
2872 }
2873 }
2874
2875
2876 void
vm_page_assign_special_state(vm_page_t mem,int mode)2877 vm_page_assign_special_state(vm_page_t mem, int mode)
2878 {
2879 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2880 return;
2881 }
2882
2883 switch (mode) {
2884 case VM_PAGE_SPECIAL_Q_BG:
2885 {
2886 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2887 return;
2888 }
2889
2890 task_t my_task = current_task_early();
2891
2892 if (my_task) {
2893 if (task_get_darkwake_mode(my_task)) {
2894 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2895 return;
2896 }
2897 }
2898
2899 if (my_task) {
2900 mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2901 }
2902 break;
2903 }
2904
2905 case VM_PAGE_SPECIAL_Q_DONATE:
2906 {
2907 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2908 return;
2909 }
2910 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2911 break;
2912 }
2913
2914 default:
2915 break;
2916 }
2917 }
2918
2919
2920 void
vm_page_remove_from_specialq(vm_page_t mem)2921 vm_page_remove_from_specialq(
2922 vm_page_t mem)
2923 {
2924 vm_object_t m_object;
2925 unsigned short mode;
2926
2927 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2928
2929 mode = mem->vmp_on_specialq;
2930
2931 switch (mode) {
2932 case VM_PAGE_SPECIAL_Q_BG:
2933 {
2934 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2935 vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2936
2937 mem->vmp_specialq.next = 0;
2938 mem->vmp_specialq.prev = 0;
2939
2940 vm_page_background_count--;
2941
2942 m_object = VM_PAGE_OBJECT(mem);
2943
2944 if (m_object->internal) {
2945 vm_page_background_internal_count--;
2946 } else {
2947 vm_page_background_external_count--;
2948 }
2949 }
2950 break;
2951 }
2952
2953 case VM_PAGE_SPECIAL_Q_DONATE:
2954 {
2955 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2956 vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2957 mem->vmp_specialq.next = 0;
2958 mem->vmp_specialq.prev = 0;
2959 vm_page_donate_count--;
2960 if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2961 assert(vm_page_donate_target == vm_page_donate_target_low);
2962 vm_page_donate_target = vm_page_donate_target_high;
2963 vm_page_donate_queue_ripe = false;
2964 }
2965 }
2966
2967 break;
2968 }
2969
2970 default:
2971 {
2972 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2973 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2974 break;
2975 }
2976 }
2977 }
2978
2979
2980 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2981 vm_page_add_to_specialq(
2982 vm_page_t mem,
2983 boolean_t first)
2984 {
2985 vm_object_t m_object;
2986
2987 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2988
2989 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2990 return;
2991 }
2992
2993 int mode = mem->vmp_on_specialq;
2994
2995 switch (mode) {
2996 case VM_PAGE_SPECIAL_Q_BG:
2997 {
2998 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2999 return;
3000 }
3001
3002 m_object = VM_PAGE_OBJECT(mem);
3003
3004 if (vm_page_background_exclude_external && !m_object->internal) {
3005 return;
3006 }
3007
3008 if (first == TRUE) {
3009 vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
3010 } else {
3011 vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
3012 }
3013 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3014
3015 vm_page_background_count++;
3016
3017 if (m_object->internal) {
3018 vm_page_background_internal_count++;
3019 } else {
3020 vm_page_background_external_count++;
3021 }
3022 break;
3023 }
3024
3025 case VM_PAGE_SPECIAL_Q_DONATE:
3026 {
3027 if (first == TRUE) {
3028 vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3029 } else {
3030 vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3031 }
3032 vm_page_donate_count++;
3033 if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3034 assert(vm_page_donate_target == vm_page_donate_target_high);
3035 vm_page_donate_target = vm_page_donate_target_low;
3036 vm_page_donate_queue_ripe = true;
3037 }
3038 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3039 break;
3040 }
3041
3042 default:
3043 break;
3044 }
3045 }
3046
3047 /*
3048 * This can be switched to FALSE to help debug drivers
3049 * that are having problems with memory > 4G.
3050 */
3051 boolean_t vm_himemory_mode = TRUE;
3052
3053 /*
3054 * this interface exists to support hardware controllers
3055 * incapable of generating DMAs with more than 32 bits
3056 * of address on platforms with physical memory > 4G...
3057 */
3058 unsigned int vm_lopages_allocated_q = 0;
3059 unsigned int vm_lopages_allocated_cpm_success = 0;
3060 unsigned int vm_lopages_allocated_cpm_failed = 0;
3061 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3062
3063 vm_page_t
vm_page_grablo(void)3064 vm_page_grablo(void)
3065 {
3066 vm_page_t mem;
3067
3068 if (vm_lopage_needed == FALSE) {
3069 return vm_page_grab();
3070 }
3071
3072 vm_free_page_lock_spin();
3073
3074 if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3075 vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3076 assert(vm_lopage_free_count);
3077 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3078 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3079
3080 vm_lopage_free_count--;
3081 vm_lopages_allocated_q++;
3082
3083 if (vm_lopage_free_count < vm_lopage_lowater) {
3084 vm_lopage_refill = TRUE;
3085 }
3086
3087 vm_free_page_unlock();
3088
3089 if (current_task()->donates_own_pages) {
3090 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3091 } else {
3092 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3093 }
3094 } else {
3095 vm_free_page_unlock();
3096
3097 if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3098 vm_free_page_lock_spin();
3099 vm_lopages_allocated_cpm_failed++;
3100 vm_free_page_unlock();
3101
3102 return VM_PAGE_NULL;
3103 }
3104 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3105
3106 mem->vmp_busy = TRUE;
3107
3108 vm_page_lockspin_queues();
3109
3110 mem->vmp_gobbled = FALSE;
3111 vm_page_gobble_count--;
3112 vm_page_wire_count--;
3113
3114 vm_lopages_allocated_cpm_success++;
3115 vm_page_unlock_queues();
3116 }
3117 assert(mem->vmp_busy);
3118 assert(!mem->vmp_pmapped);
3119 assert(!mem->vmp_wpmapped);
3120 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3121
3122 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3123
3124 counter_inc(&vm_page_grab_count);
3125 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3126
3127 return mem;
3128 }
3129
3130 /*
3131 * vm_page_grab:
3132 *
3133 * first try to grab a page from the per-cpu free list...
3134 * this must be done while pre-emption is disabled... if
3135 * a page is available, we're done...
3136 * if no page is available, grab the vm_page_queue_free_lock
3137 * and see if current number of free pages would allow us
3138 * to grab at least 1... if not, return VM_PAGE_NULL as before...
3139 * if there are pages available, disable preemption and
3140 * recheck the state of the per-cpu free list... we could
3141 * have been preempted and moved to a different cpu, or
3142 * some other thread could have re-filled it... if still
3143 * empty, figure out how many pages we can steal from the
3144 * global free queue and move to the per-cpu queue...
3145 * return 1 of these pages when done... only wakeup the
3146 * pageout_scan thread if we moved pages from the global
3147 * list... no need for the wakeup if we've satisfied the
3148 * request from the per-cpu queue.
3149 */
3150
3151 #if CONFIG_SECLUDED_MEMORY
3152 vm_page_t vm_page_grab_secluded(void);
3153 #endif /* CONFIG_SECLUDED_MEMORY */
3154
3155 static inline void
3156 vm_page_grab_diags(void);
3157
3158 /*
3159 * vm_page_validate_no_references:
3160 *
3161 * Make sure the physical page has no refcounts.
3162 *
3163 */
3164 static inline void
vm_page_validate_no_references(vm_page_t mem)3165 vm_page_validate_no_references(
3166 vm_page_t mem)
3167 {
3168 bool is_freed;
3169
3170 if (mem->vmp_fictitious) {
3171 return;
3172 }
3173
3174 pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
3175
3176 #if CONFIG_SPTM
3177 is_freed = pmap_is_page_free(paddr);
3178 #else
3179 is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
3180 #endif /* CONFIG_SPTM */
3181
3182 if (!is_freed) {
3183 /*
3184 * There is a redundancy here, but we are going to panic anyways,
3185 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
3186 * behavior.
3187 */
3188 ASSERT_PMAP_FREE(mem);
3189 panic("%s: page 0x%llx is referenced", __func__, paddr);
3190 }
3191 }
3192
3193 vm_page_t
vm_page_grab(void)3194 vm_page_grab(void)
3195 {
3196 return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3197 }
3198
3199 #if HIBERNATION
3200 boolean_t hibernate_rebuild_needed = FALSE;
3201 #endif /* HIBERNATION */
3202
3203 static void
vm_page_finalize_grabed_page(vm_page_t mem)3204 vm_page_finalize_grabed_page(vm_page_t mem)
3205 {
3206 task_t cur_task = current_task_early();
3207 if (cur_task && cur_task != kernel_task) {
3208 /* tag:DONATE this is where the donate state of the page is decided according to what task grabs it */
3209 if (cur_task->donates_own_pages) {
3210 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3211 } else {
3212 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3213 }
3214 }
3215 }
3216
3217 vm_page_t
vm_page_grab_options(int grab_options)3218 vm_page_grab_options(
3219 int grab_options)
3220 {
3221 vm_page_t mem;
3222
3223 restart:
3224 disable_preemption();
3225
3226 if ((mem = *PERCPU_GET(free_pages))) {
3227 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3228
3229 #if HIBERNATION
3230 if (hibernate_rebuild_needed) {
3231 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3232 }
3233 #endif /* HIBERNATION */
3234
3235 vm_page_grab_diags();
3236
3237 vm_offset_t pcpu_base = current_percpu_base();
3238 counter_inc_preemption_disabled(&vm_page_grab_count);
3239 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3240 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3241
3242 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3243 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3244 enable_preemption();
3245
3246 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3247 assert(mem->vmp_tabled == FALSE);
3248 assert(mem->vmp_object == 0);
3249 assert(!mem->vmp_laundry);
3250 assert(mem->vmp_busy);
3251 assert(!mem->vmp_pmapped);
3252 assert(!mem->vmp_wpmapped);
3253 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3254 assert(!mem->vmp_realtime);
3255
3256 vm_page_validate_no_references(mem);
3257 vm_page_finalize_grabed_page(mem);
3258 return mem;
3259 }
3260 enable_preemption();
3261
3262
3263 /*
3264 * Optionally produce warnings if the wire or gobble
3265 * counts exceed some threshold.
3266 */
3267 #if VM_PAGE_WIRE_COUNT_WARNING
3268 if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3269 printf("mk: vm_page_grab(): high wired page count of %d\n",
3270 vm_page_wire_count);
3271 }
3272 #endif
3273 #if VM_PAGE_GOBBLE_COUNT_WARNING
3274 if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3275 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3276 vm_page_gobble_count);
3277 }
3278 #endif
3279
3280 /*
3281 * If free count is low and we have delayed pages from early boot,
3282 * get one of those instead.
3283 */
3284 if (__improbable(vm_delayed_count > 0 &&
3285 vm_page_free_count <= vm_page_free_target &&
3286 (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3287 assert(!mem->vmp_realtime);
3288 // TODO: missing vm_page_finalize_grabed_page()?
3289 return mem;
3290 }
3291
3292 vm_free_page_lock_spin();
3293
3294 /*
3295 * Only let privileged threads (involved in pageout)
3296 * dip into the reserved pool.
3297 */
3298 if ((vm_page_free_count < vm_page_free_reserved) &&
3299 !(current_thread()->options & TH_OPT_VMPRIV)) {
3300 /* no page for us in the free queue... */
3301 vm_free_page_unlock();
3302 mem = VM_PAGE_NULL;
3303
3304 #if CONFIG_SECLUDED_MEMORY
3305 /* ... but can we try and grab from the secluded queue? */
3306 if (vm_page_secluded_count > 0 &&
3307 ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3308 task_can_use_secluded_mem(current_task(), TRUE))) {
3309 mem = vm_page_grab_secluded();
3310 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3311 vm_page_secluded.grab_for_iokit++;
3312 if (mem) {
3313 vm_page_secluded.grab_for_iokit_success++;
3314 }
3315 }
3316 if (mem) {
3317 VM_CHECK_MEMORYSTATUS;
3318
3319 vm_page_grab_diags();
3320 counter_inc(&vm_page_grab_count);
3321 VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3322
3323 assert(!mem->vmp_realtime);
3324 // TODO: missing vm_page_finalize_grabed_page()?
3325 return mem;
3326 }
3327 }
3328 #else /* CONFIG_SECLUDED_MEMORY */
3329 (void) grab_options;
3330 #endif /* CONFIG_SECLUDED_MEMORY */
3331 } else {
3332 vm_page_t head;
3333 vm_page_t tail;
3334 unsigned int pages_to_steal;
3335 unsigned int color;
3336 unsigned int clump_end, sub_count;
3337
3338 /*
3339 * Replenishing our per-CPU cache of free pages might take
3340 * too long to keep holding the "free_page" lock as a spinlock,
3341 * so convert to the full mutex to prevent other threads trying
3342 * to acquire the "free_page" lock from timing out spinning on
3343 * the mutex interlock.
3344 */
3345 vm_free_page_lock_convert();
3346
3347 while (vm_page_free_count == 0) {
3348 vm_free_page_unlock();
3349 /*
3350 * must be a privileged thread to be
3351 * in this state since a non-privileged
3352 * thread would have bailed if we were
3353 * under the vm_page_free_reserved mark
3354 */
3355 VM_PAGE_WAIT();
3356 vm_free_page_lock();
3357 }
3358
3359 /*
3360 * Need to repopulate the per-CPU free list from the global free list.
3361 * Note we don't do any processing of pending retirement pages here.
3362 * That'll happen in the code above when the page comes off the per-CPU list.
3363 */
3364 disable_preemption();
3365
3366 /*
3367 * If we got preempted the cache might now have pages.
3368 */
3369 if ((mem = *PERCPU_GET(free_pages))) {
3370 vm_free_page_unlock();
3371 enable_preemption();
3372 goto restart;
3373 }
3374
3375 if (vm_page_free_count <= vm_page_free_reserved) {
3376 pages_to_steal = 1;
3377 } else {
3378 if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3379 pages_to_steal = vm_free_magazine_refill_limit;
3380 } else {
3381 pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3382 }
3383 }
3384 color = *PERCPU_GET(start_color);
3385 head = tail = NULL;
3386
3387 vm_page_free_count -= pages_to_steal;
3388 clump_end = sub_count = 0;
3389
3390 while (pages_to_steal--) {
3391 while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3392 color = (color + 1) & vm_color_mask;
3393 }
3394 #if defined(__x86_64__)
3395 vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3396 mem, clump_end);
3397 #else
3398 vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3399 mem, vmp_pageq);
3400 #endif
3401
3402 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3403
3404 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3405
3406 #if defined(__arm64__)
3407 color = (color + 1) & vm_color_mask;
3408 #else
3409
3410 #if DEVELOPMENT || DEBUG
3411
3412 sub_count++;
3413 if (clump_end) {
3414 vm_clump_update_stats(sub_count);
3415 sub_count = 0;
3416 color = (color + 1) & vm_color_mask;
3417 }
3418 #else
3419 if (clump_end) {
3420 color = (color + 1) & vm_color_mask;
3421 }
3422
3423 #endif /* if DEVELOPMENT || DEBUG */
3424
3425 #endif /* if defined(__arm64__) */
3426
3427 if (head == NULL) {
3428 head = mem;
3429 } else {
3430 tail->vmp_snext = mem;
3431 }
3432 tail = mem;
3433
3434 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3435 assert(mem->vmp_tabled == FALSE);
3436 assert(mem->vmp_object == 0);
3437 assert(!mem->vmp_laundry);
3438
3439 mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3440
3441 assert(mem->vmp_busy);
3442 assert(!mem->vmp_pmapped);
3443 assert(!mem->vmp_wpmapped);
3444 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3445 assert(!mem->vmp_realtime);
3446
3447 vm_page_validate_no_references(mem);
3448 }
3449 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3450 vm_clump_update_stats(sub_count);
3451 #endif
3452
3453 #if HIBERNATION
3454 if (hibernate_rebuild_needed) {
3455 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3456 }
3457 #endif /* HIBERNATION */
3458 vm_offset_t pcpu_base = current_percpu_base();
3459 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3460 *PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3461
3462 /*
3463 * We decremented vm_page_free_count above
3464 * so we must wake up vm_pageout_scan() if
3465 * we brought it down below vm_page_free_min.
3466 */
3467 bool wakeup_pageout_scan = false;
3468 if (vm_page_free_count < vm_page_free_min &&
3469 !vm_pageout_running) {
3470 wakeup_pageout_scan = true;
3471 }
3472 vm_free_page_unlock();
3473
3474 enable_preemption();
3475
3476 if (wakeup_pageout_scan) {
3477 thread_wakeup((event_t) &vm_page_free_wanted);
3478 }
3479 VM_CHECK_MEMORYSTATUS;
3480
3481 goto restart;
3482 }
3483
3484 /*
3485 * Decide if we should poke the pageout daemon.
3486 * We do this if the free count is less than the low
3487 * water mark. VM Pageout Scan will keep running till
3488 * the free_count > free_target (& hence above free_min).
3489 * This wakeup is to catch the possibility of the counts
3490 * dropping between VM Pageout Scan parking and this check.
3491 *
3492 * We don't have the counts locked ... if they change a little,
3493 * it doesn't really matter.
3494 */
3495 if (vm_page_free_count < vm_page_free_min) {
3496 vm_free_page_lock();
3497 if (vm_pageout_running == FALSE) {
3498 vm_free_page_unlock();
3499 thread_wakeup((event_t) &vm_page_free_wanted);
3500 } else {
3501 vm_free_page_unlock();
3502 }
3503 }
3504
3505 VM_CHECK_MEMORYSTATUS;
3506
3507 if (mem) {
3508 assert(!mem->vmp_realtime);
3509 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
3510
3511 vm_page_finalize_grabed_page(mem);
3512 }
3513 return mem;
3514 }
3515
3516 #if CONFIG_SECLUDED_MEMORY
3517 vm_page_t
vm_page_grab_secluded(void)3518 vm_page_grab_secluded(void)
3519 {
3520 vm_page_t mem;
3521 vm_object_t object;
3522 int refmod_state;
3523
3524 if (vm_page_secluded_count == 0) {
3525 /* no secluded pages to grab... */
3526 return VM_PAGE_NULL;
3527 }
3528
3529 /* secluded queue is protected by the VM page queue lock */
3530 vm_page_lock_queues();
3531
3532 if (vm_page_secluded_count == 0) {
3533 /* no secluded pages to grab... */
3534 vm_page_unlock_queues();
3535 return VM_PAGE_NULL;
3536 }
3537
3538 #if 00
3539 /* can we grab from the secluded queue? */
3540 if (vm_page_secluded_count > vm_page_secluded_target ||
3541 (vm_page_secluded_count > 0 &&
3542 task_can_use_secluded_mem(current_task(), TRUE))) {
3543 /* OK */
3544 } else {
3545 /* can't grab from secluded queue... */
3546 vm_page_unlock_queues();
3547 return VM_PAGE_NULL;
3548 }
3549 #endif
3550
3551 /* we can grab a page from secluded queue! */
3552 assert((vm_page_secluded_count_free +
3553 vm_page_secluded_count_inuse) ==
3554 vm_page_secluded_count);
3555 if (current_task()->task_can_use_secluded_mem) {
3556 assert(num_tasks_can_use_secluded_mem > 0);
3557 }
3558 assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3559 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3560 mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3561 assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3562 vm_page_queues_remove(mem, TRUE);
3563
3564 object = VM_PAGE_OBJECT(mem);
3565
3566 assert(!mem->vmp_fictitious);
3567 assert(!VM_PAGE_WIRED(mem));
3568 if (object == VM_OBJECT_NULL) {
3569 /* free for grab! */
3570 vm_page_unlock_queues();
3571 vm_page_secluded.grab_success_free++;
3572
3573 assert(mem->vmp_busy);
3574 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3575 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3576 assert(mem->vmp_pageq.next == 0);
3577 assert(mem->vmp_pageq.prev == 0);
3578 assert(mem->vmp_listq.next == 0);
3579 assert(mem->vmp_listq.prev == 0);
3580 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3581 assert(mem->vmp_specialq.next == 0);
3582 assert(mem->vmp_specialq.prev == 0);
3583 return mem;
3584 }
3585
3586 assert(!object->internal);
3587 // vm_page_pageable_external_count--;
3588
3589 if (!vm_object_lock_try(object)) {
3590 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
3591 vm_page_secluded.grab_failure_locked++;
3592 reactivate_secluded_page:
3593 vm_page_activate(mem);
3594 vm_page_unlock_queues();
3595 return VM_PAGE_NULL;
3596 }
3597 if (mem->vmp_busy ||
3598 mem->vmp_cleaning ||
3599 mem->vmp_laundry) {
3600 /* can't steal page in this state... */
3601 vm_object_unlock(object);
3602 vm_page_secluded.grab_failure_state++;
3603 goto reactivate_secluded_page;
3604 }
3605 if (mem->vmp_realtime) {
3606 /* don't steal pages used by realtime threads... */
3607 vm_object_unlock(object);
3608 vm_page_secluded.grab_failure_realtime++;
3609 goto reactivate_secluded_page;
3610 }
3611
3612 mem->vmp_busy = TRUE;
3613 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3614 if (refmod_state & VM_MEM_REFERENCED) {
3615 mem->vmp_reference = TRUE;
3616 }
3617 if (refmod_state & VM_MEM_MODIFIED) {
3618 SET_PAGE_DIRTY(mem, FALSE);
3619 }
3620 if (mem->vmp_dirty || mem->vmp_precious) {
3621 /* can't grab a dirty page; re-activate */
3622 // printf("SECLUDED: dirty page %p\n", mem);
3623 vm_page_wakeup_done(object, mem);
3624 vm_page_secluded.grab_failure_dirty++;
3625 vm_object_unlock(object);
3626 goto reactivate_secluded_page;
3627 }
3628 if (mem->vmp_reference) {
3629 /* it's been used but we do need to grab a page... */
3630 }
3631
3632 vm_page_unlock_queues();
3633
3634
3635 /* finish what vm_page_free() would have done... */
3636 vm_page_free_prepare_object(mem, TRUE);
3637 vm_object_unlock(object);
3638 object = VM_OBJECT_NULL;
3639
3640 vm_page_validate_no_references(mem);
3641
3642 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3643 vm_page_secluded.grab_success_other++;
3644
3645 assert(mem->vmp_busy);
3646 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3647 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3648 assert(mem->vmp_pageq.next == 0);
3649 assert(mem->vmp_pageq.prev == 0);
3650 assert(mem->vmp_listq.next == 0);
3651 assert(mem->vmp_listq.prev == 0);
3652 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3653 assert(mem->vmp_specialq.next == 0);
3654 assert(mem->vmp_specialq.prev == 0);
3655
3656 return mem;
3657 }
3658
3659 uint64_t
vm_page_secluded_drain(void)3660 vm_page_secluded_drain(void)
3661 {
3662 vm_page_t local_freeq;
3663 int local_freed;
3664 uint64_t num_reclaimed;
3665 unsigned int saved_secluded_count, saved_secluded_target;
3666
3667 num_reclaimed = 0;
3668 local_freeq = NULL;
3669 local_freed = 0;
3670
3671 vm_page_lock_queues();
3672
3673 saved_secluded_count = vm_page_secluded_count;
3674 saved_secluded_target = vm_page_secluded_target;
3675 vm_page_secluded_target = 0;
3676 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3677 while (vm_page_secluded_count) {
3678 vm_page_t secluded_page;
3679
3680 assert((vm_page_secluded_count_free +
3681 vm_page_secluded_count_inuse) ==
3682 vm_page_secluded_count);
3683 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3684 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3685
3686 vm_page_queues_remove(secluded_page, FALSE);
3687 assert(!secluded_page->vmp_fictitious);
3688 assert(!VM_PAGE_WIRED(secluded_page));
3689
3690 if (secluded_page->vmp_object == 0) {
3691 /* transfer to free queue */
3692 assert(secluded_page->vmp_busy);
3693 secluded_page->vmp_snext = local_freeq;
3694 local_freeq = secluded_page;
3695 local_freed += 1;
3696 } else {
3697 /* transfer to head of active queue */
3698 vm_page_enqueue_active(secluded_page, FALSE);
3699 secluded_page = VM_PAGE_NULL;
3700 }
3701 num_reclaimed++;
3702 }
3703 vm_page_secluded_target = saved_secluded_target;
3704 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3705
3706 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3707
3708 vm_page_unlock_queues();
3709
3710 if (local_freed) {
3711 vm_page_free_list(local_freeq, TRUE);
3712 local_freeq = NULL;
3713 local_freed = 0;
3714 }
3715
3716 return num_reclaimed;
3717 }
3718 #endif /* CONFIG_SECLUDED_MEMORY */
3719
3720 static inline void
vm_page_grab_diags()3721 vm_page_grab_diags()
3722 {
3723 #if DEVELOPMENT || DEBUG
3724 task_t task = current_task_early();
3725 if (task == NULL) {
3726 return;
3727 }
3728
3729 ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3730 #endif /* DEVELOPMENT || DEBUG */
3731 }
3732
3733 /*
3734 * vm_page_release:
3735 *
3736 * Return a page to the free list.
3737 */
3738
3739 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3740 vm_page_release(
3741 vm_page_t mem,
3742 boolean_t page_queues_locked)
3743 {
3744 unsigned int color;
3745 int need_wakeup = 0;
3746 int need_priv_wakeup = 0;
3747 #if CONFIG_SECLUDED_MEMORY
3748 int need_secluded_wakeup = 0;
3749 #endif /* CONFIG_SECLUDED_MEMORY */
3750 event_t wakeup_event = NULL;
3751
3752 if (page_queues_locked) {
3753 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3754 } else {
3755 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3756 }
3757
3758 assert(!mem->vmp_private && !mem->vmp_fictitious);
3759
3760 #if MACH_ASSERT
3761 if (vm_check_refs_on_free) {
3762 vm_page_validate_no_references(mem);
3763 }
3764 #endif /* MACH_ASSERT */
3765
3766 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
3767
3768 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3769
3770 if (__improbable(mem->vmp_realtime)) {
3771 if (!page_queues_locked) {
3772 vm_page_lock_queues();
3773 }
3774 if (mem->vmp_realtime) {
3775 mem->vmp_realtime = false;
3776 vm_page_realtime_count--;
3777 }
3778 if (!page_queues_locked) {
3779 vm_page_unlock_queues();
3780 }
3781 }
3782
3783 vm_free_page_lock_spin();
3784
3785 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3786 assert(mem->vmp_busy);
3787 assert(!mem->vmp_laundry);
3788 assert(mem->vmp_object == 0);
3789 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3790 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3791 assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3792
3793 /* Clear any specialQ hints before releasing page to the free pool*/
3794 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3795
3796 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3797 vm_lopage_free_count < vm_lopage_free_limit &&
3798 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3799 /*
3800 * this exists to support hardware controllers
3801 * incapable of generating DMAs with more than 32 bits
3802 * of address on platforms with physical memory > 4G...
3803 */
3804 vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3805 vm_lopage_free_count++;
3806
3807 if (vm_lopage_free_count >= vm_lopage_free_limit) {
3808 vm_lopage_refill = FALSE;
3809 }
3810
3811 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3812 mem->vmp_lopage = TRUE;
3813 #if CONFIG_SECLUDED_MEMORY
3814 } else if (vm_page_free_count > vm_page_free_reserved &&
3815 vm_page_secluded_count < vm_page_secluded_target &&
3816 num_tasks_can_use_secluded_mem == 0) {
3817 /*
3818 * XXX FBDP TODO: also avoid refilling secluded queue
3819 * when some IOKit objects are already grabbing from it...
3820 */
3821 if (!page_queues_locked) {
3822 if (!vm_page_trylock_queues()) {
3823 /* take locks in right order */
3824 vm_free_page_unlock();
3825 vm_page_lock_queues();
3826 vm_free_page_lock_spin();
3827 }
3828 }
3829 mem->vmp_lopage = FALSE;
3830 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3831 vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3832 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3833 vm_page_secluded_count++;
3834 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3835 vm_page_secluded_count_free++;
3836 if (!page_queues_locked) {
3837 vm_page_unlock_queues();
3838 }
3839 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3840 if (vm_page_free_wanted_secluded > 0) {
3841 vm_page_free_wanted_secluded--;
3842 need_secluded_wakeup = 1;
3843 }
3844 #endif /* CONFIG_SECLUDED_MEMORY */
3845 } else {
3846 mem->vmp_lopage = FALSE;
3847 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3848
3849 color = VM_PAGE_GET_COLOR(mem);
3850 #if defined(__x86_64__)
3851 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3852 #else
3853 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3854 #endif
3855 vm_page_free_count++;
3856 /*
3857 * Check if we should wake up someone waiting for page.
3858 * But don't bother waking them unless they can allocate.
3859 *
3860 * We wakeup only one thread, to prevent starvation.
3861 * Because the scheduling system handles wait queues FIFO,
3862 * if we wakeup all waiting threads, one greedy thread
3863 * can starve multiple niceguy threads. When the threads
3864 * all wakeup, the greedy threads runs first, grabs the page,
3865 * and waits for another page. It will be the first to run
3866 * when the next page is freed.
3867 *
3868 * However, there is a slight danger here.
3869 * The thread we wake might not use the free page.
3870 * Then the other threads could wait indefinitely
3871 * while the page goes unused. To forestall this,
3872 * the pageout daemon will keep making free pages
3873 * as long as vm_page_free_wanted is non-zero.
3874 */
3875
3876 assert(vm_page_free_count > 0);
3877 if (vm_page_free_wanted_privileged > 0) {
3878 vm_page_free_wanted_privileged--;
3879 need_priv_wakeup = 1;
3880 #if CONFIG_SECLUDED_MEMORY
3881 } else if (vm_page_free_wanted_secluded > 0 &&
3882 vm_page_free_count > vm_page_free_reserved) {
3883 vm_page_free_wanted_secluded--;
3884 need_secluded_wakeup = 1;
3885 #endif /* CONFIG_SECLUDED_MEMORY */
3886 } else if (vm_page_free_wanted > 0 &&
3887 vm_page_free_count > vm_page_free_reserved) {
3888 vm_page_free_wanted--;
3889 need_wakeup = 1;
3890 }
3891 }
3892 vm_pageout_vminfo.vm_page_pages_freed++;
3893
3894 vm_free_page_unlock();
3895
3896 VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3897
3898 if (need_priv_wakeup) {
3899 wakeup_event = &vm_page_free_wanted_privileged;
3900 }
3901 #if CONFIG_SECLUDED_MEMORY
3902 else if (need_secluded_wakeup) {
3903 wakeup_event = &vm_page_free_wanted_secluded;
3904 }
3905 #endif /* CONFIG_SECLUDED_MEMORY */
3906 else if (need_wakeup) {
3907 wakeup_event = &vm_page_free_count;
3908 }
3909
3910 if (wakeup_event) {
3911 if (vps_dynamic_priority_enabled) {
3912 wakeup_one_with_inheritor((event_t) wakeup_event,
3913 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3914 NULL);
3915 } else {
3916 thread_wakeup_one((event_t) wakeup_event);
3917 }
3918 }
3919
3920 VM_CHECK_MEMORYSTATUS;
3921 }
3922
3923 /*
3924 * This version of vm_page_release() is used only at startup
3925 * when we are single-threaded and pages are being released
3926 * for the first time. Hence, no locking or unnecessary checks are made.
3927 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3928 */
3929 void
vm_page_release_startup(vm_page_t mem)3930 vm_page_release_startup(
3931 vm_page_t mem)
3932 {
3933 vm_page_queue_t queue_free;
3934
3935 if (vm_lopage_free_count < vm_lopage_free_limit &&
3936 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3937 mem->vmp_lopage = TRUE;
3938 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3939 vm_lopage_free_count++;
3940 queue_free = &vm_lopage_queue_free;
3941 #if CONFIG_SECLUDED_MEMORY
3942 } else if (vm_page_secluded_count < vm_page_secluded_target) {
3943 mem->vmp_lopage = FALSE;
3944 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3945 vm_page_secluded_count++;
3946 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3947 vm_page_secluded_count_free++;
3948 queue_free = &vm_page_queue_secluded;
3949 #endif /* CONFIG_SECLUDED_MEMORY */
3950 } else {
3951 mem->vmp_lopage = FALSE;
3952 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3953 vm_page_free_count++;
3954 queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3955 }
3956 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3957 #if defined(__x86_64__)
3958 vm_page_queue_enter_clump(queue_free, mem);
3959 #else
3960 vm_page_queue_enter(queue_free, mem, vmp_pageq);
3961 #endif
3962 } else {
3963 vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3964 }
3965 }
3966
3967 /*
3968 * vm_page_wait:
3969 *
3970 * Wait for a page to become available.
3971 * If there are plenty of free pages, then we don't sleep.
3972 *
3973 * Returns:
3974 * TRUE: There may be another page, try again
3975 * FALSE: We were interrupted out of our wait, don't try again
3976 */
3977
3978 boolean_t
vm_page_wait(int interruptible)3979 vm_page_wait(
3980 int interruptible )
3981 {
3982 /*
3983 * We can't use vm_page_free_reserved to make this
3984 * determination. Consider: some thread might
3985 * need to allocate two pages. The first allocation
3986 * succeeds, the second fails. After the first page is freed,
3987 * a call to vm_page_wait must really block.
3988 */
3989 kern_return_t wait_result;
3990 int need_wakeup = 0;
3991 int is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3992 event_t wait_event = NULL;
3993
3994 vm_free_page_lock_spin();
3995
3996 if (is_privileged && vm_page_free_count) {
3997 vm_free_page_unlock();
3998 return TRUE;
3999 }
4000
4001 if (vm_page_free_count >= vm_page_free_target) {
4002 vm_free_page_unlock();
4003 return TRUE;
4004 }
4005
4006 if (is_privileged) {
4007 if (vm_page_free_wanted_privileged++ == 0) {
4008 need_wakeup = 1;
4009 }
4010 wait_event = (event_t)&vm_page_free_wanted_privileged;
4011 #if CONFIG_SECLUDED_MEMORY
4012 } else if (secluded_for_apps &&
4013 task_can_use_secluded_mem(current_task(), FALSE)) {
4014 #if 00
4015 /* XXX FBDP: need pageq lock for this... */
4016 /* XXX FBDP: might wait even if pages available, */
4017 /* XXX FBDP: hopefully not for too long... */
4018 if (vm_page_secluded_count > 0) {
4019 vm_free_page_unlock();
4020 return TRUE;
4021 }
4022 #endif
4023 if (vm_page_free_wanted_secluded++ == 0) {
4024 need_wakeup = 1;
4025 }
4026 wait_event = (event_t)&vm_page_free_wanted_secluded;
4027 #endif /* CONFIG_SECLUDED_MEMORY */
4028 } else {
4029 if (vm_page_free_wanted++ == 0) {
4030 need_wakeup = 1;
4031 }
4032 wait_event = (event_t)&vm_page_free_count;
4033 }
4034
4035 /*
4036 * We don't do a vm_pageout_scan wakeup if we already have
4037 * some waiters because vm_pageout_scan checks for waiters
4038 * before it returns and does so behind the vm_page_queue_free_lock,
4039 * which we own when we bump the waiter counts.
4040 */
4041
4042 if (vps_dynamic_priority_enabled) {
4043 /*
4044 * We are waking up vm_pageout_scan here. If it needs
4045 * the vm_page_queue_free_lock before we unlock it
4046 * we'll end up just blocking and incur an extra
4047 * context switch. Could be a perf. issue.
4048 */
4049
4050 if (need_wakeup) {
4051 thread_wakeup((event_t)&vm_page_free_wanted);
4052 }
4053
4054 /*
4055 * LD: This event is going to get recorded every time because
4056 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4057 * We just block in that routine.
4058 */
4059 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4060 vm_page_free_wanted_privileged,
4061 vm_page_free_wanted,
4062 #if CONFIG_SECLUDED_MEMORY
4063 vm_page_free_wanted_secluded,
4064 #else /* CONFIG_SECLUDED_MEMORY */
4065 0,
4066 #endif /* CONFIG_SECLUDED_MEMORY */
4067 0);
4068 wait_result = lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4069 LCK_SLEEP_UNLOCK,
4070 wait_event,
4071 vm_pageout_scan_thread,
4072 interruptible,
4073 0);
4074 } else {
4075 wait_result = assert_wait(wait_event, interruptible);
4076
4077 vm_free_page_unlock();
4078
4079 if (need_wakeup) {
4080 thread_wakeup((event_t)&vm_page_free_wanted);
4081 }
4082
4083 if (wait_result == THREAD_WAITING) {
4084 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4085 vm_page_free_wanted_privileged,
4086 vm_page_free_wanted,
4087 #if CONFIG_SECLUDED_MEMORY
4088 vm_page_free_wanted_secluded,
4089 #else /* CONFIG_SECLUDED_MEMORY */
4090 0,
4091 #endif /* CONFIG_SECLUDED_MEMORY */
4092 0);
4093 wait_result = thread_block(THREAD_CONTINUE_NULL);
4094 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4095 DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4096 }
4097 }
4098
4099 return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4100 }
4101
4102 /*
4103 * vm_page_alloc:
4104 *
4105 * Allocate and return a memory cell associated
4106 * with this VM object/offset pair.
4107 *
4108 * Object must be locked.
4109 */
4110
4111 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4112 vm_page_alloc(
4113 vm_object_t object,
4114 vm_object_offset_t offset)
4115 {
4116 vm_page_t mem;
4117 int grab_options;
4118
4119 vm_object_lock_assert_exclusive(object);
4120 grab_options = 0;
4121 #if CONFIG_SECLUDED_MEMORY
4122 if (object->can_grab_secluded) {
4123 grab_options |= VM_PAGE_GRAB_SECLUDED;
4124 }
4125 #endif /* CONFIG_SECLUDED_MEMORY */
4126 mem = vm_page_grab_options(grab_options);
4127 if (mem == VM_PAGE_NULL) {
4128 return VM_PAGE_NULL;
4129 }
4130
4131 vm_page_insert(mem, object, offset);
4132
4133 return mem;
4134 }
4135
4136 /*
4137 * vm_page_free_prepare:
4138 *
4139 * Removes page from any queue it may be on
4140 * and disassociates it from its VM object.
4141 *
4142 * Object and page queues must be locked prior to entry.
4143 */
4144 static void
vm_page_free_prepare(vm_page_t mem)4145 vm_page_free_prepare(
4146 vm_page_t mem)
4147 {
4148 #if CONFIG_SPTM
4149 /**
4150 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
4151 * created and destroyed. In order to catch potential cases where this
4152 * does not happen, add an appropriate assert here. This code should be
4153 * executed on every frame that is about to be released to the VM.
4154 */
4155 const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
4156 __unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
4157
4158 assert(frame_type == XNU_DEFAULT);
4159 #endif /* CONFIG_SPTM */
4160
4161 vm_page_free_prepare_queues(mem);
4162 vm_page_free_prepare_object(mem, TRUE);
4163 }
4164
4165
4166 void
vm_page_free_prepare_queues(vm_page_t mem)4167 vm_page_free_prepare_queues(
4168 vm_page_t mem)
4169 {
4170 vm_object_t m_object;
4171
4172 VM_PAGE_CHECK(mem);
4173
4174 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4175 assert(!mem->vmp_cleaning);
4176 m_object = VM_PAGE_OBJECT(mem);
4177
4178 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4179 if (m_object) {
4180 vm_object_lock_assert_exclusive(m_object);
4181 }
4182 if (mem->vmp_laundry) {
4183 /*
4184 * We may have to free a page while it's being laundered
4185 * if we lost its pager (due to a forced unmount, for example).
4186 * We need to call vm_pageout_steal_laundry() before removing
4187 * the page from its VM object, so that we can remove it
4188 * from its pageout queue and adjust the laundry accounting
4189 */
4190 vm_pageout_steal_laundry(mem, TRUE);
4191 }
4192
4193 vm_page_queues_remove(mem, TRUE);
4194
4195 if (__improbable(mem->vmp_realtime)) {
4196 mem->vmp_realtime = false;
4197 vm_page_realtime_count--;
4198 }
4199
4200 if (VM_PAGE_WIRED(mem)) {
4201 assert(mem->vmp_wire_count > 0);
4202
4203 if (m_object) {
4204 task_t owner;
4205 int ledger_idx_volatile;
4206 int ledger_idx_nonvolatile;
4207 int ledger_idx_volatile_compressed;
4208 int ledger_idx_nonvolatile_compressed;
4209 int ledger_idx_composite;
4210 int ledger_idx_external_wired;
4211 boolean_t do_footprint;
4212
4213 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4214 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4215 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4216
4217 assert(m_object->resident_page_count >=
4218 m_object->wired_page_count);
4219
4220 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4221 OSAddAtomic(+1, &vm_page_purgeable_count);
4222 assert(vm_page_purgeable_wired_count > 0);
4223 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4224 }
4225 if (m_object->internal &&
4226 m_object->vo_owner != TASK_NULL &&
4227 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4228 m_object->purgable == VM_PURGABLE_EMPTY)) {
4229 owner = VM_OBJECT_OWNER(m_object);
4230 vm_object_ledger_tag_ledgers(
4231 m_object,
4232 &ledger_idx_volatile,
4233 &ledger_idx_nonvolatile,
4234 &ledger_idx_volatile_compressed,
4235 &ledger_idx_nonvolatile_compressed,
4236 &ledger_idx_composite,
4237 &ledger_idx_external_wired,
4238 &do_footprint);
4239 /*
4240 * While wired, this page was accounted
4241 * as "non-volatile" but it should now
4242 * be accounted as "volatile".
4243 */
4244 /* one less "non-volatile"... */
4245 ledger_debit(owner->ledger,
4246 ledger_idx_nonvolatile,
4247 PAGE_SIZE);
4248 if (do_footprint) {
4249 /* ... and "phys_footprint" */
4250 ledger_debit(owner->ledger,
4251 task_ledgers.phys_footprint,
4252 PAGE_SIZE);
4253 } else if (ledger_idx_composite != -1) {
4254 ledger_debit(owner->ledger,
4255 ledger_idx_composite,
4256 PAGE_SIZE);
4257 }
4258 /* one more "volatile" */
4259 ledger_credit(owner->ledger,
4260 ledger_idx_volatile,
4261 PAGE_SIZE);
4262 }
4263 }
4264 if (!mem->vmp_private && !mem->vmp_fictitious) {
4265 vm_page_wire_count--;
4266 }
4267
4268 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4269 mem->vmp_wire_count = 0;
4270 assert(!mem->vmp_gobbled);
4271 } else if (mem->vmp_gobbled) {
4272 if (!mem->vmp_private && !mem->vmp_fictitious) {
4273 vm_page_wire_count--;
4274 }
4275 vm_page_gobble_count--;
4276 }
4277 }
4278
4279
4280 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4281 vm_page_free_prepare_object(
4282 vm_page_t mem,
4283 boolean_t remove_from_hash)
4284 {
4285 assert(!mem->vmp_realtime);
4286 if (mem->vmp_tabled) {
4287 vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
4288 }
4289 vm_page_wakeup(VM_OBJECT_NULL, mem); /* clears wanted */
4290
4291 if (mem->vmp_private) {
4292 mem->vmp_private = FALSE;
4293 mem->vmp_fictitious = TRUE;
4294 VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4295 }
4296 if (!mem->vmp_fictitious) {
4297 assert(mem->vmp_pageq.next == 0);
4298 assert(mem->vmp_pageq.prev == 0);
4299 assert(mem->vmp_listq.next == 0);
4300 assert(mem->vmp_listq.prev == 0);
4301 assert(mem->vmp_specialq.next == 0);
4302 assert(mem->vmp_specialq.prev == 0);
4303 assert(mem->vmp_next_m == 0);
4304
4305 #if MACH_ASSERT
4306 if (vm_check_refs_on_free) {
4307 vm_page_validate_no_references(mem);
4308 }
4309 #endif /* MACH_ASSERT */
4310
4311 {
4312 vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4313 }
4314 }
4315 }
4316
4317 /*
4318 * vm_page_free:
4319 *
4320 * Returns the given page to the free list,
4321 * disassociating it with any VM object.
4322 *
4323 * Object and page queues must be locked prior to entry.
4324 */
4325 void
vm_page_free(vm_page_t mem)4326 vm_page_free(
4327 vm_page_t mem)
4328 {
4329 vm_page_free_prepare(mem);
4330
4331 if (mem->vmp_fictitious) {
4332 vm_page_release_fictitious(mem);
4333 } else {
4334 vm_page_release(mem, TRUE); /* page queues are locked */
4335 }
4336 }
4337
4338
4339 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4340 vm_page_free_unlocked(
4341 vm_page_t mem,
4342 boolean_t remove_from_hash)
4343 {
4344 vm_page_lockspin_queues();
4345 vm_page_free_prepare_queues(mem);
4346 vm_page_unlock_queues();
4347
4348 vm_page_free_prepare_object(mem, remove_from_hash);
4349
4350 if (mem->vmp_fictitious) {
4351 vm_page_release_fictitious(mem);
4352 } else {
4353 vm_page_release(mem, FALSE); /* page queues are not locked */
4354 }
4355 }
4356
4357
4358 /*
4359 * Free a list of pages. The list can be up to several hundred pages,
4360 * as blocked up by vm_pageout_scan().
4361 * The big win is not having to take the free list lock once
4362 * per page.
4363 *
4364 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4365 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4366 */
4367 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4368 vm_page_free_list(
4369 vm_page_t freeq,
4370 boolean_t prepare_object)
4371 {
4372 vm_page_t mem;
4373 vm_page_t nxt;
4374 vm_page_t local_freeq;
4375 int pg_count;
4376
4377 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4378 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4379
4380 while (freeq) {
4381 pg_count = 0;
4382 local_freeq = VM_PAGE_NULL;
4383 mem = freeq;
4384
4385 /*
4386 * break up the processing into smaller chunks so
4387 * that we can 'pipeline' the pages onto the
4388 * free list w/o introducing too much
4389 * contention on the global free queue lock
4390 */
4391 while (mem && pg_count < 64) {
4392 assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4393 (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4394 assert(mem->vmp_specialq.next == 0 &&
4395 mem->vmp_specialq.prev == 0);
4396 /*
4397 * &&
4398 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4399 */
4400 nxt = mem->vmp_snext;
4401 mem->vmp_snext = NULL;
4402 assert(mem->vmp_pageq.prev == 0);
4403
4404 #if MACH_ASSERT
4405 if (vm_check_refs_on_free) {
4406 if (!mem->vmp_fictitious && !mem->vmp_private) {
4407 vm_page_validate_no_references(mem);
4408 }
4409 }
4410 #endif /* MACH_ASSERT */
4411
4412 if (__improbable(mem->vmp_realtime)) {
4413 vm_page_lock_queues();
4414 if (mem->vmp_realtime) {
4415 mem->vmp_realtime = false;
4416 vm_page_realtime_count--;
4417 }
4418 vm_page_unlock_queues();
4419 }
4420
4421 if (prepare_object == TRUE) {
4422 vm_page_free_prepare_object(mem, TRUE);
4423 }
4424
4425 if (!mem->vmp_fictitious) {
4426 assert(mem->vmp_busy);
4427
4428 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4429 vm_lopage_free_count < vm_lopage_free_limit &&
4430 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4431 vm_page_release(mem, FALSE); /* page queues are not locked */
4432 #if CONFIG_SECLUDED_MEMORY
4433 } else if (vm_page_secluded_count < vm_page_secluded_target &&
4434 num_tasks_can_use_secluded_mem == 0) {
4435 vm_page_release(mem,
4436 FALSE); /* page queues are not locked */
4437 #endif /* CONFIG_SECLUDED_MEMORY */
4438 } else {
4439 /*
4440 * IMPORTANT: we can't set the page "free" here
4441 * because that would make the page eligible for
4442 * a physically-contiguous allocation (see
4443 * vm_page_find_contiguous()) right away (we don't
4444 * hold the vm_page_queue_free lock). That would
4445 * cause trouble because the page is not actually
4446 * in the free queue yet...
4447 */
4448 mem->vmp_snext = local_freeq;
4449 local_freeq = mem;
4450 pg_count++;
4451
4452 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4453 }
4454 } else {
4455 assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4456 VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4457 vm_page_release_fictitious(mem);
4458 }
4459 mem = nxt;
4460 }
4461 freeq = mem;
4462
4463 if ((mem = local_freeq)) {
4464 unsigned int avail_free_count;
4465 unsigned int need_wakeup = 0;
4466 unsigned int need_priv_wakeup = 0;
4467 #if CONFIG_SECLUDED_MEMORY
4468 unsigned int need_wakeup_secluded = 0;
4469 #endif /* CONFIG_SECLUDED_MEMORY */
4470 event_t priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4471 boolean_t priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4472
4473 vm_free_page_lock_spin();
4474
4475 while (mem) {
4476 int color;
4477
4478 nxt = mem->vmp_snext;
4479
4480 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4481 assert(mem->vmp_busy);
4482 assert(!mem->vmp_realtime);
4483 mem->vmp_lopage = FALSE;
4484 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4485
4486 color = VM_PAGE_GET_COLOR(mem);
4487 #if defined(__x86_64__)
4488 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4489 #else
4490 vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4491 mem, vmp_pageq);
4492 #endif
4493 mem = nxt;
4494 }
4495 vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4496 vm_page_free_count += pg_count;
4497 avail_free_count = vm_page_free_count;
4498
4499 VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4500
4501 if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4502 if (avail_free_count < vm_page_free_wanted_privileged) {
4503 need_priv_wakeup = avail_free_count;
4504 vm_page_free_wanted_privileged -= avail_free_count;
4505 avail_free_count = 0;
4506 } else {
4507 need_priv_wakeup = vm_page_free_wanted_privileged;
4508 avail_free_count -= vm_page_free_wanted_privileged;
4509 vm_page_free_wanted_privileged = 0;
4510 }
4511 }
4512 #if CONFIG_SECLUDED_MEMORY
4513 if (vm_page_free_wanted_secluded > 0 &&
4514 avail_free_count > vm_page_free_reserved) {
4515 unsigned int available_pages;
4516 available_pages = (avail_free_count -
4517 vm_page_free_reserved);
4518 if (available_pages <
4519 vm_page_free_wanted_secluded) {
4520 need_wakeup_secluded = available_pages;
4521 vm_page_free_wanted_secluded -=
4522 available_pages;
4523 avail_free_count -= available_pages;
4524 } else {
4525 need_wakeup_secluded =
4526 vm_page_free_wanted_secluded;
4527 avail_free_count -=
4528 vm_page_free_wanted_secluded;
4529 vm_page_free_wanted_secluded = 0;
4530 }
4531 }
4532 #endif /* CONFIG_SECLUDED_MEMORY */
4533 if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4534 unsigned int available_pages;
4535
4536 available_pages = avail_free_count - vm_page_free_reserved;
4537
4538 if (available_pages >= vm_page_free_wanted) {
4539 need_wakeup = vm_page_free_wanted;
4540 vm_page_free_wanted = 0;
4541 } else {
4542 need_wakeup = available_pages;
4543 vm_page_free_wanted -= available_pages;
4544 }
4545 }
4546 vm_free_page_unlock();
4547
4548 priv_wakeup_event = NULL;
4549 secluded_wakeup_event = NULL;
4550 normal_wakeup_event = NULL;
4551
4552 priv_wakeup_all = FALSE;
4553 secluded_wakeup_all = FALSE;
4554 normal_wakeup_all = FALSE;
4555
4556
4557 if (need_priv_wakeup != 0) {
4558 /*
4559 * There shouldn't be that many VM-privileged threads,
4560 * so let's wake them all up, even if we don't quite
4561 * have enough pages to satisfy them all.
4562 */
4563 priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4564 priv_wakeup_all = TRUE;
4565 }
4566 #if CONFIG_SECLUDED_MEMORY
4567 if (need_wakeup_secluded != 0 &&
4568 vm_page_free_wanted_secluded == 0) {
4569 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4570 secluded_wakeup_all = TRUE;
4571 need_wakeup_secluded = 0;
4572 } else {
4573 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4574 }
4575 #endif /* CONFIG_SECLUDED_MEMORY */
4576 if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4577 /*
4578 * We don't expect to have any more waiters
4579 * after this, so let's wake them all up at
4580 * once.
4581 */
4582 normal_wakeup_event = (event_t) &vm_page_free_count;
4583 normal_wakeup_all = TRUE;
4584 need_wakeup = 0;
4585 } else {
4586 normal_wakeup_event = (event_t) &vm_page_free_count;
4587 }
4588
4589 if (priv_wakeup_event ||
4590 #if CONFIG_SECLUDED_MEMORY
4591 secluded_wakeup_event ||
4592 #endif /* CONFIG_SECLUDED_MEMORY */
4593 normal_wakeup_event) {
4594 if (vps_dynamic_priority_enabled) {
4595 if (priv_wakeup_all == TRUE) {
4596 wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4597 }
4598
4599 #if CONFIG_SECLUDED_MEMORY
4600 if (secluded_wakeup_all == TRUE) {
4601 wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4602 }
4603
4604 while (need_wakeup_secluded-- != 0) {
4605 /*
4606 * Wake up one waiter per page we just released.
4607 */
4608 wakeup_one_with_inheritor(secluded_wakeup_event,
4609 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4610 }
4611 #endif /* CONFIG_SECLUDED_MEMORY */
4612
4613 if (normal_wakeup_all == TRUE) {
4614 wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4615 }
4616
4617 while (need_wakeup-- != 0) {
4618 /*
4619 * Wake up one waiter per page we just released.
4620 */
4621 wakeup_one_with_inheritor(normal_wakeup_event,
4622 THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4623 NULL);
4624 }
4625 } else {
4626 /*
4627 * Non-priority-aware wakeups.
4628 */
4629
4630 if (priv_wakeup_all == TRUE) {
4631 thread_wakeup(priv_wakeup_event);
4632 }
4633
4634 #if CONFIG_SECLUDED_MEMORY
4635 if (secluded_wakeup_all == TRUE) {
4636 thread_wakeup(secluded_wakeup_event);
4637 }
4638
4639 while (need_wakeup_secluded-- != 0) {
4640 /*
4641 * Wake up one waiter per page we just released.
4642 */
4643 thread_wakeup_one(secluded_wakeup_event);
4644 }
4645
4646 #endif /* CONFIG_SECLUDED_MEMORY */
4647 if (normal_wakeup_all == TRUE) {
4648 thread_wakeup(normal_wakeup_event);
4649 }
4650
4651 while (need_wakeup-- != 0) {
4652 /*
4653 * Wake up one waiter per page we just released.
4654 */
4655 thread_wakeup_one(normal_wakeup_event);
4656 }
4657 }
4658 }
4659
4660 VM_CHECK_MEMORYSTATUS;
4661 }
4662 }
4663 }
4664
4665
4666 /*
4667 * vm_page_wire:
4668 *
4669 * Mark this page as wired down by yet
4670 * another map, removing it from paging queues
4671 * as necessary.
4672 *
4673 * The page's object and the page queues must be locked.
4674 */
4675
4676
4677 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4678 vm_page_wire(
4679 vm_page_t mem,
4680 vm_tag_t tag,
4681 boolean_t check_memorystatus)
4682 {
4683 vm_object_t m_object;
4684
4685 m_object = VM_PAGE_OBJECT(mem);
4686
4687 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
4688
4689 VM_PAGE_CHECK(mem);
4690 if (m_object) {
4691 vm_object_lock_assert_exclusive(m_object);
4692 } else {
4693 /*
4694 * In theory, the page should be in an object before it
4695 * gets wired, since we need to hold the object lock
4696 * to update some fields in the page structure.
4697 * However, some code (i386 pmap, for example) might want
4698 * to wire a page before it gets inserted into an object.
4699 * That's somewhat OK, as long as nobody else can get to
4700 * that page and update it at the same time.
4701 */
4702 }
4703 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4704 if (!VM_PAGE_WIRED(mem)) {
4705 if (mem->vmp_laundry) {
4706 vm_pageout_steal_laundry(mem, TRUE);
4707 }
4708
4709 vm_page_queues_remove(mem, TRUE);
4710
4711 assert(mem->vmp_wire_count == 0);
4712 mem->vmp_q_state = VM_PAGE_IS_WIRED;
4713
4714 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4715 if (mem->vmp_unmodified_ro == true) {
4716 /* Object and PageQ locks are held*/
4717 mem->vmp_unmodified_ro = false;
4718 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4719 vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4720 }
4721 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4722
4723 if (m_object) {
4724 task_t owner;
4725 int ledger_idx_volatile;
4726 int ledger_idx_nonvolatile;
4727 int ledger_idx_volatile_compressed;
4728 int ledger_idx_nonvolatile_compressed;
4729 int ledger_idx_composite;
4730 int ledger_idx_external_wired;
4731 boolean_t do_footprint;
4732
4733 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4734 VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4735 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4736
4737 assert(m_object->resident_page_count >=
4738 m_object->wired_page_count);
4739 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4740 assert(vm_page_purgeable_count > 0);
4741 OSAddAtomic(-1, &vm_page_purgeable_count);
4742 OSAddAtomic(1, &vm_page_purgeable_wired_count);
4743 }
4744 if (m_object->internal &&
4745 m_object->vo_owner != TASK_NULL &&
4746 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4747 m_object->purgable == VM_PURGABLE_EMPTY)) {
4748 owner = VM_OBJECT_OWNER(m_object);
4749 vm_object_ledger_tag_ledgers(
4750 m_object,
4751 &ledger_idx_volatile,
4752 &ledger_idx_nonvolatile,
4753 &ledger_idx_volatile_compressed,
4754 &ledger_idx_nonvolatile_compressed,
4755 &ledger_idx_composite,
4756 &ledger_idx_external_wired,
4757 &do_footprint);
4758 /* less volatile bytes */
4759 ledger_debit(owner->ledger,
4760 ledger_idx_volatile,
4761 PAGE_SIZE);
4762 /* more not-quite-volatile bytes */
4763 ledger_credit(owner->ledger,
4764 ledger_idx_nonvolatile,
4765 PAGE_SIZE);
4766 if (do_footprint) {
4767 /* more footprint */
4768 ledger_credit(owner->ledger,
4769 task_ledgers.phys_footprint,
4770 PAGE_SIZE);
4771 } else if (ledger_idx_composite != -1) {
4772 ledger_credit(owner->ledger,
4773 ledger_idx_composite,
4774 PAGE_SIZE);
4775 }
4776 }
4777
4778 if (m_object->all_reusable) {
4779 /*
4780 * Wired pages are not counted as "re-usable"
4781 * in "all_reusable" VM objects, so nothing
4782 * to do here.
4783 */
4784 } else if (mem->vmp_reusable) {
4785 /*
4786 * This page is not "re-usable" when it's
4787 * wired, so adjust its state and the
4788 * accounting.
4789 */
4790 vm_page_lockconvert_queues();
4791 vm_object_reuse_pages(m_object,
4792 mem->vmp_offset,
4793 mem->vmp_offset + PAGE_SIZE_64,
4794 FALSE);
4795 }
4796 }
4797 assert(!mem->vmp_reusable);
4798
4799 if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4800 vm_page_wire_count++;
4801 }
4802 if (mem->vmp_gobbled) {
4803 vm_page_gobble_count--;
4804 }
4805 mem->vmp_gobbled = FALSE;
4806
4807 if (check_memorystatus == TRUE) {
4808 VM_CHECK_MEMORYSTATUS;
4809 }
4810 }
4811 assert(!mem->vmp_gobbled);
4812 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4813 mem->vmp_wire_count++;
4814 if (__improbable(mem->vmp_wire_count == 0)) {
4815 panic("vm_page_wire(%p): wire_count overflow", mem);
4816 }
4817 VM_PAGE_CHECK(mem);
4818 }
4819
4820 /*
4821 * vm_page_unwire:
4822 *
4823 * Release one wiring of this page, potentially
4824 * enabling it to be paged again.
4825 *
4826 * The page's object and the page queues must be locked.
4827 */
4828 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4829 vm_page_unwire(
4830 vm_page_t mem,
4831 boolean_t queueit)
4832 {
4833 vm_object_t m_object;
4834
4835 m_object = VM_PAGE_OBJECT(mem);
4836
4837 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
4838
4839 VM_PAGE_CHECK(mem);
4840 assert(VM_PAGE_WIRED(mem));
4841 assert(mem->vmp_wire_count > 0);
4842 assert(!mem->vmp_gobbled);
4843 assert(m_object != VM_OBJECT_NULL);
4844 vm_object_lock_assert_exclusive(m_object);
4845 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4846 if (--mem->vmp_wire_count == 0) {
4847 task_t owner;
4848 int ledger_idx_volatile;
4849 int ledger_idx_nonvolatile;
4850 int ledger_idx_volatile_compressed;
4851 int ledger_idx_nonvolatile_compressed;
4852 int ledger_idx_composite;
4853 int ledger_idx_external_wired;
4854 boolean_t do_footprint;
4855
4856 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4857
4858 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4859 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4860 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4861 if (!mem->vmp_private && !mem->vmp_fictitious) {
4862 vm_page_wire_count--;
4863 }
4864
4865 assert(m_object->resident_page_count >=
4866 m_object->wired_page_count);
4867 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4868 OSAddAtomic(+1, &vm_page_purgeable_count);
4869 assert(vm_page_purgeable_wired_count > 0);
4870 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4871 }
4872 if (m_object->internal &&
4873 m_object->vo_owner != TASK_NULL &&
4874 (m_object->purgable == VM_PURGABLE_VOLATILE ||
4875 m_object->purgable == VM_PURGABLE_EMPTY)) {
4876 owner = VM_OBJECT_OWNER(m_object);
4877 vm_object_ledger_tag_ledgers(
4878 m_object,
4879 &ledger_idx_volatile,
4880 &ledger_idx_nonvolatile,
4881 &ledger_idx_volatile_compressed,
4882 &ledger_idx_nonvolatile_compressed,
4883 &ledger_idx_composite,
4884 &ledger_idx_external_wired,
4885 &do_footprint);
4886 /* more volatile bytes */
4887 ledger_credit(owner->ledger,
4888 ledger_idx_volatile,
4889 PAGE_SIZE);
4890 /* less not-quite-volatile bytes */
4891 ledger_debit(owner->ledger,
4892 ledger_idx_nonvolatile,
4893 PAGE_SIZE);
4894 if (do_footprint) {
4895 /* less footprint */
4896 ledger_debit(owner->ledger,
4897 task_ledgers.phys_footprint,
4898 PAGE_SIZE);
4899 } else if (ledger_idx_composite != -1) {
4900 ledger_debit(owner->ledger,
4901 ledger_idx_composite,
4902 PAGE_SIZE);
4903 }
4904 }
4905 assert(!is_kernel_object(m_object));
4906 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4907
4908 if (queueit == TRUE) {
4909 if (m_object->purgable == VM_PURGABLE_EMPTY) {
4910 vm_page_deactivate(mem);
4911 } else {
4912 vm_page_activate(mem);
4913 }
4914 }
4915
4916 VM_CHECK_MEMORYSTATUS;
4917 }
4918 VM_PAGE_CHECK(mem);
4919 }
4920
4921 /*
4922 * vm_page_deactivate:
4923 *
4924 * Returns the given page to the inactive list,
4925 * indicating that no physical maps have access
4926 * to this page. [Used by the physical mapping system.]
4927 *
4928 * The page queues must be locked.
4929 */
4930 void
vm_page_deactivate(vm_page_t m)4931 vm_page_deactivate(
4932 vm_page_t m)
4933 {
4934 vm_page_deactivate_internal(m, TRUE);
4935 }
4936
4937
4938 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4939 vm_page_deactivate_internal(
4940 vm_page_t m,
4941 boolean_t clear_hw_reference)
4942 {
4943 vm_object_t m_object;
4944
4945 m_object = VM_PAGE_OBJECT(m);
4946
4947 VM_PAGE_CHECK(m);
4948 assert(!is_kernel_object(m_object));
4949 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4950
4951 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
4952 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4953 /*
4954 * This page is no longer very interesting. If it was
4955 * interesting (active or inactive/referenced), then we
4956 * clear the reference bit and (re)enter it in the
4957 * inactive queue. Note wired pages should not have
4958 * their reference bit cleared.
4959 */
4960 assert( !(m->vmp_absent && !m->vmp_unusual));
4961
4962 if (m->vmp_gobbled) { /* can this happen? */
4963 assert( !VM_PAGE_WIRED(m));
4964
4965 if (!m->vmp_private && !m->vmp_fictitious) {
4966 vm_page_wire_count--;
4967 }
4968 vm_page_gobble_count--;
4969 m->vmp_gobbled = FALSE;
4970 }
4971 /*
4972 * if this page is currently on the pageout queue, we can't do the
4973 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4974 * and we can't remove it manually since we would need the object lock
4975 * (which is not required here) to decrement the activity_in_progress
4976 * reference which is held on the object while the page is in the pageout queue...
4977 * just let the normal laundry processing proceed
4978 */
4979 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4980 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4981 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4982 VM_PAGE_WIRED(m)) {
4983 return;
4984 }
4985 if (!m->vmp_absent && clear_hw_reference == TRUE) {
4986 vm_page_lockconvert_queues();
4987 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4988 }
4989
4990 m->vmp_reference = FALSE;
4991 m->vmp_no_cache = FALSE;
4992
4993 if (!VM_PAGE_INACTIVE(m)) {
4994 vm_page_queues_remove(m, FALSE);
4995
4996 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4997 m->vmp_dirty && m_object->internal &&
4998 (m_object->purgable == VM_PURGABLE_DENY ||
4999 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5000 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5001 vm_page_check_pageable_safe(m);
5002 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5003 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5004 vm_page_throttled_count++;
5005 } else {
5006 if (m_object->named && m_object->ref_count == 1) {
5007 vm_page_speculate(m, FALSE);
5008 #if DEVELOPMENT || DEBUG
5009 vm_page_speculative_recreated++;
5010 #endif
5011 } else {
5012 vm_page_enqueue_inactive(m, FALSE);
5013 }
5014 }
5015 }
5016 }
5017
5018 /*
5019 * vm_page_enqueue_cleaned
5020 *
5021 * Put the page on the cleaned queue, mark it cleaned, etc.
5022 * Being on the cleaned queue (and having m->clean_queue set)
5023 * does ** NOT ** guarantee that the page is clean!
5024 *
5025 * Call with the queues lock held.
5026 */
5027
5028 void
vm_page_enqueue_cleaned(vm_page_t m)5029 vm_page_enqueue_cleaned(vm_page_t m)
5030 {
5031 vm_object_t m_object;
5032
5033 m_object = VM_PAGE_OBJECT(m);
5034
5035 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5036 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5037 assert( !(m->vmp_absent && !m->vmp_unusual));
5038
5039 if (VM_PAGE_WIRED(m)) {
5040 return;
5041 }
5042
5043 if (m->vmp_gobbled) {
5044 if (!m->vmp_private && !m->vmp_fictitious) {
5045 vm_page_wire_count--;
5046 }
5047 vm_page_gobble_count--;
5048 m->vmp_gobbled = FALSE;
5049 }
5050 /*
5051 * if this page is currently on the pageout queue, we can't do the
5052 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5053 * and we can't remove it manually since we would need the object lock
5054 * (which is not required here) to decrement the activity_in_progress
5055 * reference which is held on the object while the page is in the pageout queue...
5056 * just let the normal laundry processing proceed
5057 */
5058 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5059 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5060 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5061 return;
5062 }
5063 vm_page_queues_remove(m, FALSE);
5064
5065 vm_page_check_pageable_safe(m);
5066 vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5067 m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5068 vm_page_cleaned_count++;
5069
5070 vm_page_inactive_count++;
5071 if (m_object->internal) {
5072 vm_page_pageable_internal_count++;
5073 } else {
5074 vm_page_pageable_external_count++;
5075 }
5076 vm_page_add_to_specialq(m, TRUE);
5077 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5078 }
5079
5080 /*
5081 * vm_page_activate:
5082 *
5083 * Put the specified page on the active list (if appropriate).
5084 *
5085 * The page queues must be locked.
5086 */
5087
5088 void
vm_page_activate(vm_page_t m)5089 vm_page_activate(
5090 vm_page_t m)
5091 {
5092 vm_object_t m_object;
5093
5094 m_object = VM_PAGE_OBJECT(m);
5095
5096 VM_PAGE_CHECK(m);
5097 #ifdef FIXME_4778297
5098 assert(!is_kernel_object(m_object));
5099 #endif
5100 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5101 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5102 assert( !(m->vmp_absent && !m->vmp_unusual));
5103
5104 if (m->vmp_gobbled) {
5105 assert( !VM_PAGE_WIRED(m));
5106 if (!m->vmp_private && !m->vmp_fictitious) {
5107 vm_page_wire_count--;
5108 }
5109 vm_page_gobble_count--;
5110 m->vmp_gobbled = FALSE;
5111 }
5112 /*
5113 * if this page is currently on the pageout queue, we can't do the
5114 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5115 * and we can't remove it manually since we would need the object lock
5116 * (which is not required here) to decrement the activity_in_progress
5117 * reference which is held on the object while the page is in the pageout queue...
5118 * just let the normal laundry processing proceed
5119 */
5120 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5121 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5122 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5123 return;
5124 }
5125
5126 #if DEBUG
5127 if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5128 panic("vm_page_activate: already active");
5129 }
5130 #endif
5131
5132 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5133 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5134 DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5135 }
5136
5137 /*
5138 * A freshly activated page should be promoted in the donation queue.
5139 * So we remove it here while preserving its hint and we will enqueue
5140 * it again in vm_page_enqueue_active.
5141 */
5142 vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5143
5144 if (!VM_PAGE_WIRED(m)) {
5145 vm_page_check_pageable_safe(m);
5146 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5147 m->vmp_dirty && m_object->internal &&
5148 (m_object->purgable == VM_PURGABLE_DENY ||
5149 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5150 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5151 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5152 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5153 vm_page_throttled_count++;
5154 } else {
5155 #if CONFIG_SECLUDED_MEMORY
5156 if (secluded_for_filecache &&
5157 vm_page_secluded_target != 0 &&
5158 num_tasks_can_use_secluded_mem == 0 &&
5159 m_object->eligible_for_secluded &&
5160 !m->vmp_realtime) {
5161 vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5162 m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5163 vm_page_secluded_count++;
5164 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5165 vm_page_secluded_count_inuse++;
5166 assert(!m_object->internal);
5167 // vm_page_pageable_external_count++;
5168 } else
5169 #endif /* CONFIG_SECLUDED_MEMORY */
5170 vm_page_enqueue_active(m, FALSE);
5171 }
5172 m->vmp_reference = TRUE;
5173 m->vmp_no_cache = FALSE;
5174 }
5175 VM_PAGE_CHECK(m);
5176 }
5177
5178
5179 /*
5180 * vm_page_speculate:
5181 *
5182 * Put the specified page on the speculative list (if appropriate).
5183 *
5184 * The page queues must be locked.
5185 */
5186 void
vm_page_speculate(vm_page_t m,boolean_t new)5187 vm_page_speculate(
5188 vm_page_t m,
5189 boolean_t new)
5190 {
5191 struct vm_speculative_age_q *aq;
5192 vm_object_t m_object;
5193
5194 m_object = VM_PAGE_OBJECT(m);
5195
5196 VM_PAGE_CHECK(m);
5197 vm_page_check_pageable_safe(m);
5198
5199 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5200 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5201 assert( !(m->vmp_absent && !m->vmp_unusual));
5202 assert(m_object->internal == FALSE);
5203
5204 /*
5205 * if this page is currently on the pageout queue, we can't do the
5206 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5207 * and we can't remove it manually since we would need the object lock
5208 * (which is not required here) to decrement the activity_in_progress
5209 * reference which is held on the object while the page is in the pageout queue...
5210 * just let the normal laundry processing proceed
5211 */
5212 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5213 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5214 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5215 return;
5216 }
5217
5218 vm_page_queues_remove(m, FALSE);
5219
5220 if (!VM_PAGE_WIRED(m)) {
5221 mach_timespec_t ts;
5222 clock_sec_t sec;
5223 clock_nsec_t nsec;
5224
5225 clock_get_system_nanotime(&sec, &nsec);
5226 ts.tv_sec = (unsigned int) sec;
5227 ts.tv_nsec = nsec;
5228
5229 if (vm_page_speculative_count == 0) {
5230 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5231 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5232
5233 aq = &vm_page_queue_speculative[speculative_age_index];
5234
5235 /*
5236 * set the timer to begin a new group
5237 */
5238 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5239 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5240
5241 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5242 } else {
5243 aq = &vm_page_queue_speculative[speculative_age_index];
5244
5245 if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5246 speculative_age_index++;
5247
5248 if (speculative_age_index > vm_page_max_speculative_age_q) {
5249 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5250 }
5251 if (speculative_age_index == speculative_steal_index) {
5252 speculative_steal_index = speculative_age_index + 1;
5253
5254 if (speculative_steal_index > vm_page_max_speculative_age_q) {
5255 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5256 }
5257 }
5258 aq = &vm_page_queue_speculative[speculative_age_index];
5259
5260 if (!vm_page_queue_empty(&aq->age_q)) {
5261 vm_page_speculate_ageit(aq);
5262 }
5263
5264 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5265 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5266
5267 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5268 }
5269 }
5270 vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5271 m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5272 vm_page_speculative_count++;
5273 vm_page_pageable_external_count++;
5274
5275 if (new == TRUE) {
5276 vm_object_lock_assert_exclusive(m_object);
5277
5278 m_object->pages_created++;
5279 #if DEVELOPMENT || DEBUG
5280 vm_page_speculative_created++;
5281 #endif
5282 }
5283 }
5284 VM_PAGE_CHECK(m);
5285 }
5286
5287
5288 /*
5289 * move pages from the specified aging bin to
5290 * the speculative bin that pageout_scan claims from
5291 *
5292 * The page queues must be locked.
5293 */
5294 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5295 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5296 {
5297 struct vm_speculative_age_q *sq;
5298 vm_page_t t;
5299
5300 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5301
5302 if (vm_page_queue_empty(&sq->age_q)) {
5303 sq->age_q.next = aq->age_q.next;
5304 sq->age_q.prev = aq->age_q.prev;
5305
5306 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5307 t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5308
5309 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5310 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5311 } else {
5312 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5313 t->vmp_pageq.next = aq->age_q.next;
5314
5315 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5316 t->vmp_pageq.prev = sq->age_q.prev;
5317
5318 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5319 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5320
5321 sq->age_q.prev = aq->age_q.prev;
5322 }
5323 vm_page_queue_init(&aq->age_q);
5324 }
5325
5326
5327 void
vm_page_lru(vm_page_t m)5328 vm_page_lru(
5329 vm_page_t m)
5330 {
5331 VM_PAGE_CHECK(m);
5332 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5333 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5334
5335 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5336
5337 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5338 /*
5339 * we don't need to do all the other work that
5340 * vm_page_queues_remove and vm_page_enqueue_inactive
5341 * bring along for the ride
5342 */
5343 assert(!m->vmp_laundry);
5344 assert(!m->vmp_private);
5345
5346 m->vmp_no_cache = FALSE;
5347
5348 vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5349 vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5350
5351 return;
5352 }
5353 /*
5354 * if this page is currently on the pageout queue, we can't do the
5355 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5356 * and we can't remove it manually since we would need the object lock
5357 * (which is not required here) to decrement the activity_in_progress
5358 * reference which is held on the object while the page is in the pageout queue...
5359 * just let the normal laundry processing proceed
5360 */
5361 if (m->vmp_laundry || m->vmp_private ||
5362 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5363 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5364 VM_PAGE_WIRED(m)) {
5365 return;
5366 }
5367
5368 m->vmp_no_cache = FALSE;
5369
5370 vm_page_queues_remove(m, FALSE);
5371
5372 vm_page_enqueue_inactive(m, FALSE);
5373 }
5374
5375
5376 void
vm_page_reactivate_all_throttled(void)5377 vm_page_reactivate_all_throttled(void)
5378 {
5379 vm_page_t first_throttled, last_throttled;
5380 vm_page_t first_active;
5381 vm_page_t m;
5382 int extra_active_count;
5383 int extra_internal_count, extra_external_count;
5384 vm_object_t m_object;
5385
5386 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5387 return;
5388 }
5389
5390 extra_active_count = 0;
5391 extra_internal_count = 0;
5392 extra_external_count = 0;
5393 vm_page_lock_queues();
5394 if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5395 /*
5396 * Switch "throttled" pages to "active".
5397 */
5398 vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5399 VM_PAGE_CHECK(m);
5400 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5401
5402 m_object = VM_PAGE_OBJECT(m);
5403
5404 extra_active_count++;
5405 if (m_object->internal) {
5406 extra_internal_count++;
5407 } else {
5408 extra_external_count++;
5409 }
5410
5411 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5412 VM_PAGE_CHECK(m);
5413 vm_page_add_to_specialq(m, FALSE);
5414 }
5415
5416 /*
5417 * Transfer the entire throttled queue to a regular LRU page queues.
5418 * We insert it at the head of the active queue, so that these pages
5419 * get re-evaluated by the LRU algorithm first, since they've been
5420 * completely out of it until now.
5421 */
5422 first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5423 last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5424 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5425 if (vm_page_queue_empty(&vm_page_queue_active)) {
5426 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5427 } else {
5428 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5429 }
5430 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5431 first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5432 last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5433
5434 #if DEBUG
5435 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5436 #endif
5437 vm_page_queue_init(&vm_page_queue_throttled);
5438 /*
5439 * Adjust the global page counts.
5440 */
5441 vm_page_active_count += extra_active_count;
5442 vm_page_pageable_internal_count += extra_internal_count;
5443 vm_page_pageable_external_count += extra_external_count;
5444 vm_page_throttled_count = 0;
5445 }
5446 assert(vm_page_throttled_count == 0);
5447 assert(vm_page_queue_empty(&vm_page_queue_throttled));
5448 vm_page_unlock_queues();
5449 }
5450
5451
5452 /*
5453 * move pages from the indicated local queue to the global active queue
5454 * its ok to fail if we're below the hard limit and force == FALSE
5455 * the nolocks == TRUE case is to allow this function to be run on
5456 * the hibernate path
5457 */
5458
5459 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5460 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5461 {
5462 struct vpl *lq;
5463 vm_page_t first_local, last_local;
5464 vm_page_t first_active;
5465 vm_page_t m;
5466 uint32_t count = 0;
5467
5468 if (vm_page_local_q == NULL) {
5469 return;
5470 }
5471
5472 lq = zpercpu_get_cpu(vm_page_local_q, lid);
5473
5474 if (nolocks == FALSE) {
5475 if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5476 if (!vm_page_trylockspin_queues()) {
5477 return;
5478 }
5479 } else {
5480 vm_page_lockspin_queues();
5481 }
5482
5483 VPL_LOCK(&lq->vpl_lock);
5484 }
5485 if (lq->vpl_count) {
5486 /*
5487 * Switch "local" pages to "active".
5488 */
5489 assert(!vm_page_queue_empty(&lq->vpl_queue));
5490
5491 vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5492 VM_PAGE_CHECK(m);
5493 vm_page_check_pageable_safe(m);
5494 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5495 assert(!m->vmp_fictitious);
5496
5497 if (m->vmp_local_id != lid) {
5498 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5499 }
5500
5501 m->vmp_local_id = 0;
5502 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5503 VM_PAGE_CHECK(m);
5504 vm_page_add_to_specialq(m, FALSE);
5505 count++;
5506 }
5507 if (count != lq->vpl_count) {
5508 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5509 }
5510
5511 /*
5512 * Transfer the entire local queue to a regular LRU page queues.
5513 */
5514 first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5515 last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5516 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5517
5518 if (vm_page_queue_empty(&vm_page_queue_active)) {
5519 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5520 } else {
5521 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5522 }
5523 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5524 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5525 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5526
5527 vm_page_queue_init(&lq->vpl_queue);
5528 /*
5529 * Adjust the global page counts.
5530 */
5531 vm_page_active_count += lq->vpl_count;
5532 vm_page_pageable_internal_count += lq->vpl_internal_count;
5533 vm_page_pageable_external_count += lq->vpl_external_count;
5534 lq->vpl_count = 0;
5535 lq->vpl_internal_count = 0;
5536 lq->vpl_external_count = 0;
5537 }
5538 assert(vm_page_queue_empty(&lq->vpl_queue));
5539
5540 if (nolocks == FALSE) {
5541 VPL_UNLOCK(&lq->vpl_lock);
5542
5543 vm_page_balance_inactive(count / 4);
5544 vm_page_unlock_queues();
5545 }
5546 }
5547
5548 /*
5549 * vm_page_part_zero_fill:
5550 *
5551 * Zero-fill a part of the page.
5552 */
5553 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5554 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5555 vm_page_part_zero_fill(
5556 vm_page_t m,
5557 vm_offset_t m_pa,
5558 vm_size_t len)
5559 {
5560 #if 0
5561 /*
5562 * we don't hold the page queue lock
5563 * so this check isn't safe to make
5564 */
5565 VM_PAGE_CHECK(m);
5566 #endif
5567
5568 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5569 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5570 #else
5571 vm_page_t tmp;
5572 while (1) {
5573 tmp = vm_page_grab();
5574 if (tmp == VM_PAGE_NULL) {
5575 vm_page_wait(THREAD_UNINT);
5576 continue;
5577 }
5578 break;
5579 }
5580 vm_page_zero_fill(tmp);
5581 if (m_pa != 0) {
5582 vm_page_part_copy(m, 0, tmp, 0, m_pa);
5583 }
5584 if ((m_pa + len) < PAGE_SIZE) {
5585 vm_page_part_copy(m, m_pa + len, tmp,
5586 m_pa + len, PAGE_SIZE - (m_pa + len));
5587 }
5588 vm_page_copy(tmp, m);
5589 VM_PAGE_FREE(tmp);
5590 #endif
5591 }
5592
5593 /*
5594 * vm_page_zero_fill:
5595 *
5596 * Zero-fill the specified page.
5597 */
5598 void
vm_page_zero_fill(vm_page_t m)5599 vm_page_zero_fill(
5600 vm_page_t m)
5601 {
5602 #if 0
5603 /*
5604 * we don't hold the page queue lock
5605 * so this check isn't safe to make
5606 */
5607 VM_PAGE_CHECK(m);
5608 #endif
5609
5610 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
5611 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5612 }
5613
5614 /*
5615 * vm_page_part_copy:
5616 *
5617 * copy part of one page to another
5618 */
5619
5620 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5621 vm_page_part_copy(
5622 vm_page_t src_m,
5623 vm_offset_t src_pa,
5624 vm_page_t dst_m,
5625 vm_offset_t dst_pa,
5626 vm_size_t len)
5627 {
5628 #if 0
5629 /*
5630 * we don't hold the page queue lock
5631 * so this check isn't safe to make
5632 */
5633 VM_PAGE_CHECK(src_m);
5634 VM_PAGE_CHECK(dst_m);
5635 #endif
5636 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5637 VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5638 }
5639
5640 /*
5641 * vm_page_copy:
5642 *
5643 * Copy one page to another
5644 */
5645
5646 int vm_page_copy_cs_validations = 0;
5647 int vm_page_copy_cs_tainted = 0;
5648
5649 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5650 vm_page_copy(
5651 vm_page_t src_m,
5652 vm_page_t dest_m)
5653 {
5654 vm_object_t src_m_object;
5655
5656 src_m_object = VM_PAGE_OBJECT(src_m);
5657
5658 #if 0
5659 /*
5660 * we don't hold the page queue lock
5661 * so this check isn't safe to make
5662 */
5663 VM_PAGE_CHECK(src_m);
5664 VM_PAGE_CHECK(dest_m);
5665 #endif
5666 vm_object_lock_assert_held(src_m_object);
5667
5668 if (src_m_object != VM_OBJECT_NULL &&
5669 src_m_object->code_signed) {
5670 /*
5671 * We're copying a page from a code-signed object.
5672 * Whoever ends up mapping the copy page might care about
5673 * the original page's integrity, so let's validate the
5674 * source page now.
5675 */
5676 vm_page_copy_cs_validations++;
5677 vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5678 #if DEVELOPMENT || DEBUG
5679 DTRACE_VM4(codesigned_copy,
5680 vm_object_t, src_m_object,
5681 vm_object_offset_t, src_m->vmp_offset,
5682 int, src_m->vmp_cs_validated,
5683 int, src_m->vmp_cs_tainted);
5684 #endif /* DEVELOPMENT || DEBUG */
5685 }
5686
5687 /*
5688 * Propagate the cs_tainted bit to the copy page. Do not propagate
5689 * the cs_validated bit.
5690 */
5691 dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5692 dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5693 if (dest_m->vmp_cs_tainted) {
5694 vm_page_copy_cs_tainted++;
5695 }
5696 dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5697 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5698 }
5699
5700 #if MACH_ASSERT
5701 static void
_vm_page_print(vm_page_t p)5702 _vm_page_print(
5703 vm_page_t p)
5704 {
5705 printf("vm_page %p: \n", p);
5706 printf(" pageq: next=%p prev=%p\n",
5707 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5708 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5709 printf(" listq: next=%p prev=%p\n",
5710 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5711 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5712 printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5713 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5714 printf(" wire_count=%u\n", p->vmp_wire_count);
5715 printf(" q_state=%u\n", p->vmp_q_state);
5716
5717 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
5718 (p->vmp_laundry ? "" : "!"),
5719 (p->vmp_reference ? "" : "!"),
5720 (p->vmp_gobbled ? "" : "!"),
5721 (p->vmp_private ? "" : "!"));
5722 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5723 (p->vmp_busy ? "" : "!"),
5724 (p->vmp_wanted ? "" : "!"),
5725 (p->vmp_tabled ? "" : "!"),
5726 (p->vmp_fictitious ? "" : "!"),
5727 (p->vmp_pmapped ? "" : "!"),
5728 (p->vmp_wpmapped ? "" : "!"));
5729 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5730 (p->vmp_free_when_done ? "" : "!"),
5731 (p->vmp_absent ? "" : "!"),
5732 (VMP_ERROR_GET(p) ? "" : "!"),
5733 (p->vmp_dirty ? "" : "!"),
5734 (p->vmp_cleaning ? "" : "!"),
5735 (p->vmp_precious ? "" : "!"),
5736 (p->vmp_clustered ? "" : "!"));
5737 printf(" %soverwriting, %srestart, %sunusual\n",
5738 (p->vmp_overwriting ? "" : "!"),
5739 (p->vmp_restart ? "" : "!"),
5740 (p->vmp_unusual ? "" : "!"));
5741 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5742 p->vmp_cs_validated,
5743 p->vmp_cs_tainted,
5744 p->vmp_cs_nx,
5745 (p->vmp_no_cache ? "" : "!"));
5746
5747 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5748 }
5749
5750 /*
5751 * Check that the list of pages is ordered by
5752 * ascending physical address and has no holes.
5753 */
5754 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5755 vm_page_verify_contiguous(
5756 vm_page_t pages,
5757 unsigned int npages)
5758 {
5759 vm_page_t m;
5760 unsigned int page_count;
5761 vm_offset_t prev_addr;
5762
5763 prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5764 page_count = 1;
5765 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5766 if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5767 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5768 m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5769 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5770 panic("vm_page_verify_contiguous: not contiguous!");
5771 }
5772 prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5773 ++page_count;
5774 }
5775 if (page_count != npages) {
5776 printf("pages %p actual count 0x%x but requested 0x%x\n",
5777 pages, page_count, npages);
5778 panic("vm_page_verify_contiguous: count error");
5779 }
5780 return 1;
5781 }
5782
5783
5784 /*
5785 * Check the free lists for proper length etc.
5786 */
5787 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5788 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5789 vm_page_verify_free_list(
5790 vm_page_queue_head_t *vm_page_queue,
5791 unsigned int color,
5792 vm_page_t look_for_page,
5793 boolean_t expect_page)
5794 {
5795 unsigned int npages;
5796 vm_page_t m;
5797 vm_page_t prev_m;
5798 boolean_t found_page;
5799
5800 if (!vm_page_verify_this_free_list_enabled) {
5801 return 0;
5802 }
5803
5804 found_page = FALSE;
5805 npages = 0;
5806 prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5807
5808 vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5809 if (m == look_for_page) {
5810 found_page = TRUE;
5811 }
5812 if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5813 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5814 color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5815 }
5816 if (!m->vmp_busy) {
5817 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5818 color, npages, m);
5819 }
5820 if (color != (unsigned int) -1) {
5821 if (VM_PAGE_GET_COLOR(m) != color) {
5822 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5823 color, npages, m, VM_PAGE_GET_COLOR(m), color);
5824 }
5825 if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5826 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5827 color, npages, m, m->vmp_q_state);
5828 }
5829 } else {
5830 if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5831 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5832 npages, m, m->vmp_q_state);
5833 }
5834 }
5835 ++npages;
5836 prev_m = m;
5837 }
5838 if (look_for_page != VM_PAGE_NULL) {
5839 unsigned int other_color;
5840
5841 if (expect_page && !found_page) {
5842 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5843 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5844 _vm_page_print(look_for_page);
5845 for (other_color = 0;
5846 other_color < vm_colors;
5847 other_color++) {
5848 if (other_color == color) {
5849 continue;
5850 }
5851 vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5852 other_color, look_for_page, FALSE);
5853 }
5854 if (color == (unsigned int) -1) {
5855 vm_page_verify_free_list(&vm_lopage_queue_free,
5856 (unsigned int) -1, look_for_page, FALSE);
5857 }
5858 panic("vm_page_verify_free_list(color=%u)", color);
5859 }
5860 if (!expect_page && found_page) {
5861 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5862 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5863 }
5864 }
5865 return npages;
5866 }
5867
5868 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5869 static void
vm_page_verify_free_lists(void)5870 vm_page_verify_free_lists( void )
5871 {
5872 unsigned int color, npages, nlopages;
5873 boolean_t toggle = TRUE;
5874
5875 if (!vm_page_verify_all_free_lists_enabled) {
5876 return;
5877 }
5878
5879 npages = 0;
5880
5881 vm_free_page_lock();
5882
5883 if (vm_page_verify_this_free_list_enabled == TRUE) {
5884 /*
5885 * This variable has been set globally for extra checking of
5886 * each free list Q. Since we didn't set it, we don't own it
5887 * and we shouldn't toggle it.
5888 */
5889 toggle = FALSE;
5890 }
5891
5892 if (toggle == TRUE) {
5893 vm_page_verify_this_free_list_enabled = TRUE;
5894 }
5895
5896 for (color = 0; color < vm_colors; color++) {
5897 npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5898 color, VM_PAGE_NULL, FALSE);
5899 }
5900 nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5901 (unsigned int) -1,
5902 VM_PAGE_NULL, FALSE);
5903 if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5904 panic("vm_page_verify_free_lists: "
5905 "npages %u free_count %d nlopages %u lo_free_count %u",
5906 npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5907 }
5908
5909 if (toggle == TRUE) {
5910 vm_page_verify_this_free_list_enabled = FALSE;
5911 }
5912
5913 vm_free_page_unlock();
5914 }
5915
5916 #endif /* MACH_ASSERT */
5917
5918 /*
5919 * wrapper for pmap_enter()
5920 */
5921 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5922 pmap_enter_check(
5923 pmap_t pmap,
5924 vm_map_address_t virtual_address,
5925 vm_page_t page,
5926 vm_prot_t protection,
5927 vm_prot_t fault_type,
5928 unsigned int flags,
5929 boolean_t wired)
5930 {
5931 int options = 0;
5932 vm_object_t obj;
5933
5934 if (VMP_ERROR_GET(page)) {
5935 return KERN_MEMORY_FAILURE;
5936 }
5937 obj = VM_PAGE_OBJECT(page);
5938 if (obj->internal) {
5939 options |= PMAP_OPTIONS_INTERNAL;
5940 }
5941 if (page->vmp_reusable || obj->all_reusable) {
5942 options |= PMAP_OPTIONS_REUSABLE;
5943 }
5944 return pmap_enter_options(pmap,
5945 virtual_address,
5946 VM_PAGE_GET_PHYS_PAGE(page),
5947 protection,
5948 fault_type,
5949 flags,
5950 wired,
5951 options,
5952 NULL,
5953 PMAP_MAPPING_TYPE_INFER);
5954 }
5955
5956
5957 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5958
5959 /*
5960 * CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
5961 */
5962
5963 /*
5964 * Helper function used to determine if a page can be relocated
5965 * A page is relocatable if it is in a stable non-transient state
5966 */
5967 static inline boolean_t
vm_page_is_relocatable(vm_page_t m)5968 vm_page_is_relocatable(vm_page_t m)
5969 {
5970
5971 if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
5972 m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5973 /*
5974 * Page is in a transient state
5975 * or a state we don't want to deal with.
5976 */
5977 return FALSE;
5978 } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5979 (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5980 (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5981 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5982 /*
5983 * Page needs to be on one of our queues (other then the pageout or special
5984 * free queues) or it needs to belong to the compressor pool (which is now
5985 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
5986 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
5987 * locks we hold at this point...
5988 */
5989 return FALSE;
5990 } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
5991 (!m->vmp_tabled || m->vmp_busy)) {
5992 /*
5993 * pages on the free list are always 'busy'
5994 * so we couldn't test for 'busy' in the check
5995 * for the transient states... pages that are
5996 * 'free' are never 'tabled', so we also couldn't
5997 * test for 'tabled'. So we check here to make
5998 * sure that a non-free page is not busy and is
5999 * tabled on an object...
6000 */
6001 return FALSE;
6002 }
6003 return TRUE;
6004 }
6005
6006 /*
6007 * Free up the given page by possibily relocating its contents to a new page
6008 * If the page is on an object the object lock must be held.
6009 */
6010 static kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages)6011 vm_page_relocate(vm_page_t m1, int *compressed_pages)
6012 {
6013 int refmod = 0;
6014 vm_object_t object = VM_PAGE_OBJECT(m1);
6015 kern_return_t kr;
6016
6017 if (object == VM_OBJECT_NULL) {
6018 return KERN_FAILURE;
6019 }
6020
6021 vm_object_lock_assert_held(object);
6022
6023 if (VM_PAGE_WIRED(m1) ||
6024 m1->vmp_gobbled ||
6025 m1->vmp_laundry ||
6026 m1->vmp_wanted ||
6027 m1->vmp_cleaning ||
6028 m1->vmp_overwriting ||
6029 m1->vmp_free_when_done ||
6030 m1->vmp_busy ||
6031 m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6032 return KERN_FAILURE;
6033 }
6034
6035 boolean_t disconnected = FALSE;
6036 boolean_t reusable = FALSE;
6037
6038 /*
6039 * Pages from reusable objects can be reclaimed directly.
6040 */
6041 if ((m1->vmp_reusable || object->all_reusable) &&
6042 m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6043 !m1->vmp_reference) {
6044 /*
6045 * reusable page...
6046 */
6047
6048 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6049 disconnected = TRUE;
6050 if (refmod == 0) {
6051 /*
6052 * ... not reused: can steal without relocating contents.
6053 */
6054 reusable = TRUE;
6055 }
6056 }
6057
6058 if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6059 vm_object_offset_t offset;
6060
6061 /* page is not reusable, we need to allocate a new page
6062 * and move its contents there.
6063 */
6064 vm_page_t m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6065
6066 if (m2 == VM_PAGE_NULL) {
6067 return KERN_RESOURCE_SHORTAGE;
6068 }
6069
6070 if (!disconnected) {
6071 if (m1->vmp_pmapped) {
6072 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6073 } else {
6074 refmod = 0;
6075 }
6076 }
6077
6078 /* copy the page's contents */
6079 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6080
6081 /* copy the page's state */
6082 assert(!VM_PAGE_WIRED(m1));
6083 assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6084 assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6085 assert(!m1->vmp_laundry);
6086 m2->vmp_reference = m1->vmp_reference;
6087 assert(!m1->vmp_gobbled);
6088 assert(!m1->vmp_private);
6089 m2->vmp_no_cache = m1->vmp_no_cache;
6090 m2->vmp_xpmapped = 0;
6091 assert(!m1->vmp_busy);
6092 assert(!m1->vmp_wanted);
6093 assert(!m1->vmp_fictitious);
6094 m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6095 m2->vmp_wpmapped = m1->vmp_wpmapped;
6096 assert(!m1->vmp_free_when_done);
6097 m2->vmp_absent = m1->vmp_absent;
6098 m2->vmp_error = VMP_ERROR_GET(m1);
6099 m2->vmp_dirty = m1->vmp_dirty;
6100 assert(!m1->vmp_cleaning);
6101 m2->vmp_precious = m1->vmp_precious;
6102 m2->vmp_clustered = m1->vmp_clustered;
6103 assert(!m1->vmp_overwriting);
6104 m2->vmp_restart = m1->vmp_restart;
6105 m2->vmp_unusual = m1->vmp_unusual;
6106 m2->vmp_cs_validated = m1->vmp_cs_validated;
6107 m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6108 m2->vmp_cs_nx = m1->vmp_cs_nx;
6109
6110 m2->vmp_realtime = m1->vmp_realtime;
6111 m1->vmp_realtime = false;
6112
6113 /*
6114 * If m1 had really been reusable,
6115 * we would have just stolen it, so
6116 * let's not propagate its "reusable"
6117 * bit and assert that m2 is not
6118 * marked as "reusable".
6119 */
6120 // m2->vmp_reusable = m1->vmp_reusable;
6121 assert(!m2->vmp_reusable);
6122
6123 // assert(!m1->vmp_lopage);
6124
6125 if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6126 m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6127 /*
6128 * We just grabbed m2 up above and so it isn't
6129 * going to be on any special Q as yet and so
6130 * we don't need to 'remove' it from the special
6131 * queues. Just resetting the state should be enough.
6132 */
6133 m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6134 }
6135
6136 /*
6137 * page may need to be flushed if
6138 * it is marshalled into a UPL
6139 * that is going to be used by a device
6140 * that doesn't support coherency
6141 */
6142 m2->vmp_written_by_kernel = TRUE;
6143
6144 /*
6145 * make sure we clear the ref/mod state
6146 * from the pmap layer... else we risk
6147 * inheriting state from the last time
6148 * this page was used...
6149 */
6150 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6151 VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6152
6153 if (refmod & VM_MEM_REFERENCED) {
6154 m2->vmp_reference = TRUE;
6155 }
6156 if (refmod & VM_MEM_MODIFIED) {
6157 SET_PAGE_DIRTY(m2, TRUE);
6158 }
6159 offset = m1->vmp_offset;
6160
6161 /*
6162 * completely cleans up the state
6163 * of the page so that it is ready
6164 * to be put onto the free list, or
6165 * for this purpose it looks like it
6166 * just came off of the free list
6167 */
6168 vm_page_free_prepare(m1);
6169
6170 /*
6171 * now put the substitute page on the object
6172 */
6173 vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6174 TRUE, FALSE, FALSE, NULL);
6175
6176 if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6177 m2->vmp_pmapped = TRUE;
6178 m2->vmp_wpmapped = TRUE;
6179
6180 kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6181 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6182
6183 assert(kr == KERN_SUCCESS);
6184
6185 if (compressed_pages) {
6186 ++*compressed_pages;
6187 }
6188 } else {
6189 /* relocated page was not used by the compressor
6190 * put it on either the active or inactive lists */
6191 if (m2->vmp_reference) {
6192 vm_page_activate(m2);
6193 } else {
6194 vm_page_deactivate(m2);
6195 }
6196 }
6197
6198 /* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6199 vm_page_wakeup_done(object, m2);
6200
6201 return KERN_SUCCESS;
6202 } else {
6203 assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6204
6205 /*
6206 * completely cleans up the state
6207 * of the page so that it is ready
6208 * to be put onto the free list, or
6209 * for this purpose it looks like it
6210 * just came off of the free list
6211 */
6212 vm_page_free_prepare(m1);
6213
6214 /* we're done here */
6215 return KERN_SUCCESS;
6216 }
6217
6218 return KERN_FAILURE;
6219 }
6220
6221 /*
6222 * CONTIGUOUS PAGE ALLOCATION
6223 *
6224 * Find a region large enough to contain at least n pages
6225 * of contiguous physical memory.
6226 *
6227 * This is done by traversing the vm_page_t array in a linear fashion
6228 * we assume that the vm_page_t array has the avaiable physical pages in an
6229 * ordered, ascending list... this is currently true of all our implementations
6230 * and must remain so... there can be 'holes' in the array... we also can
6231 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6232 * which use to happen via 'vm_page_convert'... that function was no longer
6233 * being called and was removed...
6234 *
6235 * The basic flow consists of stabilizing some of the interesting state of
6236 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6237 * sweep at the beginning of the array looking for pages that meet our criterea
6238 * for a 'stealable' page... currently we are pretty conservative... if the page
6239 * meets this criterea and is physically contiguous to the previous page in the 'run'
6240 * we keep developing it. If we hit a page that doesn't fit, we reset our state
6241 * and start to develop a new run... if at this point we've already considered
6242 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
6243 * and mutex_pause (which will yield the processor), to keep the latency low w/r
6244 * to other threads trying to acquire free pages (or move pages from q to q),
6245 * and then continue from the spot we left off... we only make 1 pass through the
6246 * array. Once we have a 'run' that is long enough, we'll go into the loop which
6247 * which steals the pages from the queues they're currently on... pages on the free
6248 * queue can be stolen directly... pages that are on any of the other queues
6249 * must be removed from the object they are tabled on... this requires taking the
6250 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
6251 * or if the state of the page behind the vm_object lock is no longer viable, we'll
6252 * dump the pages we've currently stolen back to the free list, and pick up our
6253 * scan from the point where we aborted the 'current' run.
6254 *
6255 *
6256 * Requirements:
6257 * - neither vm_page_queue nor vm_free_list lock can be held on entry
6258 *
6259 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
6260 *
6261 * Algorithm:
6262 */
6263
6264 #define MAX_CONSIDERED_BEFORE_YIELD 1000
6265
6266
6267 #define RESET_STATE_OF_RUN() \
6268 MACRO_BEGIN \
6269 prevcontaddr = -2; \
6270 start_pnum = -1; \
6271 free_considered = 0; \
6272 substitute_needed = 0; \
6273 npages = 0; \
6274 MACRO_END
6275
6276 /*
6277 * Can we steal in-use (i.e. not free) pages when searching for
6278 * physically-contiguous pages ?
6279 */
6280 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
6281
6282 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
6283 #if DEBUG
6284 int vm_page_find_contig_debug = 0;
6285 #endif
6286
6287 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6288 vm_page_find_contiguous(
6289 unsigned int contig_pages,
6290 ppnum_t max_pnum,
6291 ppnum_t pnum_mask,
6292 boolean_t wire,
6293 int flags)
6294 {
6295 vm_page_t m = NULL;
6296 ppnum_t prevcontaddr = 0;
6297 ppnum_t start_pnum = 0;
6298 unsigned int npages = 0, considered = 0, scanned = 0;
6299 unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
6300 unsigned int idx_last_contig_page_found = 0;
6301 int free_considered = 0, free_available = 0;
6302 int substitute_needed = 0;
6303 int zone_gc_called = 0;
6304 boolean_t wrapped;
6305 kern_return_t kr;
6306 #if DEBUG
6307 clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
6308 clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
6309 #endif
6310
6311 int yielded = 0;
6312 int dumped_run = 0;
6313 int stolen_pages = 0;
6314 int compressed_pages = 0;
6315
6316
6317 if (contig_pages == 0) {
6318 return VM_PAGE_NULL;
6319 }
6320
6321 full_scan_again:
6322
6323 #if MACH_ASSERT
6324 vm_page_verify_free_lists();
6325 #endif
6326 #if DEBUG
6327 clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
6328 #endif
6329 PAGE_REPLACEMENT_ALLOWED(TRUE);
6330
6331 /*
6332 * If there are still delayed pages, try to free up some that match.
6333 */
6334 if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
6335 vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
6336 }
6337
6338 vm_page_lock_queues();
6339 vm_free_page_lock();
6340
6341 RESET_STATE_OF_RUN();
6342
6343 scanned = 0;
6344 considered = 0;
6345 free_available = vm_page_free_count - vm_page_free_reserved;
6346
6347 wrapped = FALSE;
6348
6349 if (flags & KMA_LOMEM) {
6350 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
6351 } else {
6352 idx_last_contig_page_found = vm_page_find_contiguous_last_idx;
6353 }
6354
6355 orig_last_idx = idx_last_contig_page_found;
6356 last_idx = orig_last_idx;
6357
6358 for (page_idx = last_idx, start_idx = last_idx;
6359 npages < contig_pages && page_idx < vm_pages_count;
6360 page_idx++) {
6361 retry:
6362 if (wrapped &&
6363 npages == 0 &&
6364 page_idx >= orig_last_idx) {
6365 /*
6366 * We're back where we started and we haven't
6367 * found any suitable contiguous range. Let's
6368 * give up.
6369 */
6370 break;
6371 }
6372 scanned++;
6373 m = &vm_pages[page_idx];
6374
6375 assert(!m->vmp_fictitious);
6376 assert(!m->vmp_private);
6377
6378 if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
6379 /* no more low pages... */
6380 break;
6381 }
6382 if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
6383 /*
6384 * not aligned
6385 */
6386 RESET_STATE_OF_RUN();
6387 } else if (!vm_page_is_relocatable(m)) {
6388 /*
6389 * page is not relocatable */
6390 RESET_STATE_OF_RUN();
6391 } else {
6392 if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6393 if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6394 RESET_STATE_OF_RUN();
6395 goto did_consider;
6396 } else {
6397 npages = 1;
6398 start_idx = page_idx;
6399 start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6400 }
6401 } else {
6402 npages++;
6403 }
6404 prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6405
6406 VM_PAGE_CHECK(m);
6407 if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6408 free_considered++;
6409 } else {
6410 /*
6411 * This page is not free.
6412 * If we can't steal used pages,
6413 * we have to give up this run
6414 * and keep looking.
6415 * Otherwise, we might need to
6416 * move the contents of this page
6417 * into a substitute page.
6418 */
6419 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6420 if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6421 substitute_needed++;
6422 }
6423 #else
6424 RESET_STATE_OF_RUN();
6425 #endif
6426 }
6427
6428 if ((free_considered + substitute_needed) > free_available) {
6429 /*
6430 * if we let this run continue
6431 * we will end up dropping the vm_page_free_count
6432 * below the reserve limit... we need to abort
6433 * this run, but we can at least re-consider this
6434 * page... thus the jump back to 'retry'
6435 */
6436 RESET_STATE_OF_RUN();
6437
6438 if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6439 considered++;
6440 goto retry;
6441 }
6442 /*
6443 * free_available == 0
6444 * so can't consider any free pages... if
6445 * we went to retry in this case, we'd
6446 * get stuck looking at the same page
6447 * w/o making any forward progress
6448 * we also want to take this path if we've already
6449 * reached our limit that controls the lock latency
6450 */
6451 }
6452 }
6453 did_consider:
6454 if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6455 PAGE_REPLACEMENT_ALLOWED(FALSE);
6456
6457 vm_free_page_unlock();
6458 vm_page_unlock_queues();
6459
6460 mutex_pause(0);
6461
6462 PAGE_REPLACEMENT_ALLOWED(TRUE);
6463
6464 vm_page_lock_queues();
6465 vm_free_page_lock();
6466
6467 RESET_STATE_OF_RUN();
6468 /*
6469 * reset our free page limit since we
6470 * dropped the lock protecting the vm_page_free_queue
6471 */
6472 free_available = vm_page_free_count - vm_page_free_reserved;
6473 considered = 0;
6474
6475 yielded++;
6476
6477 goto retry;
6478 }
6479 considered++;
6480 } /* main for-loop end */
6481
6482 m = VM_PAGE_NULL;
6483
6484 if (npages != contig_pages) {
6485 if (!wrapped) {
6486 /*
6487 * We didn't find a contiguous range but we didn't
6488 * start from the very first page.
6489 * Start again from the very first page.
6490 */
6491 RESET_STATE_OF_RUN();
6492 if (flags & KMA_LOMEM) {
6493 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0;
6494 } else {
6495 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6496 }
6497 last_idx = 0;
6498 page_idx = last_idx;
6499 wrapped = TRUE;
6500 goto retry;
6501 }
6502 vm_free_page_unlock();
6503 } else {
6504 vm_page_t m1;
6505 unsigned int cur_idx;
6506 unsigned int tmp_start_idx;
6507 vm_object_t locked_object = VM_OBJECT_NULL;
6508 boolean_t abort_run = FALSE;
6509
6510 assert(page_idx - start_idx == contig_pages);
6511
6512 tmp_start_idx = start_idx;
6513
6514 /*
6515 * first pass through to pull the free pages
6516 * off of the free queue so that in case we
6517 * need substitute pages, we won't grab any
6518 * of the free pages in the run... we'll clear
6519 * the 'free' bit in the 2nd pass, and even in
6520 * an abort_run case, we'll collect all of the
6521 * free pages in this run and return them to the free list
6522 */
6523 while (start_idx < page_idx) {
6524 m1 = &vm_pages[start_idx++];
6525
6526 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6527 assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6528 #endif
6529
6530 if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6531 unsigned int color;
6532
6533 color = VM_PAGE_GET_COLOR(m1);
6534 #if MACH_ASSERT
6535 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6536 #endif
6537 vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6538
6539 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6540 #if MACH_ASSERT
6541 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6542 #endif
6543 /*
6544 * Clear the "free" bit so that this page
6545 * does not get considered for another
6546 * concurrent physically-contiguous allocation.
6547 */
6548 m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6549 assert(m1->vmp_busy);
6550
6551 vm_page_free_count--;
6552 }
6553 }
6554 if (flags & KMA_LOMEM) {
6555 vm_page_lomem_find_contiguous_last_idx = page_idx;
6556 } else {
6557 vm_page_find_contiguous_last_idx = page_idx;
6558 }
6559
6560 /*
6561 * we can drop the free queue lock at this point since
6562 * we've pulled any 'free' candidates off of the list
6563 * we need it dropped so that we can do a vm_page_grab
6564 * when substituing for pmapped/dirty pages
6565 */
6566 vm_free_page_unlock();
6567
6568 start_idx = tmp_start_idx;
6569 cur_idx = page_idx - 1;
6570
6571 while (start_idx++ < page_idx) {
6572 /*
6573 * must go through the list from back to front
6574 * so that the page list is created in the
6575 * correct order - low -> high phys addresses
6576 */
6577 m1 = &vm_pages[cur_idx--];
6578
6579 if (m1->vmp_object == 0) {
6580 /*
6581 * page has already been removed from
6582 * the free list in the 1st pass
6583 */
6584 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6585 assert(m1->vmp_offset == (vm_object_offset_t) -1);
6586 assert(m1->vmp_busy);
6587 assert(!m1->vmp_wanted);
6588 assert(!m1->vmp_laundry);
6589 } else {
6590 /*
6591 * try to relocate/steal the page
6592 */
6593 if (abort_run == TRUE) {
6594 continue;
6595 }
6596
6597 assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6598
6599 vm_object_t object = VM_PAGE_OBJECT(m1);
6600
6601 if (object != locked_object) {
6602 if (locked_object) {
6603 vm_object_unlock(locked_object);
6604 locked_object = VM_OBJECT_NULL;
6605 }
6606 if (vm_object_lock_try(object)) {
6607 locked_object = object;
6608 } else {
6609 /* object must be locked to relocate its pages */
6610 tmp_start_idx = cur_idx;
6611 abort_run = TRUE;
6612 continue;
6613 }
6614 }
6615
6616 kr = vm_page_relocate(m1, &compressed_pages);
6617 if (kr != KERN_SUCCESS) {
6618 if (locked_object) {
6619 vm_object_unlock(locked_object);
6620 locked_object = VM_OBJECT_NULL;
6621 }
6622 tmp_start_idx = cur_idx;
6623 abort_run = TRUE;
6624 continue;
6625 }
6626
6627 stolen_pages++;
6628 }
6629
6630 /* m1 is ours at this point ... */
6631
6632 if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6633 /*
6634 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6635 * change it for pages marked as used-by-compressor.
6636 */
6637 vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6638 }
6639 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6640 m1->vmp_snext = m;
6641 m = m1;
6642 }
6643
6644 if (locked_object) {
6645 vm_object_unlock(locked_object);
6646 locked_object = VM_OBJECT_NULL;
6647 }
6648
6649 if (abort_run == TRUE) {
6650 /*
6651 * want the index of the last
6652 * page in this run that was
6653 * successfully 'stolen', so back
6654 * it up 1 for the auto-decrement on use
6655 * and 1 more to bump back over this page
6656 */
6657 page_idx = tmp_start_idx + 2;
6658 if (page_idx >= vm_pages_count) {
6659 if (wrapped) {
6660 if (m != VM_PAGE_NULL) {
6661 vm_page_unlock_queues();
6662 vm_page_free_list(m, FALSE);
6663 vm_page_lock_queues();
6664 m = VM_PAGE_NULL;
6665 }
6666 dumped_run++;
6667 goto done_scanning;
6668 }
6669 page_idx = last_idx = 0;
6670 wrapped = TRUE;
6671 }
6672 abort_run = FALSE;
6673
6674 /*
6675 * We didn't find a contiguous range but we didn't
6676 * start from the very first page.
6677 * Start again from the very first page.
6678 */
6679 RESET_STATE_OF_RUN();
6680
6681 if (flags & KMA_LOMEM) {
6682 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx;
6683 } else {
6684 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6685 }
6686
6687 last_idx = page_idx;
6688
6689 if (m != VM_PAGE_NULL) {
6690 vm_page_unlock_queues();
6691 vm_page_free_list(m, FALSE);
6692 vm_page_lock_queues();
6693 m = VM_PAGE_NULL;
6694 }
6695 dumped_run++;
6696
6697 vm_free_page_lock();
6698 /*
6699 * reset our free page limit since we
6700 * dropped the lock protecting the vm_page_free_queue
6701 */
6702 free_available = vm_page_free_count - vm_page_free_reserved;
6703 goto retry;
6704 }
6705
6706 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6707 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6708 assert(m1->vmp_wire_count == 0);
6709
6710 if (wire == TRUE) {
6711 m1->vmp_wire_count++;
6712 m1->vmp_q_state = VM_PAGE_IS_WIRED;
6713 } else {
6714 m1->vmp_gobbled = TRUE;
6715 }
6716 }
6717 if (wire == FALSE) {
6718 vm_page_gobble_count += npages;
6719 }
6720
6721 /*
6722 * gobbled pages are also counted as wired pages
6723 */
6724 vm_page_wire_count += npages;
6725
6726 assert(vm_page_verify_contiguous(m, npages));
6727 }
6728 done_scanning:
6729 PAGE_REPLACEMENT_ALLOWED(FALSE);
6730
6731 vm_page_unlock_queues();
6732
6733 #if DEBUG
6734 clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6735
6736 tv_end_sec -= tv_start_sec;
6737 if (tv_end_usec < tv_start_usec) {
6738 tv_end_sec--;
6739 tv_end_usec += 1000000;
6740 }
6741 tv_end_usec -= tv_start_usec;
6742 if (tv_end_usec >= 1000000) {
6743 tv_end_sec++;
6744 tv_end_sec -= 1000000;
6745 }
6746 if (vm_page_find_contig_debug) {
6747 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
6748 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6749 (long)tv_end_sec, tv_end_usec, orig_last_idx,
6750 scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6751 }
6752
6753 #endif
6754 #if MACH_ASSERT
6755 vm_page_verify_free_lists();
6756 #endif
6757 if (m == NULL && zone_gc_called < 2) {
6758 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6759 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6760 scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6761
6762 if (consider_buffer_cache_collect != NULL) {
6763 (void)(*consider_buffer_cache_collect)(1);
6764 }
6765
6766 zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6767
6768 zone_gc_called++;
6769
6770 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6771 goto full_scan_again;
6772 }
6773
6774 return m;
6775 }
6776
6777 /*
6778 * Allocate a list of contiguous, wired pages.
6779 */
6780 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6781 cpm_allocate(
6782 vm_size_t size,
6783 vm_page_t *list,
6784 ppnum_t max_pnum,
6785 ppnum_t pnum_mask,
6786 boolean_t wire,
6787 int flags)
6788 {
6789 vm_page_t pages;
6790 unsigned int npages;
6791
6792 if (size % PAGE_SIZE != 0) {
6793 return KERN_INVALID_ARGUMENT;
6794 }
6795
6796 npages = (unsigned int) (size / PAGE_SIZE);
6797 if (npages != size / PAGE_SIZE) {
6798 /* 32-bit overflow */
6799 return KERN_INVALID_ARGUMENT;
6800 }
6801
6802 /*
6803 * Obtain a pointer to a subset of the free
6804 * list large enough to satisfy the request;
6805 * the region will be physically contiguous.
6806 */
6807 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6808
6809 if (pages == VM_PAGE_NULL) {
6810 return KERN_NO_SPACE;
6811 }
6812 /*
6813 * determine need for wakeups
6814 */
6815 if (vm_page_free_count < vm_page_free_min) {
6816 vm_free_page_lock();
6817 if (vm_pageout_running == FALSE) {
6818 vm_free_page_unlock();
6819 thread_wakeup((event_t) &vm_page_free_wanted);
6820 } else {
6821 vm_free_page_unlock();
6822 }
6823 }
6824
6825 VM_CHECK_MEMORYSTATUS;
6826
6827 /*
6828 * The CPM pages should now be available and
6829 * ordered by ascending physical address.
6830 */
6831 assert(vm_page_verify_contiguous(pages, npages));
6832
6833 if (flags & KMA_ZERO) {
6834 for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6835 vm_page_zero_fill(m);
6836 }
6837 }
6838
6839 *list = pages;
6840 return KERN_SUCCESS;
6841 }
6842
6843
6844 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6845
6846 /*
6847 * when working on a 'run' of pages, it is necessary to hold
6848 * the vm_page_queue_lock (a hot global lock) for certain operations
6849 * on the page... however, the majority of the work can be done
6850 * while merely holding the object lock... in fact there are certain
6851 * collections of pages that don't require any work brokered by the
6852 * vm_page_queue_lock... to mitigate the time spent behind the global
6853 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6854 * while doing all of the work that doesn't require the vm_page_queue_lock...
6855 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6856 * necessary work for each page... we will grab the busy bit on the page
6857 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6858 * if it can't immediately take the vm_page_queue_lock in order to compete
6859 * for the locks in the same order that vm_pageout_scan takes them.
6860 * the operation names are modeled after the names of the routines that
6861 * need to be called in order to make the changes very obvious in the
6862 * original loop
6863 */
6864
6865 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6866 vm_page_do_delayed_work(
6867 vm_object_t object,
6868 vm_tag_t tag,
6869 struct vm_page_delayed_work *dwp,
6870 int dw_count)
6871 {
6872 int j;
6873 vm_page_t m;
6874 vm_page_t local_free_q = VM_PAGE_NULL;
6875
6876 /*
6877 * pageout_scan takes the vm_page_lock_queues first
6878 * then tries for the object lock... to avoid what
6879 * is effectively a lock inversion, we'll go to the
6880 * trouble of taking them in that same order... otherwise
6881 * if this object contains the majority of the pages resident
6882 * in the UBC (or a small set of large objects actively being
6883 * worked on contain the majority of the pages), we could
6884 * cause the pageout_scan thread to 'starve' in its attempt
6885 * to find pages to move to the free queue, since it has to
6886 * successfully acquire the object lock of any candidate page
6887 * before it can steal/clean it.
6888 */
6889 if (!vm_page_trylock_queues()) {
6890 vm_object_unlock(object);
6891
6892 /*
6893 * "Turnstile enabled vm_pageout_scan" can be runnable
6894 * for a very long time without getting on a core.
6895 * If this is a higher priority thread it could be
6896 * waiting here for a very long time respecting the fact
6897 * that pageout_scan would like its object after VPS does
6898 * a mutex_pause(0).
6899 * So we cap the number of yields in the vm_object_lock_avoid()
6900 * case to a single mutex_pause(0) which will give vm_pageout_scan
6901 * 10us to run and grab the object if needed.
6902 */
6903 vm_page_lock_queues();
6904
6905 for (j = 0;; j++) {
6906 if ((!vm_object_lock_avoid(object) ||
6907 (vps_dynamic_priority_enabled && (j > 0))) &&
6908 _vm_object_lock_try(object)) {
6909 break;
6910 }
6911 vm_page_unlock_queues();
6912 mutex_pause(j);
6913 vm_page_lock_queues();
6914 }
6915 }
6916 for (j = 0; j < dw_count; j++, dwp++) {
6917 m = dwp->dw_m;
6918
6919 if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6920 vm_pageout_throttle_up(m);
6921 }
6922 #if CONFIG_PHANTOM_CACHE
6923 if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6924 vm_phantom_cache_update(m);
6925 }
6926 #endif
6927 if (dwp->dw_mask & DW_vm_page_wire) {
6928 vm_page_wire(m, tag, FALSE);
6929 } else if (dwp->dw_mask & DW_vm_page_unwire) {
6930 boolean_t queueit;
6931
6932 queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6933
6934 vm_page_unwire(m, queueit);
6935 }
6936 if (dwp->dw_mask & DW_vm_page_free) {
6937 vm_page_free_prepare_queues(m);
6938
6939 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6940 /*
6941 * Add this page to our list of reclaimed pages,
6942 * to be freed later.
6943 */
6944 m->vmp_snext = local_free_q;
6945 local_free_q = m;
6946 } else {
6947 if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6948 vm_page_deactivate_internal(m, FALSE);
6949 } else if (dwp->dw_mask & DW_vm_page_activate) {
6950 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6951 vm_page_activate(m);
6952 }
6953 } else if (dwp->dw_mask & DW_vm_page_speculate) {
6954 vm_page_speculate(m, TRUE);
6955 } else if (dwp->dw_mask & DW_enqueue_cleaned) {
6956 /*
6957 * if we didn't hold the object lock and did this,
6958 * we might disconnect the page, then someone might
6959 * soft fault it back in, then we would put it on the
6960 * cleaned queue, and so we would have a referenced (maybe even dirty)
6961 * page on that queue, which we don't want
6962 */
6963 int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6964
6965 if ((refmod_state & VM_MEM_REFERENCED)) {
6966 /*
6967 * this page has been touched since it got cleaned; let's activate it
6968 * if it hasn't already been
6969 */
6970 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6971 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6972
6973 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6974 vm_page_activate(m);
6975 }
6976 } else {
6977 m->vmp_reference = FALSE;
6978 vm_page_enqueue_cleaned(m);
6979 }
6980 } else if (dwp->dw_mask & DW_vm_page_lru) {
6981 vm_page_lru(m);
6982 } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6983 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6984 vm_page_queues_remove(m, TRUE);
6985 }
6986 }
6987 if (dwp->dw_mask & DW_set_reference) {
6988 m->vmp_reference = TRUE;
6989 } else if (dwp->dw_mask & DW_clear_reference) {
6990 m->vmp_reference = FALSE;
6991 }
6992
6993 if (dwp->dw_mask & DW_move_page) {
6994 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6995 vm_page_queues_remove(m, FALSE);
6996
6997 assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
6998
6999 vm_page_enqueue_inactive(m, FALSE);
7000 }
7001 }
7002 if (dwp->dw_mask & DW_clear_busy) {
7003 m->vmp_busy = FALSE;
7004 }
7005
7006 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7007 vm_page_wakeup(object, m);
7008 }
7009 }
7010 }
7011 vm_page_unlock_queues();
7012
7013 if (local_free_q) {
7014 vm_page_free_list(local_free_q, TRUE);
7015 }
7016
7017 VM_CHECK_MEMORYSTATUS;
7018 }
7019
7020 __abortlike
7021 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7022 __vm_page_alloc_list_failed_panic(
7023 vm_size_t page_count,
7024 kma_flags_t flags,
7025 kern_return_t kr)
7026 {
7027 panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7028 (size_t)page_count, flags, kr);
7029 }
7030
7031 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7032 vm_page_alloc_list(
7033 vm_size_t page_count,
7034 kma_flags_t flags,
7035 vm_page_t *list)
7036 {
7037 vm_page_t page_list = VM_PAGE_NULL;
7038 vm_page_t mem;
7039 kern_return_t kr = KERN_SUCCESS;
7040 int page_grab_count = 0;
7041 #if DEVELOPMENT || DEBUG
7042 task_t task;
7043 #endif /* DEVELOPMENT || DEBUG */
7044
7045 for (vm_size_t i = 0; i < page_count; i++) {
7046 for (;;) {
7047 if (flags & KMA_LOMEM) {
7048 mem = vm_page_grablo();
7049 } else {
7050 uint_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7051 mem = vm_page_grab_options(options);
7052 }
7053
7054 if (mem != VM_PAGE_NULL) {
7055 break;
7056 }
7057
7058 if (flags & KMA_NOPAGEWAIT) {
7059 kr = KERN_RESOURCE_SHORTAGE;
7060 goto out;
7061 }
7062 if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
7063 kr = KERN_RESOURCE_SHORTAGE;
7064 goto out;
7065 }
7066
7067 /* VM privileged threads should have waited in vm_page_grab() and not get here. */
7068 assert(!(current_thread()->options & TH_OPT_VMPRIV));
7069
7070 if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_count) > max_mem / 4) {
7071 uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7072 if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7073 kr = KERN_RESOURCE_SHORTAGE;
7074 goto out;
7075 }
7076 }
7077 VM_PAGE_WAIT();
7078 }
7079
7080 page_grab_count++;
7081 mem->vmp_snext = page_list;
7082 page_list = mem;
7083 }
7084
7085 if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7086 for (mem = page_list; mem; mem = mem->vmp_snext) {
7087 vm_page_zero_fill(mem);
7088 }
7089 }
7090
7091 out:
7092 #if DEBUG || DEVELOPMENT
7093 task = current_task_early();
7094 if (task != NULL) {
7095 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
7096 }
7097 #endif
7098
7099 if (kr == KERN_SUCCESS) {
7100 *list = page_list;
7101 } else if (flags & KMA_NOFAIL) {
7102 __vm_page_alloc_list_failed_panic(page_count, flags, kr);
7103 } else {
7104 vm_page_free_list(page_list, FALSE);
7105 }
7106
7107 return kr;
7108 }
7109
7110 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7111 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7112 {
7113 page->vmp_offset = offset;
7114 }
7115
7116 vm_page_t
vm_page_get_next(vm_page_t page)7117 vm_page_get_next(vm_page_t page)
7118 {
7119 return page->vmp_snext;
7120 }
7121
7122 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7123 vm_page_get_offset(vm_page_t page)
7124 {
7125 return page->vmp_offset;
7126 }
7127
7128 ppnum_t
vm_page_get_phys_page(vm_page_t page)7129 vm_page_get_phys_page(vm_page_t page)
7130 {
7131 return VM_PAGE_GET_PHYS_PAGE(page);
7132 }
7133
7134
7135 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7136
7137 #if HIBERNATION
7138
7139 static vm_page_t hibernate_gobble_queue;
7140
7141 static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
7142 static int hibernate_flush_dirty_pages(int);
7143 static int hibernate_flush_queue(vm_page_queue_head_t *, int);
7144
7145 void hibernate_flush_wait(void);
7146 void hibernate_mark_in_progress(void);
7147 void hibernate_clear_in_progress(void);
7148
7149 void hibernate_free_range(int, int);
7150 void hibernate_hash_insert_page(vm_page_t);
7151 uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
7152 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7153 ppnum_t hibernate_lookup_paddr(unsigned int);
7154
7155 struct hibernate_statistics {
7156 int hibernate_considered;
7157 int hibernate_reentered_on_q;
7158 int hibernate_found_dirty;
7159 int hibernate_skipped_cleaning;
7160 int hibernate_skipped_transient;
7161 int hibernate_skipped_precious;
7162 int hibernate_skipped_external;
7163 int hibernate_queue_nolock;
7164 int hibernate_queue_paused;
7165 int hibernate_throttled;
7166 int hibernate_throttle_timeout;
7167 int hibernate_drained;
7168 int hibernate_drain_timeout;
7169 int cd_lock_failed;
7170 int cd_found_precious;
7171 int cd_found_wired;
7172 int cd_found_busy;
7173 int cd_found_unusual;
7174 int cd_found_cleaning;
7175 int cd_found_laundry;
7176 int cd_found_dirty;
7177 int cd_found_xpmapped;
7178 int cd_skipped_xpmapped;
7179 int cd_local_free;
7180 int cd_total_free;
7181 int cd_vm_page_wire_count;
7182 int cd_vm_struct_pages_unneeded;
7183 int cd_pages;
7184 int cd_discarded;
7185 int cd_count_wire;
7186 } hibernate_stats;
7187
7188 #if CONFIG_SPTM
7189 /**
7190 * On SPTM-based systems don't save any executable pages into the hibernation
7191 * image. The SPTM has stronger guarantees around not allowing write access to
7192 * the executable pages than on older systems, which prevents XNU from being
7193 * able to restore any pages mapped as executable.
7194 */
7195 #define HIBERNATE_XPMAPPED_LIMIT 0ULL
7196 #else /* CONFIG_SPTM */
7197 /*
7198 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7199 * so that we don't overrun the estimated image size, which would
7200 * result in a hibernation failure.
7201 *
7202 * We use a size value instead of pages because we don't want to take up more space
7203 * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7204 * to have that additional space available.
7205 *
7206 * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7207 * xpmapped size.
7208 */
7209 #define HIBERNATE_XPMAPPED_LIMIT ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7210 #endif /* CONFIG_SPTM */
7211
7212 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7213 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7214 {
7215 wait_result_t wait_result;
7216
7217 vm_page_lock_queues();
7218
7219 while (!vm_page_queue_empty(&q->pgo_pending)) {
7220 q->pgo_draining = TRUE;
7221
7222 assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7223
7224 vm_page_unlock_queues();
7225
7226 wait_result = thread_block(THREAD_CONTINUE_NULL);
7227
7228 if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7229 hibernate_stats.hibernate_drain_timeout++;
7230
7231 if (q == &vm_pageout_queue_external) {
7232 return 0;
7233 }
7234
7235 return 1;
7236 }
7237 vm_page_lock_queues();
7238
7239 hibernate_stats.hibernate_drained++;
7240 }
7241 vm_page_unlock_queues();
7242
7243 return 0;
7244 }
7245
7246
7247 boolean_t hibernate_skip_external = FALSE;
7248
7249 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7250 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7251 {
7252 vm_page_t m;
7253 vm_object_t l_object = NULL;
7254 vm_object_t m_object = NULL;
7255 int refmod_state = 0;
7256 int try_failed_count = 0;
7257 int retval = 0;
7258 int current_run = 0;
7259 struct vm_pageout_queue *iq;
7260 struct vm_pageout_queue *eq;
7261 struct vm_pageout_queue *tq;
7262
7263 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7264 VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7265
7266 iq = &vm_pageout_queue_internal;
7267 eq = &vm_pageout_queue_external;
7268
7269 vm_page_lock_queues();
7270
7271 while (qcount && !vm_page_queue_empty(q)) {
7272 if (current_run++ == 1000) {
7273 if (hibernate_should_abort()) {
7274 retval = 1;
7275 break;
7276 }
7277 current_run = 0;
7278 }
7279
7280 m = (vm_page_t) vm_page_queue_first(q);
7281 m_object = VM_PAGE_OBJECT(m);
7282
7283 /*
7284 * check to see if we currently are working
7285 * with the same object... if so, we've
7286 * already got the lock
7287 */
7288 if (m_object != l_object) {
7289 /*
7290 * the object associated with candidate page is
7291 * different from the one we were just working
7292 * with... dump the lock if we still own it
7293 */
7294 if (l_object != NULL) {
7295 vm_object_unlock(l_object);
7296 l_object = NULL;
7297 }
7298 /*
7299 * Try to lock object; since we've alread got the
7300 * page queues lock, we can only 'try' for this one.
7301 * if the 'try' fails, we need to do a mutex_pause
7302 * to allow the owner of the object lock a chance to
7303 * run...
7304 */
7305 if (!vm_object_lock_try_scan(m_object)) {
7306 if (try_failed_count > 20) {
7307 hibernate_stats.hibernate_queue_nolock++;
7308
7309 goto reenter_pg_on_q;
7310 }
7311
7312 vm_page_unlock_queues();
7313 mutex_pause(try_failed_count++);
7314 vm_page_lock_queues();
7315
7316 hibernate_stats.hibernate_queue_paused++;
7317 continue;
7318 } else {
7319 l_object = m_object;
7320 }
7321 }
7322 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7323 /*
7324 * page is not to be cleaned
7325 * put it back on the head of its queue
7326 */
7327 if (m->vmp_cleaning) {
7328 hibernate_stats.hibernate_skipped_cleaning++;
7329 } else {
7330 hibernate_stats.hibernate_skipped_transient++;
7331 }
7332
7333 goto reenter_pg_on_q;
7334 }
7335 if (m_object->vo_copy == VM_OBJECT_NULL) {
7336 if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7337 /*
7338 * let the normal hibernate image path
7339 * deal with these
7340 */
7341 goto reenter_pg_on_q;
7342 }
7343 }
7344 if (!m->vmp_dirty && m->vmp_pmapped) {
7345 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7346
7347 if ((refmod_state & VM_MEM_MODIFIED)) {
7348 SET_PAGE_DIRTY(m, FALSE);
7349 }
7350 } else {
7351 refmod_state = 0;
7352 }
7353
7354 if (!m->vmp_dirty) {
7355 /*
7356 * page is not to be cleaned
7357 * put it back on the head of its queue
7358 */
7359 if (m->vmp_precious) {
7360 hibernate_stats.hibernate_skipped_precious++;
7361 }
7362
7363 goto reenter_pg_on_q;
7364 }
7365
7366 if (hibernate_skip_external == TRUE && !m_object->internal) {
7367 hibernate_stats.hibernate_skipped_external++;
7368
7369 goto reenter_pg_on_q;
7370 }
7371 tq = NULL;
7372
7373 if (m_object->internal) {
7374 if (VM_PAGE_Q_THROTTLED(iq)) {
7375 tq = iq;
7376 }
7377 } else if (VM_PAGE_Q_THROTTLED(eq)) {
7378 tq = eq;
7379 }
7380
7381 if (tq != NULL) {
7382 wait_result_t wait_result;
7383 int wait_count = 5;
7384
7385 if (l_object != NULL) {
7386 vm_object_unlock(l_object);
7387 l_object = NULL;
7388 }
7389
7390 while (retval == 0) {
7391 tq->pgo_throttled = TRUE;
7392
7393 assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7394
7395 vm_page_unlock_queues();
7396
7397 wait_result = thread_block(THREAD_CONTINUE_NULL);
7398
7399 vm_page_lock_queues();
7400
7401 if (wait_result != THREAD_TIMED_OUT) {
7402 break;
7403 }
7404 if (!VM_PAGE_Q_THROTTLED(tq)) {
7405 break;
7406 }
7407
7408 if (hibernate_should_abort()) {
7409 retval = 1;
7410 }
7411
7412 if (--wait_count == 0) {
7413 hibernate_stats.hibernate_throttle_timeout++;
7414
7415 if (tq == eq) {
7416 hibernate_skip_external = TRUE;
7417 break;
7418 }
7419 retval = 1;
7420 }
7421 }
7422 if (retval) {
7423 break;
7424 }
7425
7426 hibernate_stats.hibernate_throttled++;
7427
7428 continue;
7429 }
7430 /*
7431 * we've already factored out pages in the laundry which
7432 * means this page can't be on the pageout queue so it's
7433 * safe to do the vm_page_queues_remove
7434 */
7435 vm_page_queues_remove(m, TRUE);
7436
7437 if (m_object->internal == TRUE) {
7438 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7439 }
7440
7441 vm_pageout_cluster(m);
7442
7443 hibernate_stats.hibernate_found_dirty++;
7444
7445 goto next_pg;
7446
7447 reenter_pg_on_q:
7448 vm_page_queue_remove(q, m, vmp_pageq);
7449 vm_page_queue_enter(q, m, vmp_pageq);
7450
7451 hibernate_stats.hibernate_reentered_on_q++;
7452 next_pg:
7453 hibernate_stats.hibernate_considered++;
7454
7455 qcount--;
7456 try_failed_count = 0;
7457 }
7458 if (l_object != NULL) {
7459 vm_object_unlock(l_object);
7460 l_object = NULL;
7461 }
7462
7463 vm_page_unlock_queues();
7464
7465 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7466
7467 return retval;
7468 }
7469
7470
7471 static int
hibernate_flush_dirty_pages(int pass)7472 hibernate_flush_dirty_pages(int pass)
7473 {
7474 struct vm_speculative_age_q *aq;
7475 uint32_t i;
7476
7477 if (vm_page_local_q) {
7478 zpercpu_foreach_cpu(lid) {
7479 vm_page_reactivate_local(lid, TRUE, FALSE);
7480 }
7481 }
7482
7483 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
7484 int qcount;
7485 vm_page_t m;
7486
7487 aq = &vm_page_queue_speculative[i];
7488
7489 if (vm_page_queue_empty(&aq->age_q)) {
7490 continue;
7491 }
7492 qcount = 0;
7493
7494 vm_page_lockspin_queues();
7495
7496 vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7497 qcount++;
7498 }
7499 vm_page_unlock_queues();
7500
7501 if (qcount) {
7502 if (hibernate_flush_queue(&aq->age_q, qcount)) {
7503 return 1;
7504 }
7505 }
7506 }
7507 if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7508 return 1;
7509 }
7510 /* XXX FBDP TODO: flush secluded queue */
7511 if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7512 return 1;
7513 }
7514 if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7515 return 1;
7516 }
7517 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7518 return 1;
7519 }
7520
7521 if (pass == 1) {
7522 vm_compressor_record_warmup_start();
7523 }
7524
7525 if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7526 if (pass == 1) {
7527 vm_compressor_record_warmup_end();
7528 }
7529 return 1;
7530 }
7531 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7532 if (pass == 1) {
7533 vm_compressor_record_warmup_end();
7534 }
7535 return 1;
7536 }
7537 if (pass == 1) {
7538 vm_compressor_record_warmup_end();
7539 }
7540
7541 if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7542 return 1;
7543 }
7544
7545 return 0;
7546 }
7547
7548
7549 void
hibernate_reset_stats()7550 hibernate_reset_stats()
7551 {
7552 bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7553 }
7554
7555
7556 int
hibernate_flush_memory()7557 hibernate_flush_memory()
7558 {
7559 int retval;
7560
7561 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7562
7563 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7564
7565 hibernate_cleaning_in_progress = TRUE;
7566 hibernate_skip_external = FALSE;
7567
7568 if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7569 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7570
7571 vm_compressor_flush();
7572
7573 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7574
7575 if (consider_buffer_cache_collect != NULL) {
7576 unsigned int orig_wire_count;
7577
7578 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7579 orig_wire_count = vm_page_wire_count;
7580
7581 (void)(*consider_buffer_cache_collect)(1);
7582 zone_gc(ZONE_GC_DRAIN);
7583
7584 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7585
7586 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7587 }
7588 }
7589 hibernate_cleaning_in_progress = FALSE;
7590
7591 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7592
7593 if (retval) {
7594 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7595 }
7596
7597
7598 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7599 hibernate_stats.hibernate_considered,
7600 hibernate_stats.hibernate_reentered_on_q,
7601 hibernate_stats.hibernate_found_dirty);
7602 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7603 hibernate_stats.hibernate_skipped_cleaning,
7604 hibernate_stats.hibernate_skipped_transient,
7605 hibernate_stats.hibernate_skipped_precious,
7606 hibernate_stats.hibernate_skipped_external,
7607 hibernate_stats.hibernate_queue_nolock);
7608 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7609 hibernate_stats.hibernate_queue_paused,
7610 hibernate_stats.hibernate_throttled,
7611 hibernate_stats.hibernate_throttle_timeout,
7612 hibernate_stats.hibernate_drained,
7613 hibernate_stats.hibernate_drain_timeout);
7614
7615 return retval;
7616 }
7617
7618
7619 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7620 hibernate_page_list_zero(hibernate_page_list_t *list)
7621 {
7622 uint32_t bank;
7623 hibernate_bitmap_t * bitmap;
7624
7625 bitmap = &list->bank_bitmap[0];
7626 for (bank = 0; bank < list->bank_count; bank++) {
7627 uint32_t last_bit;
7628
7629 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7630 // set out-of-bound bits at end of bitmap.
7631 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7632 if (last_bit) {
7633 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7634 }
7635
7636 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7637 }
7638 }
7639
7640 void
hibernate_free_gobble_pages(void)7641 hibernate_free_gobble_pages(void)
7642 {
7643 vm_page_t m, next;
7644 uint32_t count = 0;
7645
7646 m = (vm_page_t) hibernate_gobble_queue;
7647 while (m) {
7648 next = m->vmp_snext;
7649 vm_page_free(m);
7650 count++;
7651 m = next;
7652 }
7653 hibernate_gobble_queue = VM_PAGE_NULL;
7654
7655 if (count) {
7656 HIBLOG("Freed %d pages\n", count);
7657 }
7658 }
7659
7660 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7661 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7662 {
7663 vm_object_t object = NULL;
7664 int refmod_state;
7665 boolean_t discard = FALSE;
7666
7667 do{
7668 if (m->vmp_private) {
7669 panic("hibernate_consider_discard: private");
7670 }
7671
7672 object = VM_PAGE_OBJECT(m);
7673
7674 if (!vm_object_lock_try(object)) {
7675 object = NULL;
7676 if (!preflight) {
7677 hibernate_stats.cd_lock_failed++;
7678 }
7679 break;
7680 }
7681 if (VM_PAGE_WIRED(m)) {
7682 if (!preflight) {
7683 hibernate_stats.cd_found_wired++;
7684 }
7685 break;
7686 }
7687 if (m->vmp_precious) {
7688 if (!preflight) {
7689 hibernate_stats.cd_found_precious++;
7690 }
7691 break;
7692 }
7693 if (m->vmp_busy || !object->alive) {
7694 /*
7695 * Somebody is playing with this page.
7696 */
7697 if (!preflight) {
7698 hibernate_stats.cd_found_busy++;
7699 }
7700 break;
7701 }
7702 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7703 /*
7704 * If it's unusual in anyway, ignore it
7705 */
7706 if (!preflight) {
7707 hibernate_stats.cd_found_unusual++;
7708 }
7709 break;
7710 }
7711 if (m->vmp_cleaning) {
7712 if (!preflight) {
7713 hibernate_stats.cd_found_cleaning++;
7714 }
7715 break;
7716 }
7717 if (m->vmp_laundry) {
7718 if (!preflight) {
7719 hibernate_stats.cd_found_laundry++;
7720 }
7721 break;
7722 }
7723 if (!m->vmp_dirty) {
7724 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7725
7726 if (refmod_state & VM_MEM_REFERENCED) {
7727 m->vmp_reference = TRUE;
7728 }
7729 if (refmod_state & VM_MEM_MODIFIED) {
7730 SET_PAGE_DIRTY(m, FALSE);
7731 }
7732 }
7733
7734 /*
7735 * If it's clean or purgeable we can discard the page on wakeup.
7736 */
7737 discard = (!m->vmp_dirty)
7738 || (VM_PURGABLE_VOLATILE == object->purgable)
7739 || (VM_PURGABLE_EMPTY == object->purgable);
7740
7741
7742 if (discard == FALSE) {
7743 if (!preflight) {
7744 hibernate_stats.cd_found_dirty++;
7745 }
7746 } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7747 if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7748 if (!preflight) {
7749 hibernate_stats.cd_found_xpmapped++;
7750 }
7751 discard = FALSE;
7752 } else {
7753 if (!preflight) {
7754 hibernate_stats.cd_skipped_xpmapped++;
7755 }
7756 }
7757 }
7758 }while (FALSE);
7759
7760 if (object) {
7761 vm_object_unlock(object);
7762 }
7763
7764 return discard;
7765 }
7766
7767
7768 static void
hibernate_discard_page(vm_page_t m)7769 hibernate_discard_page(vm_page_t m)
7770 {
7771 vm_object_t m_object;
7772
7773 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7774 /*
7775 * If it's unusual in anyway, ignore
7776 */
7777 return;
7778 }
7779
7780 m_object = VM_PAGE_OBJECT(m);
7781
7782 #if MACH_ASSERT || DEBUG
7783 if (!vm_object_lock_try(m_object)) {
7784 panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7785 }
7786 #else
7787 /* No need to lock page queue for token delete, hibernate_vm_unlock()
7788 * makes sure these locks are uncontended before sleep */
7789 #endif /* MACH_ASSERT || DEBUG */
7790
7791 if (m->vmp_pmapped == TRUE) {
7792 __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7793 }
7794
7795 if (m->vmp_laundry) {
7796 panic("hibernate_discard_page(%p) laundry", m);
7797 }
7798 if (m->vmp_private) {
7799 panic("hibernate_discard_page(%p) private", m);
7800 }
7801 if (m->vmp_fictitious) {
7802 panic("hibernate_discard_page(%p) fictitious", m);
7803 }
7804
7805 if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7806 /* object should be on a queue */
7807 assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7808 purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7809 assert(old_queue);
7810 if (m_object->purgeable_when_ripe) {
7811 vm_purgeable_token_delete_first(old_queue);
7812 }
7813 vm_object_lock_assert_exclusive(m_object);
7814 VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
7815
7816 /*
7817 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
7818 * accounted in the "volatile" ledger, so no change here.
7819 * We have to update vm_page_purgeable_count, though, since we're
7820 * effectively purging this object.
7821 */
7822 unsigned int delta;
7823 assert(m_object->resident_page_count >= m_object->wired_page_count);
7824 delta = (m_object->resident_page_count - m_object->wired_page_count);
7825 assert(vm_page_purgeable_count >= delta);
7826 assert(delta > 0);
7827 OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7828 }
7829
7830 vm_page_free(m);
7831
7832 #if MACH_ASSERT || DEBUG
7833 vm_object_unlock(m_object);
7834 #endif /* MACH_ASSERT || DEBUG */
7835 }
7836
7837 /*
7838 * Grab locks for hibernate_page_list_setall()
7839 */
7840 void
hibernate_vm_lock_queues(void)7841 hibernate_vm_lock_queues(void)
7842 {
7843 vm_object_lock(compressor_object);
7844 vm_page_lock_queues();
7845 vm_free_page_lock();
7846 lck_mtx_lock(&vm_purgeable_queue_lock);
7847
7848 if (vm_page_local_q) {
7849 zpercpu_foreach(lq, vm_page_local_q) {
7850 VPL_LOCK(&lq->vpl_lock);
7851 }
7852 }
7853 }
7854
7855 void
hibernate_vm_unlock_queues(void)7856 hibernate_vm_unlock_queues(void)
7857 {
7858 if (vm_page_local_q) {
7859 zpercpu_foreach(lq, vm_page_local_q) {
7860 VPL_UNLOCK(&lq->vpl_lock);
7861 }
7862 }
7863 lck_mtx_unlock(&vm_purgeable_queue_lock);
7864 vm_free_page_unlock();
7865 vm_page_unlock_queues();
7866 vm_object_unlock(compressor_object);
7867 }
7868
7869 #if CONFIG_SPTM
7870 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)7871 hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)
7872 {
7873 const sptm_paddr_t paddr = ptoa_64(VM_PAGE_GET_PHYS_PAGE(vmp));
7874 const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
7875 const vm_object_t vmp_objp = VM_PAGE_OBJECT(vmp);
7876
7877 return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG ||
7878 (frame_type == XNU_USER_EXEC && vmp_objp->internal == TRUE);
7879 }
7880 #endif
7881
7882 /*
7883 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7884 * pages known to VM to not need saving are subtracted.
7885 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
7886 */
7887
7888 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7889 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7890 hibernate_page_list_t * page_list_wired,
7891 hibernate_page_list_t * page_list_pal,
7892 boolean_t preflight,
7893 boolean_t will_discard,
7894 uint32_t * pagesOut)
7895 {
7896 uint64_t start, end, nsec;
7897 vm_page_t m;
7898 vm_page_t next;
7899 uint32_t pages = page_list->page_count;
7900 uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7901 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7902 uint32_t count_wire = pages;
7903 uint32_t count_discard_active = 0;
7904 uint32_t count_discard_inactive = 0;
7905 uint32_t count_retired = 0;
7906 uint32_t count_discard_cleaned = 0;
7907 uint32_t count_discard_purgeable = 0;
7908 uint32_t count_discard_speculative = 0;
7909 uint32_t count_discard_vm_struct_pages = 0;
7910 uint32_t i;
7911 uint32_t bank;
7912 hibernate_bitmap_t * bitmap;
7913 hibernate_bitmap_t * bitmap_wired;
7914 boolean_t discard_all;
7915 boolean_t discard = FALSE;
7916
7917 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7918
7919 if (preflight) {
7920 page_list = NULL;
7921 page_list_wired = NULL;
7922 page_list_pal = NULL;
7923 discard_all = FALSE;
7924 } else {
7925 discard_all = will_discard;
7926 }
7927
7928 #if MACH_ASSERT || DEBUG
7929 if (!preflight) {
7930 assert(hibernate_vm_locks_are_safe());
7931 vm_page_lock_queues();
7932 if (vm_page_local_q) {
7933 zpercpu_foreach(lq, vm_page_local_q) {
7934 VPL_LOCK(&lq->vpl_lock);
7935 }
7936 }
7937 }
7938 #endif /* MACH_ASSERT || DEBUG */
7939
7940
7941 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7942
7943 clock_get_uptime(&start);
7944
7945 if (!preflight) {
7946 hibernate_page_list_zero(page_list);
7947 hibernate_page_list_zero(page_list_wired);
7948 hibernate_page_list_zero(page_list_pal);
7949
7950 hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7951 hibernate_stats.cd_pages = pages;
7952 }
7953
7954 if (vm_page_local_q) {
7955 zpercpu_foreach_cpu(lid) {
7956 vm_page_reactivate_local(lid, TRUE, !preflight);
7957 }
7958 }
7959
7960 if (preflight) {
7961 vm_object_lock(compressor_object);
7962 vm_page_lock_queues();
7963 vm_free_page_lock();
7964 }
7965
7966 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7967
7968 hibernation_vmqueues_inspection = TRUE;
7969
7970 m = (vm_page_t) hibernate_gobble_queue;
7971 while (m) {
7972 pages--;
7973 count_wire--;
7974 if (!preflight) {
7975 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7976 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7977 }
7978 m = m->vmp_snext;
7979 }
7980
7981 if (!preflight) {
7982 percpu_foreach(free_pages_head, free_pages) {
7983 for (m = *free_pages_head; m; m = m->vmp_snext) {
7984 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7985
7986 pages--;
7987 count_wire--;
7988 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7989 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7990
7991 hibernate_stats.cd_local_free++;
7992 hibernate_stats.cd_total_free++;
7993 }
7994 }
7995 }
7996
7997 for (i = 0; i < vm_colors; i++) {
7998 vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7999 assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
8000
8001 pages--;
8002 count_wire--;
8003 if (!preflight) {
8004 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8005 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8006
8007 hibernate_stats.cd_total_free++;
8008 }
8009 }
8010 }
8011
8012 vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
8013 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
8014
8015 pages--;
8016 count_wire--;
8017 if (!preflight) {
8018 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8019 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8020
8021 hibernate_stats.cd_total_free++;
8022 }
8023 }
8024
8025 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8026 while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8027 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8028
8029 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8030 discard = FALSE;
8031 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8032 && hibernate_consider_discard(m, preflight)) {
8033 if (!preflight) {
8034 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8035 }
8036 count_discard_inactive++;
8037 discard = discard_all;
8038 } else {
8039 count_throttled++;
8040 }
8041 count_wire--;
8042 if (!preflight) {
8043 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8044 }
8045
8046 if (discard) {
8047 hibernate_discard_page(m);
8048 }
8049 m = next;
8050 }
8051
8052 m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8053 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8054 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8055 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8056 #if CONFIG_SPTM
8057 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8058 #endif
8059 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8060 discard = FALSE;
8061 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8062 hibernate_consider_discard(m, preflight)) {
8063 if (!preflight) {
8064 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8065 }
8066 if (m->vmp_dirty) {
8067 count_discard_purgeable++;
8068 } else {
8069 count_discard_inactive++;
8070 }
8071 discard = discard_all;
8072 } else {
8073 /*
8074 * If the page must be force-added to the wired page list, prevent it from appearing
8075 * in the unwired page list.
8076 */
8077 if (force_to_wired_list) {
8078 if (!preflight) {
8079 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8080 }
8081 } else {
8082 count_anonymous++;
8083 }
8084 }
8085 /*
8086 * If the page is NOT being forced into the wired page list, remove it from the
8087 * wired page list here.
8088 */
8089 if (!force_to_wired_list) {
8090 count_wire--;
8091 if (!preflight) {
8092 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8093 }
8094 }
8095 if (discard) {
8096 hibernate_discard_page(m);
8097 }
8098 m = next;
8099 }
8100
8101 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8102 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8103 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8104
8105 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8106 discard = FALSE;
8107 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8108 hibernate_consider_discard(m, preflight)) {
8109 if (!preflight) {
8110 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8111 }
8112 if (m->vmp_dirty) {
8113 count_discard_purgeable++;
8114 } else {
8115 count_discard_cleaned++;
8116 }
8117 discard = discard_all;
8118 } else {
8119 count_cleaned++;
8120 }
8121 count_wire--;
8122 if (!preflight) {
8123 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8124 }
8125 if (discard) {
8126 hibernate_discard_page(m);
8127 }
8128 m = next;
8129 }
8130
8131 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8132 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8133 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8134 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8135 #if CONFIG_SPTM
8136 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8137 #endif
8138 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8139 discard = FALSE;
8140 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8141 hibernate_consider_discard(m, preflight)) {
8142 if (!preflight) {
8143 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8144 }
8145 if (m->vmp_dirty) {
8146 count_discard_purgeable++;
8147 } else {
8148 count_discard_active++;
8149 }
8150 discard = discard_all;
8151 } else {
8152 /*
8153 * If the page must be force-added to the wired page list, prevent it from appearing
8154 * in the unwired page list.
8155 */
8156 if (force_to_wired_list) {
8157 if (!preflight) {
8158 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8159 }
8160 } else {
8161 count_active++;
8162 }
8163 }
8164 /*
8165 * If the page is NOT being forced into the wired page list, remove it from the
8166 * wired page list here.
8167 */
8168 if (!force_to_wired_list) {
8169 count_wire--;
8170 if (!preflight) {
8171 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8172 }
8173 }
8174 if (discard) {
8175 hibernate_discard_page(m);
8176 }
8177 m = next;
8178 }
8179
8180 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8181 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8182 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8183 bool force_to_wired_list = false; /* Default to NOT forcing page into the wired page list */
8184 #if CONFIG_SPTM
8185 force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8186 #endif
8187 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8188 discard = FALSE;
8189 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8190 hibernate_consider_discard(m, preflight)) {
8191 if (!preflight) {
8192 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8193 }
8194 if (m->vmp_dirty) {
8195 count_discard_purgeable++;
8196 } else {
8197 count_discard_inactive++;
8198 }
8199 discard = discard_all;
8200 } else {
8201 /*
8202 * If the page must be force-added to the wired page list, prevent it from appearing
8203 * in the unwired page list.
8204 */
8205 if (force_to_wired_list) {
8206 if (!preflight) {
8207 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8208 }
8209 } else {
8210 count_inactive++;
8211 }
8212 }
8213 /*
8214 * If the page is NOT being forced into the wired page list, remove it from the
8215 * wired page list here.
8216 */
8217 if (!force_to_wired_list) {
8218 count_wire--;
8219 if (!preflight) {
8220 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8221 }
8222 }
8223 if (discard) {
8224 hibernate_discard_page(m);
8225 }
8226 m = next;
8227 }
8228 /* XXX FBDP TODO: secluded queue */
8229
8230 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8231 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8232 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8233 assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8234 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8235 m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8236
8237 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8238 discard = FALSE;
8239 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8240 hibernate_consider_discard(m, preflight)) {
8241 if (!preflight) {
8242 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8243 }
8244 count_discard_speculative++;
8245 discard = discard_all;
8246 } else {
8247 count_speculative++;
8248 }
8249 count_wire--;
8250 if (!preflight) {
8251 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8252 }
8253 if (discard) {
8254 hibernate_discard_page(m);
8255 }
8256 m = next;
8257 }
8258 }
8259
8260 vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
8261 assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
8262
8263 count_compressor++;
8264 count_wire--;
8265 if (!preflight) {
8266 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8267 }
8268 }
8269
8270
8271 if (preflight == FALSE && discard_all == TRUE) {
8272 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8273
8274 HIBLOG("hibernate_teardown started\n");
8275 count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8276 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8277
8278 pages -= count_discard_vm_struct_pages;
8279 count_wire -= count_discard_vm_struct_pages;
8280
8281 hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8282
8283 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8284 }
8285
8286 if (!preflight) {
8287 // pull wired from hibernate_bitmap
8288 bitmap = &page_list->bank_bitmap[0];
8289 bitmap_wired = &page_list_wired->bank_bitmap[0];
8290 for (bank = 0; bank < page_list->bank_count; bank++) {
8291 for (i = 0; i < bitmap->bitmapwords; i++) {
8292 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8293 }
8294 bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8295 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8296 }
8297 }
8298
8299 // machine dependent adjustments
8300 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8301
8302 if (!preflight) {
8303 hibernate_stats.cd_count_wire = count_wire;
8304 hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8305 count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8306 }
8307
8308 clock_get_uptime(&end);
8309 absolutetime_to_nanoseconds(end - start, &nsec);
8310 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8311
8312 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8313 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8314 discard_all ? "did" : "could",
8315 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8316
8317 if (hibernate_stats.cd_skipped_xpmapped) {
8318 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8319 }
8320
8321 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8322
8323 if (preflight && will_discard) {
8324 *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8325 /*
8326 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8327 * even if these are clean and so we need to size the hibernation image accordingly.
8328 *
8329 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8330 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8331 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8332 * clean xpmapped pages.
8333 *
8334 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8335 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8336 */
8337 *pagesOut += HIBERNATE_XPMAPPED_LIMIT;
8338 }
8339
8340 hibernation_vmqueues_inspection = FALSE;
8341
8342 #if MACH_ASSERT || DEBUG
8343 if (!preflight) {
8344 if (vm_page_local_q) {
8345 zpercpu_foreach(lq, vm_page_local_q) {
8346 VPL_UNLOCK(&lq->vpl_lock);
8347 }
8348 }
8349 vm_page_unlock_queues();
8350 }
8351 #endif /* MACH_ASSERT || DEBUG */
8352
8353 if (preflight) {
8354 vm_free_page_unlock();
8355 vm_page_unlock_queues();
8356 vm_object_unlock(compressor_object);
8357 }
8358
8359 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8360 }
8361
8362 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8363 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8364 {
8365 uint64_t start, end, nsec;
8366 vm_page_t m;
8367 vm_page_t next;
8368 uint32_t i;
8369 uint32_t count_discard_active = 0;
8370 uint32_t count_discard_inactive = 0;
8371 uint32_t count_discard_purgeable = 0;
8372 uint32_t count_discard_cleaned = 0;
8373 uint32_t count_discard_speculative = 0;
8374
8375
8376 #if MACH_ASSERT || DEBUG
8377 vm_page_lock_queues();
8378 if (vm_page_local_q) {
8379 zpercpu_foreach(lq, vm_page_local_q) {
8380 VPL_LOCK(&lq->vpl_lock);
8381 }
8382 }
8383 #endif /* MACH_ASSERT || DEBUG */
8384
8385 clock_get_uptime(&start);
8386
8387 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8388 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8389 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8390
8391 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8392 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8393 if (m->vmp_dirty) {
8394 count_discard_purgeable++;
8395 } else {
8396 count_discard_inactive++;
8397 }
8398 hibernate_discard_page(m);
8399 }
8400 m = next;
8401 }
8402
8403 for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8404 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8405 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8406 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8407
8408 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8409 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8410 count_discard_speculative++;
8411 hibernate_discard_page(m);
8412 }
8413 m = next;
8414 }
8415 }
8416
8417 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8418 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8419 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8420
8421 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8422 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8423 if (m->vmp_dirty) {
8424 count_discard_purgeable++;
8425 } else {
8426 count_discard_inactive++;
8427 }
8428 hibernate_discard_page(m);
8429 }
8430 m = next;
8431 }
8432 /* XXX FBDP TODO: secluded queue */
8433
8434 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8435 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8436 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8437
8438 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8439 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8440 if (m->vmp_dirty) {
8441 count_discard_purgeable++;
8442 } else {
8443 count_discard_active++;
8444 }
8445 hibernate_discard_page(m);
8446 }
8447 m = next;
8448 }
8449
8450 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8451 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8452 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8453
8454 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8455 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8456 if (m->vmp_dirty) {
8457 count_discard_purgeable++;
8458 } else {
8459 count_discard_cleaned++;
8460 }
8461 hibernate_discard_page(m);
8462 }
8463 m = next;
8464 }
8465
8466 #if MACH_ASSERT || DEBUG
8467 if (vm_page_local_q) {
8468 zpercpu_foreach(lq, vm_page_local_q) {
8469 VPL_UNLOCK(&lq->vpl_lock);
8470 }
8471 }
8472 vm_page_unlock_queues();
8473 #endif /* MACH_ASSERT || DEBUG */
8474
8475 clock_get_uptime(&end);
8476 absolutetime_to_nanoseconds(end - start, &nsec);
8477 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8478 nsec / 1000000ULL,
8479 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8480 }
8481
8482 boolean_t hibernate_paddr_map_inited = FALSE;
8483 unsigned int hibernate_teardown_last_valid_compact_indx = -1;
8484 vm_page_t hibernate_rebuild_hash_list = NULL;
8485
8486 unsigned int hibernate_teardown_found_tabled_pages = 0;
8487 unsigned int hibernate_teardown_found_created_pages = 0;
8488 unsigned int hibernate_teardown_found_free_pages = 0;
8489 unsigned int hibernate_teardown_vm_page_free_count;
8490
8491
8492 struct ppnum_mapping {
8493 struct ppnum_mapping *ppnm_next;
8494 ppnum_t ppnm_base_paddr;
8495 unsigned int ppnm_sindx;
8496 unsigned int ppnm_eindx;
8497 };
8498
8499 struct ppnum_mapping *ppnm_head;
8500 struct ppnum_mapping *ppnm_last_found = NULL;
8501
8502
8503 void
hibernate_create_paddr_map(void)8504 hibernate_create_paddr_map(void)
8505 {
8506 unsigned int i;
8507 ppnum_t next_ppnum_in_run = 0;
8508 struct ppnum_mapping *ppnm = NULL;
8509
8510 if (hibernate_paddr_map_inited == FALSE) {
8511 for (i = 0; i < vm_pages_count; i++) {
8512 if (ppnm) {
8513 ppnm->ppnm_eindx = i;
8514 }
8515
8516 if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8517 ppnm = zalloc_permanent_type(struct ppnum_mapping);
8518
8519 ppnm->ppnm_next = ppnm_head;
8520 ppnm_head = ppnm;
8521
8522 ppnm->ppnm_sindx = i;
8523 ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8524 }
8525 next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8526 }
8527 ppnm->ppnm_eindx = vm_pages_count;
8528
8529 hibernate_paddr_map_inited = TRUE;
8530 }
8531 }
8532
8533 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8534 hibernate_lookup_paddr(unsigned int indx)
8535 {
8536 struct ppnum_mapping *ppnm = NULL;
8537
8538 ppnm = ppnm_last_found;
8539
8540 if (ppnm) {
8541 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8542 goto done;
8543 }
8544 }
8545 for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8546 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8547 ppnm_last_found = ppnm;
8548 break;
8549 }
8550 }
8551 if (ppnm == NULL) {
8552 panic("hibernate_lookup_paddr of %d failed", indx);
8553 }
8554 done:
8555 return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8556 }
8557
8558
8559 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8560 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8561 {
8562 addr64_t saddr_aligned;
8563 addr64_t eaddr_aligned;
8564 addr64_t addr;
8565 ppnum_t paddr;
8566 unsigned int mark_as_unneeded_pages = 0;
8567
8568 saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8569 eaddr_aligned = eaddr & ~PAGE_MASK_64;
8570
8571 for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8572 paddr = pmap_find_phys(kernel_pmap, addr);
8573
8574 assert(paddr);
8575
8576 hibernate_page_bitset(page_list, TRUE, paddr);
8577 hibernate_page_bitset(page_list_wired, TRUE, paddr);
8578
8579 mark_as_unneeded_pages++;
8580 }
8581 return mark_as_unneeded_pages;
8582 }
8583
8584
8585 void
hibernate_hash_insert_page(vm_page_t mem)8586 hibernate_hash_insert_page(vm_page_t mem)
8587 {
8588 vm_page_bucket_t *bucket;
8589 int hash_id;
8590 vm_object_t m_object;
8591
8592 m_object = VM_PAGE_OBJECT(mem);
8593
8594 assert(mem->vmp_hashed);
8595 assert(m_object);
8596 assert(mem->vmp_offset != (vm_object_offset_t) -1);
8597
8598 /*
8599 * Insert it into the object_object/offset hash table
8600 */
8601 hash_id = vm_page_hash(m_object, mem->vmp_offset);
8602 bucket = &vm_page_buckets[hash_id];
8603
8604 mem->vmp_next_m = bucket->page_list;
8605 bucket->page_list = VM_PAGE_PACK_PTR(mem);
8606 }
8607
8608
8609 void
hibernate_free_range(int sindx,int eindx)8610 hibernate_free_range(int sindx, int eindx)
8611 {
8612 vm_page_t mem;
8613 unsigned int color;
8614
8615 while (sindx < eindx) {
8616 mem = &vm_pages[sindx];
8617
8618 vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8619
8620 mem->vmp_lopage = FALSE;
8621 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8622
8623 color = VM_PAGE_GET_COLOR(mem);
8624 #if defined(__x86_64__)
8625 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8626 #else
8627 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8628 #endif
8629 vm_page_free_count++;
8630
8631 sindx++;
8632 }
8633 }
8634
8635 void
hibernate_rebuild_vm_structs(void)8636 hibernate_rebuild_vm_structs(void)
8637 {
8638 int i, cindx, sindx, eindx;
8639 vm_page_t mem, tmem, mem_next;
8640 AbsoluteTime startTime, endTime;
8641 uint64_t nsec;
8642
8643 if (hibernate_rebuild_needed == FALSE) {
8644 return;
8645 }
8646
8647 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8648 HIBLOG("hibernate_rebuild started\n");
8649
8650 clock_get_uptime(&startTime);
8651
8652 pal_hib_rebuild_pmap_structs();
8653
8654 bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8655 eindx = vm_pages_count;
8656
8657 /*
8658 * Mark all the vm_pages[] that have not been initialized yet as being
8659 * transient. This is needed to ensure that buddy page search is corrrect.
8660 * Without this random data in these vm_pages[] can trip the buddy search
8661 */
8662 for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8663 vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8664 }
8665
8666 for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8667 mem = &vm_pages[cindx];
8668 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8669 /*
8670 * hibernate_teardown_vm_structs leaves the location where
8671 * this vm_page_t must be located in "next".
8672 */
8673 tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8674 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8675
8676 sindx = (int)(tmem - &vm_pages[0]);
8677
8678 if (mem != tmem) {
8679 /*
8680 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8681 * so move it back to its real location
8682 */
8683 *tmem = *mem;
8684 mem = tmem;
8685 }
8686 if (mem->vmp_hashed) {
8687 hibernate_hash_insert_page(mem);
8688 }
8689 /*
8690 * the 'hole' between this vm_page_t and the previous
8691 * vm_page_t we moved needs to be initialized as
8692 * a range of free vm_page_t's
8693 */
8694 hibernate_free_range(sindx + 1, eindx);
8695
8696 eindx = sindx;
8697 }
8698 if (sindx) {
8699 hibernate_free_range(0, sindx);
8700 }
8701
8702 assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8703
8704 /*
8705 * process the list of vm_page_t's that were entered in the hash,
8706 * but were not located in the vm_pages arrary... these are
8707 * vm_page_t's that were created on the fly (i.e. fictitious)
8708 */
8709 for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8710 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8711
8712 mem->vmp_next_m = 0;
8713 hibernate_hash_insert_page(mem);
8714 }
8715 hibernate_rebuild_hash_list = NULL;
8716
8717 clock_get_uptime(&endTime);
8718 SUB_ABSOLUTETIME(&endTime, &startTime);
8719 absolutetime_to_nanoseconds(endTime, &nsec);
8720
8721 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8722
8723 hibernate_rebuild_needed = FALSE;
8724
8725 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8726 }
8727
8728 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8729 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8730 {
8731 unsigned int i;
8732 unsigned int compact_target_indx;
8733 vm_page_t mem, mem_next;
8734 vm_page_bucket_t *bucket;
8735 unsigned int mark_as_unneeded_pages = 0;
8736 unsigned int unneeded_vm_page_bucket_pages = 0;
8737 unsigned int unneeded_vm_pages_pages = 0;
8738 unsigned int unneeded_pmap_pages = 0;
8739 addr64_t start_of_unneeded = 0;
8740 addr64_t end_of_unneeded = 0;
8741
8742
8743 if (hibernate_should_abort()) {
8744 return 0;
8745 }
8746
8747 hibernate_rebuild_needed = TRUE;
8748
8749 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8750 vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8751 vm_page_cleaned_count, compressor_object->resident_page_count);
8752
8753 for (i = 0; i < vm_page_bucket_count; i++) {
8754 bucket = &vm_page_buckets[i];
8755
8756 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8757 assert(mem->vmp_hashed);
8758
8759 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8760
8761 if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8762 mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8763 hibernate_rebuild_hash_list = mem;
8764 }
8765 }
8766 }
8767 unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8768 mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8769
8770 hibernate_teardown_vm_page_free_count = vm_page_free_count;
8771
8772 compact_target_indx = 0;
8773
8774 for (i = 0; i < vm_pages_count; i++) {
8775 mem = &vm_pages[i];
8776
8777 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8778 unsigned int color;
8779
8780 assert(mem->vmp_busy);
8781 assert(!mem->vmp_lopage);
8782
8783 color = VM_PAGE_GET_COLOR(mem);
8784
8785 vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8786
8787 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8788
8789 vm_page_free_count--;
8790
8791 hibernate_teardown_found_free_pages++;
8792
8793 if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8794 compact_target_indx = i;
8795 }
8796 } else {
8797 /*
8798 * record this vm_page_t's original location
8799 * we need this even if it doesn't get moved
8800 * as an indicator to the rebuild function that
8801 * we don't have to move it
8802 */
8803 mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8804
8805 if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8806 /*
8807 * we've got a hole to fill, so
8808 * move this vm_page_t to it's new home
8809 */
8810 vm_pages[compact_target_indx] = *mem;
8811 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8812
8813 hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8814 compact_target_indx++;
8815 } else {
8816 hibernate_teardown_last_valid_compact_indx = i;
8817 }
8818 }
8819 }
8820 unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8821 (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8822 mark_as_unneeded_pages += unneeded_vm_pages_pages;
8823
8824 pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8825
8826 if (start_of_unneeded) {
8827 unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8828 mark_as_unneeded_pages += unneeded_pmap_pages;
8829 }
8830 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8831
8832 return mark_as_unneeded_pages;
8833 }
8834
8835
8836 #endif /* HIBERNATION */
8837
8838 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8839
8840 #include <mach_vm_debug.h>
8841 #if MACH_VM_DEBUG
8842
8843 #include <mach_debug/hash_info.h>
8844 #include <vm/vm_debug_internal.h>
8845
8846 /*
8847 * Routine: vm_page_info
8848 * Purpose:
8849 * Return information about the global VP table.
8850 * Fills the buffer with as much information as possible
8851 * and returns the desired size of the buffer.
8852 * Conditions:
8853 * Nothing locked. The caller should provide
8854 * possibly-pageable memory.
8855 */
8856
8857 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8858 vm_page_info(
8859 hash_info_bucket_t *info,
8860 unsigned int count)
8861 {
8862 unsigned int i;
8863 lck_spin_t *bucket_lock;
8864
8865 if (vm_page_bucket_count < count) {
8866 count = vm_page_bucket_count;
8867 }
8868
8869 for (i = 0; i < count; i++) {
8870 vm_page_bucket_t *bucket = &vm_page_buckets[i];
8871 unsigned int bucket_count = 0;
8872 vm_page_t m;
8873
8874 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8875 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8876
8877 for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8878 m != VM_PAGE_NULL;
8879 m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8880 bucket_count++;
8881 }
8882
8883 lck_spin_unlock(bucket_lock);
8884
8885 /* don't touch pageable memory while holding locks */
8886 info[i].hib_count = bucket_count;
8887 }
8888
8889 return vm_page_bucket_count;
8890 }
8891 #endif /* MACH_VM_DEBUG */
8892
8893 #if VM_PAGE_BUCKETS_CHECK
8894 void
vm_page_buckets_check(void)8895 vm_page_buckets_check(void)
8896 {
8897 unsigned int i;
8898 vm_page_t p;
8899 unsigned int p_hash;
8900 vm_page_bucket_t *bucket;
8901 lck_spin_t *bucket_lock;
8902
8903 if (!vm_page_buckets_check_ready) {
8904 return;
8905 }
8906
8907 #if HIBERNATION
8908 if (hibernate_rebuild_needed ||
8909 hibernate_rebuild_hash_list) {
8910 panic("BUCKET_CHECK: hibernation in progress: "
8911 "rebuild_needed=%d rebuild_hash_list=%p\n",
8912 hibernate_rebuild_needed,
8913 hibernate_rebuild_hash_list);
8914 }
8915 #endif /* HIBERNATION */
8916
8917 #if VM_PAGE_FAKE_BUCKETS
8918 char *cp;
8919 for (cp = (char *) vm_page_fake_buckets_start;
8920 cp < (char *) vm_page_fake_buckets_end;
8921 cp++) {
8922 if (*cp != 0x5a) {
8923 panic("BUCKET_CHECK: corruption at %p in fake buckets "
8924 "[0x%llx:0x%llx]\n",
8925 cp,
8926 (uint64_t) vm_page_fake_buckets_start,
8927 (uint64_t) vm_page_fake_buckets_end);
8928 }
8929 }
8930 #endif /* VM_PAGE_FAKE_BUCKETS */
8931
8932 for (i = 0; i < vm_page_bucket_count; i++) {
8933 vm_object_t p_object;
8934
8935 bucket = &vm_page_buckets[i];
8936 if (!bucket->page_list) {
8937 continue;
8938 }
8939
8940 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8941 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8942 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8943
8944 while (p != VM_PAGE_NULL) {
8945 p_object = VM_PAGE_OBJECT(p);
8946
8947 if (!p->vmp_hashed) {
8948 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8949 "hash %d in bucket %d at %p "
8950 "is not hashed\n",
8951 p, p_object, p->vmp_offset,
8952 p_hash, i, bucket);
8953 }
8954 p_hash = vm_page_hash(p_object, p->vmp_offset);
8955 if (p_hash != i) {
8956 panic("BUCKET_CHECK: corruption in bucket %d "
8957 "at %p: page %p object %p offset 0x%llx "
8958 "hash %d\n",
8959 i, bucket, p, p_object, p->vmp_offset,
8960 p_hash);
8961 }
8962 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8963 }
8964 lck_spin_unlock(bucket_lock);
8965 }
8966
8967 // printf("BUCKET_CHECK: checked buckets\n");
8968 }
8969 #endif /* VM_PAGE_BUCKETS_CHECK */
8970
8971 /*
8972 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8973 * local queues if they exist... its the only spot in the system where we add pages
8974 * to those queues... once on those queues, those pages can only move to one of the
8975 * global page queues or the free queues... they NEVER move from local q to local q.
8976 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8977 * the global vm_page_queue_lock at this point... we still need to take the local lock
8978 * in case this operation is being run on a different CPU then the local queue's identity,
8979 * but we don't have to worry about the page moving to a global queue or becoming wired
8980 * while we're grabbing the local lock since those operations would require the global
8981 * vm_page_queue_lock to be held, and we already own it.
8982 *
8983 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8984 * 'wired' and local are ALWAYS mutually exclusive conditions.
8985 */
8986
8987 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8988 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8989 {
8990 boolean_t was_pageable = TRUE;
8991 vm_object_t m_object;
8992
8993 m_object = VM_PAGE_OBJECT(mem);
8994
8995 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8996
8997 if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8998 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8999 if (remove_from_specialq == TRUE) {
9000 vm_page_remove_from_specialq(mem);
9001 }
9002 /*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
9003 * assert(mem->vmp_specialq.next != 0);
9004 * assert(mem->vmp_specialq.prev != 0);
9005 * } else {*/
9006 if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
9007 assert(mem->vmp_specialq.next == 0);
9008 assert(mem->vmp_specialq.prev == 0);
9009 }
9010 return;
9011 }
9012
9013 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9014 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9015 assert(mem->vmp_specialq.next == 0 &&
9016 mem->vmp_specialq.prev == 0 &&
9017 mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9018 return;
9019 }
9020 if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9021 /*
9022 * might put these guys on a list for debugging purposes
9023 * if we do, we'll need to remove this assert
9024 */
9025 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9026 assert(mem->vmp_specialq.next == 0 &&
9027 mem->vmp_specialq.prev == 0);
9028 /*
9029 * Recall that vmp_on_specialq also means a request to put
9030 * it on the special Q. So we don't want to reset that bit
9031 * just because a wiring request came in. We might want to
9032 * put it on the special queue post-unwiring.
9033 *
9034 * &&
9035 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9036 */
9037 return;
9038 }
9039
9040 assert(m_object != compressor_object);
9041 assert(!is_kernel_object(m_object));
9042 assert(!mem->vmp_fictitious);
9043
9044 switch (mem->vmp_q_state) {
9045 case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9046 {
9047 struct vpl *lq;
9048
9049 lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9050 VPL_LOCK(&lq->vpl_lock);
9051 vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9052 mem->vmp_local_id = 0;
9053 lq->vpl_count--;
9054 if (m_object->internal) {
9055 lq->vpl_internal_count--;
9056 } else {
9057 lq->vpl_external_count--;
9058 }
9059 VPL_UNLOCK(&lq->vpl_lock);
9060 was_pageable = FALSE;
9061 break;
9062 }
9063 case VM_PAGE_ON_ACTIVE_Q:
9064 {
9065 vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9066 vm_page_active_count--;
9067 break;
9068 }
9069
9070 case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9071 {
9072 assert(m_object->internal == TRUE);
9073
9074 vm_page_inactive_count--;
9075 vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9076 vm_page_anonymous_count--;
9077
9078 vm_purgeable_q_advance_all();
9079 vm_page_balance_inactive(3);
9080 break;
9081 }
9082
9083 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9084 {
9085 assert(m_object->internal == FALSE);
9086
9087 vm_page_inactive_count--;
9088 vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9089 vm_purgeable_q_advance_all();
9090 vm_page_balance_inactive(3);
9091 break;
9092 }
9093
9094 case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9095 {
9096 assert(m_object->internal == FALSE);
9097
9098 vm_page_inactive_count--;
9099 vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9100 vm_page_cleaned_count--;
9101 vm_page_balance_inactive(3);
9102 break;
9103 }
9104
9105 case VM_PAGE_ON_THROTTLED_Q:
9106 {
9107 assert(m_object->internal == TRUE);
9108
9109 vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9110 vm_page_throttled_count--;
9111 was_pageable = FALSE;
9112 break;
9113 }
9114
9115 case VM_PAGE_ON_SPECULATIVE_Q:
9116 {
9117 assert(m_object->internal == FALSE);
9118
9119 vm_page_remque(&mem->vmp_pageq);
9120 vm_page_speculative_count--;
9121 vm_page_balance_inactive(3);
9122 break;
9123 }
9124
9125 #if CONFIG_SECLUDED_MEMORY
9126 case VM_PAGE_ON_SECLUDED_Q:
9127 {
9128 vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9129 vm_page_secluded_count--;
9130 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9131 if (m_object == VM_OBJECT_NULL) {
9132 vm_page_secluded_count_free--;
9133 was_pageable = FALSE;
9134 } else {
9135 assert(!m_object->internal);
9136 vm_page_secluded_count_inuse--;
9137 was_pageable = FALSE;
9138 // was_pageable = TRUE;
9139 }
9140 break;
9141 }
9142 #endif /* CONFIG_SECLUDED_MEMORY */
9143
9144 default:
9145 {
9146 /*
9147 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9148 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9149 * the caller is responsible for determing if the page is on that queue, and if so, must
9150 * either first remove it (it needs both the page queues lock and the object lock to do
9151 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9152 *
9153 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9154 * or any of the undefined states
9155 */
9156 panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9157 break;
9158 }
9159 }
9160 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9161 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9162
9163 if (remove_from_specialq == TRUE) {
9164 vm_page_remove_from_specialq(mem);
9165 }
9166 if (was_pageable) {
9167 if (m_object->internal) {
9168 vm_page_pageable_internal_count--;
9169 } else {
9170 vm_page_pageable_external_count--;
9171 }
9172 }
9173 }
9174
9175 void
vm_page_remove_internal(vm_page_t page)9176 vm_page_remove_internal(vm_page_t page)
9177 {
9178 vm_object_t __object = VM_PAGE_OBJECT(page);
9179 if (page == __object->memq_hint) {
9180 vm_page_t __new_hint;
9181 vm_page_queue_entry_t __qe;
9182 __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9183 if (vm_page_queue_end(&__object->memq, __qe)) {
9184 __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9185 if (vm_page_queue_end(&__object->memq, __qe)) {
9186 __qe = NULL;
9187 }
9188 }
9189 __new_hint = (vm_page_t)((uintptr_t) __qe);
9190 __object->memq_hint = __new_hint;
9191 }
9192 vm_page_queue_remove(&__object->memq, page, vmp_listq);
9193 #if CONFIG_SECLUDED_MEMORY
9194 if (__object->eligible_for_secluded) {
9195 vm_page_secluded.eligible_for_secluded--;
9196 }
9197 #endif /* CONFIG_SECLUDED_MEMORY */
9198 }
9199
9200 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9201 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9202 {
9203 vm_object_t m_object;
9204
9205 m_object = VM_PAGE_OBJECT(mem);
9206
9207 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9208 assert(!mem->vmp_fictitious);
9209 assert(!mem->vmp_laundry);
9210 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9211 vm_page_check_pageable_safe(mem);
9212
9213 if (m_object->internal) {
9214 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9215
9216 if (first == TRUE) {
9217 vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9218 } else {
9219 vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9220 }
9221
9222 vm_page_anonymous_count++;
9223 vm_page_pageable_internal_count++;
9224 } else {
9225 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9226
9227 if (first == TRUE) {
9228 vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9229 } else {
9230 vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9231 }
9232
9233 vm_page_pageable_external_count++;
9234 }
9235 vm_page_inactive_count++;
9236 token_new_pagecount++;
9237
9238 vm_page_add_to_specialq(mem, FALSE);
9239 }
9240
9241 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9242 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9243 {
9244 vm_object_t m_object;
9245
9246 m_object = VM_PAGE_OBJECT(mem);
9247
9248 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9249 assert(!mem->vmp_fictitious);
9250 assert(!mem->vmp_laundry);
9251 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9252 vm_page_check_pageable_safe(mem);
9253
9254 mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
9255 if (first == TRUE) {
9256 vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
9257 } else {
9258 vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
9259 }
9260 vm_page_active_count++;
9261
9262 if (m_object->internal) {
9263 vm_page_pageable_internal_count++;
9264 } else {
9265 vm_page_pageable_external_count++;
9266 }
9267
9268 vm_page_add_to_specialq(mem, FALSE);
9269 vm_page_balance_inactive(3);
9270 }
9271
9272 /*
9273 * Pages from special kernel objects shouldn't
9274 * be placed on pageable queues.
9275 */
9276 void
vm_page_check_pageable_safe(vm_page_t page)9277 vm_page_check_pageable_safe(vm_page_t page)
9278 {
9279 vm_object_t page_object;
9280
9281 page_object = VM_PAGE_OBJECT(page);
9282
9283 if (is_kernel_object(page_object)) {
9284 panic("vm_page_check_pageable_safe: trying to add page"
9285 "from a kernel object to pageable queue");
9286 }
9287
9288 if (page_object == compressor_object) {
9289 panic("vm_page_check_pageable_safe: trying to add page"
9290 "from compressor object (%p) to pageable queue", compressor_object);
9291 }
9292 }
9293
9294 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9295 * wired page diagnose
9296 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9297
9298 #include <libkern/OSKextLibPrivate.h>
9299
9300 #define KA_SIZE(namelen, subtotalscount) \
9301 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9302
9303 #define KA_NAME(alloc) \
9304 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9305
9306 #define KA_NAME_LEN(alloc) \
9307 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9308
9309 vm_tag_t
vm_tag_bt(void)9310 vm_tag_bt(void)
9311 {
9312 uintptr_t* frameptr;
9313 uintptr_t* frameptr_next;
9314 uintptr_t retaddr;
9315 uintptr_t kstackb, kstackt;
9316 const vm_allocation_site_t * site;
9317 thread_t cthread;
9318 kern_allocation_name_t name;
9319
9320 cthread = current_thread();
9321 if (__improbable(cthread == NULL)) {
9322 return VM_KERN_MEMORY_OSFMK;
9323 }
9324
9325 if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9326 if (!name->tag) {
9327 vm_tag_alloc(name);
9328 }
9329 return name->tag;
9330 }
9331
9332 kstackb = cthread->kernel_stack;
9333 kstackt = kstackb + kernel_stack_size;
9334
9335 /* Load stack frame pointer (EBP on x86) into frameptr */
9336 frameptr = __builtin_frame_address(0);
9337 site = NULL;
9338 while (frameptr != NULL) {
9339 /* Verify thread stack bounds */
9340 if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9341 break;
9342 }
9343
9344 /* Next frame pointer is pointed to by the previous one */
9345 frameptr_next = (uintptr_t*) *frameptr;
9346 #if defined(HAS_APPLE_PAC)
9347 frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9348 #endif
9349
9350 /* Pull return address from one spot above the frame pointer */
9351 retaddr = *(frameptr + 1);
9352
9353 #if defined(HAS_APPLE_PAC)
9354 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9355 #endif
9356
9357 if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9358 || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9359 site = OSKextGetAllocationSiteForCaller(retaddr);
9360 break;
9361 }
9362 frameptr = frameptr_next;
9363 }
9364
9365 return site ? site->tag : VM_KERN_MEMORY_NONE;
9366 }
9367
9368 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9369
9370 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9371 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9372 {
9373 vm_tag_t tag;
9374 uint64_t avail;
9375 uint32_t idx;
9376 vm_allocation_site_t * prev;
9377
9378 if (site->tag) {
9379 return;
9380 }
9381
9382 idx = 0;
9383 while (TRUE) {
9384 avail = free_tag_bits[idx];
9385 if (avail) {
9386 tag = (vm_tag_t)__builtin_clzll(avail);
9387 avail &= ~(1ULL << (63 - tag));
9388 free_tag_bits[idx] = avail;
9389 tag += (idx << 6);
9390 break;
9391 }
9392 idx++;
9393 if (idx >= ARRAY_COUNT(free_tag_bits)) {
9394 for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9395 prev = vm_allocation_sites[idx];
9396 if (!prev) {
9397 continue;
9398 }
9399 if (!KA_NAME_LEN(prev)) {
9400 continue;
9401 }
9402 if (!prev->tag) {
9403 continue;
9404 }
9405 if (prev->total) {
9406 continue;
9407 }
9408 if (1 != prev->refcount) {
9409 continue;
9410 }
9411
9412 assert(idx == prev->tag);
9413 tag = (vm_tag_t)idx;
9414 prev->tag = VM_KERN_MEMORY_NONE;
9415 *releasesiteP = prev;
9416 break;
9417 }
9418 if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9419 tag = VM_KERN_MEMORY_ANY;
9420 }
9421 break;
9422 }
9423 }
9424 site->tag = tag;
9425
9426 OSAddAtomic16(1, &site->refcount);
9427
9428 if (VM_KERN_MEMORY_ANY != tag) {
9429 vm_allocation_sites[tag] = site;
9430 }
9431
9432 if (tag > vm_allocation_tag_highest) {
9433 vm_allocation_tag_highest = tag;
9434 }
9435 }
9436
9437 static void
vm_tag_free_locked(vm_tag_t tag)9438 vm_tag_free_locked(vm_tag_t tag)
9439 {
9440 uint64_t avail;
9441 uint32_t idx;
9442 uint64_t bit;
9443
9444 if (VM_KERN_MEMORY_ANY == tag) {
9445 return;
9446 }
9447
9448 idx = (tag >> 6);
9449 avail = free_tag_bits[idx];
9450 tag &= 63;
9451 bit = (1ULL << (63 - tag));
9452 assert(!(avail & bit));
9453 free_tag_bits[idx] = (avail | bit);
9454 }
9455
9456 static void
vm_tag_init(void)9457 vm_tag_init(void)
9458 {
9459 vm_tag_t tag;
9460 for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9461 vm_tag_free_locked(tag);
9462 }
9463
9464 for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9465 vm_tag_free_locked(tag);
9466 }
9467 }
9468
9469 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9470 vm_tag_alloc(vm_allocation_site_t * site)
9471 {
9472 vm_allocation_site_t * releasesite;
9473
9474 if (!site->tag) {
9475 releasesite = NULL;
9476 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9477 vm_tag_alloc_locked(site, &releasesite);
9478 lck_ticket_unlock(&vm_allocation_sites_lock);
9479 if (releasesite) {
9480 kern_allocation_name_release(releasesite);
9481 }
9482 }
9483
9484 return site->tag;
9485 }
9486
9487 #if VM_BTLOG_TAGS
9488 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9489 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9490 #define VM_TAG_BTLOG_SIZE (16u << 10)
9491
9492 btlog_t vmtaglog_btlog;
9493 vm_tag_t vmtaglog_tag;
9494
9495 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9496 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9497 {
9498 if (is_kernel_object(object)) {
9499 /* kernel object backtraces are tracked in vm entries */
9500 return;
9501 }
9502 if (delta > 0) {
9503 btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9504 btlog_record(vmtaglog_btlog, object, 0, ref);
9505 } else if (object->wired_page_count == 0) {
9506 btlog_erase(vmtaglog_btlog, object);
9507 }
9508 }
9509
9510 #ifndef ARRAY_SIZE
9511 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9512 #endif /* ARRAY_SIZE */
9513 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9514 const char *vm_kern_memory_strs[] = {
9515 VM_KERN_MEMORY_ELEM(OSFMK),
9516 VM_KERN_MEMORY_ELEM(BSD),
9517 VM_KERN_MEMORY_ELEM(IOKIT),
9518 VM_KERN_MEMORY_ELEM(LIBKERN),
9519 VM_KERN_MEMORY_ELEM(OSKEXT),
9520 VM_KERN_MEMORY_ELEM(KEXT),
9521 VM_KERN_MEMORY_ELEM(IPC),
9522 VM_KERN_MEMORY_ELEM(STACK),
9523 VM_KERN_MEMORY_ELEM(CPU),
9524 VM_KERN_MEMORY_ELEM(PMAP),
9525 VM_KERN_MEMORY_ELEM(PTE),
9526 VM_KERN_MEMORY_ELEM(ZONE),
9527 VM_KERN_MEMORY_ELEM(KALLOC),
9528 VM_KERN_MEMORY_ELEM(COMPRESSOR),
9529 VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9530 VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9531 VM_KERN_MEMORY_ELEM(WAITQ),
9532 VM_KERN_MEMORY_ELEM(DIAG),
9533 VM_KERN_MEMORY_ELEM(LOG),
9534 VM_KERN_MEMORY_ELEM(FILE),
9535 VM_KERN_MEMORY_ELEM(MBUF),
9536 VM_KERN_MEMORY_ELEM(UBC),
9537 VM_KERN_MEMORY_ELEM(SECURITY),
9538 VM_KERN_MEMORY_ELEM(MLOCK),
9539 VM_KERN_MEMORY_ELEM(REASON),
9540 VM_KERN_MEMORY_ELEM(SKYWALK),
9541 VM_KERN_MEMORY_ELEM(LTABLE),
9542 VM_KERN_MEMORY_ELEM(HV),
9543 VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9544 VM_KERN_MEMORY_ELEM(RETIRED),
9545 VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9546 VM_KERN_MEMORY_ELEM(TRIAGE),
9547 VM_KERN_MEMORY_ELEM(RECOUNT),
9548 };
9549
9550 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9551 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9552 {
9553 for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9554 if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9555 return i;
9556 }
9557 }
9558
9559 printf("Unable to find vm tag %s for btlog\n", tagstr);
9560 return VM_KERN_MEMORY_NONE;
9561 }
9562
9563 __startup_func
9564 static void
vm_btlog_init(void)9565 vm_btlog_init(void)
9566 {
9567 vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9568
9569 if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9570 vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9571 }
9572 }
9573 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9574 #endif /* VM_BTLOG_TAGS */
9575
9576 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9577 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9578 {
9579 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9580
9581 kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9582 }
9583
9584 uint64_t
vm_tag_get_size(vm_tag_t tag)9585 vm_tag_get_size(vm_tag_t tag)
9586 {
9587 vm_allocation_site_t *allocation;
9588
9589 assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9590
9591 allocation = vm_allocation_sites[tag];
9592 return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9593 }
9594
9595 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9596 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9597 {
9598 uint64_t value;
9599
9600 value = os_atomic_add(&allocation->total, delta, relaxed);
9601 if (delta < 0) {
9602 assertf(value + (uint64_t)-delta > value,
9603 "tag %d, site %p", allocation->tag, allocation);
9604 }
9605
9606 #if DEBUG || DEVELOPMENT
9607 if (value > allocation->peak) {
9608 os_atomic_max(&allocation->peak, value, relaxed);
9609 }
9610 #endif /* DEBUG || DEVELOPMENT */
9611
9612 if (value == (uint64_t)delta && !allocation->tag) {
9613 vm_tag_alloc(allocation);
9614 }
9615
9616 #if VM_BTLOG_TAGS
9617 if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9618 vm_tag_log(object, delta, __builtin_frame_address(0));
9619 }
9620 #endif /* VM_BTLOG_TAGS */
9621 }
9622
9623 #if VM_TAG_SIZECLASSES
9624
9625 void
vm_allocation_zones_init(void)9626 vm_allocation_zones_init(void)
9627 {
9628 vm_offset_t addr;
9629 vm_size_t size;
9630
9631 const vm_tag_t early_tags[] = {
9632 VM_KERN_MEMORY_DIAG,
9633 VM_KERN_MEMORY_KALLOC,
9634 VM_KERN_MEMORY_KALLOC_DATA,
9635 VM_KERN_MEMORY_KALLOC_TYPE,
9636 VM_KERN_MEMORY_LIBKERN,
9637 VM_KERN_MEMORY_OSFMK,
9638 VM_KERN_MEMORY_RECOUNT,
9639 };
9640
9641 size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9642 + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9643
9644 kmem_alloc(kernel_map, &addr, round_page(size),
9645 KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9646 VM_KERN_MEMORY_DIAG);
9647
9648 vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9649 addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9650
9651 // prepopulate early tag ranges so allocations
9652 // in vm_tag_update_zone_size() and early boot won't recurse
9653 for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9654 vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9655 addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9656 }
9657 }
9658
9659 __attribute__((noinline))
9660 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9661 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9662 {
9663 vm_allocation_zone_total_t *stats;
9664 vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9665
9666 flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9667 stats = kalloc_data(size, flags);
9668 if (!stats) {
9669 return VM_KERN_MEMORY_NONE;
9670 }
9671 if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9672 kfree_data(stats, size);
9673 }
9674 return tag;
9675 }
9676
9677 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)9678 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
9679 {
9680 assert(VM_KERN_MEMORY_NONE != tag);
9681 assert(tag < VM_MAX_TAG_VALUE);
9682
9683 if (__probable(vm_allocation_zone_totals[tag])) {
9684 return tag;
9685 }
9686 return vm_tag_zone_stats_alloc(tag, zflags);
9687 }
9688
9689 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9690 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9691 {
9692 vm_allocation_zone_total_t *stats;
9693 vm_size_t value;
9694
9695 assert(VM_KERN_MEMORY_NONE != tag);
9696 assert(tag < VM_MAX_TAG_VALUE);
9697
9698 if (zidx >= VM_TAG_SIZECLASSES) {
9699 return;
9700 }
9701
9702 stats = vm_allocation_zone_totals[tag];
9703 assert(stats);
9704 stats += zidx;
9705
9706 value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9707 if (delta < 0) {
9708 assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9709 return;
9710 } else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9711 os_atomic_max(&stats->vazt_peak, value, relaxed);
9712 }
9713 }
9714
9715 #endif /* VM_TAG_SIZECLASSES */
9716
9717 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)9718 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
9719 {
9720 kern_allocation_name_t other;
9721 struct vm_allocation_total * total;
9722 uint32_t subidx;
9723
9724 assert(VM_KERN_MEMORY_NONE != subtag);
9725 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9726 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9727 total = &allocation->subtotals[subidx];
9728 if (subtag == total->tag) {
9729 break;
9730 }
9731 }
9732 if (subidx >= allocation->subtotalscount) {
9733 for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9734 total = &allocation->subtotals[subidx];
9735 if ((VM_KERN_MEMORY_NONE == total->tag)
9736 || !total->total) {
9737 total->tag = (vm_tag_t)subtag;
9738 break;
9739 }
9740 }
9741 }
9742 assert(subidx < allocation->subtotalscount);
9743 if (subidx >= allocation->subtotalscount) {
9744 lck_ticket_unlock(&vm_allocation_sites_lock);
9745 return;
9746 }
9747 if (delta < 0) {
9748 assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9749 }
9750 OSAddAtomic64(delta, &total->total);
9751 lck_ticket_unlock(&vm_allocation_sites_lock);
9752
9753 other = vm_allocation_sites[subtag];
9754 assert(other);
9755 if (delta < 0) {
9756 assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9757 }
9758 OSAddAtomic64(delta, &other->mapped);
9759 }
9760
9761 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9762 kern_allocation_get_name(kern_allocation_name_t allocation)
9763 {
9764 return KA_NAME(allocation);
9765 }
9766
9767 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9768 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9769 {
9770 kern_allocation_name_t allocation;
9771 uint16_t namelen;
9772
9773 namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9774
9775 allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9776 allocation->refcount = 1;
9777 allocation->subtotalscount = subtotalscount;
9778 allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9779 strlcpy(KA_NAME(allocation), name, namelen + 1);
9780
9781 vm_tag_alloc(allocation);
9782 return allocation;
9783 }
9784
9785 void
kern_allocation_name_release(kern_allocation_name_t allocation)9786 kern_allocation_name_release(kern_allocation_name_t allocation)
9787 {
9788 assert(allocation->refcount > 0);
9789 if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9790 kfree_data(allocation,
9791 KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9792 }
9793 }
9794
9795 #if !VM_TAG_ACTIVE_UPDATE
9796 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9797 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9798 {
9799 if (!object->wired_page_count) {
9800 return;
9801 }
9802 if (!is_kernel_object(object)) {
9803 assert(object->wire_tag < num_info);
9804 info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9805 }
9806 }
9807
9808 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9809 unsigned int num_info, vm_object_t object);
9810
9811 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9812 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9813 vm_page_iterate_proc proc, purgeable_q_t queue,
9814 int group)
9815 {
9816 vm_object_t object;
9817
9818 for (object = (vm_object_t) queue_first(&queue->objq[group]);
9819 !queue_end(&queue->objq[group], (queue_entry_t) object);
9820 object = (vm_object_t) queue_next(&object->objq)) {
9821 proc(info, num_info, object);
9822 }
9823 }
9824
9825 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9826 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9827 vm_page_iterate_proc proc)
9828 {
9829 vm_object_t object;
9830
9831 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9832 queue_iterate(&vm_objects_wired,
9833 object,
9834 vm_object_t,
9835 wired_objq)
9836 {
9837 proc(info, num_info, object);
9838 }
9839 lck_spin_unlock(&vm_objects_wired_lock);
9840 }
9841 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9842
9843 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9844 process_account(mach_memory_info_t * info, unsigned int num_info,
9845 uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9846 {
9847 size_t namelen;
9848 unsigned int idx, count, nextinfo;
9849 vm_allocation_site_t * site;
9850 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9851
9852 for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9853 site = vm_allocation_sites[idx];
9854 if (!site) {
9855 continue;
9856 }
9857 info[idx].mapped = site->mapped;
9858 info[idx].tag = site->tag;
9859 if (!iterated) {
9860 info[idx].size = site->total;
9861 #if DEBUG || DEVELOPMENT
9862 info[idx].peak = site->peak;
9863 #endif /* DEBUG || DEVELOPMENT */
9864 } else {
9865 if (!site->subtotalscount && (site->total != info[idx].size)) {
9866 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9867 info[idx].size = site->total;
9868 }
9869 }
9870 info[idx].flags |= VM_KERN_SITE_WIRED;
9871 if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9872 info[idx].site = idx;
9873 info[idx].flags |= VM_KERN_SITE_TAG;
9874 if (VM_KERN_MEMORY_ZONE == idx) {
9875 info[idx].flags |= VM_KERN_SITE_HIDE;
9876 info[idx].flags &= ~VM_KERN_SITE_WIRED;
9877 info[idx].collectable_bytes = zones_collectable_bytes;
9878 }
9879 } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9880 info[idx].site = 0;
9881 info[idx].flags |= VM_KERN_SITE_NAMED;
9882 if (namelen > sizeof(info[idx].name)) {
9883 namelen = sizeof(info[idx].name);
9884 }
9885 strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9886 } else if (VM_TAG_KMOD & site->flags) {
9887 info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
9888 info[idx].flags |= VM_KERN_SITE_KMOD;
9889 } else {
9890 info[idx].site = VM_KERNEL_UNSLIDE(site);
9891 info[idx].flags |= VM_KERN_SITE_KERNEL;
9892 }
9893 }
9894
9895 nextinfo = (vm_allocation_tag_highest + 1);
9896 count = nextinfo;
9897 if (count >= num_info) {
9898 count = num_info;
9899 }
9900
9901 for (idx = 0; idx < count; idx++) {
9902 site = vm_allocation_sites[idx];
9903 if (!site) {
9904 continue;
9905 }
9906 #if VM_TAG_SIZECLASSES
9907 vm_allocation_zone_total_t * zone;
9908 unsigned int zidx;
9909
9910 if (!redact_info
9911 && vm_allocation_zone_totals
9912 && (zone = vm_allocation_zone_totals[idx])
9913 && (nextinfo < num_info)) {
9914 for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9915 if (!zone[zidx].vazt_peak) {
9916 continue;
9917 }
9918 info[nextinfo] = info[idx];
9919 info[nextinfo].zone = zone_index_from_tag_index(zidx);
9920 info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
9921 info[nextinfo].flags |= VM_KERN_SITE_ZONE;
9922 info[nextinfo].flags |= VM_KERN_SITE_KALLOC;
9923 info[nextinfo].size = zone[zidx].vazt_total;
9924 info[nextinfo].peak = zone[zidx].vazt_peak;
9925 info[nextinfo].mapped = 0;
9926 nextinfo++;
9927 }
9928 }
9929 #endif /* VM_TAG_SIZECLASSES */
9930 if (site->subtotalscount) {
9931 uint64_t mapped, mapcost, take;
9932 uint32_t sub;
9933 vm_tag_t alloctag;
9934
9935 info[idx].size = site->total;
9936 mapped = info[idx].size;
9937 info[idx].mapped = mapped;
9938 mapcost = 0;
9939 for (sub = 0; sub < site->subtotalscount; sub++) {
9940 alloctag = site->subtotals[sub].tag;
9941 assert(alloctag < num_info);
9942 if (info[alloctag].name[0]) {
9943 continue;
9944 }
9945 take = site->subtotals[sub].total;
9946 if (take > info[alloctag].size) {
9947 take = info[alloctag].size;
9948 }
9949 if (take > mapped) {
9950 take = mapped;
9951 }
9952 info[alloctag].mapped -= take;
9953 info[alloctag].size -= take;
9954 mapped -= take;
9955 mapcost += take;
9956 }
9957 info[idx].size = mapcost;
9958 }
9959 }
9960 lck_ticket_unlock(&vm_allocation_sites_lock);
9961
9962 return 0;
9963 }
9964
9965 uint32_t
vm_page_diagnose_estimate(void)9966 vm_page_diagnose_estimate(void)
9967 {
9968 vm_allocation_site_t * site;
9969 uint32_t count = zone_view_count;
9970 uint32_t idx;
9971
9972 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9973 for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9974 site = vm_allocation_sites[idx];
9975 if (!site) {
9976 continue;
9977 }
9978 count++;
9979 #if VM_TAG_SIZECLASSES
9980 if (vm_allocation_zone_totals) {
9981 vm_allocation_zone_total_t * zone;
9982 zone = vm_allocation_zone_totals[idx];
9983 if (!zone) {
9984 continue;
9985 }
9986 for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9987 count += (zone[zidx].vazt_peak != 0);
9988 }
9989 }
9990 #endif
9991 }
9992 lck_ticket_unlock(&vm_allocation_sites_lock);
9993
9994 /* some slop for new tags created */
9995 count += 8;
9996 count += VM_KERN_COUNTER_COUNT;
9997
9998 return count;
9999 }
10000
10001 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)10002 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
10003 bool percpu)
10004 {
10005 zpercpu_foreach(zs, zstats) {
10006 info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
10007 }
10008 if (percpu) {
10009 info->size *= zpercpu_count();
10010 }
10011 info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
10012 }
10013
10014 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10015 vm_page_add_info(
10016 mach_memory_info_t *info,
10017 zone_stats_t stats,
10018 bool per_cpu,
10019 const char *parent_heap_name,
10020 const char *parent_zone_name,
10021 const char *view_name)
10022 {
10023 vm_page_diagnose_zone_stats(info, stats, per_cpu);
10024 snprintf(info->name, sizeof(info->name),
10025 "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10026 }
10027
10028 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10029 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10030 {
10031 vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10032 z->z_name, "raw");
10033 }
10034
10035 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10036 vm_page_add_view(
10037 mach_memory_info_t *info,
10038 zone_stats_t stats,
10039 const char *parent_heap_name,
10040 const char *parent_zone_name,
10041 const char *view_name)
10042 {
10043 vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10044 view_name);
10045 }
10046
10047 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10048 vm_page_diagnose_heap_views(
10049 mach_memory_info_t *info,
10050 kalloc_heap_t kh,
10051 const char *parent_heap_name,
10052 const char *parent_zone_name)
10053 {
10054 uint32_t i = 0;
10055
10056 while (kh) {
10057 vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10058 parent_zone_name, kh->kh_name);
10059 kh = kh->kh_views;
10060 i++;
10061 }
10062 return i;
10063 }
10064
10065 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10066 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10067 {
10068 uint32_t i = 0;
10069
10070 for (; i < KHEAP_NUM_ZONES; i++) {
10071 vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10072 }
10073
10074 i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10075 NULL);
10076 return i;
10077 }
10078
10079 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10080 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10081 {
10082 uint32_t idx = 0;
10083 vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10084 "", "raw");
10085 idx++;
10086
10087 for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10088 struct kheap_info heap = kalloc_type_heap_array[i];
10089 char heap_num_tmp[MAX_ZONE_NAME] = "";
10090 const char *heap_num;
10091
10092 snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10093 heap_num = &heap_num_tmp[0];
10094
10095 for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10096 ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10097 if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10098 vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10099 heap_num, ktv->kt_name);
10100 idx++;
10101 }
10102 }
10103
10104 idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10105 KHEAP_KT_VAR->kh_name, heap_num);
10106 }
10107
10108 return idx;
10109 }
10110
10111 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10112 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10113 {
10114 uint64_t wired_size;
10115 uint64_t wired_managed_size;
10116 uint64_t wired_reserved_size;
10117 boolean_t iterate;
10118 mach_memory_info_t * counts;
10119 uint32_t i;
10120
10121 bzero(info, num_info * sizeof(mach_memory_info_t));
10122
10123 if (!vm_page_wire_count_initial) {
10124 return KERN_ABORTED;
10125 }
10126
10127 #if !XNU_TARGET_OS_OSX
10128 wired_size = ptoa_64(vm_page_wire_count);
10129 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10130 #else /* !XNU_TARGET_OS_OSX */
10131 wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
10132 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
10133 #endif /* !XNU_TARGET_OS_OSX */
10134 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10135
10136 wired_size += booter_size;
10137
10138 assert(num_info >= VM_KERN_COUNTER_COUNT);
10139 num_info -= VM_KERN_COUNTER_COUNT;
10140 counts = &info[num_info];
10141
10142 #define SET_COUNT(xcount, xsize, xflags) \
10143 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
10144 counts[xcount].site = (xcount); \
10145 counts[xcount].size = (xsize); \
10146 counts[xcount].mapped = (xsize); \
10147 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10148
10149 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10150 SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10151 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10152 SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10153 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10154 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10155 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10156 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10157 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10158
10159 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10160 counts[xcount].site = (xcount); \
10161 counts[xcount].size = (xsize); \
10162 counts[xcount].mapped = (xsize); \
10163 counts[xcount].free = (xfree); \
10164 counts[xcount].largest = (xlargest); \
10165 counts[xcount].flags = VM_KERN_SITE_COUNTER;
10166
10167 vm_map_size_t map_size, map_free, map_largest;
10168
10169 vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10170 SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10171
10172 zone_map_sizes(&map_size, &map_free, &map_largest);
10173 SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10174
10175 assert(num_info >= zone_view_count);
10176 num_info -= zone_view_count;
10177 counts = &info[num_info];
10178 i = 0;
10179
10180 if (!redact_info) {
10181 if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
10182 i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10183 }
10184 if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10185 i += vm_page_diagnose_kt_heaps(counts + i);
10186 }
10187 assert(i <= zone_view_count);
10188
10189 zone_index_foreach(zidx) {
10190 zone_t z = &zone_array[zidx];
10191 zone_security_flags_t zsflags = zone_security_array[zidx];
10192 zone_view_t zv = z->z_views;
10193
10194 if (zv == NULL) {
10195 continue;
10196 }
10197
10198 zone_stats_t zv_stats_head = z->z_stats;
10199 bool has_raw_view = false;
10200
10201 for (; zv; zv = zv->zv_next) {
10202 /*
10203 * kalloc_types that allocate from the same zone are linked
10204 * as views. Only print the ones that have their own stats.
10205 */
10206 if (zv->zv_stats == zv_stats_head) {
10207 continue;
10208 }
10209 has_raw_view = true;
10210 vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10211 z->z_percpu);
10212 snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10213 zone_heap_name(z), z->z_name, zv->zv_name);
10214 i++;
10215 assert(i <= zone_view_count);
10216 }
10217
10218 /*
10219 * Print raw views for non kalloc or kalloc_type zones
10220 */
10221 bool kalloc_type = zsflags.z_kalloc_type;
10222 if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10223 (kalloc_type && has_raw_view)) {
10224 vm_page_diagnose_zone(counts + i, z);
10225 i++;
10226 assert(i <= zone_view_count);
10227 }
10228 }
10229 }
10230
10231 iterate = !VM_TAG_ACTIVE_UPDATE;
10232 if (iterate) {
10233 enum { kMaxKernelDepth = 1 };
10234 vm_map_t maps[kMaxKernelDepth];
10235 vm_map_entry_t entries[kMaxKernelDepth];
10236 vm_map_t map;
10237 vm_map_entry_t entry;
10238 vm_object_offset_t offset;
10239 vm_page_t page;
10240 int stackIdx, count;
10241
10242 #if !VM_TAG_ACTIVE_UPDATE
10243 vm_page_iterate_objects(info, num_info, &vm_page_count_object);
10244 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10245
10246 map = kernel_map;
10247 stackIdx = 0;
10248 while (map) {
10249 vm_map_lock(map);
10250 for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
10251 if (entry->is_sub_map) {
10252 assert(stackIdx < kMaxKernelDepth);
10253 maps[stackIdx] = map;
10254 entries[stackIdx] = entry;
10255 stackIdx++;
10256 map = VME_SUBMAP(entry);
10257 entry = NULL;
10258 break;
10259 }
10260 if (is_kernel_object(VME_OBJECT(entry))) {
10261 count = 0;
10262 vm_object_lock(VME_OBJECT(entry));
10263 for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
10264 page = vm_page_lookup(VME_OBJECT(entry), offset);
10265 if (page && VM_PAGE_WIRED(page)) {
10266 count++;
10267 }
10268 }
10269 vm_object_unlock(VME_OBJECT(entry));
10270
10271 if (count) {
10272 assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10273 assert(VME_ALIAS(entry) < num_info);
10274 info[VME_ALIAS(entry)].size += ptoa_64(count);
10275 }
10276 }
10277 while (map && (entry == vm_map_last_entry(map))) {
10278 vm_map_unlock(map);
10279 if (!stackIdx) {
10280 map = NULL;
10281 } else {
10282 --stackIdx;
10283 map = maps[stackIdx];
10284 entry = entries[stackIdx];
10285 }
10286 }
10287 }
10288 }
10289 }
10290
10291 process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10292
10293 return KERN_SUCCESS;
10294 }
10295
10296 #if DEBUG || DEVELOPMENT
10297
10298 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10299 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10300 {
10301 kern_return_t ret;
10302 vm_size_t zsize;
10303 vm_map_t map;
10304 vm_map_entry_t entry;
10305
10306 zsize = zone_element_info((void *) addr, tag);
10307 if (zsize) {
10308 *zone_size = *size = zsize;
10309 return KERN_SUCCESS;
10310 }
10311
10312 *zone_size = 0;
10313 ret = KERN_INVALID_ADDRESS;
10314 for (map = kernel_map; map;) {
10315 vm_map_lock(map);
10316 if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10317 break;
10318 }
10319 if (entry->is_sub_map) {
10320 if (map != kernel_map) {
10321 break;
10322 }
10323 map = VME_SUBMAP(entry);
10324 continue;
10325 }
10326 if (entry->vme_start != addr) {
10327 break;
10328 }
10329 *tag = (vm_tag_t)VME_ALIAS(entry);
10330 *size = (entry->vme_end - addr);
10331 ret = KERN_SUCCESS;
10332 break;
10333 }
10334 if (map != kernel_map) {
10335 vm_map_unlock(map);
10336 }
10337 vm_map_unlock(kernel_map);
10338
10339 return ret;
10340 }
10341
10342 #endif /* DEBUG || DEVELOPMENT */
10343
10344 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10345 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10346 {
10347 vm_allocation_site_t * site;
10348 uint32_t kmodId;
10349
10350 kmodId = 0;
10351 lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10352 if ((site = vm_allocation_sites[tag])) {
10353 if (VM_TAG_KMOD & site->flags) {
10354 kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10355 }
10356 }
10357 lck_ticket_unlock(&vm_allocation_sites_lock);
10358
10359 return kmodId;
10360 }
10361
10362
10363 #if CONFIG_SECLUDED_MEMORY
10364 /*
10365 * Note that there's no locking around other accesses to vm_page_secluded_target.
10366 * That should be OK, since these are the only place where it can be changed after
10367 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10368 * but will eventually get the correct value. This brief mismatch is OK as pageout
10369 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10370 * over time.
10371 */
10372 unsigned int vm_page_secluded_suppress_cnt = 0;
10373 unsigned int vm_page_secluded_save_target;
10374
10375 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10376 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10377
10378 void
start_secluded_suppression(task_t task)10379 start_secluded_suppression(task_t task)
10380 {
10381 if (task->task_suppressed_secluded) {
10382 return;
10383 }
10384 lck_spin_lock(&secluded_suppress_slock);
10385 if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10386 task->task_suppressed_secluded = TRUE;
10387 vm_page_secluded_save_target = vm_page_secluded_target;
10388 vm_page_secluded_target = 0;
10389 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10390 }
10391 lck_spin_unlock(&secluded_suppress_slock);
10392 }
10393
10394 void
stop_secluded_suppression(task_t task)10395 stop_secluded_suppression(task_t task)
10396 {
10397 lck_spin_lock(&secluded_suppress_slock);
10398 if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10399 task->task_suppressed_secluded = FALSE;
10400 vm_page_secluded_target = vm_page_secluded_save_target;
10401 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10402 }
10403 lck_spin_unlock(&secluded_suppress_slock);
10404 }
10405
10406 #endif /* CONFIG_SECLUDED_MEMORY */
10407
10408 /*
10409 * Move the list of retired pages on the vm_page_queue_retired to
10410 * their final resting place on retired_pages_object.
10411 */
10412 void
vm_retire_boot_pages(void)10413 vm_retire_boot_pages(void)
10414 {
10415 }
10416
10417 /*
10418 * This holds the reported physical address if an ECC error leads to a panic.
10419 * SMC will store it in PMU SRAM under the 'sECC' key.
10420 */
10421 uint64_t ecc_panic_physical_address = 0;
10422
10423
10424 boolean_t
vm_page_created(vm_page_t page)10425 vm_page_created(vm_page_t page)
10426 {
10427 return (page < &vm_pages[0]) || (page >= &vm_pages[vm_pages_count]);
10428 }
10429