xref: /xnu-11215.1.10/osfmk/vm/vm_resident.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68 
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init_xnu.h>
85 #include <vm/vm_map_internal.h>
86 #include <vm/vm_page_internal.h>
87 #include <vm/vm_pageout_internal.h>
88 #include <vm/vm_kern_xnu.h>                 /* kmem_alloc() */
89 #include <vm/vm_compressor_pager_internal.h>
90 #include <kern/misc_protos.h>
91 #include <mach_debug/zone_info.h>
92 #include <vm/cpm_internal.h>
93 #include <pexpert/pexpert.h>
94 #include <pexpert/device_tree.h>
95 #include <san/kasan.h>
96 #include <os/log.h>
97 
98 #include <vm/vm_protos_internal.h>
99 #include <vm/memory_object.h>
100 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor_internal.h>
102 #include <vm/vm_iokit.h>
103 #include <vm/vm_object_internal.h>
104 #if defined (__x86_64__)
105 #include <i386/misc_protos.h>
106 #endif
107 
108 #if CONFIG_PHANTOM_CACHE
109 #include <vm/vm_phantom_cache_internal.h>
110 #endif
111 
112 #if HIBERNATION
113 #include <IOKit/IOHibernatePrivate.h>
114 #include <machine/pal_hibernate.h>
115 #endif /* HIBERNATION */
116 
117 #include <sys/kdebug.h>
118 
119 #if defined(HAS_APPLE_PAC)
120 #include <ptrauth.h>
121 #endif
122 #if defined(__arm64__)
123 #include <arm/cpu_internal.h>
124 #endif /* defined(__arm64__) */
125 
126 #if MACH_ASSERT
127 
128 TUNABLE(bool, vm_check_refs_on_free, "vm_check_refs_on_free", true);
129 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
130 
131 #else /* MACH_ASSERT */
132 
133 #define ASSERT_PMAP_FREE(mem) /* nothing */
134 
135 #endif /* MACH_ASSERT */
136 
137 extern boolean_t vm_pageout_running;
138 extern thread_t  vm_pageout_scan_thread;
139 extern bool vps_dynamic_priority_enabled;
140 
141 char    vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
142 char    vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
143 char    vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
144 char    vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
145 
146 #if CONFIG_SECLUDED_MEMORY
147 struct vm_page_secluded_data vm_page_secluded;
148 #endif /* CONFIG_SECLUDED_MEMORY */
149 
150 #if DEVELOPMENT || DEBUG
151 extern struct memory_object_pager_ops shared_region_pager_ops;
152 unsigned int shared_region_pagers_resident_count = 0;
153 unsigned int shared_region_pagers_resident_peak = 0;
154 #endif /* DEVELOPMENT || DEBUG */
155 
156 
157 
158 int             PERCPU_DATA(start_color);
159 vm_page_t       PERCPU_DATA(free_pages);
160 boolean_t       hibernate_cleaning_in_progress = FALSE;
161 
162 uint32_t        vm_lopage_free_count = 0;
163 uint32_t        vm_lopage_free_limit = 0;
164 uint32_t        vm_lopage_lowater    = 0;
165 boolean_t       vm_lopage_refill = FALSE;
166 boolean_t       vm_lopage_needed = FALSE;
167 
168 int             speculative_age_index = 0;
169 int             speculative_steal_index = 0;
170 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
171 
172 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
173                                                           * Updated and checked behind the vm_page_queues_lock. */
174 
175 static void             vm_page_free_prepare(vm_page_t  page);
176 static vm_page_t        vm_page_grab_fictitious_common(ppnum_t, boolean_t);
177 
178 static void vm_tag_init(void);
179 
180 /* for debugging purposes */
181 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
182     VM_PAGE_PACKED_FROM_ARRAY;
183 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
184     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
185 
186 /*
187  *	Associated with page of user-allocatable memory is a
188  *	page structure.
189  */
190 
191 /*
192  *	These variables record the values returned by vm_page_bootstrap,
193  *	for debugging purposes.  The implementation of pmap_steal_memory
194  *	and pmap_startup here also uses them internally.
195  */
196 
197 vm_offset_t virtual_space_start;
198 vm_offset_t virtual_space_end;
199 uint32_t        vm_page_pages;
200 
201 /*
202  *	The vm_page_lookup() routine, which provides for fast
203  *	(virtual memory object, offset) to page lookup, employs
204  *	the following hash table.  The vm_page_{insert,remove}
205  *	routines install and remove associations in the table.
206  *	[This table is often called the virtual-to-physical,
207  *	or VP, table.]
208  */
209 typedef struct {
210 	vm_page_packed_t page_list;
211 #if     MACH_PAGE_HASH_STATS
212 	int             cur_count;              /* current count */
213 	int             hi_count;               /* high water mark */
214 #endif /* MACH_PAGE_HASH_STATS */
215 } vm_page_bucket_t;
216 
217 
218 #define BUCKETS_PER_LOCK        16
219 
220 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
221 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
222 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
223 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
224 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
225 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
226 
227 #ifndef VM_TAG_ACTIVE_UPDATE
228 #error VM_TAG_ACTIVE_UPDATE
229 #endif
230 #ifndef VM_TAG_SIZECLASSES
231 #error VM_TAG_SIZECLASSES
232 #endif
233 
234 /* for debugging */
235 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
236 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
237 
238 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
239 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
240 #if VM_TAG_SIZECLASSES
241 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
242 #endif /* VM_TAG_SIZECLASSES */
243 
244 vm_tag_t vm_allocation_tag_highest;
245 
246 #if VM_PAGE_BUCKETS_CHECK
247 boolean_t vm_page_buckets_check_ready = FALSE;
248 #if VM_PAGE_FAKE_BUCKETS
249 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
250 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
251 #endif /* VM_PAGE_FAKE_BUCKETS */
252 #endif /* VM_PAGE_BUCKETS_CHECK */
253 
254 #if     MACH_PAGE_HASH_STATS
255 /* This routine is only for debug.  It is intended to be called by
256  * hand by a developer using a kernel debugger.  This routine prints
257  * out vm_page_hash table statistics to the kernel debug console.
258  */
259 void
hash_debug(void)260 hash_debug(void)
261 {
262 	int     i;
263 	int     numbuckets = 0;
264 	int     highsum = 0;
265 	int     maxdepth = 0;
266 
267 	for (i = 0; i < vm_page_bucket_count; i++) {
268 		if (vm_page_buckets[i].hi_count) {
269 			numbuckets++;
270 			highsum += vm_page_buckets[i].hi_count;
271 			if (vm_page_buckets[i].hi_count > maxdepth) {
272 				maxdepth = vm_page_buckets[i].hi_count;
273 			}
274 		}
275 	}
276 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
277 	printf("Number used buckets:     %d = %d%%\n",
278 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
279 	printf("Number unused buckets:   %d = %d%%\n",
280 	    vm_page_bucket_count - numbuckets,
281 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
282 	printf("Sum of bucket max depth: %d\n", highsum);
283 	printf("Average bucket depth:    %d.%2d\n",
284 	    highsum / vm_page_bucket_count,
285 	    highsum % vm_page_bucket_count);
286 	printf("Maximum bucket depth:    %d\n", maxdepth);
287 }
288 #endif /* MACH_PAGE_HASH_STATS */
289 
290 /*
291  *	The virtual page size is currently implemented as a runtime
292  *	variable, but is constant once initialized using vm_set_page_size.
293  *	This initialization must be done in the machine-dependent
294  *	bootstrap sequence, before calling other machine-independent
295  *	initializations.
296  *
297  *	All references to the virtual page size outside this
298  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
299  *	constants.
300  */
301 #if defined(__arm64__)
302 vm_size_t       page_size;
303 vm_size_t       page_mask;
304 int             page_shift;
305 #else
306 vm_size_t       page_size  = PAGE_SIZE;
307 vm_size_t       page_mask  = PAGE_MASK;
308 int             page_shift = PAGE_SHIFT;
309 #endif
310 
311 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
312 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
313 vm_page_t                          vm_page_array_ending_addr;
314 
315 unsigned int    vm_pages_count = 0;
316 
317 /*
318  *	Resident pages that represent real memory
319  *	are allocated from a set of free lists,
320  *	one per color.
321  */
322 unsigned int    vm_colors;
323 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
324 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
325 unsigned int    vm_free_magazine_refill_limit = 0;
326 
327 
328 struct vm_page_queue_free_head {
329 	vm_page_queue_head_t    qhead;
330 } VM_PAGE_PACKED_ALIGNED;
331 
332 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
333 
334 
335 unsigned int    vm_page_free_wanted;
336 unsigned int    vm_page_free_wanted_privileged;
337 #if CONFIG_SECLUDED_MEMORY
338 unsigned int    vm_page_free_wanted_secluded;
339 #endif /* CONFIG_SECLUDED_MEMORY */
340 unsigned int    vm_page_free_count;
341 
342 unsigned int    vm_page_realtime_count;
343 
344 /*
345  *	Occasionally, the virtual memory system uses
346  *	resident page structures that do not refer to
347  *	real pages, for example to leave a page with
348  *	important state information in the VP table.
349  *
350  *	These page structures are allocated the way
351  *	most other kernel structures are.
352  */
353 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
354 vm_locks_array_t vm_page_locks;
355 
356 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
357 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
358 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
359 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
360 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
361 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
362 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
363 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
364 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
365 
366 unsigned int    vm_page_local_q_soft_limit = 250;
367 unsigned int    vm_page_local_q_hard_limit = 500;
368 struct vpl     *__zpercpu vm_page_local_q;
369 
370 /* N.B. Guard and fictitious pages must not
371  * be assigned a zero phys_page value.
372  */
373 /*
374  *	Fictitious pages don't have a physical address,
375  *	but we must initialize phys_page to something.
376  *	For debugging, this should be a strange value
377  *	that the pmap module can recognize in assertions.
378  */
379 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
380 
381 /*
382  *	Guard pages are not accessible so they don't
383  *      need a physical address, but we need to enter
384  *	one in the pmap.
385  *	Let's make it recognizable and make sure that
386  *	we don't use a real physical page with that
387  *	physical address.
388  */
389 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
390 
391 /*
392  *	Resident page structures are also chained on
393  *	queues that are used by the page replacement
394  *	system (pageout daemon).  These queues are
395  *	defined here, but are shared by the pageout
396  *	module.  The inactive queue is broken into
397  *	file backed and anonymous for convenience as the
398  *	pageout daemon often assignes a higher
399  *	importance to anonymous pages (less likely to pick)
400  */
401 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
402 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
403 #if CONFIG_SECLUDED_MEMORY
404 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
405 #endif /* CONFIG_SECLUDED_MEMORY */
406 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
407 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
408 
409 queue_head_t    vm_objects_wired;
410 
411 vm_page_queue_head_t    vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
412 uint32_t        vm_page_donate_mode;
413 uint32_t        vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
414 uint32_t        vm_page_donate_count;
415 bool            vm_page_donate_queue_ripe;
416 
417 
418 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
419 uint32_t        vm_page_background_target;
420 uint32_t        vm_page_background_target_snapshot;
421 uint32_t        vm_page_background_count;
422 uint64_t        vm_page_background_promoted_count;
423 
424 uint32_t        vm_page_background_internal_count;
425 uint32_t        vm_page_background_external_count;
426 
427 uint32_t        vm_page_background_mode;
428 uint32_t        vm_page_background_exclude_external;
429 
430 unsigned int    vm_page_active_count;
431 unsigned int    vm_page_inactive_count;
432 unsigned int    vm_page_kernelcache_count;
433 #if CONFIG_SECLUDED_MEMORY
434 unsigned int    vm_page_secluded_count;
435 unsigned int    vm_page_secluded_count_free;
436 unsigned int    vm_page_secluded_count_inuse;
437 unsigned int    vm_page_secluded_count_over_target;
438 #endif /* CONFIG_SECLUDED_MEMORY */
439 unsigned int    vm_page_anonymous_count;
440 unsigned int    vm_page_throttled_count;
441 unsigned int    vm_page_speculative_count;
442 
443 unsigned int    vm_page_wire_count;
444 unsigned int    vm_page_wire_count_on_boot = 0;
445 unsigned int    vm_page_stolen_count = 0;
446 unsigned int    vm_page_wire_count_initial;
447 unsigned int    vm_page_gobble_count = 0;
448 unsigned int    vm_page_kern_lpage_count = 0;
449 
450 uint64_t        booter_size;  /* external so it can be found in core dumps */
451 
452 #define VM_PAGE_WIRE_COUNT_WARNING      0
453 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
454 
455 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
456 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
457 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
458 
459 unsigned int    vm_page_xpmapped_external_count = 0;
460 unsigned int    vm_page_external_count = 0;
461 unsigned int    vm_page_internal_count = 0;
462 unsigned int    vm_page_pageable_external_count = 0;
463 unsigned int    vm_page_pageable_internal_count = 0;
464 
465 #if DEVELOPMENT || DEBUG
466 unsigned int    vm_page_speculative_recreated = 0;
467 unsigned int    vm_page_speculative_created = 0;
468 unsigned int    vm_page_speculative_used = 0;
469 #endif
470 
471 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
472 
473 unsigned int    vm_page_cleaned_count = 0;
474 
475 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
476 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
477 
478 
479 /*
480  *	Several page replacement parameters are also
481  *	shared with this module, so that page allocation
482  *	(done here in vm_page_alloc) can trigger the
483  *	pageout daemon.
484  */
485 unsigned int    vm_page_free_target = 0;
486 unsigned int    vm_page_free_min = 0;
487 unsigned int    vm_page_throttle_limit = 0;
488 unsigned int    vm_page_inactive_target = 0;
489 #if CONFIG_SECLUDED_MEMORY
490 unsigned int    vm_page_secluded_target = 0;
491 #endif /* CONFIG_SECLUDED_MEMORY */
492 unsigned int    vm_page_anonymous_min = 0;
493 unsigned int    vm_page_free_reserved = 0;
494 
495 
496 /*
497  *	The VM system has a couple of heuristics for deciding
498  *	that pages are "uninteresting" and should be placed
499  *	on the inactive queue as likely candidates for replacement.
500  *	These variables let the heuristics be controlled at run-time
501  *	to make experimentation easier.
502  */
503 
504 boolean_t vm_page_deactivate_hint = TRUE;
505 
506 struct vm_page_stats_reusable vm_page_stats_reusable;
507 
508 /*
509  *	vm_set_page_size:
510  *
511  *	Sets the page size, perhaps based upon the memory
512  *	size.  Must be called before any use of page-size
513  *	dependent functions.
514  *
515  *	Sets page_shift and page_mask from page_size.
516  */
517 void
vm_set_page_size(void)518 vm_set_page_size(void)
519 {
520 	page_size  = PAGE_SIZE;
521 	page_mask  = PAGE_MASK;
522 	page_shift = PAGE_SHIFT;
523 
524 	if ((page_mask & page_size) != 0) {
525 		panic("vm_set_page_size: page size not a power of two");
526 	}
527 
528 	for (page_shift = 0;; page_shift++) {
529 		if ((1U << page_shift) == page_size) {
530 			break;
531 		}
532 	}
533 }
534 
535 #if defined (__x86_64__)
536 
537 #define MAX_CLUMP_SIZE      16
538 #define DEFAULT_CLUMP_SIZE  4
539 
540 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
541 
542 #if DEVELOPMENT || DEBUG
543 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
544 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
545 
546 static inline void
vm_clump_update_stats(unsigned int c)547 vm_clump_update_stats(unsigned int c)
548 {
549 	assert(c <= vm_clump_size);
550 	if (c > 0 && c <= vm_clump_size) {
551 		vm_clump_stats[c] += c;
552 	}
553 	vm_clump_allocs += c;
554 }
555 #endif  /*  if DEVELOPMENT || DEBUG */
556 
557 /* Called once to setup the VM clump knobs */
558 static void
vm_page_setup_clump(void)559 vm_page_setup_clump( void )
560 {
561 	unsigned int override, n;
562 
563 	vm_clump_size = DEFAULT_CLUMP_SIZE;
564 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
565 		vm_clump_size = override;
566 	}
567 
568 	if (vm_clump_size > MAX_CLUMP_SIZE) {
569 		panic("vm_page_setup_clump:: clump_size is too large!");
570 	}
571 	if (vm_clump_size < 1) {
572 		panic("vm_page_setup_clump:: clump_size must be >= 1");
573 	}
574 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
575 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
576 	}
577 
578 	vm_clump_promote_threshold = vm_clump_size;
579 	vm_clump_mask = vm_clump_size - 1;
580 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
581 		;
582 	}
583 
584 #if DEVELOPMENT || DEBUG
585 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
586 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
587 #endif  /*  if DEVELOPMENT || DEBUG */
588 }
589 
590 #endif  /* #if defined (__x86_64__) */
591 
592 #define COLOR_GROUPS_TO_STEAL   4
593 
594 /* Called once during statup, once the cache geometry is known.
595  */
596 static void
vm_page_set_colors(void)597 vm_page_set_colors( void )
598 {
599 	unsigned int    n, override;
600 
601 #if defined (__x86_64__)
602 	/* adjust #colors because we need to color outside the clump boundary */
603 	vm_cache_geometry_colors >>= vm_clump_shift;
604 #endif
605 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
606 		n = override;
607 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
608 		n = vm_cache_geometry_colors;
609 	} else {
610 		n = DEFAULT_COLORS;                             /* use default if all else fails */
611 	}
612 	if (n == 0) {
613 		n = 1;
614 	}
615 	if (n > MAX_COLORS) {
616 		n = MAX_COLORS;
617 	}
618 
619 	/* the count must be a power of 2  */
620 	if ((n & (n - 1)) != 0) {
621 		n = DEFAULT_COLORS;                             /* use default if all else fails */
622 	}
623 	vm_colors = n;
624 	vm_color_mask = n - 1;
625 
626 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
627 
628 #if defined (__x86_64__)
629 	/* adjust for reduction in colors due to clumping and multiple cores */
630 	if (real_ncpus) {
631 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
632 	}
633 #endif
634 }
635 
636 /*
637  * During single threaded early boot we don't initialize all pages.
638  * This avoids some delay during boot. They'll be initialized and
639  * added to the free list as needed or after we are multithreaded by
640  * what becomes the pageout thread.
641  */
642 static boolean_t fill = FALSE;
643 static unsigned int fillval;
644 uint_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
645 ppnum_t delay_above_pnum = PPNUM_MAX;
646 
647 /*
648  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
649  * If ARM ever uses delayed page initialization, this value may need to be quite different.
650  */
651 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
652 
653 /*
654  * When we have to dip into more delayed pages due to low memory, free up
655  * a large chunk to get things back to normal. This avoids contention on the
656  * delayed code allocating page by page.
657  */
658 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
659 
660 /*
661  * Get and initialize the next delayed page.
662  */
663 static vm_page_t
vm_get_delayed_page(int grab_options)664 vm_get_delayed_page(int grab_options)
665 {
666 	vm_page_t p;
667 	ppnum_t   pnum;
668 
669 	/*
670 	 * Get a new page if we have one.
671 	 */
672 	vm_free_page_lock();
673 	if (vm_delayed_count == 0) {
674 		vm_free_page_unlock();
675 		return NULL;
676 	}
677 
678 	if (!pmap_next_page(&pnum)) {
679 		vm_delayed_count = 0;
680 		vm_free_page_unlock();
681 		return NULL;
682 	}
683 
684 
685 	assert(vm_delayed_count > 0);
686 	--vm_delayed_count;
687 
688 #if defined(__x86_64__)
689 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
690 	if (vm_pages_count > 0) {
691 		assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
692 	}
693 #endif
694 	p = &vm_pages[vm_pages_count];
695 	assert(p < vm_page_array_ending_addr);
696 	vm_page_init(p, pnum, FALSE);
697 	++vm_pages_count;
698 	++vm_page_pages;
699 	vm_free_page_unlock();
700 
701 	/*
702 	 * These pages were initially counted as wired, undo that now.
703 	 */
704 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
705 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
706 	} else {
707 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
708 		vm_page_lockspin_queues();
709 	}
710 	--vm_page_wire_count;
711 	--vm_page_wire_count_initial;
712 	if (vm_page_wire_count_on_boot != 0) {
713 		--vm_page_wire_count_on_boot;
714 	}
715 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
716 		vm_page_unlock_queues();
717 	}
718 
719 
720 	if (fill) {
721 		fillPage(pnum, fillval);
722 	}
723 	return p;
724 }
725 
726 static void vm_page_module_init_delayed(void);
727 
728 /*
729  * Free all remaining delayed pages to the free lists.
730  */
731 void
vm_free_delayed_pages(void)732 vm_free_delayed_pages(void)
733 {
734 	vm_page_t   p;
735 	vm_page_t   list = NULL;
736 	uint_t      cnt = 0;
737 	vm_offset_t start_free_va;
738 	int64_t     free_size;
739 
740 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
741 		if (vm_himemory_mode) {
742 			vm_page_release(p, FALSE);
743 		} else {
744 			p->vmp_snext = list;
745 			list = p;
746 		}
747 		++cnt;
748 	}
749 
750 	/*
751 	 * Free the pages in reverse order if not himemory mode.
752 	 * Hence the low memory pages will be first on free lists. (LIFO)
753 	 */
754 	while (list != NULL) {
755 		p = list;
756 		list = p->vmp_snext;
757 		p->vmp_snext = NULL;
758 		vm_page_release(p, FALSE);
759 	}
760 #if DEVELOPMENT || DEBUG
761 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
762 #endif
763 
764 	/*
765 	 * Free up any unused full pages at the end of the vm_pages[] array
766 	 */
767 	start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
768 
769 #if defined(__x86_64__)
770 	/*
771 	 * Since x86 might have used large pages for vm_pages[], we can't
772 	 * free starting in the middle of a partially used large page.
773 	 */
774 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
775 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
776 	}
777 #endif
778 	if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
779 		free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
780 		if (free_size > 0) {
781 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
782 			vm_page_array_ending_addr = (void *)start_free_va;
783 
784 			/*
785 			 * Note there's no locking here, as only this thread will ever change this value.
786 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
787 			 */
788 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
789 
790 #if DEVELOPMENT || DEBUG
791 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
792 			    (long)free_size, (long)start_free_va);
793 #endif
794 		}
795 	}
796 
797 
798 	/*
799 	 * now we can create the VM page array zone
800 	 */
801 	vm_page_module_init_delayed();
802 }
803 
804 /*
805  * Try and free up enough delayed pages to match a contig memory allocation.
806  */
807 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)808 vm_free_delayed_pages_contig(
809 	uint_t    npages,
810 	ppnum_t   max_pnum,
811 	ppnum_t   pnum_mask)
812 {
813 	vm_page_t p;
814 	ppnum_t   pnum;
815 	uint_t    cnt = 0;
816 
817 	/*
818 	 * Treat 0 as the absolute max page number.
819 	 */
820 	if (max_pnum == 0) {
821 		max_pnum = PPNUM_MAX;
822 	}
823 
824 	/*
825 	 * Free till we get a properly aligned start page
826 	 */
827 	for (;;) {
828 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
829 		if (p == NULL) {
830 			return;
831 		}
832 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
833 		vm_page_release(p, FALSE);
834 		if (pnum >= max_pnum) {
835 			return;
836 		}
837 		if ((pnum & pnum_mask) == 0) {
838 			break;
839 		}
840 	}
841 
842 	/*
843 	 * Having a healthy pool of free pages will help performance. We don't
844 	 * want to fall back to the delayed code for every page allocation.
845 	 */
846 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
847 		npages += VM_DELAY_PAGE_CHUNK;
848 	}
849 
850 	/*
851 	 * Now free up the pages
852 	 */
853 	for (cnt = 1; cnt < npages; ++cnt) {
854 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
855 		if (p == NULL) {
856 			return;
857 		}
858 		vm_page_release(p, FALSE);
859 	}
860 }
861 
862 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
863 
864 void
vm_page_init_local_q(unsigned int num_cpus)865 vm_page_init_local_q(unsigned int num_cpus)
866 {
867 	struct vpl *t_local_q;
868 
869 	/*
870 	 * no point in this for a uni-processor system
871 	 */
872 	if (num_cpus >= 2) {
873 		ml_cpu_info_t cpu_info;
874 
875 		/*
876 		 * Force the allocation alignment to a cacheline,
877 		 * because the `vpl` struct has a lock and will be taken
878 		 * cross CPU so we want to isolate the rest of the per-CPU
879 		 * data to avoid false sharing due to this lock being taken.
880 		 */
881 
882 		ml_cpu_get_info(&cpu_info);
883 
884 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
885 		    cpu_info.cache_line_size - 1);
886 
887 		zpercpu_foreach(lq, t_local_q) {
888 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
889 			vm_page_queue_init(&lq->vpl_queue);
890 		}
891 
892 		/* make the initialization visible to all cores */
893 		os_atomic_store(&vm_page_local_q, t_local_q, release);
894 	}
895 }
896 
897 /*
898  * vm_init_before_launchd
899  *
900  * This should be called right before launchd is loaded.
901  */
902 void
vm_init_before_launchd()903 vm_init_before_launchd()
904 {
905 	vm_page_lockspin_queues();
906 	vm_page_wire_count_on_boot = vm_page_wire_count;
907 	vm_page_unlock_queues();
908 }
909 
910 
911 /*
912  *	vm_page_bootstrap:
913  *
914  *	Initializes the resident memory module.
915  *
916  *	Allocates memory for the page cells, and
917  *	for the object/offset-to-page hash table headers.
918  *	Each page cell is initialized and placed on the free list.
919  *	Returns the range of available kernel virtual memory.
920  */
921 __startup_func
922 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)923 vm_page_bootstrap(
924 	vm_offset_t             *startp,
925 	vm_offset_t             *endp)
926 {
927 	unsigned int            i;
928 	unsigned int            log1;
929 	unsigned int            log2;
930 	unsigned int            size;
931 
932 	/*
933 	 *	Initialize the page queues.
934 	 */
935 
936 	lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
937 	lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
938 	lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
939 
940 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
941 		int group;
942 
943 		purgeable_queues[i].token_q_head = 0;
944 		purgeable_queues[i].token_q_tail = 0;
945 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
946 			queue_init(&purgeable_queues[i].objq[group]);
947 		}
948 
949 		purgeable_queues[i].type = i;
950 		purgeable_queues[i].new_pages = 0;
951 #if MACH_ASSERT
952 		purgeable_queues[i].debug_count_tokens = 0;
953 		purgeable_queues[i].debug_count_objects = 0;
954 #endif
955 	}
956 	;
957 	purgeable_nonvolatile_count = 0;
958 	queue_init(&purgeable_nonvolatile_queue);
959 
960 	for (i = 0; i < MAX_COLORS; i++) {
961 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
962 	}
963 
964 	vm_page_queue_init(&vm_lopage_queue_free);
965 	vm_page_queue_init(&vm_page_queue_active);
966 	vm_page_queue_init(&vm_page_queue_inactive);
967 #if CONFIG_SECLUDED_MEMORY
968 	vm_page_queue_init(&vm_page_queue_secluded);
969 #endif /* CONFIG_SECLUDED_MEMORY */
970 	vm_page_queue_init(&vm_page_queue_cleaned);
971 	vm_page_queue_init(&vm_page_queue_throttled);
972 	vm_page_queue_init(&vm_page_queue_anonymous);
973 	queue_init(&vm_objects_wired);
974 
975 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
976 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
977 
978 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
979 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
980 	}
981 
982 	vm_page_queue_init(&vm_page_queue_donate);
983 	vm_page_queue_init(&vm_page_queue_background);
984 
985 	vm_page_background_count = 0;
986 	vm_page_background_internal_count = 0;
987 	vm_page_background_external_count = 0;
988 	vm_page_background_promoted_count = 0;
989 
990 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
991 
992 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
993 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
994 	}
995 
996 #if    defined(__LP64__)
997 	vm_page_background_mode = VM_PAGE_BG_ENABLED;
998 	vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
999 #else
1000 	vm_page_background_mode = VM_PAGE_BG_DISABLED;
1001 	vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1002 #endif
1003 	vm_page_background_exclude_external = 0;
1004 
1005 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1006 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1007 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1008 
1009 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1010 		vm_page_background_mode = VM_PAGE_BG_DISABLED;
1011 	}
1012 
1013 	PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1014 	if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1015 		vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1016 	}
1017 
1018 	vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1019 	vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1020 	vm_page_donate_target = vm_page_donate_target_high;
1021 	vm_page_donate_count = 0;
1022 
1023 	vm_page_free_wanted = 0;
1024 	vm_page_free_wanted_privileged = 0;
1025 #if CONFIG_SECLUDED_MEMORY
1026 	vm_page_free_wanted_secluded = 0;
1027 #endif /* CONFIG_SECLUDED_MEMORY */
1028 
1029 #if defined (__x86_64__)
1030 	/* this must be called before vm_page_set_colors() */
1031 	vm_page_setup_clump();
1032 #endif
1033 
1034 	vm_page_set_colors();
1035 
1036 	bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1037 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1038 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1039 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1040 
1041 	bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1042 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1043 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1044 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1045 	vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1046 	vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1047 	vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1048 #if CONFIG_SECLUDED_MEMORY
1049 	vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1050 #endif /* CONFIG_SECLUDED_MEMORY */
1051 
1052 	bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1053 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1054 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1055 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1056 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1057 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1058 #if CONFIG_SECLUDED_MEMORY
1059 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1060 #endif /* CONFIG_SECLUDED_MEMORY */
1061 
1062 	bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1063 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1064 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1065 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1066 	vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1067 #if CONFIG_SECLUDED_MEMORY
1068 	vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1069 #endif /* CONFIG_SECLUDED_MEMORY */
1070 
1071 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1072 		vm_allocation_sites_static[t].refcount = 2;
1073 		vm_allocation_sites_static[t].tag = t;
1074 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1075 	}
1076 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1077 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1078 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1079 
1080 	/*
1081 	 *	Steal memory for the map and zone subsystems.
1082 	 */
1083 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1084 
1085 	/*
1086 	 *	Allocate (and initialize) the virtual-to-physical
1087 	 *	table hash buckets.
1088 	 *
1089 	 *	The number of buckets should be a power of two to
1090 	 *	get a good hash function.  The following computation
1091 	 *	chooses the first power of two that is greater
1092 	 *	than the number of physical pages in the system.
1093 	 */
1094 
1095 	if (vm_page_bucket_count == 0) {
1096 		unsigned int npages = pmap_free_pages();
1097 
1098 		vm_page_bucket_count = 1;
1099 		while (vm_page_bucket_count < npages) {
1100 			vm_page_bucket_count <<= 1;
1101 		}
1102 	}
1103 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1104 
1105 	vm_page_hash_mask = vm_page_bucket_count - 1;
1106 
1107 	/*
1108 	 *	Calculate object shift value for hashing algorithm:
1109 	 *		O = log2(sizeof(struct vm_object))
1110 	 *		B = log2(vm_page_bucket_count)
1111 	 *	        hash shifts the object left by
1112 	 *		B/2 - O
1113 	 */
1114 	size = vm_page_bucket_count;
1115 	for (log1 = 0; size > 1; log1++) {
1116 		size /= 2;
1117 	}
1118 	size = sizeof(struct vm_object);
1119 	for (log2 = 0; size > 1; log2++) {
1120 		size /= 2;
1121 	}
1122 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1123 
1124 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1125 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1126 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1127 
1128 	if (vm_page_hash_mask & vm_page_bucket_count) {
1129 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1130 	}
1131 
1132 #if VM_PAGE_BUCKETS_CHECK
1133 #if VM_PAGE_FAKE_BUCKETS
1134 	/*
1135 	 * Allocate a decoy set of page buckets, to detect
1136 	 * any stomping there.
1137 	 */
1138 	vm_page_fake_buckets = (vm_page_bucket_t *)
1139 	    pmap_steal_memory(vm_page_bucket_count *
1140 	    sizeof(vm_page_bucket_t), 0);
1141 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1142 	vm_page_fake_buckets_end =
1143 	    vm_map_round_page((vm_page_fake_buckets_start +
1144 	    (vm_page_bucket_count *
1145 	    sizeof(vm_page_bucket_t))),
1146 	    PAGE_MASK);
1147 	char *cp;
1148 	for (cp = (char *)vm_page_fake_buckets_start;
1149 	    cp < (char *)vm_page_fake_buckets_end;
1150 	    cp++) {
1151 		*cp = 0x5a;
1152 	}
1153 #endif /* VM_PAGE_FAKE_BUCKETS */
1154 #endif /* VM_PAGE_BUCKETS_CHECK */
1155 
1156 	kernel_debug_string_early("vm_page_buckets");
1157 	vm_page_buckets = (vm_page_bucket_t *)
1158 	    pmap_steal_memory(vm_page_bucket_count *
1159 	    sizeof(vm_page_bucket_t), 0);
1160 
1161 	kernel_debug_string_early("vm_page_bucket_locks");
1162 	vm_page_bucket_locks = (lck_spin_t *)
1163 	    pmap_steal_memory(vm_page_bucket_lock_count *
1164 	    sizeof(lck_spin_t), 0);
1165 
1166 	for (i = 0; i < vm_page_bucket_count; i++) {
1167 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1168 
1169 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1170 #if     MACH_PAGE_HASH_STATS
1171 		bucket->cur_count = 0;
1172 		bucket->hi_count = 0;
1173 #endif /* MACH_PAGE_HASH_STATS */
1174 	}
1175 
1176 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1177 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1178 	}
1179 
1180 	vm_tag_init();
1181 
1182 #if VM_PAGE_BUCKETS_CHECK
1183 	vm_page_buckets_check_ready = TRUE;
1184 #endif /* VM_PAGE_BUCKETS_CHECK */
1185 
1186 	/*
1187 	 *	Machine-dependent code allocates the resident page table.
1188 	 *	It uses vm_page_init to initialize the page frames.
1189 	 *	The code also returns to us the virtual space available
1190 	 *	to the kernel.  We don't trust the pmap module
1191 	 *	to get the alignment right.
1192 	 */
1193 
1194 	kernel_debug_string_early("pmap_startup");
1195 	pmap_startup(&virtual_space_start, &virtual_space_end);
1196 	virtual_space_start = round_page(virtual_space_start);
1197 	virtual_space_end = trunc_page(virtual_space_end);
1198 
1199 	*startp = virtual_space_start;
1200 	*endp = virtual_space_end;
1201 
1202 	/*
1203 	 *	Compute the initial "wire" count.
1204 	 *	Up until now, the pages which have been set aside are not under
1205 	 *	the VM system's control, so although they aren't explicitly
1206 	 *	wired, they nonetheless can't be moved. At this moment,
1207 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1208 	 */
1209 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1210 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1211 	    vm_page_free_count - vm_lopage_free_count;
1212 #if CONFIG_SECLUDED_MEMORY
1213 	vm_page_wire_count -= vm_page_secluded_count;
1214 #endif
1215 	vm_page_wire_count_initial = vm_page_wire_count;
1216 
1217 	/* capture this for later use */
1218 	booter_size = ml_get_booter_memory_size();
1219 
1220 	printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1221 	    vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1222 
1223 	kernel_debug_string_early("vm_page_bootstrap complete");
1224 }
1225 
1226 #ifndef MACHINE_PAGES
1227 /*
1228  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1229  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1230  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1231  */
1232 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1233 pmap_steal_memory_internal(
1234 	vm_size_t size,
1235 	vm_size_t alignment,
1236 	boolean_t might_free,
1237 	unsigned int flags,
1238 	pmap_mapping_type_t mapping_type)
1239 {
1240 	kern_return_t kr;
1241 	vm_offset_t addr;
1242 	vm_offset_t map_addr;
1243 	ppnum_t phys_page;
1244 	unsigned int pmap_flags;
1245 
1246 	/*
1247 	 * Size needs to be aligned to word size.
1248 	 */
1249 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1250 
1251 	/*
1252 	 * Alignment defaults to word size if not specified.
1253 	 */
1254 	if (alignment == 0) {
1255 		alignment = sizeof(void*);
1256 	}
1257 
1258 	/*
1259 	 * Alignment must be no greater than a page and must be a power of two.
1260 	 */
1261 	assert(alignment <= PAGE_SIZE);
1262 	assert((alignment & (alignment - 1)) == 0);
1263 
1264 	/*
1265 	 * On the first call, get the initial values for virtual address space
1266 	 * and page align them.
1267 	 */
1268 	if (virtual_space_start == virtual_space_end) {
1269 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1270 		virtual_space_start = round_page(virtual_space_start);
1271 		virtual_space_end = trunc_page(virtual_space_end);
1272 
1273 #if defined(__x86_64__)
1274 		/*
1275 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1276 		 * that map it. This makes the VA available for large page mappings.
1277 		 */
1278 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1279 #endif
1280 	}
1281 
1282 	/*
1283 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1284 	 * address if the size is big enough to back with at least 1 large page.
1285 	 */
1286 #if defined(__x86_64__)
1287 	if (size >= I386_LPGBYTES) {
1288 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1289 	}
1290 #endif
1291 	virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1292 	addr = virtual_space_start;
1293 	virtual_space_start += size;
1294 
1295 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1296 
1297 	/*
1298 	 * Allocate and map physical pages to back the new virtual space.
1299 	 */
1300 	map_addr = round_page(addr);
1301 	while (map_addr < addr + size) {
1302 #if defined(__x86_64__)
1303 		/*
1304 		 * Back with a large page if properly aligned on x86
1305 		 */
1306 		if ((map_addr & I386_LPGMASK) == 0 &&
1307 		    map_addr + I386_LPGBYTES <= addr + size &&
1308 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1309 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1310 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1311 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1312 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1313 
1314 			if (kr != KERN_SUCCESS) {
1315 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1316 				    (unsigned long)map_addr, phys_page);
1317 			}
1318 			map_addr += I386_LPGBYTES;
1319 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1320 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1321 			vm_page_kern_lpage_count++;
1322 			continue;
1323 		}
1324 #endif
1325 
1326 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1327 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1328 		}
1329 
1330 #if defined(__x86_64__)
1331 		pmap_pre_expand(kernel_pmap, map_addr);
1332 #endif
1333 		pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1334 
1335 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1336 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1337 		    pmap_flags, FALSE, mapping_type);
1338 
1339 		if (kr != KERN_SUCCESS) {
1340 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1341 			    (unsigned long)map_addr, phys_page);
1342 		}
1343 		map_addr += PAGE_SIZE;
1344 
1345 		/*
1346 		 * Account for newly stolen memory
1347 		 */
1348 		vm_page_wire_count++;
1349 		vm_page_stolen_count++;
1350 	}
1351 
1352 #if defined(__x86_64__)
1353 	/*
1354 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1355 	 * Notify the pmap layer to record which high pages were allocated so far.
1356 	 */
1357 	if (might_free) {
1358 		pmap_hi_pages_done();
1359 	}
1360 #endif
1361 #if KASAN
1362 	kasan_notify_address(round_page(addr), size);
1363 #endif
1364 	return (void *) addr;
1365 }
1366 
1367 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1368 pmap_steal_memory(
1369 	vm_size_t size,
1370 	vm_size_t alignment)
1371 {
1372 	return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1373 }
1374 
1375 void *
pmap_steal_freeable_memory(vm_size_t size)1376 pmap_steal_freeable_memory(
1377 	vm_size_t size)
1378 {
1379 	return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1380 }
1381 
1382 
1383 
1384 
1385 #if CONFIG_SECLUDED_MEMORY
1386 /* boot-args to control secluded memory */
1387 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1388 /* IOKit can use secluded memory */
1389 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1390 /* apps can use secluded memory */
1391 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1392 /* filecache can use seclude memory */
1393 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1394 uint64_t secluded_shutoff_trigger = 0;
1395 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1396 #endif /* CONFIG_SECLUDED_MEMORY */
1397 
1398 
1399 #if defined(__arm64__)
1400 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1401 unsigned int vm_first_phys_ppnum = 0;
1402 #endif
1403 
1404 void vm_page_release_startup(vm_page_t mem);
1405 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1406 pmap_startup(
1407 	vm_offset_t     *startp,
1408 	vm_offset_t     *endp)
1409 {
1410 	unsigned int    i, npages;
1411 	ppnum_t         phys_page;
1412 	uint64_t        mem_sz;
1413 	uint64_t        start_ns;
1414 	uint64_t        now_ns;
1415 	uint_t          low_page_count = 0;
1416 
1417 #if    defined(__LP64__)
1418 	/*
1419 	 * make sure we are aligned on a 64 byte boundary
1420 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1421 	 * 6 bits of the pointer)
1422 	 */
1423 	if (virtual_space_start != virtual_space_end) {
1424 		virtual_space_start = round_page(virtual_space_start);
1425 	}
1426 #endif
1427 
1428 	/*
1429 	 * We calculate how many page frames we will have
1430 	 * and then allocate the page structures in one chunk.
1431 	 *
1432 	 * Note that the calculation here doesn't take into account
1433 	 * the memory needed to map what's being allocated, i.e. the page
1434 	 * table entries. So the actual number of pages we get will be
1435 	 * less than this. To do someday: include that in the computation.
1436 	 *
1437 	 * Also for ARM, we don't use the count of free_pages, but rather the
1438 	 * range from last page to first page (ignore holes due to retired pages).
1439 	 */
1440 #if defined(__arm64__)
1441 	mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1442 #else /* defined(__arm64__) */
1443 	mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1444 #endif /* defined(__arm64__) */
1445 	mem_sz += round_page(virtual_space_start) - virtual_space_start;        /* Account for any slop */
1446 	npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages)));    /* scaled to include the vm_page_ts */
1447 
1448 
1449 	vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1450 
1451 	/*
1452 	 * Check if we want to initialize pages to a known value
1453 	 */
1454 	if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1455 		fill = TRUE;
1456 	}
1457 #if     DEBUG
1458 	/* This slows down booting the DEBUG kernel, particularly on
1459 	 * large memory systems, but is worthwhile in deterministically
1460 	 * trapping uninitialized memory usage.
1461 	 */
1462 	if (!fill) {
1463 		fill = TRUE;
1464 		fillval = 0xDEB8F177;
1465 	}
1466 #endif
1467 	if (fill) {
1468 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1469 	}
1470 
1471 #if CONFIG_SECLUDED_MEMORY
1472 	/*
1473 	 * Figure out how much secluded memory to have before we start
1474 	 * release pages to free lists.
1475 	 * The default, if specified nowhere else, is no secluded mem.
1476 	 */
1477 	vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1478 
1479 	/*
1480 	 * Allow a really large app to effectively use secluded memory until it exits.
1481 	 */
1482 	if (vm_page_secluded_target != 0) {
1483 		/*
1484 		 * Get an amount from boot-args, else use 1/2 of max_mem.
1485 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1486 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
1487 		 */
1488 		int secluded_shutoff_mb;
1489 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1490 		    sizeof(secluded_shutoff_mb))) {
1491 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1492 		} else {
1493 			secluded_shutoff_trigger = max_mem / 2;
1494 		}
1495 
1496 		/* ensure the headroom value is sensible and avoid underflows */
1497 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1498 	}
1499 
1500 #endif /* CONFIG_SECLUDED_MEMORY */
1501 
1502 #if defined(__x86_64__)
1503 
1504 	/*
1505 	 * Decide how much memory we delay freeing at boot time.
1506 	 */
1507 	uint32_t delay_above_gb;
1508 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1509 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1510 	}
1511 
1512 	if (delay_above_gb == 0) {
1513 		delay_above_pnum = PPNUM_MAX;
1514 	} else {
1515 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1516 	}
1517 
1518 	/* make sure we have sane breathing room: 1G above low memory */
1519 	if (delay_above_pnum <= max_valid_low_ppnum) {
1520 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1521 	}
1522 
1523 	if (delay_above_pnum < PPNUM_MAX) {
1524 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1525 	}
1526 
1527 #endif /* defined(__x86_64__) */
1528 
1529 	/*
1530 	 * Initialize and release the page frames.
1531 	 */
1532 	kernel_debug_string_early("page_frame_init");
1533 
1534 	vm_page_array_beginning_addr = &vm_pages[0];
1535 	vm_page_array_ending_addr = &vm_pages[npages];  /* used by ptr packing/unpacking code */
1536 #if VM_PAGE_PACKED_FROM_ARRAY
1537 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1538 		panic("pmap_startup(): too many pages to support vm_page packing");
1539 	}
1540 #endif
1541 
1542 	vm_delayed_count = 0;
1543 
1544 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1545 	vm_pages_count = 0;
1546 	for (i = 0; i < npages; i++) {
1547 		/* Did we run out of pages? */
1548 		if (!pmap_next_page(&phys_page)) {
1549 			break;
1550 		}
1551 
1552 		if (phys_page < max_valid_low_ppnum) {
1553 			++low_page_count;
1554 		}
1555 
1556 		/* Are we at high enough pages to delay the rest? */
1557 		if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1558 			vm_delayed_count = pmap_free_pages();
1559 			break;
1560 		}
1561 
1562 #if defined(__arm64__)
1563 		if (i == 0) {
1564 			vm_first_phys_ppnum = phys_page;
1565 			patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1566 			    (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1567 		}
1568 #endif /* defined(__arm64__) */
1569 
1570 #if defined(__x86_64__)
1571 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
1572 		if (i > 0) {
1573 			assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1574 		}
1575 #endif
1576 		++vm_pages_count;
1577 		vm_page_init(&vm_pages[i], phys_page, FALSE);
1578 		if (fill) {
1579 			fillPage(phys_page, fillval);
1580 		}
1581 		if (vm_himemory_mode) {
1582 			vm_page_release_startup(&vm_pages[i]);
1583 		}
1584 	}
1585 	vm_page_pages = vm_pages_count; /* used to report to user space */
1586 
1587 	if (!vm_himemory_mode) {
1588 		do {
1589 			if (!VMP_ERROR_GET(&vm_pages[--i])) {               /* skip retired pages */
1590 				vm_page_release_startup(&vm_pages[i]);
1591 			}
1592 		} while (i != 0);
1593 	}
1594 
1595 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1596 	printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1597 	printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1598 
1599 #if defined(__LP64__)
1600 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1601 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1602 	}
1603 
1604 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1605 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1606 	}
1607 #endif
1608 
1609 	VM_CHECK_MEMORYSTATUS;
1610 
1611 	/*
1612 	 * We have to re-align virtual_space_start,
1613 	 * because pmap_steal_memory has been using it.
1614 	 */
1615 	virtual_space_start = round_page(virtual_space_start);
1616 	*startp = virtual_space_start;
1617 	*endp = virtual_space_end;
1618 }
1619 #endif  /* MACHINE_PAGES */
1620 
1621 /*
1622  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1623  * or frees to this zone. It's just here for reporting purposes via zprint command.
1624  * This needs to be done after all initially delayed pages are put on the free lists.
1625  */
1626 static void
vm_page_module_init_delayed(void)1627 vm_page_module_init_delayed(void)
1628 {
1629 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1630 	    ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1631 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1632 
1633 		zone_set_exhaustible(z, 0, true);
1634 		/*
1635 		 * Reflect size and usage information for vm_pages[].
1636 		 */
1637 
1638 		z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1639 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
1640 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1641 		vm_pages_count * sizeof(struct vm_page);
1642 		vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1643 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1644 		z->z_wired_cur += vm_page_zone_pages;
1645 		z->z_wired_hwm = z->z_wired_cur;
1646 		z->z_va_cur = z->z_wired_cur;
1647 		/* since zone accounts for these, take them out of stolen */
1648 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1649 	});
1650 }
1651 
1652 /*
1653  * Create the vm_pages zone. This is used for the vm_page structures for the pages
1654  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1655  * this needs to happen in early VM bootstrap.
1656  */
1657 
1658 __startup_func
1659 static void
vm_page_module_init(void)1660 vm_page_module_init(void)
1661 {
1662 	vm_size_t vm_page_with_ppnum_size;
1663 
1664 	/*
1665 	 * Since the pointers to elements in this zone will be packed, they
1666 	 * must have appropriate size. Not strictly what sizeof() reports.
1667 	 */
1668 	vm_page_with_ppnum_size =
1669 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1670 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1671 
1672 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1673 	    ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NO_TBI_TAG,
1674 	    ZONE_ID_ANY, ^(zone_t z) {
1675 		/*
1676 		 * The number "10" is a small number that is larger than the number
1677 		 * of fictitious pages that any single caller will attempt to allocate
1678 		 * without blocking.
1679 		 *
1680 		 * The largest such number at the moment is kmem_alloc()
1681 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1682 		 * taking into account the 50% hysteresis the zone allocator uses.
1683 		 *
1684 		 * Note: this works at all because the zone allocator
1685 		 *       doesn't ever allocate fictitious pages.
1686 		 */
1687 		zone_raise_reserve(z, 10);
1688 	});
1689 }
1690 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1691 
1692 /*
1693  *	Routine:	vm_page_create
1694  *	Purpose:
1695  *		After the VM system is up, machine-dependent code
1696  *		may stumble across more physical memory.  For example,
1697  *		memory that it was reserving for a frame buffer.
1698  *		vm_page_create turns this memory into available pages.
1699  */
1700 
1701 void
vm_page_create(ppnum_t start,ppnum_t end)1702 vm_page_create(
1703 	ppnum_t start,
1704 	ppnum_t end)
1705 {
1706 	ppnum_t         phys_page;
1707 	vm_page_t       m;
1708 
1709 	for (phys_page = start;
1710 	    phys_page < end;
1711 	    phys_page++) {
1712 		m = vm_page_grab_fictitious_common(phys_page, TRUE);
1713 		m->vmp_fictitious = FALSE;
1714 		pmap_clear_noencrypt(phys_page);
1715 
1716 
1717 		vm_free_page_lock();
1718 		vm_page_pages++;
1719 		vm_free_page_unlock();
1720 		vm_page_release(m, FALSE);
1721 	}
1722 }
1723 
1724 
1725 /*
1726  *	vm_page_hash:
1727  *
1728  *	Distributes the object/offset key pair among hash buckets.
1729  *
1730  *	NOTE:	The bucket count must be a power of 2
1731  */
1732 #define vm_page_hash(object, offset) (\
1733 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1734 	 & vm_page_hash_mask)
1735 
1736 
1737 /*
1738  *	vm_page_insert:		[ internal use only ]
1739  *
1740  *	Inserts the given mem entry into the object/object-page
1741  *	table and object list.
1742  *
1743  *	The object must be locked.
1744  */
1745 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1746 vm_page_insert(
1747 	vm_page_t               mem,
1748 	vm_object_t             object,
1749 	vm_object_offset_t      offset)
1750 {
1751 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1752 }
1753 
1754 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1755 vm_page_insert_wired(
1756 	vm_page_t               mem,
1757 	vm_object_t             object,
1758 	vm_object_offset_t      offset,
1759 	vm_tag_t                tag)
1760 {
1761 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1762 }
1763 
1764 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1765 vm_page_insert_internal(
1766 	vm_page_t               mem,
1767 	vm_object_t             object,
1768 	vm_object_offset_t      offset,
1769 	vm_tag_t                tag,
1770 	boolean_t               queues_lock_held,
1771 	boolean_t               insert_in_hash,
1772 	boolean_t               batch_pmap_op,
1773 	boolean_t               batch_accounting,
1774 	uint64_t                *delayed_ledger_update)
1775 {
1776 	vm_page_bucket_t        *bucket;
1777 	lck_spin_t              *bucket_lock;
1778 	int                     hash_id;
1779 	task_t                  owner;
1780 	int                     ledger_idx_volatile;
1781 	int                     ledger_idx_nonvolatile;
1782 	int                     ledger_idx_volatile_compressed;
1783 	int                     ledger_idx_nonvolatile_compressed;
1784 	int                     ledger_idx_composite;
1785 	int                     ledger_idx_external_wired;
1786 	boolean_t               do_footprint;
1787 
1788 #if 0
1789 	/*
1790 	 * we may not hold the page queue lock
1791 	 * so this check isn't safe to make
1792 	 */
1793 	VM_PAGE_CHECK(mem);
1794 #endif
1795 
1796 	assertf(page_aligned(offset), "0x%llx\n", offset);
1797 
1798 	assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1799 
1800 	vm_object_lock_assert_exclusive(object);
1801 	LCK_MTX_ASSERT(&vm_page_queue_lock,
1802 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
1803 	    : LCK_MTX_ASSERT_NOTOWNED);
1804 
1805 	if (queues_lock_held == FALSE) {
1806 		assert(!VM_PAGE_PAGEABLE(mem));
1807 	}
1808 
1809 	if (insert_in_hash == TRUE) {
1810 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1811 		if (mem->vmp_tabled || mem->vmp_object) {
1812 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1813 			    "already in (obj=%p,off=0x%llx)",
1814 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1815 		}
1816 #endif
1817 		if (object->internal && (offset >= object->vo_size)) {
1818 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1819 			    mem, object, offset, object->vo_size);
1820 		}
1821 
1822 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1823 
1824 		/*
1825 		 *	Record the object/offset pair in this page
1826 		 */
1827 
1828 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1829 		mem->vmp_offset = offset;
1830 
1831 #if CONFIG_SECLUDED_MEMORY
1832 		if (object->eligible_for_secluded) {
1833 			vm_page_secluded.eligible_for_secluded++;
1834 		}
1835 #endif /* CONFIG_SECLUDED_MEMORY */
1836 
1837 		/*
1838 		 *	Insert it into the object_object/offset hash table
1839 		 */
1840 		hash_id = vm_page_hash(object, offset);
1841 		bucket = &vm_page_buckets[hash_id];
1842 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1843 
1844 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1845 
1846 		mem->vmp_next_m = bucket->page_list;
1847 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
1848 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1849 
1850 #if     MACH_PAGE_HASH_STATS
1851 		if (++bucket->cur_count > bucket->hi_count) {
1852 			bucket->hi_count = bucket->cur_count;
1853 		}
1854 #endif /* MACH_PAGE_HASH_STATS */
1855 		mem->vmp_hashed = TRUE;
1856 		lck_spin_unlock(bucket_lock);
1857 	}
1858 
1859 	{
1860 		unsigned int    cache_attr;
1861 
1862 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
1863 
1864 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
1865 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1866 		}
1867 	}
1868 	/*
1869 	 *	Now link into the object's list of backed pages.
1870 	 */
1871 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
1872 	object->memq_hint = mem;
1873 	mem->vmp_tabled = TRUE;
1874 
1875 	/*
1876 	 *	Show that the object has one more resident page.
1877 	 */
1878 
1879 	object->resident_page_count++;
1880 	if (VM_PAGE_WIRED(mem)) {
1881 		assert(mem->vmp_wire_count > 0);
1882 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1883 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1884 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1885 	}
1886 	assert(object->resident_page_count >= object->wired_page_count);
1887 
1888 #if DEVELOPMENT || DEBUG
1889 	if (object->object_is_shared_cache &&
1890 	    object->pager != NULL &&
1891 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1892 		int new, old;
1893 		assert(!object->internal);
1894 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1895 		do {
1896 			old = shared_region_pagers_resident_peak;
1897 		} while (old < new &&
1898 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1899 	}
1900 #endif /* DEVELOPMENT || DEBUG */
1901 
1902 	if (batch_accounting == FALSE) {
1903 		if (object->internal) {
1904 			OSAddAtomic(1, &vm_page_internal_count);
1905 		} else {
1906 			OSAddAtomic(1, &vm_page_external_count);
1907 		}
1908 	}
1909 
1910 	/*
1911 	 * It wouldn't make sense to insert a "reusable" page in
1912 	 * an object (the page would have been marked "reusable" only
1913 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1914 	 * in the object at that time).
1915 	 * But a page could be inserted in a "all_reusable" object, if
1916 	 * something faults it in (a vm_read() from another task or a
1917 	 * "use-after-free" issue in user space, for example).  It can
1918 	 * also happen if we're relocating a page from that object to
1919 	 * a different physical page during a physically-contiguous
1920 	 * allocation.
1921 	 */
1922 	assert(!mem->vmp_reusable);
1923 	if (object->all_reusable) {
1924 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1925 	}
1926 
1927 	if (object->purgable == VM_PURGABLE_DENY &&
1928 	    !object->vo_ledger_tag) {
1929 		owner = TASK_NULL;
1930 	} else {
1931 		owner = VM_OBJECT_OWNER(object);
1932 		vm_object_ledger_tag_ledgers(object,
1933 		    &ledger_idx_volatile,
1934 		    &ledger_idx_nonvolatile,
1935 		    &ledger_idx_volatile_compressed,
1936 		    &ledger_idx_nonvolatile_compressed,
1937 		    &ledger_idx_composite,
1938 		    &ledger_idx_external_wired,
1939 		    &do_footprint);
1940 	}
1941 	if (owner &&
1942 	    object->internal &&
1943 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
1944 	    object->purgable == VM_PURGABLE_DENY ||
1945 	    VM_PAGE_WIRED(mem))) {
1946 		if (delayed_ledger_update) {
1947 			*delayed_ledger_update += PAGE_SIZE;
1948 		} else {
1949 			/* more non-volatile bytes */
1950 			ledger_credit(owner->ledger,
1951 			    ledger_idx_nonvolatile,
1952 			    PAGE_SIZE);
1953 			if (do_footprint) {
1954 				/* more footprint */
1955 				ledger_credit(owner->ledger,
1956 				    task_ledgers.phys_footprint,
1957 				    PAGE_SIZE);
1958 			} else if (ledger_idx_composite != -1) {
1959 				ledger_credit(owner->ledger,
1960 				    ledger_idx_composite,
1961 				    PAGE_SIZE);
1962 			}
1963 		}
1964 	} else if (owner &&
1965 	    object->internal &&
1966 	    (object->purgable == VM_PURGABLE_VOLATILE ||
1967 	    object->purgable == VM_PURGABLE_EMPTY)) {
1968 		assert(!VM_PAGE_WIRED(mem));
1969 		/* more volatile bytes */
1970 		ledger_credit(owner->ledger,
1971 		    ledger_idx_volatile,
1972 		    PAGE_SIZE);
1973 	}
1974 
1975 	if (object->purgable == VM_PURGABLE_VOLATILE) {
1976 		if (VM_PAGE_WIRED(mem)) {
1977 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1978 		} else {
1979 			OSAddAtomic(+1, &vm_page_purgeable_count);
1980 		}
1981 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
1982 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1983 		/*
1984 		 * This page belongs to a purged VM object but hasn't
1985 		 * been purged (because it was "busy").
1986 		 * It's in the "throttled" queue and hence not
1987 		 * visible to vm_pageout_scan().  Move it to a pageable
1988 		 * queue, so that it can eventually be reclaimed, instead
1989 		 * of lingering in the "empty" object.
1990 		 */
1991 		if (queues_lock_held == FALSE) {
1992 			vm_page_lockspin_queues();
1993 		}
1994 		vm_page_deactivate(mem);
1995 		if (queues_lock_held == FALSE) {
1996 			vm_page_unlock_queues();
1997 		}
1998 	}
1999 
2000 #if VM_OBJECT_TRACKING_OP_MODIFIED
2001 	if (vm_object_tracking_btlog &&
2002 	    object->internal &&
2003 	    object->resident_page_count == 0 &&
2004 	    object->pager == NULL &&
2005 	    object->shadow != NULL &&
2006 	    object->shadow->vo_copy == object) {
2007 		btlog_record(vm_object_tracking_btlog, object,
2008 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2009 		    btref_get(__builtin_frame_address(0), 0));
2010 	}
2011 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2012 }
2013 
2014 /*
2015  *	vm_page_replace:
2016  *
2017  *	Exactly like vm_page_insert, except that we first
2018  *	remove any existing page at the given offset in object.
2019  *
2020  *	The object must be locked.
2021  */
2022 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2023 vm_page_replace(
2024 	vm_page_t               mem,
2025 	vm_object_t             object,
2026 	vm_object_offset_t      offset)
2027 {
2028 	vm_page_bucket_t *bucket;
2029 	vm_page_t        found_m = VM_PAGE_NULL;
2030 	lck_spin_t      *bucket_lock;
2031 	int             hash_id;
2032 
2033 #if 0
2034 	/*
2035 	 * we don't hold the page queue lock
2036 	 * so this check isn't safe to make
2037 	 */
2038 	VM_PAGE_CHECK(mem);
2039 #endif
2040 	vm_object_lock_assert_exclusive(object);
2041 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2042 	if (mem->vmp_tabled || mem->vmp_object) {
2043 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2044 		    "already in (obj=%p,off=0x%llx)",
2045 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2046 	}
2047 #endif
2048 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2049 
2050 	assert(!VM_PAGE_PAGEABLE(mem));
2051 
2052 	/*
2053 	 *	Record the object/offset pair in this page
2054 	 */
2055 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2056 	mem->vmp_offset = offset;
2057 
2058 	/*
2059 	 *	Insert it into the object_object/offset hash table,
2060 	 *	replacing any page that might have been there.
2061 	 */
2062 
2063 	hash_id = vm_page_hash(object, offset);
2064 	bucket = &vm_page_buckets[hash_id];
2065 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2066 
2067 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2068 
2069 	if (bucket->page_list) {
2070 		vm_page_packed_t *mp = &bucket->page_list;
2071 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2072 
2073 		do {
2074 			/*
2075 			 * compare packed object pointers
2076 			 */
2077 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2078 				/*
2079 				 * Remove old page from hash list
2080 				 */
2081 				*mp = m->vmp_next_m;
2082 				m->vmp_hashed = FALSE;
2083 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2084 
2085 				found_m = m;
2086 				break;
2087 			}
2088 			mp = &m->vmp_next_m;
2089 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2090 
2091 		mem->vmp_next_m = bucket->page_list;
2092 	} else {
2093 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2094 	}
2095 	/*
2096 	 * insert new page at head of hash list
2097 	 */
2098 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2099 	mem->vmp_hashed = TRUE;
2100 
2101 	lck_spin_unlock(bucket_lock);
2102 
2103 	if (found_m) {
2104 		/*
2105 		 * there was already a page at the specified
2106 		 * offset for this object... remove it from
2107 		 * the object and free it back to the free list
2108 		 */
2109 		vm_page_free_unlocked(found_m, FALSE);
2110 	}
2111 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2112 }
2113 
2114 /*
2115  *	vm_page_remove:		[ internal use only ]
2116  *
2117  *	Removes the given mem entry from the object/offset-page
2118  *	table and the object page list.
2119  *
2120  *	The object must be locked.
2121  */
2122 
2123 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2124 vm_page_remove(
2125 	vm_page_t       mem,
2126 	boolean_t       remove_from_hash)
2127 {
2128 	vm_page_bucket_t *bucket;
2129 	vm_page_t       this;
2130 	lck_spin_t      *bucket_lock;
2131 	int             hash_id;
2132 	task_t          owner;
2133 	vm_object_t     m_object;
2134 	int             ledger_idx_volatile;
2135 	int             ledger_idx_nonvolatile;
2136 	int             ledger_idx_volatile_compressed;
2137 	int             ledger_idx_nonvolatile_compressed;
2138 	int             ledger_idx_composite;
2139 	int             ledger_idx_external_wired;
2140 	int             do_footprint;
2141 
2142 	m_object = VM_PAGE_OBJECT(mem);
2143 
2144 	vm_object_lock_assert_exclusive(m_object);
2145 	assert(mem->vmp_tabled);
2146 	assert(!mem->vmp_cleaning);
2147 	assert(!mem->vmp_laundry);
2148 
2149 	if (VM_PAGE_PAGEABLE(mem)) {
2150 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2151 	}
2152 #if 0
2153 	/*
2154 	 * we don't hold the page queue lock
2155 	 * so this check isn't safe to make
2156 	 */
2157 	VM_PAGE_CHECK(mem);
2158 #endif
2159 	if (remove_from_hash == TRUE) {
2160 		/*
2161 		 *	Remove from the object_object/offset hash table
2162 		 */
2163 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2164 		bucket = &vm_page_buckets[hash_id];
2165 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2166 
2167 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2168 
2169 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2170 			/* optimize for common case */
2171 
2172 			bucket->page_list = mem->vmp_next_m;
2173 		} else {
2174 			vm_page_packed_t        *prev;
2175 
2176 			for (prev = &this->vmp_next_m;
2177 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2178 			    prev = &this->vmp_next_m) {
2179 				continue;
2180 			}
2181 			*prev = this->vmp_next_m;
2182 		}
2183 #if     MACH_PAGE_HASH_STATS
2184 		bucket->cur_count--;
2185 #endif /* MACH_PAGE_HASH_STATS */
2186 		mem->vmp_hashed = FALSE;
2187 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2188 		lck_spin_unlock(bucket_lock);
2189 	}
2190 	/*
2191 	 *	Now remove from the object's list of backed pages.
2192 	 */
2193 
2194 	vm_page_remove_internal(mem);
2195 
2196 	/*
2197 	 *	And show that the object has one fewer resident
2198 	 *	page.
2199 	 */
2200 
2201 	assert(m_object->resident_page_count > 0);
2202 	m_object->resident_page_count--;
2203 
2204 #if DEVELOPMENT || DEBUG
2205 	if (m_object->object_is_shared_cache &&
2206 	    m_object->pager != NULL &&
2207 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2208 		assert(!m_object->internal);
2209 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
2210 	}
2211 #endif /* DEVELOPMENT || DEBUG */
2212 
2213 	if (m_object->internal) {
2214 #if DEBUG
2215 		assert(vm_page_internal_count);
2216 #endif /* DEBUG */
2217 
2218 		OSAddAtomic(-1, &vm_page_internal_count);
2219 	} else {
2220 		assert(vm_page_external_count);
2221 		OSAddAtomic(-1, &vm_page_external_count);
2222 
2223 		if (mem->vmp_xpmapped) {
2224 			assert(vm_page_xpmapped_external_count);
2225 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2226 		}
2227 	}
2228 	if (!m_object->internal &&
2229 	    m_object->cached_list.next &&
2230 	    m_object->cached_list.prev) {
2231 		if (m_object->resident_page_count == 0) {
2232 			vm_object_cache_remove(m_object);
2233 		}
2234 	}
2235 
2236 	if (VM_PAGE_WIRED(mem)) {
2237 		assert(mem->vmp_wire_count > 0);
2238 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2239 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2240 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2241 	}
2242 	assert(m_object->resident_page_count >=
2243 	    m_object->wired_page_count);
2244 	if (mem->vmp_reusable) {
2245 		assert(m_object->reusable_page_count > 0);
2246 		m_object->reusable_page_count--;
2247 		assert(m_object->reusable_page_count <=
2248 		    m_object->resident_page_count);
2249 		mem->vmp_reusable = FALSE;
2250 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2251 		vm_page_stats_reusable.reused_remove++;
2252 	} else if (m_object->all_reusable) {
2253 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2254 		vm_page_stats_reusable.reused_remove++;
2255 	}
2256 
2257 	if (m_object->purgable == VM_PURGABLE_DENY &&
2258 	    !m_object->vo_ledger_tag) {
2259 		owner = TASK_NULL;
2260 	} else {
2261 		owner = VM_OBJECT_OWNER(m_object);
2262 		vm_object_ledger_tag_ledgers(m_object,
2263 		    &ledger_idx_volatile,
2264 		    &ledger_idx_nonvolatile,
2265 		    &ledger_idx_volatile_compressed,
2266 		    &ledger_idx_nonvolatile_compressed,
2267 		    &ledger_idx_composite,
2268 		    &ledger_idx_external_wired,
2269 		    &do_footprint);
2270 	}
2271 	if (owner &&
2272 	    m_object->internal &&
2273 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2274 	    m_object->purgable == VM_PURGABLE_DENY ||
2275 	    VM_PAGE_WIRED(mem))) {
2276 		/* less non-volatile bytes */
2277 		ledger_debit(owner->ledger,
2278 		    ledger_idx_nonvolatile,
2279 		    PAGE_SIZE);
2280 		if (do_footprint) {
2281 			/* less footprint */
2282 			ledger_debit(owner->ledger,
2283 			    task_ledgers.phys_footprint,
2284 			    PAGE_SIZE);
2285 		} else if (ledger_idx_composite != -1) {
2286 			ledger_debit(owner->ledger,
2287 			    ledger_idx_composite,
2288 			    PAGE_SIZE);
2289 		}
2290 	} else if (owner &&
2291 	    m_object->internal &&
2292 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
2293 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
2294 		assert(!VM_PAGE_WIRED(mem));
2295 		/* less volatile bytes */
2296 		ledger_debit(owner->ledger,
2297 		    ledger_idx_volatile,
2298 		    PAGE_SIZE);
2299 	}
2300 
2301 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2302 		if (VM_PAGE_WIRED(mem)) {
2303 			assert(vm_page_purgeable_wired_count > 0);
2304 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2305 		} else {
2306 			assert(vm_page_purgeable_count > 0);
2307 			OSAddAtomic(-1, &vm_page_purgeable_count);
2308 		}
2309 	}
2310 
2311 	if (m_object->set_cache_attr == TRUE) {
2312 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2313 	}
2314 
2315 	mem->vmp_tabled = FALSE;
2316 	mem->vmp_object = 0;
2317 	mem->vmp_offset = (vm_object_offset_t) -1;
2318 }
2319 
2320 
2321 /*
2322  *	vm_page_lookup:
2323  *
2324  *	Returns the page associated with the object/offset
2325  *	pair specified; if none is found, VM_PAGE_NULL is returned.
2326  *
2327  *	The object must be locked.  No side effects.
2328  */
2329 
2330 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
2331 
2332 #if DEBUG_VM_PAGE_LOOKUP
2333 
2334 struct {
2335 	uint64_t        vpl_total;
2336 	uint64_t        vpl_empty_obj;
2337 	uint64_t        vpl_bucket_NULL;
2338 	uint64_t        vpl_hit_hint;
2339 	uint64_t        vpl_hit_hint_next;
2340 	uint64_t        vpl_hit_hint_prev;
2341 	uint64_t        vpl_fast;
2342 	uint64_t        vpl_slow;
2343 	uint64_t        vpl_hit;
2344 	uint64_t        vpl_miss;
2345 
2346 	uint64_t        vpl_fast_elapsed;
2347 	uint64_t        vpl_slow_elapsed;
2348 } vm_page_lookup_stats __attribute__((aligned(8)));
2349 
2350 #endif
2351 
2352 #define KDP_VM_PAGE_WALK_MAX    1000
2353 
2354 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2355 kdp_vm_page_lookup(
2356 	vm_object_t             object,
2357 	vm_object_offset_t      offset)
2358 {
2359 	vm_page_t cur_page;
2360 	int num_traversed = 0;
2361 
2362 	if (not_in_kdp) {
2363 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2364 	}
2365 
2366 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2367 		if (cur_page->vmp_offset == offset) {
2368 			return cur_page;
2369 		}
2370 		num_traversed++;
2371 
2372 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2373 			return VM_PAGE_NULL;
2374 		}
2375 	}
2376 
2377 	return VM_PAGE_NULL;
2378 }
2379 
2380 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2381 vm_page_lookup(
2382 	vm_object_t             object,
2383 	vm_object_offset_t      offset)
2384 {
2385 	vm_page_t       mem;
2386 	vm_page_bucket_t *bucket;
2387 	vm_page_queue_entry_t   qe;
2388 	lck_spin_t      *bucket_lock = NULL;
2389 	int             hash_id;
2390 #if DEBUG_VM_PAGE_LOOKUP
2391 	uint64_t        start, elapsed;
2392 
2393 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2394 #endif
2395 
2396 	if (VM_KERNEL_ADDRESS(offset)) {
2397 		offset = VM_KERNEL_STRIP_UPTR(offset);
2398 	}
2399 
2400 	vm_object_lock_assert_held(object);
2401 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2402 
2403 	if (object->resident_page_count == 0) {
2404 #if DEBUG_VM_PAGE_LOOKUP
2405 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2406 #endif
2407 		return VM_PAGE_NULL;
2408 	}
2409 
2410 	mem = object->memq_hint;
2411 
2412 	if (mem != VM_PAGE_NULL) {
2413 		assert(VM_PAGE_OBJECT(mem) == object);
2414 
2415 		if (mem->vmp_offset == offset) {
2416 #if DEBUG_VM_PAGE_LOOKUP
2417 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2418 #endif
2419 			return mem;
2420 		}
2421 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2422 
2423 		if (!vm_page_queue_end(&object->memq, qe)) {
2424 			vm_page_t       next_page;
2425 
2426 			next_page = (vm_page_t)((uintptr_t)qe);
2427 			assert(VM_PAGE_OBJECT(next_page) == object);
2428 
2429 			if (next_page->vmp_offset == offset) {
2430 				object->memq_hint = next_page; /* new hint */
2431 #if DEBUG_VM_PAGE_LOOKUP
2432 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2433 #endif
2434 				return next_page;
2435 			}
2436 		}
2437 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2438 
2439 		if (!vm_page_queue_end(&object->memq, qe)) {
2440 			vm_page_t prev_page;
2441 
2442 			prev_page = (vm_page_t)((uintptr_t)qe);
2443 			assert(VM_PAGE_OBJECT(prev_page) == object);
2444 
2445 			if (prev_page->vmp_offset == offset) {
2446 				object->memq_hint = prev_page; /* new hint */
2447 #if DEBUG_VM_PAGE_LOOKUP
2448 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2449 #endif
2450 				return prev_page;
2451 			}
2452 		}
2453 	}
2454 	/*
2455 	 * Search the hash table for this object/offset pair
2456 	 */
2457 	hash_id = vm_page_hash(object, offset);
2458 	bucket = &vm_page_buckets[hash_id];
2459 
2460 	/*
2461 	 * since we hold the object lock, we are guaranteed that no
2462 	 * new pages can be inserted into this object... this in turn
2463 	 * guarantess that the page we're looking for can't exist
2464 	 * if the bucket it hashes to is currently NULL even when looked
2465 	 * at outside the scope of the hash bucket lock... this is a
2466 	 * really cheap optimiztion to avoid taking the lock
2467 	 */
2468 	if (!bucket->page_list) {
2469 #if DEBUG_VM_PAGE_LOOKUP
2470 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2471 #endif
2472 		return VM_PAGE_NULL;
2473 	}
2474 
2475 #if DEBUG_VM_PAGE_LOOKUP
2476 	start = mach_absolute_time();
2477 #endif
2478 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2479 		/*
2480 		 * on average, it's roughly 3 times faster to run a short memq list
2481 		 * than to take the spin lock and go through the hash list
2482 		 */
2483 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
2484 
2485 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2486 			if (mem->vmp_offset == offset) {
2487 				break;
2488 			}
2489 
2490 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2491 		}
2492 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2493 			mem = NULL;
2494 		}
2495 	} else {
2496 		vm_page_object_t        packed_object;
2497 
2498 		packed_object = VM_PAGE_PACK_OBJECT(object);
2499 
2500 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2501 
2502 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2503 
2504 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2505 		    mem != VM_PAGE_NULL;
2506 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2507 #if 0
2508 			/*
2509 			 * we don't hold the page queue lock
2510 			 * so this check isn't safe to make
2511 			 */
2512 			VM_PAGE_CHECK(mem);
2513 #endif
2514 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2515 				break;
2516 			}
2517 		}
2518 		lck_spin_unlock(bucket_lock);
2519 	}
2520 
2521 #if DEBUG_VM_PAGE_LOOKUP
2522 	elapsed = mach_absolute_time() - start;
2523 
2524 	if (bucket_lock) {
2525 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2526 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2527 	} else {
2528 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2529 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2530 	}
2531 	if (mem != VM_PAGE_NULL) {
2532 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2533 	} else {
2534 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2535 	}
2536 #endif
2537 	if (mem != VM_PAGE_NULL) {
2538 		assert(VM_PAGE_OBJECT(mem) == object);
2539 
2540 		object->memq_hint = mem;
2541 	}
2542 	return mem;
2543 }
2544 
2545 
2546 /*
2547  *	vm_page_rename:
2548  *
2549  *	Move the given memory entry from its
2550  *	current object to the specified target object/offset.
2551  *
2552  *	The object must be locked.
2553  */
2554 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2555 vm_page_rename(
2556 	vm_page_t               mem,
2557 	vm_object_t             new_object,
2558 	vm_object_offset_t      new_offset)
2559 {
2560 	boolean_t       internal_to_external, external_to_internal;
2561 	vm_tag_t        tag;
2562 	vm_object_t     m_object;
2563 
2564 	m_object = VM_PAGE_OBJECT(mem);
2565 
2566 	assert(m_object != new_object);
2567 	assert(m_object);
2568 
2569 	/*
2570 	 *	Changes to mem->vmp_object require the page lock because
2571 	 *	the pageout daemon uses that lock to get the object.
2572 	 */
2573 	vm_page_lockspin_queues();
2574 
2575 	internal_to_external = FALSE;
2576 	external_to_internal = FALSE;
2577 
2578 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2579 		/*
2580 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
2581 		 * if we first move the page to the active queue... it's going to end
2582 		 * up there anyway, and we don't do vm_page_rename's frequently enough
2583 		 * for this to matter.
2584 		 */
2585 		vm_page_queues_remove(mem, FALSE);
2586 		vm_page_activate(mem);
2587 	}
2588 	if (VM_PAGE_PAGEABLE(mem)) {
2589 		if (m_object->internal && !new_object->internal) {
2590 			internal_to_external = TRUE;
2591 		}
2592 		if (!m_object->internal && new_object->internal) {
2593 			external_to_internal = TRUE;
2594 		}
2595 	}
2596 
2597 	tag = m_object->wire_tag;
2598 	vm_page_remove(mem, TRUE);
2599 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2600 
2601 	if (internal_to_external) {
2602 		vm_page_pageable_internal_count--;
2603 		vm_page_pageable_external_count++;
2604 	} else if (external_to_internal) {
2605 		vm_page_pageable_external_count--;
2606 		vm_page_pageable_internal_count++;
2607 	}
2608 
2609 	vm_page_unlock_queues();
2610 }
2611 
2612 /*
2613  *	vm_page_init:
2614  *
2615  *	Initialize the fields in a new page.
2616  *	This takes a structure with random values and initializes it
2617  *	so that it can be given to vm_page_release or vm_page_insert.
2618  */
2619 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2620 vm_page_init(
2621 	vm_page_t mem,
2622 	ppnum_t   phys_page,
2623 	boolean_t lopage)
2624 {
2625 	uint_t    i;
2626 	uintptr_t *p;
2627 
2628 	assert(phys_page);
2629 
2630 #if DEBUG
2631 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2632 		if (!(pmap_valid_page(phys_page))) {
2633 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2634 		}
2635 	}
2636 #endif /* DEBUG */
2637 
2638 	/*
2639 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2640 	 * try to use initial values which match 0. This minimizes the number of writes
2641 	 * needed for boot-time initialization.
2642 	 *
2643 	 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2644 	 */
2645 	assert(VM_PAGE_NOT_ON_Q == 0);
2646 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2647 	for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2648 		*p++ = 0;
2649 	}
2650 	mem->vmp_offset = (vm_object_offset_t)-1;
2651 	mem->vmp_busy = TRUE;
2652 	mem->vmp_lopage = lopage;
2653 
2654 	VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2655 #if 0
2656 	/*
2657 	 * we're leaving this turned off for now... currently pages
2658 	 * come off the free list and are either immediately dirtied/referenced
2659 	 * due to zero-fill or COW faults, or are used to read or write files...
2660 	 * in the file I/O case, the UPL mechanism takes care of clearing
2661 	 * the state of the HW ref/mod bits in a somewhat fragile way.
2662 	 * Since we may change the way this works in the future (to toughen it up),
2663 	 * I'm leaving this as a reminder of where these bits could get cleared
2664 	 */
2665 
2666 	/*
2667 	 * make sure both the h/w referenced and modified bits are
2668 	 * clear at this point... we are especially dependent on
2669 	 * not finding a 'stale' h/w modified in a number of spots
2670 	 * once this page goes back into use
2671 	 */
2672 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2673 #endif
2674 }
2675 
2676 /*
2677  *	vm_page_grab_fictitious:
2678  *
2679  *	Remove a fictitious page from the free list.
2680  *	Returns VM_PAGE_NULL if there are no free pages.
2681  */
2682 
2683 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2684 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2685 {
2686 	vm_page_t m;
2687 
2688 	m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2689 	if (m) {
2690 		vm_page_init(m, phys_addr, FALSE);
2691 		m->vmp_fictitious = TRUE;
2692 	}
2693 	return m;
2694 }
2695 
2696 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2697 vm_page_grab_fictitious(boolean_t canwait)
2698 {
2699 	return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2700 }
2701 
2702 int vm_guard_count;
2703 
2704 
2705 vm_page_t
vm_page_grab_guard(boolean_t canwait)2706 vm_page_grab_guard(boolean_t canwait)
2707 {
2708 	vm_page_t page;
2709 	page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2710 	if (page) {
2711 		OSAddAtomic(1, &vm_guard_count);
2712 	}
2713 	return page;
2714 }
2715 
2716 
2717 /*
2718  *	vm_page_release_fictitious:
2719  *
2720  *	Release a fictitious page to the zone pool
2721  */
2722 void
vm_page_release_fictitious(vm_page_t m)2723 vm_page_release_fictitious(
2724 	vm_page_t m)
2725 {
2726 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2727 	assert(m->vmp_fictitious);
2728 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2729 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2730 	assert(!m->vmp_realtime);
2731 
2732 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2733 		OSAddAtomic(-1, &vm_guard_count);
2734 	}
2735 
2736 	zfree(vm_page_zone, m);
2737 }
2738 
2739 /*
2740  *	vm_pool_low():
2741  *
2742  *	Return true if it is not likely that a non-vm_privileged thread
2743  *	can get memory without blocking.  Advisory only, since the
2744  *	situation may change under us.
2745  */
2746 bool
vm_pool_low(void)2747 vm_pool_low(void)
2748 {
2749 	/* No locking, at worst we will fib. */
2750 	return vm_page_free_count <= vm_page_free_reserved;
2751 }
2752 
2753 boolean_t vm_darkwake_mode = FALSE;
2754 
2755 /*
2756  * vm_update_darkwake_mode():
2757  *
2758  * Tells the VM that the system is in / out of darkwake.
2759  *
2760  * Today, the VM only lowers/raises the background queue target
2761  * so as to favor consuming more/less background pages when
2762  * darwake is ON/OFF.
2763  *
2764  * We might need to do more things in the future.
2765  */
2766 
2767 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2768 vm_update_darkwake_mode(boolean_t darkwake_mode)
2769 {
2770 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2771 #pragma unused(darkwake_mode)
2772 	assert(vm_darkwake_mode == FALSE);
2773 	/*
2774 	 * Darkwake mode isn't supported for AS macOS.
2775 	 */
2776 	return;
2777 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2778 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2779 
2780 	vm_page_lockspin_queues();
2781 
2782 	if (vm_darkwake_mode == darkwake_mode) {
2783 		/*
2784 		 * No change.
2785 		 */
2786 		vm_page_unlock_queues();
2787 		return;
2788 	}
2789 
2790 	vm_darkwake_mode = darkwake_mode;
2791 
2792 	if (vm_darkwake_mode == TRUE) {
2793 		/* save background target to restore later */
2794 		vm_page_background_target_snapshot = vm_page_background_target;
2795 
2796 		/* target is set to 0...no protection for background pages */
2797 		vm_page_background_target = 0;
2798 	} else if (vm_darkwake_mode == FALSE) {
2799 		if (vm_page_background_target_snapshot) {
2800 			vm_page_background_target = vm_page_background_target_snapshot;
2801 		}
2802 	}
2803 	vm_page_unlock_queues();
2804 #endif
2805 }
2806 
2807 void
vm_page_update_special_state(vm_page_t mem)2808 vm_page_update_special_state(vm_page_t mem)
2809 {
2810 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2811 		return;
2812 	}
2813 
2814 	int mode = mem->vmp_on_specialq;
2815 
2816 	switch (mode) {
2817 	case VM_PAGE_SPECIAL_Q_BG:
2818 	{
2819 		task_t  my_task = current_task_early();
2820 
2821 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2822 			return;
2823 		}
2824 
2825 		if (my_task) {
2826 			if (task_get_darkwake_mode(my_task)) {
2827 				return;
2828 			}
2829 		}
2830 
2831 		if (my_task) {
2832 			if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2833 				return;
2834 			}
2835 		}
2836 		vm_page_lockspin_queues();
2837 
2838 		vm_page_background_promoted_count++;
2839 
2840 		vm_page_remove_from_specialq(mem);
2841 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2842 
2843 		vm_page_unlock_queues();
2844 		break;
2845 	}
2846 
2847 	case VM_PAGE_SPECIAL_Q_DONATE:
2848 	{
2849 		task_t  my_task = current_task_early();
2850 
2851 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2852 			return;
2853 		}
2854 
2855 		if (my_task->donates_own_pages == false) {
2856 			vm_page_lockspin_queues();
2857 
2858 			vm_page_remove_from_specialq(mem);
2859 			mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2860 
2861 			vm_page_unlock_queues();
2862 		}
2863 		break;
2864 	}
2865 
2866 	default:
2867 	{
2868 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2869 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2870 		break;
2871 	}
2872 	}
2873 }
2874 
2875 
2876 void
vm_page_assign_special_state(vm_page_t mem,int mode)2877 vm_page_assign_special_state(vm_page_t mem, int mode)
2878 {
2879 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2880 		return;
2881 	}
2882 
2883 	switch (mode) {
2884 	case VM_PAGE_SPECIAL_Q_BG:
2885 	{
2886 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2887 			return;
2888 		}
2889 
2890 		task_t  my_task = current_task_early();
2891 
2892 		if (my_task) {
2893 			if (task_get_darkwake_mode(my_task)) {
2894 				mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2895 				return;
2896 			}
2897 		}
2898 
2899 		if (my_task) {
2900 			mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2901 		}
2902 		break;
2903 	}
2904 
2905 	case VM_PAGE_SPECIAL_Q_DONATE:
2906 	{
2907 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2908 			return;
2909 		}
2910 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2911 		break;
2912 	}
2913 
2914 	default:
2915 		break;
2916 	}
2917 }
2918 
2919 
2920 void
vm_page_remove_from_specialq(vm_page_t mem)2921 vm_page_remove_from_specialq(
2922 	vm_page_t       mem)
2923 {
2924 	vm_object_t     m_object;
2925 	unsigned short  mode;
2926 
2927 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2928 
2929 	mode = mem->vmp_on_specialq;
2930 
2931 	switch (mode) {
2932 	case VM_PAGE_SPECIAL_Q_BG:
2933 	{
2934 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2935 			vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2936 
2937 			mem->vmp_specialq.next = 0;
2938 			mem->vmp_specialq.prev = 0;
2939 
2940 			vm_page_background_count--;
2941 
2942 			m_object = VM_PAGE_OBJECT(mem);
2943 
2944 			if (m_object->internal) {
2945 				vm_page_background_internal_count--;
2946 			} else {
2947 				vm_page_background_external_count--;
2948 			}
2949 		}
2950 		break;
2951 	}
2952 
2953 	case VM_PAGE_SPECIAL_Q_DONATE:
2954 	{
2955 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2956 			vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2957 			mem->vmp_specialq.next = 0;
2958 			mem->vmp_specialq.prev = 0;
2959 			vm_page_donate_count--;
2960 			if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2961 				assert(vm_page_donate_target == vm_page_donate_target_low);
2962 				vm_page_donate_target = vm_page_donate_target_high;
2963 				vm_page_donate_queue_ripe = false;
2964 			}
2965 		}
2966 
2967 		break;
2968 	}
2969 
2970 	default:
2971 	{
2972 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2973 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2974 		break;
2975 	}
2976 	}
2977 }
2978 
2979 
2980 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2981 vm_page_add_to_specialq(
2982 	vm_page_t       mem,
2983 	boolean_t       first)
2984 {
2985 	vm_object_t     m_object;
2986 
2987 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2988 
2989 	if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2990 		return;
2991 	}
2992 
2993 	int mode = mem->vmp_on_specialq;
2994 
2995 	switch (mode) {
2996 	case VM_PAGE_SPECIAL_Q_BG:
2997 	{
2998 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2999 			return;
3000 		}
3001 
3002 		m_object = VM_PAGE_OBJECT(mem);
3003 
3004 		if (vm_page_background_exclude_external && !m_object->internal) {
3005 			return;
3006 		}
3007 
3008 		if (first == TRUE) {
3009 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
3010 		} else {
3011 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
3012 		}
3013 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3014 
3015 		vm_page_background_count++;
3016 
3017 		if (m_object->internal) {
3018 			vm_page_background_internal_count++;
3019 		} else {
3020 			vm_page_background_external_count++;
3021 		}
3022 		break;
3023 	}
3024 
3025 	case VM_PAGE_SPECIAL_Q_DONATE:
3026 	{
3027 		if (first == TRUE) {
3028 			vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3029 		} else {
3030 			vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3031 		}
3032 		vm_page_donate_count++;
3033 		if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3034 			assert(vm_page_donate_target == vm_page_donate_target_high);
3035 			vm_page_donate_target = vm_page_donate_target_low;
3036 			vm_page_donate_queue_ripe = true;
3037 		}
3038 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3039 		break;
3040 	}
3041 
3042 	default:
3043 		break;
3044 	}
3045 }
3046 
3047 /*
3048  * This can be switched to FALSE to help debug drivers
3049  * that are having problems with memory > 4G.
3050  */
3051 boolean_t       vm_himemory_mode = TRUE;
3052 
3053 /*
3054  * this interface exists to support hardware controllers
3055  * incapable of generating DMAs with more than 32 bits
3056  * of address on platforms with physical memory > 4G...
3057  */
3058 unsigned int    vm_lopages_allocated_q = 0;
3059 unsigned int    vm_lopages_allocated_cpm_success = 0;
3060 unsigned int    vm_lopages_allocated_cpm_failed = 0;
3061 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3062 
3063 vm_page_t
vm_page_grablo(void)3064 vm_page_grablo(void)
3065 {
3066 	vm_page_t       mem;
3067 
3068 	if (vm_lopage_needed == FALSE) {
3069 		return vm_page_grab();
3070 	}
3071 
3072 	vm_free_page_lock_spin();
3073 
3074 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3075 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3076 		assert(vm_lopage_free_count);
3077 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3078 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3079 
3080 		vm_lopage_free_count--;
3081 		vm_lopages_allocated_q++;
3082 
3083 		if (vm_lopage_free_count < vm_lopage_lowater) {
3084 			vm_lopage_refill = TRUE;
3085 		}
3086 
3087 		vm_free_page_unlock();
3088 
3089 		if (current_task()->donates_own_pages) {
3090 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3091 		} else {
3092 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3093 		}
3094 	} else {
3095 		vm_free_page_unlock();
3096 
3097 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3098 			vm_free_page_lock_spin();
3099 			vm_lopages_allocated_cpm_failed++;
3100 			vm_free_page_unlock();
3101 
3102 			return VM_PAGE_NULL;
3103 		}
3104 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3105 
3106 		mem->vmp_busy = TRUE;
3107 
3108 		vm_page_lockspin_queues();
3109 
3110 		mem->vmp_gobbled = FALSE;
3111 		vm_page_gobble_count--;
3112 		vm_page_wire_count--;
3113 
3114 		vm_lopages_allocated_cpm_success++;
3115 		vm_page_unlock_queues();
3116 	}
3117 	assert(mem->vmp_busy);
3118 	assert(!mem->vmp_pmapped);
3119 	assert(!mem->vmp_wpmapped);
3120 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3121 
3122 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3123 
3124 	counter_inc(&vm_page_grab_count);
3125 	VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3126 
3127 	return mem;
3128 }
3129 
3130 /*
3131  *	vm_page_grab:
3132  *
3133  *	first try to grab a page from the per-cpu free list...
3134  *	this must be done while pre-emption is disabled... if
3135  *      a page is available, we're done...
3136  *	if no page is available, grab the vm_page_queue_free_lock
3137  *	and see if current number of free pages would allow us
3138  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3139  *	if there are pages available, disable preemption and
3140  *      recheck the state of the per-cpu free list... we could
3141  *	have been preempted and moved to a different cpu, or
3142  *      some other thread could have re-filled it... if still
3143  *	empty, figure out how many pages we can steal from the
3144  *	global free queue and move to the per-cpu queue...
3145  *	return 1 of these pages when done... only wakeup the
3146  *      pageout_scan thread if we moved pages from the global
3147  *	list... no need for the wakeup if we've satisfied the
3148  *	request from the per-cpu queue.
3149  */
3150 
3151 #if CONFIG_SECLUDED_MEMORY
3152 vm_page_t vm_page_grab_secluded(void);
3153 #endif /* CONFIG_SECLUDED_MEMORY */
3154 
3155 static inline void
3156 vm_page_grab_diags(void);
3157 
3158 /*
3159  *	vm_page_validate_no_references:
3160  *
3161  *	Make sure the physical page has no refcounts.
3162  *
3163  */
3164 static inline void
vm_page_validate_no_references(vm_page_t mem)3165 vm_page_validate_no_references(
3166 	vm_page_t       mem)
3167 {
3168 	bool is_freed;
3169 
3170 	if (mem->vmp_fictitious) {
3171 		return;
3172 	}
3173 
3174 	pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
3175 
3176 #if CONFIG_SPTM
3177 	is_freed = pmap_is_page_free(paddr);
3178 #else
3179 	is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
3180 #endif /* CONFIG_SPTM */
3181 
3182 	if (!is_freed) {
3183 		/*
3184 		 * There is a redundancy here, but we are going to panic anyways,
3185 		 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
3186 		 * behavior.
3187 		 */
3188 		ASSERT_PMAP_FREE(mem);
3189 		panic("%s: page 0x%llx is referenced", __func__, paddr);
3190 	}
3191 }
3192 
3193 vm_page_t
vm_page_grab(void)3194 vm_page_grab(void)
3195 {
3196 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3197 }
3198 
3199 #if HIBERNATION
3200 boolean_t       hibernate_rebuild_needed = FALSE;
3201 #endif /* HIBERNATION */
3202 
3203 static void
vm_page_finalize_grabed_page(vm_page_t mem)3204 vm_page_finalize_grabed_page(vm_page_t mem)
3205 {
3206 	task_t cur_task = current_task_early();
3207 	if (cur_task && cur_task != kernel_task) {
3208 		/* tag:DONATE this is where the donate state of the page is decided according to what task grabs it */
3209 		if (cur_task->donates_own_pages) {
3210 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3211 		} else {
3212 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3213 		}
3214 	}
3215 }
3216 
3217 vm_page_t
vm_page_grab_options(int grab_options)3218 vm_page_grab_options(
3219 	int grab_options)
3220 {
3221 	vm_page_t       mem;
3222 
3223 restart:
3224 	disable_preemption();
3225 
3226 	if ((mem = *PERCPU_GET(free_pages))) {
3227 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3228 
3229 #if HIBERNATION
3230 		if (hibernate_rebuild_needed) {
3231 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3232 		}
3233 #endif /* HIBERNATION */
3234 
3235 		vm_page_grab_diags();
3236 
3237 		vm_offset_t pcpu_base = current_percpu_base();
3238 		counter_inc_preemption_disabled(&vm_page_grab_count);
3239 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3240 		VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3241 
3242 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3243 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3244 		enable_preemption();
3245 
3246 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3247 		assert(mem->vmp_tabled == FALSE);
3248 		assert(mem->vmp_object == 0);
3249 		assert(!mem->vmp_laundry);
3250 		assert(mem->vmp_busy);
3251 		assert(!mem->vmp_pmapped);
3252 		assert(!mem->vmp_wpmapped);
3253 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3254 		assert(!mem->vmp_realtime);
3255 
3256 		vm_page_validate_no_references(mem);
3257 		vm_page_finalize_grabed_page(mem);
3258 		return mem;
3259 	}
3260 	enable_preemption();
3261 
3262 
3263 	/*
3264 	 *	Optionally produce warnings if the wire or gobble
3265 	 *	counts exceed some threshold.
3266 	 */
3267 #if VM_PAGE_WIRE_COUNT_WARNING
3268 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3269 		printf("mk: vm_page_grab(): high wired page count of %d\n",
3270 		    vm_page_wire_count);
3271 	}
3272 #endif
3273 #if VM_PAGE_GOBBLE_COUNT_WARNING
3274 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3275 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3276 		    vm_page_gobble_count);
3277 	}
3278 #endif
3279 
3280 	/*
3281 	 * If free count is low and we have delayed pages from early boot,
3282 	 * get one of those instead.
3283 	 */
3284 	if (__improbable(vm_delayed_count > 0 &&
3285 	    vm_page_free_count <= vm_page_free_target &&
3286 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3287 		assert(!mem->vmp_realtime);
3288 		// TODO: missing vm_page_finalize_grabed_page()?
3289 		return mem;
3290 	}
3291 
3292 	vm_free_page_lock_spin();
3293 
3294 	/*
3295 	 *	Only let privileged threads (involved in pageout)
3296 	 *	dip into the reserved pool.
3297 	 */
3298 	if ((vm_page_free_count < vm_page_free_reserved) &&
3299 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
3300 		/* no page for us in the free queue... */
3301 		vm_free_page_unlock();
3302 		mem = VM_PAGE_NULL;
3303 
3304 #if CONFIG_SECLUDED_MEMORY
3305 		/* ... but can we try and grab from the secluded queue? */
3306 		if (vm_page_secluded_count > 0 &&
3307 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3308 		    task_can_use_secluded_mem(current_task(), TRUE))) {
3309 			mem = vm_page_grab_secluded();
3310 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3311 				vm_page_secluded.grab_for_iokit++;
3312 				if (mem) {
3313 					vm_page_secluded.grab_for_iokit_success++;
3314 				}
3315 			}
3316 			if (mem) {
3317 				VM_CHECK_MEMORYSTATUS;
3318 
3319 				vm_page_grab_diags();
3320 				counter_inc(&vm_page_grab_count);
3321 				VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3322 
3323 				assert(!mem->vmp_realtime);
3324 				// TODO: missing vm_page_finalize_grabed_page()?
3325 				return mem;
3326 			}
3327 		}
3328 #else /* CONFIG_SECLUDED_MEMORY */
3329 		(void) grab_options;
3330 #endif /* CONFIG_SECLUDED_MEMORY */
3331 	} else {
3332 		vm_page_t        head;
3333 		vm_page_t        tail;
3334 		unsigned int     pages_to_steal;
3335 		unsigned int     color;
3336 		unsigned int clump_end, sub_count;
3337 
3338 		/*
3339 		 * Replenishing our per-CPU cache of free pages might take
3340 		 * too long to keep holding the "free_page" lock as a spinlock,
3341 		 * so convert to the full mutex to prevent other threads trying
3342 		 * to acquire the "free_page" lock from timing out spinning on
3343 		 * the mutex interlock.
3344 		 */
3345 		vm_free_page_lock_convert();
3346 
3347 		while (vm_page_free_count == 0) {
3348 			vm_free_page_unlock();
3349 			/*
3350 			 * must be a privileged thread to be
3351 			 * in this state since a non-privileged
3352 			 * thread would have bailed if we were
3353 			 * under the vm_page_free_reserved mark
3354 			 */
3355 			VM_PAGE_WAIT();
3356 			vm_free_page_lock();
3357 		}
3358 
3359 		/*
3360 		 * Need to repopulate the per-CPU free list from the global free list.
3361 		 * Note we don't do any processing of pending retirement pages here.
3362 		 * That'll happen in the code above when the page comes off the per-CPU list.
3363 		 */
3364 		disable_preemption();
3365 
3366 		/*
3367 		 * If we got preempted the cache might now have pages.
3368 		 */
3369 		if ((mem = *PERCPU_GET(free_pages))) {
3370 			vm_free_page_unlock();
3371 			enable_preemption();
3372 			goto restart;
3373 		}
3374 
3375 		if (vm_page_free_count <= vm_page_free_reserved) {
3376 			pages_to_steal = 1;
3377 		} else {
3378 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3379 				pages_to_steal = vm_free_magazine_refill_limit;
3380 			} else {
3381 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3382 			}
3383 		}
3384 		color = *PERCPU_GET(start_color);
3385 		head = tail = NULL;
3386 
3387 		vm_page_free_count -= pages_to_steal;
3388 		clump_end = sub_count = 0;
3389 
3390 		while (pages_to_steal--) {
3391 			while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3392 				color = (color + 1) & vm_color_mask;
3393 			}
3394 #if defined(__x86_64__)
3395 			vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3396 			    mem, clump_end);
3397 #else
3398 			vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3399 			    mem, vmp_pageq);
3400 #endif
3401 
3402 			assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3403 
3404 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3405 
3406 #if defined(__arm64__)
3407 			color = (color + 1) & vm_color_mask;
3408 #else
3409 
3410 #if DEVELOPMENT || DEBUG
3411 
3412 			sub_count++;
3413 			if (clump_end) {
3414 				vm_clump_update_stats(sub_count);
3415 				sub_count = 0;
3416 				color = (color + 1) & vm_color_mask;
3417 			}
3418 #else
3419 			if (clump_end) {
3420 				color = (color + 1) & vm_color_mask;
3421 			}
3422 
3423 #endif /* if DEVELOPMENT || DEBUG */
3424 
3425 #endif  /* if defined(__arm64__) */
3426 
3427 			if (head == NULL) {
3428 				head = mem;
3429 			} else {
3430 				tail->vmp_snext = mem;
3431 			}
3432 			tail = mem;
3433 
3434 			assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3435 			assert(mem->vmp_tabled == FALSE);
3436 			assert(mem->vmp_object == 0);
3437 			assert(!mem->vmp_laundry);
3438 
3439 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3440 
3441 			assert(mem->vmp_busy);
3442 			assert(!mem->vmp_pmapped);
3443 			assert(!mem->vmp_wpmapped);
3444 			assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3445 			assert(!mem->vmp_realtime);
3446 
3447 			vm_page_validate_no_references(mem);
3448 		}
3449 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3450 		vm_clump_update_stats(sub_count);
3451 #endif
3452 
3453 #if HIBERNATION
3454 		if (hibernate_rebuild_needed) {
3455 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3456 		}
3457 #endif /* HIBERNATION */
3458 		vm_offset_t pcpu_base = current_percpu_base();
3459 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3460 		*PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3461 
3462 		vm_free_page_unlock();
3463 		enable_preemption();
3464 		goto restart;
3465 	}
3466 
3467 	/*
3468 	 *	Decide if we should poke the pageout daemon.
3469 	 *	We do this if the free count is less than the low
3470 	 *	water mark. VM Pageout Scan will keep running till
3471 	 *	the free_count > free_target (& hence above free_min).
3472 	 *	This wakeup is to catch the possibility of the counts
3473 	 *	dropping between VM Pageout Scan parking and this check.
3474 	 *
3475 	 *	We don't have the counts locked ... if they change a little,
3476 	 *	it doesn't really matter.
3477 	 */
3478 	if (vm_page_free_count < vm_page_free_min) {
3479 		vm_free_page_lock();
3480 		if (vm_pageout_running == FALSE) {
3481 			vm_free_page_unlock();
3482 			thread_wakeup((event_t) &vm_page_free_wanted);
3483 		} else {
3484 			vm_free_page_unlock();
3485 		}
3486 	}
3487 
3488 	VM_CHECK_MEMORYSTATUS;
3489 
3490 	if (mem) {
3491 		assert(!mem->vmp_realtime);
3492 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
3493 
3494 		vm_page_finalize_grabed_page(mem);
3495 	}
3496 	return mem;
3497 }
3498 
3499 #if CONFIG_SECLUDED_MEMORY
3500 vm_page_t
vm_page_grab_secluded(void)3501 vm_page_grab_secluded(void)
3502 {
3503 	vm_page_t       mem;
3504 	vm_object_t     object;
3505 	int             refmod_state;
3506 
3507 	if (vm_page_secluded_count == 0) {
3508 		/* no secluded pages to grab... */
3509 		return VM_PAGE_NULL;
3510 	}
3511 
3512 	/* secluded queue is protected by the VM page queue lock */
3513 	vm_page_lock_queues();
3514 
3515 	if (vm_page_secluded_count == 0) {
3516 		/* no secluded pages to grab... */
3517 		vm_page_unlock_queues();
3518 		return VM_PAGE_NULL;
3519 	}
3520 
3521 #if 00
3522 	/* can we grab from the secluded queue? */
3523 	if (vm_page_secluded_count > vm_page_secluded_target ||
3524 	    (vm_page_secluded_count > 0 &&
3525 	    task_can_use_secluded_mem(current_task(), TRUE))) {
3526 		/* OK */
3527 	} else {
3528 		/* can't grab from secluded queue... */
3529 		vm_page_unlock_queues();
3530 		return VM_PAGE_NULL;
3531 	}
3532 #endif
3533 
3534 	/* we can grab a page from secluded queue! */
3535 	assert((vm_page_secluded_count_free +
3536 	    vm_page_secluded_count_inuse) ==
3537 	    vm_page_secluded_count);
3538 	if (current_task()->task_can_use_secluded_mem) {
3539 		assert(num_tasks_can_use_secluded_mem > 0);
3540 	}
3541 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3542 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3543 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3544 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3545 	vm_page_queues_remove(mem, TRUE);
3546 
3547 	object = VM_PAGE_OBJECT(mem);
3548 
3549 	assert(!mem->vmp_fictitious);
3550 	assert(!VM_PAGE_WIRED(mem));
3551 	if (object == VM_OBJECT_NULL) {
3552 		/* free for grab! */
3553 		vm_page_unlock_queues();
3554 		vm_page_secluded.grab_success_free++;
3555 
3556 		assert(mem->vmp_busy);
3557 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3558 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3559 		assert(mem->vmp_pageq.next == 0);
3560 		assert(mem->vmp_pageq.prev == 0);
3561 		assert(mem->vmp_listq.next == 0);
3562 		assert(mem->vmp_listq.prev == 0);
3563 		assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3564 		assert(mem->vmp_specialq.next == 0);
3565 		assert(mem->vmp_specialq.prev == 0);
3566 		return mem;
3567 	}
3568 
3569 	assert(!object->internal);
3570 //	vm_page_pageable_external_count--;
3571 
3572 	if (!vm_object_lock_try(object)) {
3573 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
3574 		vm_page_secluded.grab_failure_locked++;
3575 reactivate_secluded_page:
3576 		vm_page_activate(mem);
3577 		vm_page_unlock_queues();
3578 		return VM_PAGE_NULL;
3579 	}
3580 	if (mem->vmp_busy ||
3581 	    mem->vmp_cleaning ||
3582 	    mem->vmp_laundry) {
3583 		/* can't steal page in this state... */
3584 		vm_object_unlock(object);
3585 		vm_page_secluded.grab_failure_state++;
3586 		goto reactivate_secluded_page;
3587 	}
3588 	if (mem->vmp_realtime) {
3589 		/* don't steal pages used by realtime threads... */
3590 		vm_object_unlock(object);
3591 		vm_page_secluded.grab_failure_realtime++;
3592 		goto reactivate_secluded_page;
3593 	}
3594 
3595 	mem->vmp_busy = TRUE;
3596 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3597 	if (refmod_state & VM_MEM_REFERENCED) {
3598 		mem->vmp_reference = TRUE;
3599 	}
3600 	if (refmod_state & VM_MEM_MODIFIED) {
3601 		SET_PAGE_DIRTY(mem, FALSE);
3602 	}
3603 	if (mem->vmp_dirty || mem->vmp_precious) {
3604 		/* can't grab a dirty page; re-activate */
3605 //		printf("SECLUDED: dirty page %p\n", mem);
3606 		vm_page_wakeup_done(object, mem);
3607 		vm_page_secluded.grab_failure_dirty++;
3608 		vm_object_unlock(object);
3609 		goto reactivate_secluded_page;
3610 	}
3611 	if (mem->vmp_reference) {
3612 		/* it's been used but we do need to grab a page... */
3613 	}
3614 
3615 	vm_page_unlock_queues();
3616 
3617 
3618 	/* finish what vm_page_free() would have done... */
3619 	vm_page_free_prepare_object(mem, TRUE);
3620 	vm_object_unlock(object);
3621 	object = VM_OBJECT_NULL;
3622 
3623 	vm_page_validate_no_references(mem);
3624 
3625 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3626 	vm_page_secluded.grab_success_other++;
3627 
3628 	assert(mem->vmp_busy);
3629 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3630 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3631 	assert(mem->vmp_pageq.next == 0);
3632 	assert(mem->vmp_pageq.prev == 0);
3633 	assert(mem->vmp_listq.next == 0);
3634 	assert(mem->vmp_listq.prev == 0);
3635 	assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3636 	assert(mem->vmp_specialq.next == 0);
3637 	assert(mem->vmp_specialq.prev == 0);
3638 
3639 	return mem;
3640 }
3641 
3642 uint64_t
vm_page_secluded_drain(void)3643 vm_page_secluded_drain(void)
3644 {
3645 	vm_page_t local_freeq;
3646 	int local_freed;
3647 	uint64_t num_reclaimed;
3648 	unsigned int saved_secluded_count, saved_secluded_target;
3649 
3650 	num_reclaimed = 0;
3651 	local_freeq = NULL;
3652 	local_freed = 0;
3653 
3654 	vm_page_lock_queues();
3655 
3656 	saved_secluded_count = vm_page_secluded_count;
3657 	saved_secluded_target = vm_page_secluded_target;
3658 	vm_page_secluded_target = 0;
3659 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3660 	while (vm_page_secluded_count) {
3661 		vm_page_t secluded_page;
3662 
3663 		assert((vm_page_secluded_count_free +
3664 		    vm_page_secluded_count_inuse) ==
3665 		    vm_page_secluded_count);
3666 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3667 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3668 
3669 		vm_page_queues_remove(secluded_page, FALSE);
3670 		assert(!secluded_page->vmp_fictitious);
3671 		assert(!VM_PAGE_WIRED(secluded_page));
3672 
3673 		if (secluded_page->vmp_object == 0) {
3674 			/* transfer to free queue */
3675 			assert(secluded_page->vmp_busy);
3676 			secluded_page->vmp_snext = local_freeq;
3677 			local_freeq = secluded_page;
3678 			local_freed += 1;
3679 		} else {
3680 			/* transfer to head of active queue */
3681 			vm_page_enqueue_active(secluded_page, FALSE);
3682 			secluded_page = VM_PAGE_NULL;
3683 		}
3684 		num_reclaimed++;
3685 	}
3686 	vm_page_secluded_target = saved_secluded_target;
3687 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3688 
3689 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3690 
3691 	vm_page_unlock_queues();
3692 
3693 	if (local_freed) {
3694 		vm_page_free_list(local_freeq, TRUE);
3695 		local_freeq = NULL;
3696 		local_freed = 0;
3697 	}
3698 
3699 	return num_reclaimed;
3700 }
3701 #endif /* CONFIG_SECLUDED_MEMORY */
3702 
3703 static inline void
vm_page_grab_diags()3704 vm_page_grab_diags()
3705 {
3706 #if DEVELOPMENT || DEBUG
3707 	task_t task = current_task_early();
3708 	if (task == NULL) {
3709 		return;
3710 	}
3711 
3712 	ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3713 #endif /* DEVELOPMENT || DEBUG */
3714 }
3715 
3716 /*
3717  *	vm_page_release:
3718  *
3719  *	Return a page to the free list.
3720  */
3721 
3722 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3723 vm_page_release(
3724 	vm_page_t       mem,
3725 	boolean_t       page_queues_locked)
3726 {
3727 	unsigned int    color;
3728 	int     need_wakeup = 0;
3729 	int     need_priv_wakeup = 0;
3730 #if CONFIG_SECLUDED_MEMORY
3731 	int     need_secluded_wakeup = 0;
3732 #endif /* CONFIG_SECLUDED_MEMORY */
3733 	event_t wakeup_event = NULL;
3734 
3735 	if (page_queues_locked) {
3736 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3737 	} else {
3738 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3739 	}
3740 
3741 	assert(!mem->vmp_private && !mem->vmp_fictitious);
3742 
3743 #if MACH_ASSERT
3744 	if (vm_check_refs_on_free) {
3745 		vm_page_validate_no_references(mem);
3746 	}
3747 #endif /* MACH_ASSERT */
3748 
3749 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
3750 
3751 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3752 
3753 	if (__improbable(mem->vmp_realtime)) {
3754 		if (!page_queues_locked) {
3755 			vm_page_lock_queues();
3756 		}
3757 		if (mem->vmp_realtime) {
3758 			mem->vmp_realtime = false;
3759 			vm_page_realtime_count--;
3760 		}
3761 		if (!page_queues_locked) {
3762 			vm_page_unlock_queues();
3763 		}
3764 	}
3765 
3766 	vm_free_page_lock_spin();
3767 
3768 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3769 	assert(mem->vmp_busy);
3770 	assert(!mem->vmp_laundry);
3771 	assert(mem->vmp_object == 0);
3772 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3773 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3774 	assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3775 
3776 	/* Clear any specialQ hints before releasing page to the free pool*/
3777 	mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3778 
3779 	if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3780 	    vm_lopage_free_count < vm_lopage_free_limit &&
3781 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3782 		/*
3783 		 * this exists to support hardware controllers
3784 		 * incapable of generating DMAs with more than 32 bits
3785 		 * of address on platforms with physical memory > 4G...
3786 		 */
3787 		vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3788 		vm_lopage_free_count++;
3789 
3790 		if (vm_lopage_free_count >= vm_lopage_free_limit) {
3791 			vm_lopage_refill = FALSE;
3792 		}
3793 
3794 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3795 		mem->vmp_lopage = TRUE;
3796 #if CONFIG_SECLUDED_MEMORY
3797 	} else if (vm_page_free_count > vm_page_free_reserved &&
3798 	    vm_page_secluded_count < vm_page_secluded_target &&
3799 	    num_tasks_can_use_secluded_mem == 0) {
3800 		/*
3801 		 * XXX FBDP TODO: also avoid refilling secluded queue
3802 		 * when some IOKit objects are already grabbing from it...
3803 		 */
3804 		if (!page_queues_locked) {
3805 			if (!vm_page_trylock_queues()) {
3806 				/* take locks in right order */
3807 				vm_free_page_unlock();
3808 				vm_page_lock_queues();
3809 				vm_free_page_lock_spin();
3810 			}
3811 		}
3812 		mem->vmp_lopage = FALSE;
3813 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3814 		vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3815 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3816 		vm_page_secluded_count++;
3817 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3818 		vm_page_secluded_count_free++;
3819 		if (!page_queues_locked) {
3820 			vm_page_unlock_queues();
3821 		}
3822 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3823 		if (vm_page_free_wanted_secluded > 0) {
3824 			vm_page_free_wanted_secluded--;
3825 			need_secluded_wakeup = 1;
3826 		}
3827 #endif /* CONFIG_SECLUDED_MEMORY */
3828 	} else {
3829 		mem->vmp_lopage = FALSE;
3830 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3831 
3832 		color = VM_PAGE_GET_COLOR(mem);
3833 #if defined(__x86_64__)
3834 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3835 #else
3836 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3837 #endif
3838 		vm_page_free_count++;
3839 		/*
3840 		 *	Check if we should wake up someone waiting for page.
3841 		 *	But don't bother waking them unless they can allocate.
3842 		 *
3843 		 *	We wakeup only one thread, to prevent starvation.
3844 		 *	Because the scheduling system handles wait queues FIFO,
3845 		 *	if we wakeup all waiting threads, one greedy thread
3846 		 *	can starve multiple niceguy threads.  When the threads
3847 		 *	all wakeup, the greedy threads runs first, grabs the page,
3848 		 *	and waits for another page.  It will be the first to run
3849 		 *	when the next page is freed.
3850 		 *
3851 		 *	However, there is a slight danger here.
3852 		 *	The thread we wake might not use the free page.
3853 		 *	Then the other threads could wait indefinitely
3854 		 *	while the page goes unused.  To forestall this,
3855 		 *	the pageout daemon will keep making free pages
3856 		 *	as long as vm_page_free_wanted is non-zero.
3857 		 */
3858 
3859 		assert(vm_page_free_count > 0);
3860 		if (vm_page_free_wanted_privileged > 0) {
3861 			vm_page_free_wanted_privileged--;
3862 			need_priv_wakeup = 1;
3863 #if CONFIG_SECLUDED_MEMORY
3864 		} else if (vm_page_free_wanted_secluded > 0 &&
3865 		    vm_page_free_count > vm_page_free_reserved) {
3866 			vm_page_free_wanted_secluded--;
3867 			need_secluded_wakeup = 1;
3868 #endif /* CONFIG_SECLUDED_MEMORY */
3869 		} else if (vm_page_free_wanted > 0 &&
3870 		    vm_page_free_count > vm_page_free_reserved) {
3871 			vm_page_free_wanted--;
3872 			need_wakeup = 1;
3873 		}
3874 	}
3875 	vm_pageout_vminfo.vm_page_pages_freed++;
3876 
3877 	vm_free_page_unlock();
3878 
3879 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3880 
3881 	if (need_priv_wakeup) {
3882 		wakeup_event = &vm_page_free_wanted_privileged;
3883 	}
3884 #if CONFIG_SECLUDED_MEMORY
3885 	else if (need_secluded_wakeup) {
3886 		wakeup_event = &vm_page_free_wanted_secluded;
3887 	}
3888 #endif /* CONFIG_SECLUDED_MEMORY */
3889 	else if (need_wakeup) {
3890 		wakeup_event = &vm_page_free_count;
3891 	}
3892 
3893 	if (wakeup_event) {
3894 		if (vps_dynamic_priority_enabled) {
3895 			wakeup_one_with_inheritor((event_t) wakeup_event,
3896 			    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3897 			    NULL);
3898 		} else {
3899 			thread_wakeup_one((event_t) wakeup_event);
3900 		}
3901 	}
3902 
3903 	VM_CHECK_MEMORYSTATUS;
3904 }
3905 
3906 /*
3907  * This version of vm_page_release() is used only at startup
3908  * when we are single-threaded and pages are being released
3909  * for the first time. Hence, no locking or unnecessary checks are made.
3910  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3911  */
3912 void
vm_page_release_startup(vm_page_t mem)3913 vm_page_release_startup(
3914 	vm_page_t       mem)
3915 {
3916 	vm_page_queue_t queue_free;
3917 
3918 	if (vm_lopage_free_count < vm_lopage_free_limit &&
3919 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3920 		mem->vmp_lopage = TRUE;
3921 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3922 		vm_lopage_free_count++;
3923 		queue_free = &vm_lopage_queue_free;
3924 #if CONFIG_SECLUDED_MEMORY
3925 	} else if (vm_page_secluded_count < vm_page_secluded_target) {
3926 		mem->vmp_lopage = FALSE;
3927 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3928 		vm_page_secluded_count++;
3929 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3930 		vm_page_secluded_count_free++;
3931 		queue_free = &vm_page_queue_secluded;
3932 #endif /* CONFIG_SECLUDED_MEMORY */
3933 	} else {
3934 		mem->vmp_lopage = FALSE;
3935 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3936 		vm_page_free_count++;
3937 		queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3938 	}
3939 	if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3940 #if defined(__x86_64__)
3941 		vm_page_queue_enter_clump(queue_free, mem);
3942 #else
3943 		vm_page_queue_enter(queue_free, mem, vmp_pageq);
3944 #endif
3945 	} else {
3946 		vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3947 	}
3948 }
3949 
3950 /*
3951  *	vm_page_wait:
3952  *
3953  *	Wait for a page to become available.
3954  *	If there are plenty of free pages, then we don't sleep.
3955  *
3956  *	Returns:
3957  *		TRUE:  There may be another page, try again
3958  *		FALSE: We were interrupted out of our wait, don't try again
3959  */
3960 
3961 boolean_t
vm_page_wait(int interruptible)3962 vm_page_wait(
3963 	int     interruptible )
3964 {
3965 	/*
3966 	 *	We can't use vm_page_free_reserved to make this
3967 	 *	determination.  Consider: some thread might
3968 	 *	need to allocate two pages.  The first allocation
3969 	 *	succeeds, the second fails.  After the first page is freed,
3970 	 *	a call to vm_page_wait must really block.
3971 	 */
3972 	kern_return_t   wait_result;
3973 	int             need_wakeup = 0;
3974 	int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3975 	event_t         wait_event = NULL;
3976 
3977 	vm_free_page_lock_spin();
3978 
3979 	if (is_privileged && vm_page_free_count) {
3980 		vm_free_page_unlock();
3981 		return TRUE;
3982 	}
3983 
3984 	if (vm_page_free_count >= vm_page_free_target) {
3985 		vm_free_page_unlock();
3986 		return TRUE;
3987 	}
3988 
3989 	if (is_privileged) {
3990 		if (vm_page_free_wanted_privileged++ == 0) {
3991 			need_wakeup = 1;
3992 		}
3993 		wait_event = (event_t)&vm_page_free_wanted_privileged;
3994 #if CONFIG_SECLUDED_MEMORY
3995 	} else if (secluded_for_apps &&
3996 	    task_can_use_secluded_mem(current_task(), FALSE)) {
3997 #if 00
3998 		/* XXX FBDP: need pageq lock for this... */
3999 		/* XXX FBDP: might wait even if pages available, */
4000 		/* XXX FBDP: hopefully not for too long... */
4001 		if (vm_page_secluded_count > 0) {
4002 			vm_free_page_unlock();
4003 			return TRUE;
4004 		}
4005 #endif
4006 		if (vm_page_free_wanted_secluded++ == 0) {
4007 			need_wakeup = 1;
4008 		}
4009 		wait_event = (event_t)&vm_page_free_wanted_secluded;
4010 #endif /* CONFIG_SECLUDED_MEMORY */
4011 	} else {
4012 		if (vm_page_free_wanted++ == 0) {
4013 			need_wakeup = 1;
4014 		}
4015 		wait_event = (event_t)&vm_page_free_count;
4016 	}
4017 
4018 	/*
4019 	 * We don't do a vm_pageout_scan wakeup if we already have
4020 	 * some waiters because vm_pageout_scan checks for waiters
4021 	 * before it returns and does so behind the vm_page_queue_free_lock,
4022 	 * which we own when we bump the waiter counts.
4023 	 */
4024 
4025 	if (vps_dynamic_priority_enabled) {
4026 		/*
4027 		 * We are waking up vm_pageout_scan here. If it needs
4028 		 * the vm_page_queue_free_lock before we unlock it
4029 		 * we'll end up just blocking and incur an extra
4030 		 * context switch. Could be a perf. issue.
4031 		 */
4032 
4033 		if (need_wakeup) {
4034 			thread_wakeup((event_t)&vm_page_free_wanted);
4035 		}
4036 
4037 		/*
4038 		 * LD: This event is going to get recorded every time because
4039 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4040 		 * We just block in that routine.
4041 		 */
4042 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4043 		    vm_page_free_wanted_privileged,
4044 		    vm_page_free_wanted,
4045 #if CONFIG_SECLUDED_MEMORY
4046 		    vm_page_free_wanted_secluded,
4047 #else /* CONFIG_SECLUDED_MEMORY */
4048 		    0,
4049 #endif /* CONFIG_SECLUDED_MEMORY */
4050 		    0);
4051 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4052 		    LCK_SLEEP_UNLOCK,
4053 		    wait_event,
4054 		    vm_pageout_scan_thread,
4055 		    interruptible,
4056 		    0);
4057 	} else {
4058 		wait_result = assert_wait(wait_event, interruptible);
4059 
4060 		vm_free_page_unlock();
4061 
4062 		if (need_wakeup) {
4063 			thread_wakeup((event_t)&vm_page_free_wanted);
4064 		}
4065 
4066 		if (wait_result == THREAD_WAITING) {
4067 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4068 			    vm_page_free_wanted_privileged,
4069 			    vm_page_free_wanted,
4070 #if CONFIG_SECLUDED_MEMORY
4071 			    vm_page_free_wanted_secluded,
4072 #else /* CONFIG_SECLUDED_MEMORY */
4073 			    0,
4074 #endif /* CONFIG_SECLUDED_MEMORY */
4075 			    0);
4076 			wait_result = thread_block(THREAD_CONTINUE_NULL);
4077 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4078 			    DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4079 		}
4080 	}
4081 
4082 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4083 }
4084 
4085 /*
4086  *	vm_page_alloc:
4087  *
4088  *	Allocate and return a memory cell associated
4089  *	with this VM object/offset pair.
4090  *
4091  *	Object must be locked.
4092  */
4093 
4094 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4095 vm_page_alloc(
4096 	vm_object_t             object,
4097 	vm_object_offset_t      offset)
4098 {
4099 	vm_page_t       mem;
4100 	int             grab_options;
4101 
4102 	vm_object_lock_assert_exclusive(object);
4103 	grab_options = 0;
4104 #if CONFIG_SECLUDED_MEMORY
4105 	if (object->can_grab_secluded) {
4106 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4107 	}
4108 #endif /* CONFIG_SECLUDED_MEMORY */
4109 	mem = vm_page_grab_options(grab_options);
4110 	if (mem == VM_PAGE_NULL) {
4111 		return VM_PAGE_NULL;
4112 	}
4113 
4114 	vm_page_insert(mem, object, offset);
4115 
4116 	return mem;
4117 }
4118 
4119 /*
4120  *	vm_page_free_prepare:
4121  *
4122  *	Removes page from any queue it may be on
4123  *	and disassociates it from its VM object.
4124  *
4125  *	Object and page queues must be locked prior to entry.
4126  */
4127 static void
vm_page_free_prepare(vm_page_t mem)4128 vm_page_free_prepare(
4129 	vm_page_t       mem)
4130 {
4131 #if CONFIG_SPTM
4132 	/**
4133 	 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
4134 	 *            created and destroyed. In order to catch potential cases where this
4135 	 *            does not happen, add an appropriate assert here. This code should be
4136 	 *            executed on every frame that is about to be released to the VM.
4137 	 */
4138 	const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
4139 	__unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
4140 
4141 	assert(frame_type == XNU_DEFAULT);
4142 #endif /* CONFIG_SPTM */
4143 
4144 	vm_page_free_prepare_queues(mem);
4145 	vm_page_free_prepare_object(mem, TRUE);
4146 }
4147 
4148 
4149 void
vm_page_free_prepare_queues(vm_page_t mem)4150 vm_page_free_prepare_queues(
4151 	vm_page_t       mem)
4152 {
4153 	vm_object_t     m_object;
4154 
4155 	VM_PAGE_CHECK(mem);
4156 
4157 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4158 	assert(!mem->vmp_cleaning);
4159 	m_object = VM_PAGE_OBJECT(mem);
4160 
4161 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4162 	if (m_object) {
4163 		vm_object_lock_assert_exclusive(m_object);
4164 	}
4165 	if (mem->vmp_laundry) {
4166 		/*
4167 		 * We may have to free a page while it's being laundered
4168 		 * if we lost its pager (due to a forced unmount, for example).
4169 		 * We need to call vm_pageout_steal_laundry() before removing
4170 		 * the page from its VM object, so that we can remove it
4171 		 * from its pageout queue and adjust the laundry accounting
4172 		 */
4173 		vm_pageout_steal_laundry(mem, TRUE);
4174 	}
4175 
4176 	vm_page_queues_remove(mem, TRUE);
4177 
4178 	if (__improbable(mem->vmp_realtime)) {
4179 		mem->vmp_realtime = false;
4180 		vm_page_realtime_count--;
4181 	}
4182 
4183 	if (VM_PAGE_WIRED(mem)) {
4184 		assert(mem->vmp_wire_count > 0);
4185 
4186 		if (m_object) {
4187 			task_t          owner;
4188 			int             ledger_idx_volatile;
4189 			int             ledger_idx_nonvolatile;
4190 			int             ledger_idx_volatile_compressed;
4191 			int             ledger_idx_nonvolatile_compressed;
4192 			int             ledger_idx_composite;
4193 			int             ledger_idx_external_wired;
4194 			boolean_t       do_footprint;
4195 
4196 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4197 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4198 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4199 
4200 			assert(m_object->resident_page_count >=
4201 			    m_object->wired_page_count);
4202 
4203 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4204 				OSAddAtomic(+1, &vm_page_purgeable_count);
4205 				assert(vm_page_purgeable_wired_count > 0);
4206 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4207 			}
4208 			if (m_object->internal &&
4209 			    m_object->vo_owner != TASK_NULL &&
4210 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4211 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
4212 				owner = VM_OBJECT_OWNER(m_object);
4213 				vm_object_ledger_tag_ledgers(
4214 					m_object,
4215 					&ledger_idx_volatile,
4216 					&ledger_idx_nonvolatile,
4217 					&ledger_idx_volatile_compressed,
4218 					&ledger_idx_nonvolatile_compressed,
4219 					&ledger_idx_composite,
4220 					&ledger_idx_external_wired,
4221 					&do_footprint);
4222 				/*
4223 				 * While wired, this page was accounted
4224 				 * as "non-volatile" but it should now
4225 				 * be accounted as "volatile".
4226 				 */
4227 				/* one less "non-volatile"... */
4228 				ledger_debit(owner->ledger,
4229 				    ledger_idx_nonvolatile,
4230 				    PAGE_SIZE);
4231 				if (do_footprint) {
4232 					/* ... and "phys_footprint" */
4233 					ledger_debit(owner->ledger,
4234 					    task_ledgers.phys_footprint,
4235 					    PAGE_SIZE);
4236 				} else if (ledger_idx_composite != -1) {
4237 					ledger_debit(owner->ledger,
4238 					    ledger_idx_composite,
4239 					    PAGE_SIZE);
4240 				}
4241 				/* one more "volatile" */
4242 				ledger_credit(owner->ledger,
4243 				    ledger_idx_volatile,
4244 				    PAGE_SIZE);
4245 			}
4246 		}
4247 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4248 			vm_page_wire_count--;
4249 		}
4250 
4251 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4252 		mem->vmp_wire_count = 0;
4253 		assert(!mem->vmp_gobbled);
4254 	} else if (mem->vmp_gobbled) {
4255 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4256 			vm_page_wire_count--;
4257 		}
4258 		vm_page_gobble_count--;
4259 	}
4260 }
4261 
4262 
4263 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4264 vm_page_free_prepare_object(
4265 	vm_page_t       mem,
4266 	boolean_t       remove_from_hash)
4267 {
4268 	assert(!mem->vmp_realtime);
4269 	if (mem->vmp_tabled) {
4270 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4271 	}
4272 	vm_page_wakeup(VM_OBJECT_NULL, mem);               /* clears wanted */
4273 
4274 	if (mem->vmp_private) {
4275 		mem->vmp_private = FALSE;
4276 		mem->vmp_fictitious = TRUE;
4277 		VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4278 	}
4279 	if (!mem->vmp_fictitious) {
4280 		assert(mem->vmp_pageq.next == 0);
4281 		assert(mem->vmp_pageq.prev == 0);
4282 		assert(mem->vmp_listq.next == 0);
4283 		assert(mem->vmp_listq.prev == 0);
4284 		assert(mem->vmp_specialq.next == 0);
4285 		assert(mem->vmp_specialq.prev == 0);
4286 		assert(mem->vmp_next_m == 0);
4287 
4288 #if MACH_ASSERT
4289 		if (vm_check_refs_on_free) {
4290 			vm_page_validate_no_references(mem);
4291 		}
4292 #endif /* MACH_ASSERT */
4293 
4294 		{
4295 			vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4296 		}
4297 	}
4298 }
4299 
4300 /*
4301  *	vm_page_free:
4302  *
4303  *	Returns the given page to the free list,
4304  *	disassociating it with any VM object.
4305  *
4306  *	Object and page queues must be locked prior to entry.
4307  */
4308 void
vm_page_free(vm_page_t mem)4309 vm_page_free(
4310 	vm_page_t       mem)
4311 {
4312 	vm_page_free_prepare(mem);
4313 
4314 	if (mem->vmp_fictitious) {
4315 		vm_page_release_fictitious(mem);
4316 	} else {
4317 		vm_page_release(mem, TRUE);  /* page queues are locked */
4318 	}
4319 }
4320 
4321 
4322 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4323 vm_page_free_unlocked(
4324 	vm_page_t       mem,
4325 	boolean_t       remove_from_hash)
4326 {
4327 	vm_page_lockspin_queues();
4328 	vm_page_free_prepare_queues(mem);
4329 	vm_page_unlock_queues();
4330 
4331 	vm_page_free_prepare_object(mem, remove_from_hash);
4332 
4333 	if (mem->vmp_fictitious) {
4334 		vm_page_release_fictitious(mem);
4335 	} else {
4336 		vm_page_release(mem, FALSE); /* page queues are not locked */
4337 	}
4338 }
4339 
4340 
4341 /*
4342  * Free a list of pages.  The list can be up to several hundred pages,
4343  * as blocked up by vm_pageout_scan().
4344  * The big win is not having to take the free list lock once
4345  * per page.
4346  *
4347  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4348  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4349  */
4350 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4351 vm_page_free_list(
4352 	vm_page_t       freeq,
4353 	boolean_t       prepare_object)
4354 {
4355 	vm_page_t       mem;
4356 	vm_page_t       nxt;
4357 	vm_page_t       local_freeq;
4358 	int             pg_count;
4359 
4360 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4361 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4362 
4363 	while (freeq) {
4364 		pg_count = 0;
4365 		local_freeq = VM_PAGE_NULL;
4366 		mem = freeq;
4367 
4368 		/*
4369 		 * break up the processing into smaller chunks so
4370 		 * that we can 'pipeline' the pages onto the
4371 		 * free list w/o introducing too much
4372 		 * contention on the global free queue lock
4373 		 */
4374 		while (mem && pg_count < 64) {
4375 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4376 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4377 			assert(mem->vmp_specialq.next == 0 &&
4378 			    mem->vmp_specialq.prev == 0);
4379 			/*
4380 			 * &&
4381 			 *   mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4382 			 */
4383 			nxt = mem->vmp_snext;
4384 			mem->vmp_snext = NULL;
4385 			assert(mem->vmp_pageq.prev == 0);
4386 
4387 #if MACH_ASSERT
4388 			if (vm_check_refs_on_free) {
4389 				if (!mem->vmp_fictitious && !mem->vmp_private) {
4390 					vm_page_validate_no_references(mem);
4391 				}
4392 			}
4393 #endif /* MACH_ASSERT */
4394 
4395 			if (__improbable(mem->vmp_realtime)) {
4396 				vm_page_lock_queues();
4397 				if (mem->vmp_realtime) {
4398 					mem->vmp_realtime = false;
4399 					vm_page_realtime_count--;
4400 				}
4401 				vm_page_unlock_queues();
4402 			}
4403 
4404 			if (prepare_object == TRUE) {
4405 				vm_page_free_prepare_object(mem, TRUE);
4406 			}
4407 
4408 			if (!mem->vmp_fictitious) {
4409 				assert(mem->vmp_busy);
4410 
4411 				if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4412 				    vm_lopage_free_count < vm_lopage_free_limit &&
4413 				    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4414 					vm_page_release(mem, FALSE); /* page queues are not locked */
4415 #if CONFIG_SECLUDED_MEMORY
4416 				} else if (vm_page_secluded_count < vm_page_secluded_target &&
4417 				    num_tasks_can_use_secluded_mem == 0) {
4418 					vm_page_release(mem,
4419 					    FALSE);             /* page queues are not locked */
4420 #endif /* CONFIG_SECLUDED_MEMORY */
4421 				} else {
4422 					/*
4423 					 * IMPORTANT: we can't set the page "free" here
4424 					 * because that would make the page eligible for
4425 					 * a physically-contiguous allocation (see
4426 					 * vm_page_find_contiguous()) right away (we don't
4427 					 * hold the vm_page_queue_free lock).  That would
4428 					 * cause trouble because the page is not actually
4429 					 * in the free queue yet...
4430 					 */
4431 					mem->vmp_snext = local_freeq;
4432 					local_freeq = mem;
4433 					pg_count++;
4434 
4435 					pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4436 				}
4437 			} else {
4438 				assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4439 				    VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4440 				vm_page_release_fictitious(mem);
4441 			}
4442 			mem = nxt;
4443 		}
4444 		freeq = mem;
4445 
4446 		if ((mem = local_freeq)) {
4447 			unsigned int    avail_free_count;
4448 			unsigned int    need_wakeup = 0;
4449 			unsigned int    need_priv_wakeup = 0;
4450 #if CONFIG_SECLUDED_MEMORY
4451 			unsigned int    need_wakeup_secluded = 0;
4452 #endif /* CONFIG_SECLUDED_MEMORY */
4453 			event_t         priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4454 			boolean_t       priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4455 
4456 			vm_free_page_lock_spin();
4457 
4458 			while (mem) {
4459 				int     color;
4460 
4461 				nxt = mem->vmp_snext;
4462 
4463 				assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4464 				assert(mem->vmp_busy);
4465 				assert(!mem->vmp_realtime);
4466 				mem->vmp_lopage = FALSE;
4467 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4468 
4469 				color = VM_PAGE_GET_COLOR(mem);
4470 #if defined(__x86_64__)
4471 				vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4472 #else
4473 				vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4474 				    mem, vmp_pageq);
4475 #endif
4476 				mem = nxt;
4477 			}
4478 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4479 			vm_page_free_count += pg_count;
4480 			avail_free_count = vm_page_free_count;
4481 
4482 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4483 
4484 			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4485 				if (avail_free_count < vm_page_free_wanted_privileged) {
4486 					need_priv_wakeup = avail_free_count;
4487 					vm_page_free_wanted_privileged -= avail_free_count;
4488 					avail_free_count = 0;
4489 				} else {
4490 					need_priv_wakeup = vm_page_free_wanted_privileged;
4491 					avail_free_count -= vm_page_free_wanted_privileged;
4492 					vm_page_free_wanted_privileged = 0;
4493 				}
4494 			}
4495 #if CONFIG_SECLUDED_MEMORY
4496 			if (vm_page_free_wanted_secluded > 0 &&
4497 			    avail_free_count > vm_page_free_reserved) {
4498 				unsigned int available_pages;
4499 				available_pages = (avail_free_count -
4500 				    vm_page_free_reserved);
4501 				if (available_pages <
4502 				    vm_page_free_wanted_secluded) {
4503 					need_wakeup_secluded = available_pages;
4504 					vm_page_free_wanted_secluded -=
4505 					    available_pages;
4506 					avail_free_count -= available_pages;
4507 				} else {
4508 					need_wakeup_secluded =
4509 					    vm_page_free_wanted_secluded;
4510 					avail_free_count -=
4511 					    vm_page_free_wanted_secluded;
4512 					vm_page_free_wanted_secluded = 0;
4513 				}
4514 			}
4515 #endif /* CONFIG_SECLUDED_MEMORY */
4516 			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4517 				unsigned int  available_pages;
4518 
4519 				available_pages = avail_free_count - vm_page_free_reserved;
4520 
4521 				if (available_pages >= vm_page_free_wanted) {
4522 					need_wakeup = vm_page_free_wanted;
4523 					vm_page_free_wanted = 0;
4524 				} else {
4525 					need_wakeup = available_pages;
4526 					vm_page_free_wanted -= available_pages;
4527 				}
4528 			}
4529 			vm_free_page_unlock();
4530 
4531 			priv_wakeup_event = NULL;
4532 			secluded_wakeup_event = NULL;
4533 			normal_wakeup_event = NULL;
4534 
4535 			priv_wakeup_all = FALSE;
4536 			secluded_wakeup_all = FALSE;
4537 			normal_wakeup_all = FALSE;
4538 
4539 
4540 			if (need_priv_wakeup != 0) {
4541 				/*
4542 				 * There shouldn't be that many VM-privileged threads,
4543 				 * so let's wake them all up, even if we don't quite
4544 				 * have enough pages to satisfy them all.
4545 				 */
4546 				priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4547 				priv_wakeup_all = TRUE;
4548 			}
4549 #if CONFIG_SECLUDED_MEMORY
4550 			if (need_wakeup_secluded != 0 &&
4551 			    vm_page_free_wanted_secluded == 0) {
4552 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4553 				secluded_wakeup_all = TRUE;
4554 				need_wakeup_secluded = 0;
4555 			} else {
4556 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4557 			}
4558 #endif /* CONFIG_SECLUDED_MEMORY */
4559 			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4560 				/*
4561 				 * We don't expect to have any more waiters
4562 				 * after this, so let's wake them all up at
4563 				 * once.
4564 				 */
4565 				normal_wakeup_event = (event_t) &vm_page_free_count;
4566 				normal_wakeup_all = TRUE;
4567 				need_wakeup = 0;
4568 			} else {
4569 				normal_wakeup_event = (event_t) &vm_page_free_count;
4570 			}
4571 
4572 			if (priv_wakeup_event ||
4573 #if CONFIG_SECLUDED_MEMORY
4574 			    secluded_wakeup_event ||
4575 #endif /* CONFIG_SECLUDED_MEMORY */
4576 			    normal_wakeup_event) {
4577 				if (vps_dynamic_priority_enabled) {
4578 					if (priv_wakeup_all == TRUE) {
4579 						wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4580 					}
4581 
4582 #if CONFIG_SECLUDED_MEMORY
4583 					if (secluded_wakeup_all == TRUE) {
4584 						wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4585 					}
4586 
4587 					while (need_wakeup_secluded-- != 0) {
4588 						/*
4589 						 * Wake up one waiter per page we just released.
4590 						 */
4591 						wakeup_one_with_inheritor(secluded_wakeup_event,
4592 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4593 					}
4594 #endif /* CONFIG_SECLUDED_MEMORY */
4595 
4596 					if (normal_wakeup_all == TRUE) {
4597 						wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4598 					}
4599 
4600 					while (need_wakeup-- != 0) {
4601 						/*
4602 						 * Wake up one waiter per page we just released.
4603 						 */
4604 						wakeup_one_with_inheritor(normal_wakeup_event,
4605 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4606 						    NULL);
4607 					}
4608 				} else {
4609 					/*
4610 					 * Non-priority-aware wakeups.
4611 					 */
4612 
4613 					if (priv_wakeup_all == TRUE) {
4614 						thread_wakeup(priv_wakeup_event);
4615 					}
4616 
4617 #if CONFIG_SECLUDED_MEMORY
4618 					if (secluded_wakeup_all == TRUE) {
4619 						thread_wakeup(secluded_wakeup_event);
4620 					}
4621 
4622 					while (need_wakeup_secluded-- != 0) {
4623 						/*
4624 						 * Wake up one waiter per page we just released.
4625 						 */
4626 						thread_wakeup_one(secluded_wakeup_event);
4627 					}
4628 
4629 #endif /* CONFIG_SECLUDED_MEMORY */
4630 					if (normal_wakeup_all == TRUE) {
4631 						thread_wakeup(normal_wakeup_event);
4632 					}
4633 
4634 					while (need_wakeup-- != 0) {
4635 						/*
4636 						 * Wake up one waiter per page we just released.
4637 						 */
4638 						thread_wakeup_one(normal_wakeup_event);
4639 					}
4640 				}
4641 			}
4642 
4643 			VM_CHECK_MEMORYSTATUS;
4644 		}
4645 	}
4646 }
4647 
4648 
4649 /*
4650  *	vm_page_wire:
4651  *
4652  *	Mark this page as wired down by yet
4653  *	another map, removing it from paging queues
4654  *	as necessary.
4655  *
4656  *	The page's object and the page queues must be locked.
4657  */
4658 
4659 
4660 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4661 vm_page_wire(
4662 	vm_page_t mem,
4663 	vm_tag_t           tag,
4664 	boolean_t          check_memorystatus)
4665 {
4666 	vm_object_t     m_object;
4667 
4668 	m_object = VM_PAGE_OBJECT(mem);
4669 
4670 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
4671 
4672 	VM_PAGE_CHECK(mem);
4673 	if (m_object) {
4674 		vm_object_lock_assert_exclusive(m_object);
4675 	} else {
4676 		/*
4677 		 * In theory, the page should be in an object before it
4678 		 * gets wired, since we need to hold the object lock
4679 		 * to update some fields in the page structure.
4680 		 * However, some code (i386 pmap, for example) might want
4681 		 * to wire a page before it gets inserted into an object.
4682 		 * That's somewhat OK, as long as nobody else can get to
4683 		 * that page and update it at the same time.
4684 		 */
4685 	}
4686 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4687 	if (!VM_PAGE_WIRED(mem)) {
4688 		if (mem->vmp_laundry) {
4689 			vm_pageout_steal_laundry(mem, TRUE);
4690 		}
4691 
4692 		vm_page_queues_remove(mem, TRUE);
4693 
4694 		assert(mem->vmp_wire_count == 0);
4695 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
4696 
4697 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4698 		if (mem->vmp_unmodified_ro == true) {
4699 			/* Object and PageQ locks are held*/
4700 			mem->vmp_unmodified_ro = false;
4701 			os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4702 			vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4703 		}
4704 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4705 
4706 		if (m_object) {
4707 			task_t          owner;
4708 			int             ledger_idx_volatile;
4709 			int             ledger_idx_nonvolatile;
4710 			int             ledger_idx_volatile_compressed;
4711 			int             ledger_idx_nonvolatile_compressed;
4712 			int             ledger_idx_composite;
4713 			int             ledger_idx_external_wired;
4714 			boolean_t       do_footprint;
4715 
4716 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4717 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4718 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4719 
4720 			assert(m_object->resident_page_count >=
4721 			    m_object->wired_page_count);
4722 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4723 				assert(vm_page_purgeable_count > 0);
4724 				OSAddAtomic(-1, &vm_page_purgeable_count);
4725 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
4726 			}
4727 			if (m_object->internal &&
4728 			    m_object->vo_owner != TASK_NULL &&
4729 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4730 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
4731 				owner = VM_OBJECT_OWNER(m_object);
4732 				vm_object_ledger_tag_ledgers(
4733 					m_object,
4734 					&ledger_idx_volatile,
4735 					&ledger_idx_nonvolatile,
4736 					&ledger_idx_volatile_compressed,
4737 					&ledger_idx_nonvolatile_compressed,
4738 					&ledger_idx_composite,
4739 					&ledger_idx_external_wired,
4740 					&do_footprint);
4741 				/* less volatile bytes */
4742 				ledger_debit(owner->ledger,
4743 				    ledger_idx_volatile,
4744 				    PAGE_SIZE);
4745 				/* more not-quite-volatile bytes */
4746 				ledger_credit(owner->ledger,
4747 				    ledger_idx_nonvolatile,
4748 				    PAGE_SIZE);
4749 				if (do_footprint) {
4750 					/* more footprint */
4751 					ledger_credit(owner->ledger,
4752 					    task_ledgers.phys_footprint,
4753 					    PAGE_SIZE);
4754 				} else if (ledger_idx_composite != -1) {
4755 					ledger_credit(owner->ledger,
4756 					    ledger_idx_composite,
4757 					    PAGE_SIZE);
4758 				}
4759 			}
4760 
4761 			if (m_object->all_reusable) {
4762 				/*
4763 				 * Wired pages are not counted as "re-usable"
4764 				 * in "all_reusable" VM objects, so nothing
4765 				 * to do here.
4766 				 */
4767 			} else if (mem->vmp_reusable) {
4768 				/*
4769 				 * This page is not "re-usable" when it's
4770 				 * wired, so adjust its state and the
4771 				 * accounting.
4772 				 */
4773 				vm_page_lockconvert_queues();
4774 				vm_object_reuse_pages(m_object,
4775 				    mem->vmp_offset,
4776 				    mem->vmp_offset + PAGE_SIZE_64,
4777 				    FALSE);
4778 			}
4779 		}
4780 		assert(!mem->vmp_reusable);
4781 
4782 		if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4783 			vm_page_wire_count++;
4784 		}
4785 		if (mem->vmp_gobbled) {
4786 			vm_page_gobble_count--;
4787 		}
4788 		mem->vmp_gobbled = FALSE;
4789 
4790 		if (check_memorystatus == TRUE) {
4791 			VM_CHECK_MEMORYSTATUS;
4792 		}
4793 	}
4794 	assert(!mem->vmp_gobbled);
4795 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4796 	mem->vmp_wire_count++;
4797 	if (__improbable(mem->vmp_wire_count == 0)) {
4798 		panic("vm_page_wire(%p): wire_count overflow", mem);
4799 	}
4800 	VM_PAGE_CHECK(mem);
4801 }
4802 
4803 /*
4804  *	vm_page_unwire:
4805  *
4806  *	Release one wiring of this page, potentially
4807  *	enabling it to be paged again.
4808  *
4809  *	The page's object and the page queues must be locked.
4810  */
4811 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4812 vm_page_unwire(
4813 	vm_page_t       mem,
4814 	boolean_t       queueit)
4815 {
4816 	vm_object_t     m_object;
4817 
4818 	m_object = VM_PAGE_OBJECT(mem);
4819 
4820 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
4821 
4822 	VM_PAGE_CHECK(mem);
4823 	assert(VM_PAGE_WIRED(mem));
4824 	assert(mem->vmp_wire_count > 0);
4825 	assert(!mem->vmp_gobbled);
4826 	assert(m_object != VM_OBJECT_NULL);
4827 	vm_object_lock_assert_exclusive(m_object);
4828 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4829 	if (--mem->vmp_wire_count == 0) {
4830 		task_t          owner;
4831 		int             ledger_idx_volatile;
4832 		int             ledger_idx_nonvolatile;
4833 		int             ledger_idx_volatile_compressed;
4834 		int             ledger_idx_nonvolatile_compressed;
4835 		int             ledger_idx_composite;
4836 		int             ledger_idx_external_wired;
4837 		boolean_t       do_footprint;
4838 
4839 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4840 
4841 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4842 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4843 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4844 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4845 			vm_page_wire_count--;
4846 		}
4847 
4848 		assert(m_object->resident_page_count >=
4849 		    m_object->wired_page_count);
4850 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4851 			OSAddAtomic(+1, &vm_page_purgeable_count);
4852 			assert(vm_page_purgeable_wired_count > 0);
4853 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4854 		}
4855 		if (m_object->internal &&
4856 		    m_object->vo_owner != TASK_NULL &&
4857 		    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4858 		    m_object->purgable == VM_PURGABLE_EMPTY)) {
4859 			owner = VM_OBJECT_OWNER(m_object);
4860 			vm_object_ledger_tag_ledgers(
4861 				m_object,
4862 				&ledger_idx_volatile,
4863 				&ledger_idx_nonvolatile,
4864 				&ledger_idx_volatile_compressed,
4865 				&ledger_idx_nonvolatile_compressed,
4866 				&ledger_idx_composite,
4867 				&ledger_idx_external_wired,
4868 				&do_footprint);
4869 			/* more volatile bytes */
4870 			ledger_credit(owner->ledger,
4871 			    ledger_idx_volatile,
4872 			    PAGE_SIZE);
4873 			/* less not-quite-volatile bytes */
4874 			ledger_debit(owner->ledger,
4875 			    ledger_idx_nonvolatile,
4876 			    PAGE_SIZE);
4877 			if (do_footprint) {
4878 				/* less footprint */
4879 				ledger_debit(owner->ledger,
4880 				    task_ledgers.phys_footprint,
4881 				    PAGE_SIZE);
4882 			} else if (ledger_idx_composite != -1) {
4883 				ledger_debit(owner->ledger,
4884 				    ledger_idx_composite,
4885 				    PAGE_SIZE);
4886 			}
4887 		}
4888 		assert(!is_kernel_object(m_object));
4889 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4890 
4891 		if (queueit == TRUE) {
4892 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
4893 				vm_page_deactivate(mem);
4894 			} else {
4895 				vm_page_activate(mem);
4896 			}
4897 		}
4898 
4899 		VM_CHECK_MEMORYSTATUS;
4900 	}
4901 	VM_PAGE_CHECK(mem);
4902 }
4903 
4904 /*
4905  *	vm_page_deactivate:
4906  *
4907  *	Returns the given page to the inactive list,
4908  *	indicating that no physical maps have access
4909  *	to this page.  [Used by the physical mapping system.]
4910  *
4911  *	The page queues must be locked.
4912  */
4913 void
vm_page_deactivate(vm_page_t m)4914 vm_page_deactivate(
4915 	vm_page_t       m)
4916 {
4917 	vm_page_deactivate_internal(m, TRUE);
4918 }
4919 
4920 
4921 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4922 vm_page_deactivate_internal(
4923 	vm_page_t       m,
4924 	boolean_t       clear_hw_reference)
4925 {
4926 	vm_object_t     m_object;
4927 
4928 	m_object = VM_PAGE_OBJECT(m);
4929 
4930 	VM_PAGE_CHECK(m);
4931 	assert(!is_kernel_object(m_object));
4932 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4933 
4934 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
4935 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4936 	/*
4937 	 *	This page is no longer very interesting.  If it was
4938 	 *	interesting (active or inactive/referenced), then we
4939 	 *	clear the reference bit and (re)enter it in the
4940 	 *	inactive queue.  Note wired pages should not have
4941 	 *	their reference bit cleared.
4942 	 */
4943 	assert( !(m->vmp_absent && !m->vmp_unusual));
4944 
4945 	if (m->vmp_gobbled) {           /* can this happen? */
4946 		assert( !VM_PAGE_WIRED(m));
4947 
4948 		if (!m->vmp_private && !m->vmp_fictitious) {
4949 			vm_page_wire_count--;
4950 		}
4951 		vm_page_gobble_count--;
4952 		m->vmp_gobbled = FALSE;
4953 	}
4954 	/*
4955 	 * if this page is currently on the pageout queue, we can't do the
4956 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4957 	 * and we can't remove it manually since we would need the object lock
4958 	 * (which is not required here) to decrement the activity_in_progress
4959 	 * reference which is held on the object while the page is in the pageout queue...
4960 	 * just let the normal laundry processing proceed
4961 	 */
4962 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4963 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4964 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4965 	    VM_PAGE_WIRED(m)) {
4966 		return;
4967 	}
4968 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
4969 		vm_page_lockconvert_queues();
4970 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4971 	}
4972 
4973 	m->vmp_reference = FALSE;
4974 	m->vmp_no_cache = FALSE;
4975 
4976 	if (!VM_PAGE_INACTIVE(m)) {
4977 		vm_page_queues_remove(m, FALSE);
4978 
4979 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
4980 		    m->vmp_dirty && m_object->internal &&
4981 		    (m_object->purgable == VM_PURGABLE_DENY ||
4982 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4983 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
4984 			vm_page_check_pageable_safe(m);
4985 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4986 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4987 			vm_page_throttled_count++;
4988 		} else {
4989 			if (m_object->named && m_object->ref_count == 1) {
4990 				vm_page_speculate(m, FALSE);
4991 #if DEVELOPMENT || DEBUG
4992 				vm_page_speculative_recreated++;
4993 #endif
4994 			} else {
4995 				vm_page_enqueue_inactive(m, FALSE);
4996 			}
4997 		}
4998 	}
4999 }
5000 
5001 /*
5002  * vm_page_enqueue_cleaned
5003  *
5004  * Put the page on the cleaned queue, mark it cleaned, etc.
5005  * Being on the cleaned queue (and having m->clean_queue set)
5006  * does ** NOT ** guarantee that the page is clean!
5007  *
5008  * Call with the queues lock held.
5009  */
5010 
5011 void
vm_page_enqueue_cleaned(vm_page_t m)5012 vm_page_enqueue_cleaned(vm_page_t m)
5013 {
5014 	vm_object_t     m_object;
5015 
5016 	m_object = VM_PAGE_OBJECT(m);
5017 
5018 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5019 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5020 	assert( !(m->vmp_absent && !m->vmp_unusual));
5021 
5022 	if (VM_PAGE_WIRED(m)) {
5023 		return;
5024 	}
5025 
5026 	if (m->vmp_gobbled) {
5027 		if (!m->vmp_private && !m->vmp_fictitious) {
5028 			vm_page_wire_count--;
5029 		}
5030 		vm_page_gobble_count--;
5031 		m->vmp_gobbled = FALSE;
5032 	}
5033 	/*
5034 	 * if this page is currently on the pageout queue, we can't do the
5035 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5036 	 * and we can't remove it manually since we would need the object lock
5037 	 * (which is not required here) to decrement the activity_in_progress
5038 	 * reference which is held on the object while the page is in the pageout queue...
5039 	 * just let the normal laundry processing proceed
5040 	 */
5041 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5042 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5043 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5044 		return;
5045 	}
5046 	vm_page_queues_remove(m, FALSE);
5047 
5048 	vm_page_check_pageable_safe(m);
5049 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5050 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5051 	vm_page_cleaned_count++;
5052 
5053 	vm_page_inactive_count++;
5054 	if (m_object->internal) {
5055 		vm_page_pageable_internal_count++;
5056 	} else {
5057 		vm_page_pageable_external_count++;
5058 	}
5059 	vm_page_add_to_specialq(m, TRUE);
5060 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5061 }
5062 
5063 /*
5064  *	vm_page_activate:
5065  *
5066  *	Put the specified page on the active list (if appropriate).
5067  *
5068  *	The page queues must be locked.
5069  */
5070 
5071 void
vm_page_activate(vm_page_t m)5072 vm_page_activate(
5073 	vm_page_t       m)
5074 {
5075 	vm_object_t     m_object;
5076 
5077 	m_object = VM_PAGE_OBJECT(m);
5078 
5079 	VM_PAGE_CHECK(m);
5080 #ifdef  FIXME_4778297
5081 	assert(!is_kernel_object(m_object));
5082 #endif
5083 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5084 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5085 	assert( !(m->vmp_absent && !m->vmp_unusual));
5086 
5087 	if (m->vmp_gobbled) {
5088 		assert( !VM_PAGE_WIRED(m));
5089 		if (!m->vmp_private && !m->vmp_fictitious) {
5090 			vm_page_wire_count--;
5091 		}
5092 		vm_page_gobble_count--;
5093 		m->vmp_gobbled = FALSE;
5094 	}
5095 	/*
5096 	 * if this page is currently on the pageout queue, we can't do the
5097 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5098 	 * and we can't remove it manually since we would need the object lock
5099 	 * (which is not required here) to decrement the activity_in_progress
5100 	 * reference which is held on the object while the page is in the pageout queue...
5101 	 * just let the normal laundry processing proceed
5102 	 */
5103 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5104 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5105 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5106 		return;
5107 	}
5108 
5109 #if DEBUG
5110 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5111 		panic("vm_page_activate: already active");
5112 	}
5113 #endif
5114 
5115 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5116 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5117 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5118 	}
5119 
5120 	/*
5121 	 * A freshly activated page should be promoted in the donation queue.
5122 	 * So we remove it here while preserving its hint and we will enqueue
5123 	 * it again in vm_page_enqueue_active.
5124 	 */
5125 	vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5126 
5127 	if (!VM_PAGE_WIRED(m)) {
5128 		vm_page_check_pageable_safe(m);
5129 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5130 		    m->vmp_dirty && m_object->internal &&
5131 		    (m_object->purgable == VM_PURGABLE_DENY ||
5132 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5133 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5134 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5135 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5136 			vm_page_throttled_count++;
5137 		} else {
5138 #if CONFIG_SECLUDED_MEMORY
5139 			if (secluded_for_filecache &&
5140 			    vm_page_secluded_target != 0 &&
5141 			    num_tasks_can_use_secluded_mem == 0 &&
5142 			    m_object->eligible_for_secluded &&
5143 			    !m->vmp_realtime) {
5144 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5145 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5146 				vm_page_secluded_count++;
5147 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5148 				vm_page_secluded_count_inuse++;
5149 				assert(!m_object->internal);
5150 //				vm_page_pageable_external_count++;
5151 			} else
5152 #endif /* CONFIG_SECLUDED_MEMORY */
5153 			vm_page_enqueue_active(m, FALSE);
5154 		}
5155 		m->vmp_reference = TRUE;
5156 		m->vmp_no_cache = FALSE;
5157 	}
5158 	VM_PAGE_CHECK(m);
5159 }
5160 
5161 
5162 /*
5163  *      vm_page_speculate:
5164  *
5165  *      Put the specified page on the speculative list (if appropriate).
5166  *
5167  *      The page queues must be locked.
5168  */
5169 void
vm_page_speculate(vm_page_t m,boolean_t new)5170 vm_page_speculate(
5171 	vm_page_t       m,
5172 	boolean_t       new)
5173 {
5174 	struct vm_speculative_age_q     *aq;
5175 	vm_object_t     m_object;
5176 
5177 	m_object = VM_PAGE_OBJECT(m);
5178 
5179 	VM_PAGE_CHECK(m);
5180 	vm_page_check_pageable_safe(m);
5181 
5182 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5183 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5184 	assert( !(m->vmp_absent && !m->vmp_unusual));
5185 	assert(m_object->internal == FALSE);
5186 
5187 	/*
5188 	 * if this page is currently on the pageout queue, we can't do the
5189 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5190 	 * and we can't remove it manually since we would need the object lock
5191 	 * (which is not required here) to decrement the activity_in_progress
5192 	 * reference which is held on the object while the page is in the pageout queue...
5193 	 * just let the normal laundry processing proceed
5194 	 */
5195 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5196 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5197 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5198 		return;
5199 	}
5200 
5201 	vm_page_queues_remove(m, FALSE);
5202 
5203 	if (!VM_PAGE_WIRED(m)) {
5204 		mach_timespec_t         ts;
5205 		clock_sec_t sec;
5206 		clock_nsec_t nsec;
5207 
5208 		clock_get_system_nanotime(&sec, &nsec);
5209 		ts.tv_sec = (unsigned int) sec;
5210 		ts.tv_nsec = nsec;
5211 
5212 		if (vm_page_speculative_count == 0) {
5213 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5214 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5215 
5216 			aq = &vm_page_queue_speculative[speculative_age_index];
5217 
5218 			/*
5219 			 * set the timer to begin a new group
5220 			 */
5221 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5222 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5223 
5224 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5225 		} else {
5226 			aq = &vm_page_queue_speculative[speculative_age_index];
5227 
5228 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5229 				speculative_age_index++;
5230 
5231 				if (speculative_age_index > vm_page_max_speculative_age_q) {
5232 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5233 				}
5234 				if (speculative_age_index == speculative_steal_index) {
5235 					speculative_steal_index = speculative_age_index + 1;
5236 
5237 					if (speculative_steal_index > vm_page_max_speculative_age_q) {
5238 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5239 					}
5240 				}
5241 				aq = &vm_page_queue_speculative[speculative_age_index];
5242 
5243 				if (!vm_page_queue_empty(&aq->age_q)) {
5244 					vm_page_speculate_ageit(aq);
5245 				}
5246 
5247 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5248 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5249 
5250 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5251 			}
5252 		}
5253 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5254 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5255 		vm_page_speculative_count++;
5256 		vm_page_pageable_external_count++;
5257 
5258 		if (new == TRUE) {
5259 			vm_object_lock_assert_exclusive(m_object);
5260 
5261 			m_object->pages_created++;
5262 #if DEVELOPMENT || DEBUG
5263 			vm_page_speculative_created++;
5264 #endif
5265 		}
5266 	}
5267 	VM_PAGE_CHECK(m);
5268 }
5269 
5270 
5271 /*
5272  * move pages from the specified aging bin to
5273  * the speculative bin that pageout_scan claims from
5274  *
5275  *      The page queues must be locked.
5276  */
5277 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5278 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5279 {
5280 	struct vm_speculative_age_q     *sq;
5281 	vm_page_t       t;
5282 
5283 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5284 
5285 	if (vm_page_queue_empty(&sq->age_q)) {
5286 		sq->age_q.next = aq->age_q.next;
5287 		sq->age_q.prev = aq->age_q.prev;
5288 
5289 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5290 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5291 
5292 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5293 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5294 	} else {
5295 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5296 		t->vmp_pageq.next = aq->age_q.next;
5297 
5298 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5299 		t->vmp_pageq.prev = sq->age_q.prev;
5300 
5301 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5302 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5303 
5304 		sq->age_q.prev = aq->age_q.prev;
5305 	}
5306 	vm_page_queue_init(&aq->age_q);
5307 }
5308 
5309 
5310 void
vm_page_lru(vm_page_t m)5311 vm_page_lru(
5312 	vm_page_t       m)
5313 {
5314 	VM_PAGE_CHECK(m);
5315 	assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5316 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5317 
5318 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5319 
5320 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5321 		/*
5322 		 * we don't need to do all the other work that
5323 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5324 		 * bring along for the ride
5325 		 */
5326 		assert(!m->vmp_laundry);
5327 		assert(!m->vmp_private);
5328 
5329 		m->vmp_no_cache = FALSE;
5330 
5331 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5332 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5333 
5334 		return;
5335 	}
5336 	/*
5337 	 * if this page is currently on the pageout queue, we can't do the
5338 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5339 	 * and we can't remove it manually since we would need the object lock
5340 	 * (which is not required here) to decrement the activity_in_progress
5341 	 * reference which is held on the object while the page is in the pageout queue...
5342 	 * just let the normal laundry processing proceed
5343 	 */
5344 	if (m->vmp_laundry || m->vmp_private ||
5345 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5346 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5347 	    VM_PAGE_WIRED(m)) {
5348 		return;
5349 	}
5350 
5351 	m->vmp_no_cache = FALSE;
5352 
5353 	vm_page_queues_remove(m, FALSE);
5354 
5355 	vm_page_enqueue_inactive(m, FALSE);
5356 }
5357 
5358 
5359 void
vm_page_reactivate_all_throttled(void)5360 vm_page_reactivate_all_throttled(void)
5361 {
5362 	vm_page_t       first_throttled, last_throttled;
5363 	vm_page_t       first_active;
5364 	vm_page_t       m;
5365 	int             extra_active_count;
5366 	int             extra_internal_count, extra_external_count;
5367 	vm_object_t     m_object;
5368 
5369 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5370 		return;
5371 	}
5372 
5373 	extra_active_count = 0;
5374 	extra_internal_count = 0;
5375 	extra_external_count = 0;
5376 	vm_page_lock_queues();
5377 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5378 		/*
5379 		 * Switch "throttled" pages to "active".
5380 		 */
5381 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5382 			VM_PAGE_CHECK(m);
5383 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5384 
5385 			m_object = VM_PAGE_OBJECT(m);
5386 
5387 			extra_active_count++;
5388 			if (m_object->internal) {
5389 				extra_internal_count++;
5390 			} else {
5391 				extra_external_count++;
5392 			}
5393 
5394 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5395 			VM_PAGE_CHECK(m);
5396 			vm_page_add_to_specialq(m, FALSE);
5397 		}
5398 
5399 		/*
5400 		 * Transfer the entire throttled queue to a regular LRU page queues.
5401 		 * We insert it at the head of the active queue, so that these pages
5402 		 * get re-evaluated by the LRU algorithm first, since they've been
5403 		 * completely out of it until now.
5404 		 */
5405 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5406 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5407 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5408 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5409 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5410 		} else {
5411 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5412 		}
5413 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5414 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5415 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5416 
5417 #if DEBUG
5418 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5419 #endif
5420 		vm_page_queue_init(&vm_page_queue_throttled);
5421 		/*
5422 		 * Adjust the global page counts.
5423 		 */
5424 		vm_page_active_count += extra_active_count;
5425 		vm_page_pageable_internal_count += extra_internal_count;
5426 		vm_page_pageable_external_count += extra_external_count;
5427 		vm_page_throttled_count = 0;
5428 	}
5429 	assert(vm_page_throttled_count == 0);
5430 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5431 	vm_page_unlock_queues();
5432 }
5433 
5434 
5435 /*
5436  * move pages from the indicated local queue to the global active queue
5437  * its ok to fail if we're below the hard limit and force == FALSE
5438  * the nolocks == TRUE case is to allow this function to be run on
5439  * the hibernate path
5440  */
5441 
5442 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5443 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5444 {
5445 	struct vpl      *lq;
5446 	vm_page_t       first_local, last_local;
5447 	vm_page_t       first_active;
5448 	vm_page_t       m;
5449 	uint32_t        count = 0;
5450 
5451 	if (vm_page_local_q == NULL) {
5452 		return;
5453 	}
5454 
5455 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5456 
5457 	if (nolocks == FALSE) {
5458 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5459 			if (!vm_page_trylockspin_queues()) {
5460 				return;
5461 			}
5462 		} else {
5463 			vm_page_lockspin_queues();
5464 		}
5465 
5466 		VPL_LOCK(&lq->vpl_lock);
5467 	}
5468 	if (lq->vpl_count) {
5469 		/*
5470 		 * Switch "local" pages to "active".
5471 		 */
5472 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5473 
5474 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5475 			VM_PAGE_CHECK(m);
5476 			vm_page_check_pageable_safe(m);
5477 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5478 			assert(!m->vmp_fictitious);
5479 
5480 			if (m->vmp_local_id != lid) {
5481 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5482 			}
5483 
5484 			m->vmp_local_id = 0;
5485 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5486 			VM_PAGE_CHECK(m);
5487 			vm_page_add_to_specialq(m, FALSE);
5488 			count++;
5489 		}
5490 		if (count != lq->vpl_count) {
5491 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5492 		}
5493 
5494 		/*
5495 		 * Transfer the entire local queue to a regular LRU page queues.
5496 		 */
5497 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5498 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5499 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5500 
5501 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5502 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5503 		} else {
5504 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5505 		}
5506 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5507 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5508 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5509 
5510 		vm_page_queue_init(&lq->vpl_queue);
5511 		/*
5512 		 * Adjust the global page counts.
5513 		 */
5514 		vm_page_active_count += lq->vpl_count;
5515 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5516 		vm_page_pageable_external_count += lq->vpl_external_count;
5517 		lq->vpl_count = 0;
5518 		lq->vpl_internal_count = 0;
5519 		lq->vpl_external_count = 0;
5520 	}
5521 	assert(vm_page_queue_empty(&lq->vpl_queue));
5522 
5523 	if (nolocks == FALSE) {
5524 		VPL_UNLOCK(&lq->vpl_lock);
5525 
5526 		vm_page_balance_inactive(count / 4);
5527 		vm_page_unlock_queues();
5528 	}
5529 }
5530 
5531 /*
5532  *	vm_page_part_zero_fill:
5533  *
5534  *	Zero-fill a part of the page.
5535  */
5536 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5537 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5538 vm_page_part_zero_fill(
5539 	vm_page_t       m,
5540 	vm_offset_t     m_pa,
5541 	vm_size_t       len)
5542 {
5543 #if 0
5544 	/*
5545 	 * we don't hold the page queue lock
5546 	 * so this check isn't safe to make
5547 	 */
5548 	VM_PAGE_CHECK(m);
5549 #endif
5550 
5551 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5552 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5553 #else
5554 	vm_page_t       tmp;
5555 	while (1) {
5556 		tmp = vm_page_grab();
5557 		if (tmp == VM_PAGE_NULL) {
5558 			vm_page_wait(THREAD_UNINT);
5559 			continue;
5560 		}
5561 		break;
5562 	}
5563 	vm_page_zero_fill(tmp);
5564 	if (m_pa != 0) {
5565 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
5566 	}
5567 	if ((m_pa + len) < PAGE_SIZE) {
5568 		vm_page_part_copy(m, m_pa + len, tmp,
5569 		    m_pa + len, PAGE_SIZE - (m_pa + len));
5570 	}
5571 	vm_page_copy(tmp, m);
5572 	VM_PAGE_FREE(tmp);
5573 #endif
5574 }
5575 
5576 /*
5577  *	vm_page_zero_fill:
5578  *
5579  *	Zero-fill the specified page.
5580  */
5581 void
vm_page_zero_fill(vm_page_t m)5582 vm_page_zero_fill(
5583 	vm_page_t       m)
5584 {
5585 #if 0
5586 	/*
5587 	 * we don't hold the page queue lock
5588 	 * so this check isn't safe to make
5589 	 */
5590 	VM_PAGE_CHECK(m);
5591 #endif
5592 
5593 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
5594 	pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5595 }
5596 
5597 /*
5598  *	vm_page_part_copy:
5599  *
5600  *	copy part of one page to another
5601  */
5602 
5603 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5604 vm_page_part_copy(
5605 	vm_page_t       src_m,
5606 	vm_offset_t     src_pa,
5607 	vm_page_t       dst_m,
5608 	vm_offset_t     dst_pa,
5609 	vm_size_t       len)
5610 {
5611 #if 0
5612 	/*
5613 	 * we don't hold the page queue lock
5614 	 * so this check isn't safe to make
5615 	 */
5616 	VM_PAGE_CHECK(src_m);
5617 	VM_PAGE_CHECK(dst_m);
5618 #endif
5619 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5620 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5621 }
5622 
5623 /*
5624  *	vm_page_copy:
5625  *
5626  *	Copy one page to another
5627  */
5628 
5629 int vm_page_copy_cs_validations = 0;
5630 int vm_page_copy_cs_tainted = 0;
5631 
5632 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5633 vm_page_copy(
5634 	vm_page_t       src_m,
5635 	vm_page_t       dest_m)
5636 {
5637 	vm_object_t     src_m_object;
5638 
5639 	src_m_object = VM_PAGE_OBJECT(src_m);
5640 
5641 #if 0
5642 	/*
5643 	 * we don't hold the page queue lock
5644 	 * so this check isn't safe to make
5645 	 */
5646 	VM_PAGE_CHECK(src_m);
5647 	VM_PAGE_CHECK(dest_m);
5648 #endif
5649 	vm_object_lock_assert_held(src_m_object);
5650 
5651 	if (src_m_object != VM_OBJECT_NULL &&
5652 	    src_m_object->code_signed) {
5653 		/*
5654 		 * We're copying a page from a code-signed object.
5655 		 * Whoever ends up mapping the copy page might care about
5656 		 * the original page's integrity, so let's validate the
5657 		 * source page now.
5658 		 */
5659 		vm_page_copy_cs_validations++;
5660 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5661 #if DEVELOPMENT || DEBUG
5662 		DTRACE_VM4(codesigned_copy,
5663 		    vm_object_t, src_m_object,
5664 		    vm_object_offset_t, src_m->vmp_offset,
5665 		    int, src_m->vmp_cs_validated,
5666 		    int, src_m->vmp_cs_tainted);
5667 #endif /* DEVELOPMENT || DEBUG */
5668 	}
5669 
5670 	/*
5671 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
5672 	 * the cs_validated bit.
5673 	 */
5674 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5675 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5676 	if (dest_m->vmp_cs_tainted) {
5677 		vm_page_copy_cs_tainted++;
5678 	}
5679 	dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5680 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5681 }
5682 
5683 #if MACH_ASSERT
5684 static void
_vm_page_print(vm_page_t p)5685 _vm_page_print(
5686 	vm_page_t       p)
5687 {
5688 	printf("vm_page %p: \n", p);
5689 	printf("  pageq: next=%p prev=%p\n",
5690 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5691 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5692 	printf("  listq: next=%p prev=%p\n",
5693 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5694 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5695 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5696 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5697 	printf("  wire_count=%u\n", p->vmp_wire_count);
5698 	printf("  q_state=%u\n", p->vmp_q_state);
5699 
5700 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
5701 	    (p->vmp_laundry ? "" : "!"),
5702 	    (p->vmp_reference ? "" : "!"),
5703 	    (p->vmp_gobbled ? "" : "!"),
5704 	    (p->vmp_private ? "" : "!"));
5705 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5706 	    (p->vmp_busy ? "" : "!"),
5707 	    (p->vmp_wanted ? "" : "!"),
5708 	    (p->vmp_tabled ? "" : "!"),
5709 	    (p->vmp_fictitious ? "" : "!"),
5710 	    (p->vmp_pmapped ? "" : "!"),
5711 	    (p->vmp_wpmapped ? "" : "!"));
5712 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5713 	    (p->vmp_free_when_done ? "" : "!"),
5714 	    (p->vmp_absent ? "" : "!"),
5715 	    (VMP_ERROR_GET(p) ? "" : "!"),
5716 	    (p->vmp_dirty ? "" : "!"),
5717 	    (p->vmp_cleaning ? "" : "!"),
5718 	    (p->vmp_precious ? "" : "!"),
5719 	    (p->vmp_clustered ? "" : "!"));
5720 	printf("  %soverwriting, %srestart, %sunusual\n",
5721 	    (p->vmp_overwriting ? "" : "!"),
5722 	    (p->vmp_restart ? "" : "!"),
5723 	    (p->vmp_unusual ? "" : "!"));
5724 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5725 	    p->vmp_cs_validated,
5726 	    p->vmp_cs_tainted,
5727 	    p->vmp_cs_nx,
5728 	    (p->vmp_no_cache ? "" : "!"));
5729 
5730 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5731 }
5732 
5733 /*
5734  *	Check that the list of pages is ordered by
5735  *	ascending physical address and has no holes.
5736  */
5737 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5738 vm_page_verify_contiguous(
5739 	vm_page_t       pages,
5740 	unsigned int    npages)
5741 {
5742 	vm_page_t               m;
5743 	unsigned int            page_count;
5744 	vm_offset_t             prev_addr;
5745 
5746 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5747 	page_count = 1;
5748 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5749 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5750 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5751 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5752 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5753 			panic("vm_page_verify_contiguous:  not contiguous!");
5754 		}
5755 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5756 		++page_count;
5757 	}
5758 	if (page_count != npages) {
5759 		printf("pages %p actual count 0x%x but requested 0x%x\n",
5760 		    pages, page_count, npages);
5761 		panic("vm_page_verify_contiguous:  count error");
5762 	}
5763 	return 1;
5764 }
5765 
5766 
5767 /*
5768  *	Check the free lists for proper length etc.
5769  */
5770 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5771 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5772 vm_page_verify_free_list(
5773 	vm_page_queue_head_t    *vm_page_queue,
5774 	unsigned int    color,
5775 	vm_page_t       look_for_page,
5776 	boolean_t       expect_page)
5777 {
5778 	unsigned int    npages;
5779 	vm_page_t       m;
5780 	vm_page_t       prev_m;
5781 	boolean_t       found_page;
5782 
5783 	if (!vm_page_verify_this_free_list_enabled) {
5784 		return 0;
5785 	}
5786 
5787 	found_page = FALSE;
5788 	npages = 0;
5789 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5790 
5791 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5792 		if (m == look_for_page) {
5793 			found_page = TRUE;
5794 		}
5795 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5796 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5797 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5798 		}
5799 		if (!m->vmp_busy) {
5800 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5801 			    color, npages, m);
5802 		}
5803 		if (color != (unsigned int) -1) {
5804 			if (VM_PAGE_GET_COLOR(m) != color) {
5805 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5806 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
5807 			}
5808 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5809 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5810 				    color, npages, m, m->vmp_q_state);
5811 			}
5812 		} else {
5813 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5814 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5815 				    npages, m, m->vmp_q_state);
5816 			}
5817 		}
5818 		++npages;
5819 		prev_m = m;
5820 	}
5821 	if (look_for_page != VM_PAGE_NULL) {
5822 		unsigned int other_color;
5823 
5824 		if (expect_page && !found_page) {
5825 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5826 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5827 			_vm_page_print(look_for_page);
5828 			for (other_color = 0;
5829 			    other_color < vm_colors;
5830 			    other_color++) {
5831 				if (other_color == color) {
5832 					continue;
5833 				}
5834 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5835 				    other_color, look_for_page, FALSE);
5836 			}
5837 			if (color == (unsigned int) -1) {
5838 				vm_page_verify_free_list(&vm_lopage_queue_free,
5839 				    (unsigned int) -1, look_for_page, FALSE);
5840 			}
5841 			panic("vm_page_verify_free_list(color=%u)", color);
5842 		}
5843 		if (!expect_page && found_page) {
5844 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5845 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5846 		}
5847 	}
5848 	return npages;
5849 }
5850 
5851 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5852 static void
vm_page_verify_free_lists(void)5853 vm_page_verify_free_lists( void )
5854 {
5855 	unsigned int    color, npages, nlopages;
5856 	boolean_t       toggle = TRUE;
5857 
5858 	if (!vm_page_verify_all_free_lists_enabled) {
5859 		return;
5860 	}
5861 
5862 	npages = 0;
5863 
5864 	vm_free_page_lock();
5865 
5866 	if (vm_page_verify_this_free_list_enabled == TRUE) {
5867 		/*
5868 		 * This variable has been set globally for extra checking of
5869 		 * each free list Q. Since we didn't set it, we don't own it
5870 		 * and we shouldn't toggle it.
5871 		 */
5872 		toggle = FALSE;
5873 	}
5874 
5875 	if (toggle == TRUE) {
5876 		vm_page_verify_this_free_list_enabled = TRUE;
5877 	}
5878 
5879 	for (color = 0; color < vm_colors; color++) {
5880 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5881 		    color, VM_PAGE_NULL, FALSE);
5882 	}
5883 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5884 	    (unsigned int) -1,
5885 	    VM_PAGE_NULL, FALSE);
5886 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5887 		panic("vm_page_verify_free_lists:  "
5888 		    "npages %u free_count %d nlopages %u lo_free_count %u",
5889 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5890 	}
5891 
5892 	if (toggle == TRUE) {
5893 		vm_page_verify_this_free_list_enabled = FALSE;
5894 	}
5895 
5896 	vm_free_page_unlock();
5897 }
5898 
5899 #endif  /* MACH_ASSERT */
5900 
5901 /*
5902  * wrapper for pmap_enter()
5903  */
5904 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5905 pmap_enter_check(
5906 	pmap_t           pmap,
5907 	vm_map_address_t virtual_address,
5908 	vm_page_t        page,
5909 	vm_prot_t        protection,
5910 	vm_prot_t        fault_type,
5911 	unsigned int     flags,
5912 	boolean_t        wired)
5913 {
5914 	int             options = 0;
5915 	vm_object_t     obj;
5916 
5917 	if (VMP_ERROR_GET(page)) {
5918 		return KERN_MEMORY_FAILURE;
5919 	}
5920 	obj = VM_PAGE_OBJECT(page);
5921 	if (obj->internal) {
5922 		options |= PMAP_OPTIONS_INTERNAL;
5923 	}
5924 	if (page->vmp_reusable || obj->all_reusable) {
5925 		options |= PMAP_OPTIONS_REUSABLE;
5926 	}
5927 	return pmap_enter_options(pmap,
5928 	           virtual_address,
5929 	           VM_PAGE_GET_PHYS_PAGE(page),
5930 	           protection,
5931 	           fault_type,
5932 	           flags,
5933 	           wired,
5934 	           options,
5935 	           NULL,
5936 	           PMAP_MAPPING_TYPE_INFER);
5937 }
5938 
5939 
5940 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5941 
5942 /*
5943  *	CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
5944  */
5945 
5946 /*
5947  * Helper function used to determine if a page can be relocated
5948  * A page is relocatable if it is in a stable non-transient state
5949  */
5950 static inline boolean_t
vm_page_is_relocatable(vm_page_t m)5951 vm_page_is_relocatable(vm_page_t m)
5952 {
5953 
5954 	if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
5955 	    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5956 		/*
5957 		 * Page is in a transient state
5958 		 * or a state we don't want to deal with.
5959 		 */
5960 		return FALSE;
5961 	} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5962 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5963 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5964 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5965 		/*
5966 		 * Page needs to be on one of our queues (other then the pageout or special
5967 		 * free queues) or it needs to belong to the compressor pool (which is now
5968 		 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
5969 		 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
5970 		 * locks we hold at this point...
5971 		 */
5972 		return FALSE;
5973 	} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
5974 	    (!m->vmp_tabled || m->vmp_busy)) {
5975 		/*
5976 		 * pages on the free list are always 'busy'
5977 		 * so we couldn't test for 'busy' in the check
5978 		 * for the transient states... pages that are
5979 		 * 'free' are never 'tabled', so we also couldn't
5980 		 * test for 'tabled'.  So we check here to make
5981 		 * sure that a non-free page is not busy and is
5982 		 * tabled on an object...
5983 		 */
5984 		return FALSE;
5985 	}
5986 	return TRUE;
5987 }
5988 
5989 /*
5990  * Free up the given page by possibily relocating its contents to a new page
5991  * If the page is on an object the object lock must be held.
5992  */
5993 static kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages)5994 vm_page_relocate(vm_page_t m1, int *compressed_pages)
5995 {
5996 	int refmod = 0;
5997 	vm_object_t object = VM_PAGE_OBJECT(m1);
5998 	kern_return_t kr;
5999 
6000 	if (object == VM_OBJECT_NULL) {
6001 		return KERN_FAILURE;
6002 	}
6003 
6004 	vm_object_lock_assert_held(object);
6005 
6006 	if (VM_PAGE_WIRED(m1) ||
6007 	    m1->vmp_gobbled ||
6008 	    m1->vmp_laundry ||
6009 	    m1->vmp_wanted ||
6010 	    m1->vmp_cleaning ||
6011 	    m1->vmp_overwriting ||
6012 	    m1->vmp_free_when_done ||
6013 	    m1->vmp_busy ||
6014 	    m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6015 		return KERN_FAILURE;
6016 	}
6017 
6018 	boolean_t disconnected = FALSE;
6019 	boolean_t reusable = FALSE;
6020 
6021 	/*
6022 	 * Pages from reusable objects can be reclaimed directly.
6023 	 */
6024 	if ((m1->vmp_reusable || object->all_reusable) &&
6025 	    m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6026 	    !m1->vmp_reference) {
6027 		/*
6028 		 * reusable page...
6029 		 */
6030 
6031 		refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6032 		disconnected = TRUE;
6033 		if (refmod == 0) {
6034 			/*
6035 			 * ... not reused: can steal without relocating contents.
6036 			 */
6037 			reusable = TRUE;
6038 		}
6039 	}
6040 
6041 	if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6042 		vm_object_offset_t offset;
6043 
6044 		/* page is not reusable, we need to allocate a new page
6045 		 * and move its contents there.
6046 		 */
6047 		vm_page_t m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6048 
6049 		if (m2 == VM_PAGE_NULL) {
6050 			return KERN_RESOURCE_SHORTAGE;
6051 		}
6052 
6053 		if (!disconnected) {
6054 			if (m1->vmp_pmapped) {
6055 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6056 			} else {
6057 				refmod = 0;
6058 			}
6059 		}
6060 
6061 		/* copy the page's contents */
6062 		pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6063 
6064 		/* copy the page's state */
6065 		assert(!VM_PAGE_WIRED(m1));
6066 		assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6067 		assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6068 		assert(!m1->vmp_laundry);
6069 		m2->vmp_reference = m1->vmp_reference;
6070 		assert(!m1->vmp_gobbled);
6071 		assert(!m1->vmp_private);
6072 		m2->vmp_no_cache = m1->vmp_no_cache;
6073 		m2->vmp_xpmapped = 0;
6074 		assert(!m1->vmp_busy);
6075 		assert(!m1->vmp_wanted);
6076 		assert(!m1->vmp_fictitious);
6077 		m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6078 		m2->vmp_wpmapped = m1->vmp_wpmapped;
6079 		assert(!m1->vmp_free_when_done);
6080 		m2->vmp_absent = m1->vmp_absent;
6081 		m2->vmp_error = VMP_ERROR_GET(m1);
6082 		m2->vmp_dirty = m1->vmp_dirty;
6083 		assert(!m1->vmp_cleaning);
6084 		m2->vmp_precious = m1->vmp_precious;
6085 		m2->vmp_clustered = m1->vmp_clustered;
6086 		assert(!m1->vmp_overwriting);
6087 		m2->vmp_restart = m1->vmp_restart;
6088 		m2->vmp_unusual = m1->vmp_unusual;
6089 		m2->vmp_cs_validated = m1->vmp_cs_validated;
6090 		m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6091 		m2->vmp_cs_nx = m1->vmp_cs_nx;
6092 
6093 		m2->vmp_realtime = m1->vmp_realtime;
6094 		m1->vmp_realtime = false;
6095 
6096 		/*
6097 		 * If m1 had really been reusable,
6098 		 * we would have just stolen it, so
6099 		 * let's not propagate its "reusable"
6100 		 * bit and assert that m2 is not
6101 		 * marked as "reusable".
6102 		 */
6103 		// m2->vmp_reusable	= m1->vmp_reusable;
6104 		assert(!m2->vmp_reusable);
6105 
6106 		// assert(!m1->vmp_lopage);
6107 
6108 		if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6109 			m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6110 			/*
6111 			 * We just grabbed m2 up above and so it isn't
6112 			 * going to be on any special Q as yet and so
6113 			 * we don't need to 'remove' it from the special
6114 			 * queues. Just resetting the state should be enough.
6115 			 */
6116 			m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6117 		}
6118 
6119 		/*
6120 		 * page may need to be flushed if
6121 		 * it is marshalled into a UPL
6122 		 * that is going to be used by a device
6123 		 * that doesn't support coherency
6124 		 */
6125 		m2->vmp_written_by_kernel = TRUE;
6126 
6127 		/*
6128 		 * make sure we clear the ref/mod state
6129 		 * from the pmap layer... else we risk
6130 		 * inheriting state from the last time
6131 		 * this page was used...
6132 		 */
6133 		pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6134 		    VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6135 
6136 		if (refmod & VM_MEM_REFERENCED) {
6137 			m2->vmp_reference = TRUE;
6138 		}
6139 		if (refmod & VM_MEM_MODIFIED) {
6140 			SET_PAGE_DIRTY(m2, TRUE);
6141 		}
6142 		offset = m1->vmp_offset;
6143 
6144 		/*
6145 		 * completely cleans up the state
6146 		 * of the page so that it is ready
6147 		 * to be put onto the free list, or
6148 		 * for this purpose it looks like it
6149 		 * just came off of the free list
6150 		 */
6151 		vm_page_free_prepare(m1);
6152 
6153 		/*
6154 		 * now put the substitute page on the object
6155 		 */
6156 		vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6157 		    TRUE, FALSE, FALSE, NULL);
6158 
6159 		if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6160 			m2->vmp_pmapped = TRUE;
6161 			m2->vmp_wpmapped = TRUE;
6162 
6163 			kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6164 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6165 
6166 			assert(kr == KERN_SUCCESS);
6167 
6168 			if (compressed_pages) {
6169 				++*compressed_pages;
6170 			}
6171 		} else {
6172 			/* relocated page was not used by the compressor
6173 			 * put it on either the active or inactive lists */
6174 			if (m2->vmp_reference) {
6175 				vm_page_activate(m2);
6176 			} else {
6177 				vm_page_deactivate(m2);
6178 			}
6179 		}
6180 
6181 		/* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6182 		vm_page_wakeup_done(object, m2);
6183 
6184 		return KERN_SUCCESS;
6185 	} else {
6186 		assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6187 
6188 		/*
6189 		 * completely cleans up the state
6190 		 * of the page so that it is ready
6191 		 * to be put onto the free list, or
6192 		 * for this purpose it looks like it
6193 		 * just came off of the free list
6194 		 */
6195 		vm_page_free_prepare(m1);
6196 
6197 		/* we're done here */
6198 		return KERN_SUCCESS;
6199 	}
6200 
6201 	return KERN_FAILURE;
6202 }
6203 
6204 /*
6205  *	CONTIGUOUS PAGE ALLOCATION
6206  *
6207  *	Find a region large enough to contain at least n pages
6208  *	of contiguous physical memory.
6209  *
6210  *	This is done by traversing the vm_page_t array in a linear fashion
6211  *	we assume that the vm_page_t array has the avaiable physical pages in an
6212  *	ordered, ascending list... this is currently true of all our implementations
6213  *      and must remain so... there can be 'holes' in the array...  we also can
6214  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6215  *      which use to happen via 'vm_page_convert'... that function was no longer
6216  *      being called and was removed...
6217  *
6218  *	The basic flow consists of stabilizing some of the interesting state of
6219  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6220  *	sweep at the beginning of the array looking for pages that meet our criterea
6221  *	for a 'stealable' page... currently we are pretty conservative... if the page
6222  *	meets this criterea and is physically contiguous to the previous page in the 'run'
6223  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
6224  *	and start to develop a new run... if at this point we've already considered
6225  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
6226  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
6227  *	to other threads trying to acquire free pages (or move pages from q to q),
6228  *	and then continue from the spot we left off... we only make 1 pass through the
6229  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
6230  *      which steals the pages from the queues they're currently on... pages on the free
6231  *	queue can be stolen directly... pages that are on any of the other queues
6232  *	must be removed from the object they are tabled on... this requires taking the
6233  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
6234  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
6235  *	dump the pages we've currently stolen back to the free list, and pick up our
6236  *	scan from the point where we aborted the 'current' run.
6237  *
6238  *
6239  *	Requirements:
6240  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
6241  *
6242  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
6243  *
6244  * Algorithm:
6245  */
6246 
6247 #define MAX_CONSIDERED_BEFORE_YIELD     1000
6248 
6249 
6250 #define RESET_STATE_OF_RUN()    \
6251 	MACRO_BEGIN             \
6252 	prevcontaddr = -2;      \
6253 	start_pnum = -1;        \
6254 	free_considered = 0;    \
6255 	substitute_needed = 0;  \
6256 	npages = 0;             \
6257 	MACRO_END
6258 
6259 /*
6260  * Can we steal in-use (i.e. not free) pages when searching for
6261  * physically-contiguous pages ?
6262  */
6263 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
6264 
6265 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
6266 #if DEBUG
6267 int vm_page_find_contig_debug = 0;
6268 #endif
6269 
6270 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6271 vm_page_find_contiguous(
6272 	unsigned int    contig_pages,
6273 	ppnum_t         max_pnum,
6274 	ppnum_t         pnum_mask,
6275 	boolean_t       wire,
6276 	int             flags)
6277 {
6278 	vm_page_t       m = NULL;
6279 	ppnum_t         prevcontaddr = 0;
6280 	ppnum_t         start_pnum = 0;
6281 	unsigned int    npages = 0, considered = 0, scanned = 0;
6282 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
6283 	unsigned int    idx_last_contig_page_found = 0;
6284 	int             free_considered = 0, free_available = 0;
6285 	int             substitute_needed = 0;
6286 	int             zone_gc_called = 0;
6287 	boolean_t       wrapped;
6288 	kern_return_t   kr;
6289 #if DEBUG
6290 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
6291 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
6292 #endif
6293 
6294 	int             yielded = 0;
6295 	int             dumped_run = 0;
6296 	int             stolen_pages = 0;
6297 	int             compressed_pages = 0;
6298 
6299 
6300 	if (contig_pages == 0) {
6301 		return VM_PAGE_NULL;
6302 	}
6303 
6304 full_scan_again:
6305 
6306 #if MACH_ASSERT
6307 	vm_page_verify_free_lists();
6308 #endif
6309 #if DEBUG
6310 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
6311 #endif
6312 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6313 
6314 	/*
6315 	 * If there are still delayed pages, try to free up some that match.
6316 	 */
6317 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
6318 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
6319 	}
6320 
6321 	vm_page_lock_queues();
6322 	vm_free_page_lock();
6323 
6324 	RESET_STATE_OF_RUN();
6325 
6326 	scanned = 0;
6327 	considered = 0;
6328 	free_available = vm_page_free_count - vm_page_free_reserved;
6329 
6330 	wrapped = FALSE;
6331 
6332 	if (flags & KMA_LOMEM) {
6333 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
6334 	} else {
6335 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
6336 	}
6337 
6338 	orig_last_idx = idx_last_contig_page_found;
6339 	last_idx = orig_last_idx;
6340 
6341 	for (page_idx = last_idx, start_idx = last_idx;
6342 	    npages < contig_pages && page_idx < vm_pages_count;
6343 	    page_idx++) {
6344 retry:
6345 		if (wrapped &&
6346 		    npages == 0 &&
6347 		    page_idx >= orig_last_idx) {
6348 			/*
6349 			 * We're back where we started and we haven't
6350 			 * found any suitable contiguous range.  Let's
6351 			 * give up.
6352 			 */
6353 			break;
6354 		}
6355 		scanned++;
6356 		m = &vm_pages[page_idx];
6357 
6358 		assert(!m->vmp_fictitious);
6359 		assert(!m->vmp_private);
6360 
6361 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
6362 			/* no more low pages... */
6363 			break;
6364 		}
6365 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
6366 			/*
6367 			 * not aligned
6368 			 */
6369 			RESET_STATE_OF_RUN();
6370 		} else if (!vm_page_is_relocatable(m)) {
6371 			/*
6372 			 * page is not relocatable */
6373 			RESET_STATE_OF_RUN();
6374 		} else {
6375 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6376 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6377 					RESET_STATE_OF_RUN();
6378 					goto did_consider;
6379 				} else {
6380 					npages = 1;
6381 					start_idx = page_idx;
6382 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6383 				}
6384 			} else {
6385 				npages++;
6386 			}
6387 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6388 
6389 			VM_PAGE_CHECK(m);
6390 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6391 				free_considered++;
6392 			} else {
6393 				/*
6394 				 * This page is not free.
6395 				 * If we can't steal used pages,
6396 				 * we have to give up this run
6397 				 * and keep looking.
6398 				 * Otherwise, we might need to
6399 				 * move the contents of this page
6400 				 * into a substitute page.
6401 				 */
6402 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6403 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6404 					substitute_needed++;
6405 				}
6406 #else
6407 				RESET_STATE_OF_RUN();
6408 #endif
6409 			}
6410 
6411 			if ((free_considered + substitute_needed) > free_available) {
6412 				/*
6413 				 * if we let this run continue
6414 				 * we will end up dropping the vm_page_free_count
6415 				 * below the reserve limit... we need to abort
6416 				 * this run, but we can at least re-consider this
6417 				 * page... thus the jump back to 'retry'
6418 				 */
6419 				RESET_STATE_OF_RUN();
6420 
6421 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6422 					considered++;
6423 					goto retry;
6424 				}
6425 				/*
6426 				 * free_available == 0
6427 				 * so can't consider any free pages... if
6428 				 * we went to retry in this case, we'd
6429 				 * get stuck looking at the same page
6430 				 * w/o making any forward progress
6431 				 * we also want to take this path if we've already
6432 				 * reached our limit that controls the lock latency
6433 				 */
6434 			}
6435 		}
6436 did_consider:
6437 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6438 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6439 
6440 			vm_free_page_unlock();
6441 			vm_page_unlock_queues();
6442 
6443 			mutex_pause(0);
6444 
6445 			PAGE_REPLACEMENT_ALLOWED(TRUE);
6446 
6447 			vm_page_lock_queues();
6448 			vm_free_page_lock();
6449 
6450 			RESET_STATE_OF_RUN();
6451 			/*
6452 			 * reset our free page limit since we
6453 			 * dropped the lock protecting the vm_page_free_queue
6454 			 */
6455 			free_available = vm_page_free_count - vm_page_free_reserved;
6456 			considered = 0;
6457 
6458 			yielded++;
6459 
6460 			goto retry;
6461 		}
6462 		considered++;
6463 	} /* main for-loop end */
6464 
6465 	m = VM_PAGE_NULL;
6466 
6467 	if (npages != contig_pages) {
6468 		if (!wrapped) {
6469 			/*
6470 			 * We didn't find a contiguous range but we didn't
6471 			 * start from the very first page.
6472 			 * Start again from the very first page.
6473 			 */
6474 			RESET_STATE_OF_RUN();
6475 			if (flags & KMA_LOMEM) {
6476 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6477 			} else {
6478 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6479 			}
6480 			last_idx = 0;
6481 			page_idx = last_idx;
6482 			wrapped = TRUE;
6483 			goto retry;
6484 		}
6485 		vm_free_page_unlock();
6486 	} else {
6487 		vm_page_t m1;
6488 		unsigned int cur_idx;
6489 		unsigned int tmp_start_idx;
6490 		vm_object_t locked_object = VM_OBJECT_NULL;
6491 		boolean_t abort_run = FALSE;
6492 
6493 		assert(page_idx - start_idx == contig_pages);
6494 
6495 		tmp_start_idx = start_idx;
6496 
6497 		/*
6498 		 * first pass through to pull the free pages
6499 		 * off of the free queue so that in case we
6500 		 * need substitute pages, we won't grab any
6501 		 * of the free pages in the run... we'll clear
6502 		 * the 'free' bit in the 2nd pass, and even in
6503 		 * an abort_run case, we'll collect all of the
6504 		 * free pages in this run and return them to the free list
6505 		 */
6506 		while (start_idx < page_idx) {
6507 			m1 = &vm_pages[start_idx++];
6508 
6509 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6510 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6511 #endif
6512 
6513 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6514 				unsigned int color;
6515 
6516 				color = VM_PAGE_GET_COLOR(m1);
6517 #if MACH_ASSERT
6518 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6519 #endif
6520 				vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6521 
6522 				VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6523 #if MACH_ASSERT
6524 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6525 #endif
6526 				/*
6527 				 * Clear the "free" bit so that this page
6528 				 * does not get considered for another
6529 				 * concurrent physically-contiguous allocation.
6530 				 */
6531 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6532 				assert(m1->vmp_busy);
6533 
6534 				vm_page_free_count--;
6535 			}
6536 		}
6537 		if (flags & KMA_LOMEM) {
6538 			vm_page_lomem_find_contiguous_last_idx = page_idx;
6539 		} else {
6540 			vm_page_find_contiguous_last_idx = page_idx;
6541 		}
6542 
6543 		/*
6544 		 * we can drop the free queue lock at this point since
6545 		 * we've pulled any 'free' candidates off of the list
6546 		 * we need it dropped so that we can do a vm_page_grab
6547 		 * when substituing for pmapped/dirty pages
6548 		 */
6549 		vm_free_page_unlock();
6550 
6551 		start_idx = tmp_start_idx;
6552 		cur_idx = page_idx - 1;
6553 
6554 		while (start_idx++ < page_idx) {
6555 			/*
6556 			 * must go through the list from back to front
6557 			 * so that the page list is created in the
6558 			 * correct order - low -> high phys addresses
6559 			 */
6560 			m1 = &vm_pages[cur_idx--];
6561 
6562 			if (m1->vmp_object == 0) {
6563 				/*
6564 				 * page has already been removed from
6565 				 * the free list in the 1st pass
6566 				 */
6567 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6568 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
6569 				assert(m1->vmp_busy);
6570 				assert(!m1->vmp_wanted);
6571 				assert(!m1->vmp_laundry);
6572 			} else {
6573 				/*
6574 				 * try to relocate/steal the page
6575 				 */
6576 				if (abort_run == TRUE) {
6577 					continue;
6578 				}
6579 
6580 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6581 
6582 				vm_object_t object = VM_PAGE_OBJECT(m1);
6583 
6584 				if (object != locked_object) {
6585 					if (locked_object) {
6586 						vm_object_unlock(locked_object);
6587 						locked_object = VM_OBJECT_NULL;
6588 					}
6589 					if (vm_object_lock_try(object)) {
6590 						locked_object = object;
6591 					} else {
6592 						/* object must be locked to relocate its pages */
6593 						tmp_start_idx = cur_idx;
6594 						abort_run = TRUE;
6595 						continue;
6596 					}
6597 				}
6598 
6599 				kr = vm_page_relocate(m1, &compressed_pages);
6600 				if (kr != KERN_SUCCESS) {
6601 					if (locked_object) {
6602 						vm_object_unlock(locked_object);
6603 						locked_object = VM_OBJECT_NULL;
6604 					}
6605 					tmp_start_idx = cur_idx;
6606 					abort_run = TRUE;
6607 					continue;
6608 				}
6609 
6610 				stolen_pages++;
6611 			}
6612 
6613 			/* m1 is ours at this point ... */
6614 
6615 			if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6616 				/*
6617 				 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6618 				 * change it for pages marked as used-by-compressor.
6619 				 */
6620 				vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6621 			}
6622 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6623 			m1->vmp_snext = m;
6624 			m = m1;
6625 		}
6626 
6627 		if (locked_object) {
6628 			vm_object_unlock(locked_object);
6629 			locked_object = VM_OBJECT_NULL;
6630 		}
6631 
6632 		if (abort_run == TRUE) {
6633 			/*
6634 			 * want the index of the last
6635 			 * page in this run that was
6636 			 * successfully 'stolen', so back
6637 			 * it up 1 for the auto-decrement on use
6638 			 * and 1 more to bump back over this page
6639 			 */
6640 			page_idx = tmp_start_idx + 2;
6641 			if (page_idx >= vm_pages_count) {
6642 				if (wrapped) {
6643 					if (m != VM_PAGE_NULL) {
6644 						vm_page_unlock_queues();
6645 						vm_page_free_list(m, FALSE);
6646 						vm_page_lock_queues();
6647 						m = VM_PAGE_NULL;
6648 					}
6649 					dumped_run++;
6650 					goto done_scanning;
6651 				}
6652 				page_idx = last_idx = 0;
6653 				wrapped = TRUE;
6654 			}
6655 			abort_run = FALSE;
6656 
6657 			/*
6658 			 * We didn't find a contiguous range but we didn't
6659 			 * start from the very first page.
6660 			 * Start again from the very first page.
6661 			 */
6662 			RESET_STATE_OF_RUN();
6663 
6664 			if (flags & KMA_LOMEM) {
6665 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
6666 			} else {
6667 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6668 			}
6669 
6670 			last_idx = page_idx;
6671 
6672 			if (m != VM_PAGE_NULL) {
6673 				vm_page_unlock_queues();
6674 				vm_page_free_list(m, FALSE);
6675 				vm_page_lock_queues();
6676 				m = VM_PAGE_NULL;
6677 			}
6678 			dumped_run++;
6679 
6680 			vm_free_page_lock();
6681 			/*
6682 			 * reset our free page limit since we
6683 			 * dropped the lock protecting the vm_page_free_queue
6684 			 */
6685 			free_available = vm_page_free_count - vm_page_free_reserved;
6686 			goto retry;
6687 		}
6688 
6689 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6690 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6691 			assert(m1->vmp_wire_count == 0);
6692 
6693 			if (wire == TRUE) {
6694 				m1->vmp_wire_count++;
6695 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
6696 			} else {
6697 				m1->vmp_gobbled = TRUE;
6698 			}
6699 		}
6700 		if (wire == FALSE) {
6701 			vm_page_gobble_count += npages;
6702 		}
6703 
6704 		/*
6705 		 * gobbled pages are also counted as wired pages
6706 		 */
6707 		vm_page_wire_count += npages;
6708 
6709 		assert(vm_page_verify_contiguous(m, npages));
6710 	}
6711 done_scanning:
6712 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6713 
6714 	vm_page_unlock_queues();
6715 
6716 #if DEBUG
6717 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6718 
6719 	tv_end_sec -= tv_start_sec;
6720 	if (tv_end_usec < tv_start_usec) {
6721 		tv_end_sec--;
6722 		tv_end_usec += 1000000;
6723 	}
6724 	tv_end_usec -= tv_start_usec;
6725 	if (tv_end_usec >= 1000000) {
6726 		tv_end_sec++;
6727 		tv_end_sec -= 1000000;
6728 	}
6729 	if (vm_page_find_contig_debug) {
6730 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
6731 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6732 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
6733 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6734 	}
6735 
6736 #endif
6737 #if MACH_ASSERT
6738 	vm_page_verify_free_lists();
6739 #endif
6740 	if (m == NULL && zone_gc_called < 2) {
6741 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6742 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6743 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6744 
6745 		if (consider_buffer_cache_collect != NULL) {
6746 			(void)(*consider_buffer_cache_collect)(1);
6747 		}
6748 
6749 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6750 
6751 		zone_gc_called++;
6752 
6753 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6754 		goto full_scan_again;
6755 	}
6756 
6757 	return m;
6758 }
6759 
6760 /*
6761  *	Allocate a list of contiguous, wired pages.
6762  */
6763 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6764 cpm_allocate(
6765 	vm_size_t       size,
6766 	vm_page_t       *list,
6767 	ppnum_t         max_pnum,
6768 	ppnum_t         pnum_mask,
6769 	boolean_t       wire,
6770 	int             flags)
6771 {
6772 	vm_page_t               pages;
6773 	unsigned int            npages;
6774 
6775 	if (size % PAGE_SIZE != 0) {
6776 		return KERN_INVALID_ARGUMENT;
6777 	}
6778 
6779 	npages = (unsigned int) (size / PAGE_SIZE);
6780 	if (npages != size / PAGE_SIZE) {
6781 		/* 32-bit overflow */
6782 		return KERN_INVALID_ARGUMENT;
6783 	}
6784 
6785 	/*
6786 	 *	Obtain a pointer to a subset of the free
6787 	 *	list large enough to satisfy the request;
6788 	 *	the region will be physically contiguous.
6789 	 */
6790 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6791 
6792 	if (pages == VM_PAGE_NULL) {
6793 		return KERN_NO_SPACE;
6794 	}
6795 	/*
6796 	 * determine need for wakeups
6797 	 */
6798 	if (vm_page_free_count < vm_page_free_min) {
6799 		vm_free_page_lock();
6800 		if (vm_pageout_running == FALSE) {
6801 			vm_free_page_unlock();
6802 			thread_wakeup((event_t) &vm_page_free_wanted);
6803 		} else {
6804 			vm_free_page_unlock();
6805 		}
6806 	}
6807 
6808 	VM_CHECK_MEMORYSTATUS;
6809 
6810 	/*
6811 	 *	The CPM pages should now be available and
6812 	 *	ordered by ascending physical address.
6813 	 */
6814 	assert(vm_page_verify_contiguous(pages, npages));
6815 
6816 	if (flags & KMA_ZERO) {
6817 		for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6818 			vm_page_zero_fill(m);
6819 		}
6820 	}
6821 
6822 	*list = pages;
6823 	return KERN_SUCCESS;
6824 }
6825 
6826 
6827 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6828 
6829 /*
6830  * when working on a 'run' of pages, it is necessary to hold
6831  * the vm_page_queue_lock (a hot global lock) for certain operations
6832  * on the page... however, the majority of the work can be done
6833  * while merely holding the object lock... in fact there are certain
6834  * collections of pages that don't require any work brokered by the
6835  * vm_page_queue_lock... to mitigate the time spent behind the global
6836  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6837  * while doing all of the work that doesn't require the vm_page_queue_lock...
6838  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6839  * necessary work for each page... we will grab the busy bit on the page
6840  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6841  * if it can't immediately take the vm_page_queue_lock in order to compete
6842  * for the locks in the same order that vm_pageout_scan takes them.
6843  * the operation names are modeled after the names of the routines that
6844  * need to be called in order to make the changes very obvious in the
6845  * original loop
6846  */
6847 
6848 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6849 vm_page_do_delayed_work(
6850 	vm_object_t     object,
6851 	vm_tag_t        tag,
6852 	struct vm_page_delayed_work *dwp,
6853 	int             dw_count)
6854 {
6855 	int             j;
6856 	vm_page_t       m;
6857 	vm_page_t       local_free_q = VM_PAGE_NULL;
6858 
6859 	/*
6860 	 * pageout_scan takes the vm_page_lock_queues first
6861 	 * then tries for the object lock... to avoid what
6862 	 * is effectively a lock inversion, we'll go to the
6863 	 * trouble of taking them in that same order... otherwise
6864 	 * if this object contains the majority of the pages resident
6865 	 * in the UBC (or a small set of large objects actively being
6866 	 * worked on contain the majority of the pages), we could
6867 	 * cause the pageout_scan thread to 'starve' in its attempt
6868 	 * to find pages to move to the free queue, since it has to
6869 	 * successfully acquire the object lock of any candidate page
6870 	 * before it can steal/clean it.
6871 	 */
6872 	if (!vm_page_trylock_queues()) {
6873 		vm_object_unlock(object);
6874 
6875 		/*
6876 		 * "Turnstile enabled vm_pageout_scan" can be runnable
6877 		 * for a very long time without getting on a core.
6878 		 * If this is a higher priority thread it could be
6879 		 * waiting here for a very long time respecting the fact
6880 		 * that pageout_scan would like its object after VPS does
6881 		 * a mutex_pause(0).
6882 		 * So we cap the number of yields in the vm_object_lock_avoid()
6883 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
6884 		 * 10us to run and grab the object if needed.
6885 		 */
6886 		vm_page_lock_queues();
6887 
6888 		for (j = 0;; j++) {
6889 			if ((!vm_object_lock_avoid(object) ||
6890 			    (vps_dynamic_priority_enabled && (j > 0))) &&
6891 			    _vm_object_lock_try(object)) {
6892 				break;
6893 			}
6894 			vm_page_unlock_queues();
6895 			mutex_pause(j);
6896 			vm_page_lock_queues();
6897 		}
6898 	}
6899 	for (j = 0; j < dw_count; j++, dwp++) {
6900 		m = dwp->dw_m;
6901 
6902 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6903 			vm_pageout_throttle_up(m);
6904 		}
6905 #if CONFIG_PHANTOM_CACHE
6906 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6907 			vm_phantom_cache_update(m);
6908 		}
6909 #endif
6910 		if (dwp->dw_mask & DW_vm_page_wire) {
6911 			vm_page_wire(m, tag, FALSE);
6912 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
6913 			boolean_t       queueit;
6914 
6915 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6916 
6917 			vm_page_unwire(m, queueit);
6918 		}
6919 		if (dwp->dw_mask & DW_vm_page_free) {
6920 			vm_page_free_prepare_queues(m);
6921 
6922 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6923 			/*
6924 			 * Add this page to our list of reclaimed pages,
6925 			 * to be freed later.
6926 			 */
6927 			m->vmp_snext = local_free_q;
6928 			local_free_q = m;
6929 		} else {
6930 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6931 				vm_page_deactivate_internal(m, FALSE);
6932 			} else if (dwp->dw_mask & DW_vm_page_activate) {
6933 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6934 					vm_page_activate(m);
6935 				}
6936 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
6937 				vm_page_speculate(m, TRUE);
6938 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
6939 				/*
6940 				 * if we didn't hold the object lock and did this,
6941 				 * we might disconnect the page, then someone might
6942 				 * soft fault it back in, then we would put it on the
6943 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
6944 				 * page on that queue, which we don't want
6945 				 */
6946 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6947 
6948 				if ((refmod_state & VM_MEM_REFERENCED)) {
6949 					/*
6950 					 * this page has been touched since it got cleaned; let's activate it
6951 					 * if it hasn't already been
6952 					 */
6953 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6954 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6955 
6956 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6957 						vm_page_activate(m);
6958 					}
6959 				} else {
6960 					m->vmp_reference = FALSE;
6961 					vm_page_enqueue_cleaned(m);
6962 				}
6963 			} else if (dwp->dw_mask & DW_vm_page_lru) {
6964 				vm_page_lru(m);
6965 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6966 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6967 					vm_page_queues_remove(m, TRUE);
6968 				}
6969 			}
6970 			if (dwp->dw_mask & DW_set_reference) {
6971 				m->vmp_reference = TRUE;
6972 			} else if (dwp->dw_mask & DW_clear_reference) {
6973 				m->vmp_reference = FALSE;
6974 			}
6975 
6976 			if (dwp->dw_mask & DW_move_page) {
6977 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6978 					vm_page_queues_remove(m, FALSE);
6979 
6980 					assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
6981 
6982 					vm_page_enqueue_inactive(m, FALSE);
6983 				}
6984 			}
6985 			if (dwp->dw_mask & DW_clear_busy) {
6986 				m->vmp_busy = FALSE;
6987 			}
6988 
6989 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6990 				vm_page_wakeup(object, m);
6991 			}
6992 		}
6993 	}
6994 	vm_page_unlock_queues();
6995 
6996 	if (local_free_q) {
6997 		vm_page_free_list(local_free_q, TRUE);
6998 	}
6999 
7000 	VM_CHECK_MEMORYSTATUS;
7001 }
7002 
7003 __abortlike
7004 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7005 __vm_page_alloc_list_failed_panic(
7006 	vm_size_t       page_count,
7007 	kma_flags_t     flags,
7008 	kern_return_t   kr)
7009 {
7010 	panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7011 	    (size_t)page_count, flags, kr);
7012 }
7013 
7014 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7015 vm_page_alloc_list(
7016 	vm_size_t   page_count,
7017 	kma_flags_t flags,
7018 	vm_page_t  *list)
7019 {
7020 	vm_page_t       page_list = VM_PAGE_NULL;
7021 	vm_page_t       mem;
7022 	kern_return_t   kr = KERN_SUCCESS;
7023 	int             page_grab_count = 0;
7024 #if DEVELOPMENT || DEBUG
7025 	task_t          task;
7026 #endif /* DEVELOPMENT || DEBUG */
7027 
7028 	for (vm_size_t i = 0; i < page_count; i++) {
7029 		for (;;) {
7030 			if (flags & KMA_LOMEM) {
7031 				mem = vm_page_grablo();
7032 			} else {
7033 				uint_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7034 				mem = vm_page_grab_options(options);
7035 			}
7036 
7037 			if (mem != VM_PAGE_NULL) {
7038 				break;
7039 			}
7040 
7041 			if (flags & KMA_NOPAGEWAIT) {
7042 				kr = KERN_RESOURCE_SHORTAGE;
7043 				goto out;
7044 			}
7045 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
7046 				kr = KERN_RESOURCE_SHORTAGE;
7047 				goto out;
7048 			}
7049 
7050 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
7051 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
7052 
7053 			if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_size) > max_mem / 4) {
7054 				uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7055 				if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7056 					kr = KERN_RESOURCE_SHORTAGE;
7057 					goto out;
7058 				}
7059 			}
7060 			VM_PAGE_WAIT();
7061 		}
7062 
7063 		page_grab_count++;
7064 		mem->vmp_snext = page_list;
7065 		page_list = mem;
7066 	}
7067 
7068 	if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7069 		for (mem = page_list; mem; mem = mem->vmp_snext) {
7070 			vm_page_zero_fill(mem);
7071 		}
7072 	}
7073 
7074 out:
7075 #if DEBUG || DEVELOPMENT
7076 	task = current_task_early();
7077 	if (task != NULL) {
7078 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
7079 	}
7080 #endif
7081 
7082 	if (kr == KERN_SUCCESS) {
7083 		*list = page_list;
7084 	} else if (flags & KMA_NOFAIL) {
7085 		__vm_page_alloc_list_failed_panic(page_count, flags, kr);
7086 	} else {
7087 		vm_page_free_list(page_list, FALSE);
7088 	}
7089 
7090 	return kr;
7091 }
7092 
7093 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7094 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7095 {
7096 	page->vmp_offset = offset;
7097 }
7098 
7099 vm_page_t
vm_page_get_next(vm_page_t page)7100 vm_page_get_next(vm_page_t page)
7101 {
7102 	return page->vmp_snext;
7103 }
7104 
7105 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7106 vm_page_get_offset(vm_page_t page)
7107 {
7108 	return page->vmp_offset;
7109 }
7110 
7111 ppnum_t
vm_page_get_phys_page(vm_page_t page)7112 vm_page_get_phys_page(vm_page_t page)
7113 {
7114 	return VM_PAGE_GET_PHYS_PAGE(page);
7115 }
7116 
7117 
7118 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7119 
7120 #if HIBERNATION
7121 
7122 static vm_page_t hibernate_gobble_queue;
7123 
7124 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
7125 static int  hibernate_flush_dirty_pages(int);
7126 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
7127 
7128 void hibernate_flush_wait(void);
7129 void hibernate_mark_in_progress(void);
7130 void hibernate_clear_in_progress(void);
7131 
7132 void            hibernate_free_range(int, int);
7133 void            hibernate_hash_insert_page(vm_page_t);
7134 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
7135 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7136 ppnum_t         hibernate_lookup_paddr(unsigned int);
7137 
7138 struct hibernate_statistics {
7139 	int hibernate_considered;
7140 	int hibernate_reentered_on_q;
7141 	int hibernate_found_dirty;
7142 	int hibernate_skipped_cleaning;
7143 	int hibernate_skipped_transient;
7144 	int hibernate_skipped_precious;
7145 	int hibernate_skipped_external;
7146 	int hibernate_queue_nolock;
7147 	int hibernate_queue_paused;
7148 	int hibernate_throttled;
7149 	int hibernate_throttle_timeout;
7150 	int hibernate_drained;
7151 	int hibernate_drain_timeout;
7152 	int cd_lock_failed;
7153 	int cd_found_precious;
7154 	int cd_found_wired;
7155 	int cd_found_busy;
7156 	int cd_found_unusual;
7157 	int cd_found_cleaning;
7158 	int cd_found_laundry;
7159 	int cd_found_dirty;
7160 	int cd_found_xpmapped;
7161 	int cd_skipped_xpmapped;
7162 	int cd_local_free;
7163 	int cd_total_free;
7164 	int cd_vm_page_wire_count;
7165 	int cd_vm_struct_pages_unneeded;
7166 	int cd_pages;
7167 	int cd_discarded;
7168 	int cd_count_wire;
7169 } hibernate_stats;
7170 
7171 #if CONFIG_SPTM
7172 /**
7173  * On SPTM-based systems don't save any executable pages into the hibernation
7174  * image. The SPTM has stronger guarantees around not allowing write access to
7175  * the executable pages than on older systems, which prevents XNU from being
7176  * able to restore any pages mapped as executable.
7177  */
7178 #define HIBERNATE_XPMAPPED_LIMIT        0ULL
7179 #else /* CONFIG_SPTM */
7180 /*
7181  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7182  * so that we don't overrun the estimated image size, which would
7183  * result in a hibernation failure.
7184  *
7185  * We use a size value instead of pages because we don't want to take up more space
7186  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7187  * to have that additional space available.
7188  *
7189  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7190  * xpmapped size.
7191  */
7192 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7193 #endif /* CONFIG_SPTM */
7194 
7195 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7196 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7197 {
7198 	wait_result_t   wait_result;
7199 
7200 	vm_page_lock_queues();
7201 
7202 	while (!vm_page_queue_empty(&q->pgo_pending)) {
7203 		q->pgo_draining = TRUE;
7204 
7205 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7206 
7207 		vm_page_unlock_queues();
7208 
7209 		wait_result = thread_block(THREAD_CONTINUE_NULL);
7210 
7211 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7212 			hibernate_stats.hibernate_drain_timeout++;
7213 
7214 			if (q == &vm_pageout_queue_external) {
7215 				return 0;
7216 			}
7217 
7218 			return 1;
7219 		}
7220 		vm_page_lock_queues();
7221 
7222 		hibernate_stats.hibernate_drained++;
7223 	}
7224 	vm_page_unlock_queues();
7225 
7226 	return 0;
7227 }
7228 
7229 
7230 boolean_t hibernate_skip_external = FALSE;
7231 
7232 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7233 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7234 {
7235 	vm_page_t       m;
7236 	vm_object_t     l_object = NULL;
7237 	vm_object_t     m_object = NULL;
7238 	int             refmod_state = 0;
7239 	int             try_failed_count = 0;
7240 	int             retval = 0;
7241 	int             current_run = 0;
7242 	struct  vm_pageout_queue *iq;
7243 	struct  vm_pageout_queue *eq;
7244 	struct  vm_pageout_queue *tq;
7245 
7246 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7247 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7248 
7249 	iq = &vm_pageout_queue_internal;
7250 	eq = &vm_pageout_queue_external;
7251 
7252 	vm_page_lock_queues();
7253 
7254 	while (qcount && !vm_page_queue_empty(q)) {
7255 		if (current_run++ == 1000) {
7256 			if (hibernate_should_abort()) {
7257 				retval = 1;
7258 				break;
7259 			}
7260 			current_run = 0;
7261 		}
7262 
7263 		m = (vm_page_t) vm_page_queue_first(q);
7264 		m_object = VM_PAGE_OBJECT(m);
7265 
7266 		/*
7267 		 * check to see if we currently are working
7268 		 * with the same object... if so, we've
7269 		 * already got the lock
7270 		 */
7271 		if (m_object != l_object) {
7272 			/*
7273 			 * the object associated with candidate page is
7274 			 * different from the one we were just working
7275 			 * with... dump the lock if we still own it
7276 			 */
7277 			if (l_object != NULL) {
7278 				vm_object_unlock(l_object);
7279 				l_object = NULL;
7280 			}
7281 			/*
7282 			 * Try to lock object; since we've alread got the
7283 			 * page queues lock, we can only 'try' for this one.
7284 			 * if the 'try' fails, we need to do a mutex_pause
7285 			 * to allow the owner of the object lock a chance to
7286 			 * run...
7287 			 */
7288 			if (!vm_object_lock_try_scan(m_object)) {
7289 				if (try_failed_count > 20) {
7290 					hibernate_stats.hibernate_queue_nolock++;
7291 
7292 					goto reenter_pg_on_q;
7293 				}
7294 
7295 				vm_page_unlock_queues();
7296 				mutex_pause(try_failed_count++);
7297 				vm_page_lock_queues();
7298 
7299 				hibernate_stats.hibernate_queue_paused++;
7300 				continue;
7301 			} else {
7302 				l_object = m_object;
7303 			}
7304 		}
7305 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7306 			/*
7307 			 * page is not to be cleaned
7308 			 * put it back on the head of its queue
7309 			 */
7310 			if (m->vmp_cleaning) {
7311 				hibernate_stats.hibernate_skipped_cleaning++;
7312 			} else {
7313 				hibernate_stats.hibernate_skipped_transient++;
7314 			}
7315 
7316 			goto reenter_pg_on_q;
7317 		}
7318 		if (m_object->vo_copy == VM_OBJECT_NULL) {
7319 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7320 				/*
7321 				 * let the normal hibernate image path
7322 				 * deal with these
7323 				 */
7324 				goto reenter_pg_on_q;
7325 			}
7326 		}
7327 		if (!m->vmp_dirty && m->vmp_pmapped) {
7328 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7329 
7330 			if ((refmod_state & VM_MEM_MODIFIED)) {
7331 				SET_PAGE_DIRTY(m, FALSE);
7332 			}
7333 		} else {
7334 			refmod_state = 0;
7335 		}
7336 
7337 		if (!m->vmp_dirty) {
7338 			/*
7339 			 * page is not to be cleaned
7340 			 * put it back on the head of its queue
7341 			 */
7342 			if (m->vmp_precious) {
7343 				hibernate_stats.hibernate_skipped_precious++;
7344 			}
7345 
7346 			goto reenter_pg_on_q;
7347 		}
7348 
7349 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7350 			hibernate_stats.hibernate_skipped_external++;
7351 
7352 			goto reenter_pg_on_q;
7353 		}
7354 		tq = NULL;
7355 
7356 		if (m_object->internal) {
7357 			if (VM_PAGE_Q_THROTTLED(iq)) {
7358 				tq = iq;
7359 			}
7360 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7361 			tq = eq;
7362 		}
7363 
7364 		if (tq != NULL) {
7365 			wait_result_t   wait_result;
7366 			int             wait_count = 5;
7367 
7368 			if (l_object != NULL) {
7369 				vm_object_unlock(l_object);
7370 				l_object = NULL;
7371 			}
7372 
7373 			while (retval == 0) {
7374 				tq->pgo_throttled = TRUE;
7375 
7376 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7377 
7378 				vm_page_unlock_queues();
7379 
7380 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7381 
7382 				vm_page_lock_queues();
7383 
7384 				if (wait_result != THREAD_TIMED_OUT) {
7385 					break;
7386 				}
7387 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7388 					break;
7389 				}
7390 
7391 				if (hibernate_should_abort()) {
7392 					retval = 1;
7393 				}
7394 
7395 				if (--wait_count == 0) {
7396 					hibernate_stats.hibernate_throttle_timeout++;
7397 
7398 					if (tq == eq) {
7399 						hibernate_skip_external = TRUE;
7400 						break;
7401 					}
7402 					retval = 1;
7403 				}
7404 			}
7405 			if (retval) {
7406 				break;
7407 			}
7408 
7409 			hibernate_stats.hibernate_throttled++;
7410 
7411 			continue;
7412 		}
7413 		/*
7414 		 * we've already factored out pages in the laundry which
7415 		 * means this page can't be on the pageout queue so it's
7416 		 * safe to do the vm_page_queues_remove
7417 		 */
7418 		vm_page_queues_remove(m, TRUE);
7419 
7420 		if (m_object->internal == TRUE) {
7421 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7422 		}
7423 
7424 		vm_pageout_cluster(m);
7425 
7426 		hibernate_stats.hibernate_found_dirty++;
7427 
7428 		goto next_pg;
7429 
7430 reenter_pg_on_q:
7431 		vm_page_queue_remove(q, m, vmp_pageq);
7432 		vm_page_queue_enter(q, m, vmp_pageq);
7433 
7434 		hibernate_stats.hibernate_reentered_on_q++;
7435 next_pg:
7436 		hibernate_stats.hibernate_considered++;
7437 
7438 		qcount--;
7439 		try_failed_count = 0;
7440 	}
7441 	if (l_object != NULL) {
7442 		vm_object_unlock(l_object);
7443 		l_object = NULL;
7444 	}
7445 
7446 	vm_page_unlock_queues();
7447 
7448 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7449 
7450 	return retval;
7451 }
7452 
7453 
7454 static int
hibernate_flush_dirty_pages(int pass)7455 hibernate_flush_dirty_pages(int pass)
7456 {
7457 	struct vm_speculative_age_q     *aq;
7458 	uint32_t        i;
7459 
7460 	if (vm_page_local_q) {
7461 		zpercpu_foreach_cpu(lid) {
7462 			vm_page_reactivate_local(lid, TRUE, FALSE);
7463 		}
7464 	}
7465 
7466 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
7467 		int             qcount;
7468 		vm_page_t       m;
7469 
7470 		aq = &vm_page_queue_speculative[i];
7471 
7472 		if (vm_page_queue_empty(&aq->age_q)) {
7473 			continue;
7474 		}
7475 		qcount = 0;
7476 
7477 		vm_page_lockspin_queues();
7478 
7479 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7480 			qcount++;
7481 		}
7482 		vm_page_unlock_queues();
7483 
7484 		if (qcount) {
7485 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7486 				return 1;
7487 			}
7488 		}
7489 	}
7490 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7491 		return 1;
7492 	}
7493 	/* XXX FBDP TODO: flush secluded queue */
7494 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7495 		return 1;
7496 	}
7497 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7498 		return 1;
7499 	}
7500 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7501 		return 1;
7502 	}
7503 
7504 	if (pass == 1) {
7505 		vm_compressor_record_warmup_start();
7506 	}
7507 
7508 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7509 		if (pass == 1) {
7510 			vm_compressor_record_warmup_end();
7511 		}
7512 		return 1;
7513 	}
7514 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7515 		if (pass == 1) {
7516 			vm_compressor_record_warmup_end();
7517 		}
7518 		return 1;
7519 	}
7520 	if (pass == 1) {
7521 		vm_compressor_record_warmup_end();
7522 	}
7523 
7524 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7525 		return 1;
7526 	}
7527 
7528 	return 0;
7529 }
7530 
7531 
7532 void
hibernate_reset_stats()7533 hibernate_reset_stats()
7534 {
7535 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7536 }
7537 
7538 
7539 int
hibernate_flush_memory()7540 hibernate_flush_memory()
7541 {
7542 	int     retval;
7543 
7544 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7545 
7546 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7547 
7548 	hibernate_cleaning_in_progress = TRUE;
7549 	hibernate_skip_external = FALSE;
7550 
7551 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7552 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7553 
7554 		vm_compressor_flush();
7555 
7556 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7557 
7558 		if (consider_buffer_cache_collect != NULL) {
7559 			unsigned int orig_wire_count;
7560 
7561 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7562 			orig_wire_count = vm_page_wire_count;
7563 
7564 			(void)(*consider_buffer_cache_collect)(1);
7565 			zone_gc(ZONE_GC_DRAIN);
7566 
7567 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7568 
7569 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7570 		}
7571 	}
7572 	hibernate_cleaning_in_progress = FALSE;
7573 
7574 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7575 
7576 	if (retval) {
7577 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7578 	}
7579 
7580 
7581 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7582 	    hibernate_stats.hibernate_considered,
7583 	    hibernate_stats.hibernate_reentered_on_q,
7584 	    hibernate_stats.hibernate_found_dirty);
7585 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7586 	    hibernate_stats.hibernate_skipped_cleaning,
7587 	    hibernate_stats.hibernate_skipped_transient,
7588 	    hibernate_stats.hibernate_skipped_precious,
7589 	    hibernate_stats.hibernate_skipped_external,
7590 	    hibernate_stats.hibernate_queue_nolock);
7591 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7592 	    hibernate_stats.hibernate_queue_paused,
7593 	    hibernate_stats.hibernate_throttled,
7594 	    hibernate_stats.hibernate_throttle_timeout,
7595 	    hibernate_stats.hibernate_drained,
7596 	    hibernate_stats.hibernate_drain_timeout);
7597 
7598 	return retval;
7599 }
7600 
7601 
7602 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7603 hibernate_page_list_zero(hibernate_page_list_t *list)
7604 {
7605 	uint32_t             bank;
7606 	hibernate_bitmap_t * bitmap;
7607 
7608 	bitmap = &list->bank_bitmap[0];
7609 	for (bank = 0; bank < list->bank_count; bank++) {
7610 		uint32_t last_bit;
7611 
7612 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7613 		// set out-of-bound bits at end of bitmap.
7614 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7615 		if (last_bit) {
7616 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7617 		}
7618 
7619 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7620 	}
7621 }
7622 
7623 void
hibernate_free_gobble_pages(void)7624 hibernate_free_gobble_pages(void)
7625 {
7626 	vm_page_t m, next;
7627 	uint32_t  count = 0;
7628 
7629 	m = (vm_page_t) hibernate_gobble_queue;
7630 	while (m) {
7631 		next = m->vmp_snext;
7632 		vm_page_free(m);
7633 		count++;
7634 		m = next;
7635 	}
7636 	hibernate_gobble_queue = VM_PAGE_NULL;
7637 
7638 	if (count) {
7639 		HIBLOG("Freed %d pages\n", count);
7640 	}
7641 }
7642 
7643 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7644 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7645 {
7646 	vm_object_t object = NULL;
7647 	int                  refmod_state;
7648 	boolean_t            discard = FALSE;
7649 
7650 	do{
7651 		if (m->vmp_private) {
7652 			panic("hibernate_consider_discard: private");
7653 		}
7654 
7655 		object = VM_PAGE_OBJECT(m);
7656 
7657 		if (!vm_object_lock_try(object)) {
7658 			object = NULL;
7659 			if (!preflight) {
7660 				hibernate_stats.cd_lock_failed++;
7661 			}
7662 			break;
7663 		}
7664 		if (VM_PAGE_WIRED(m)) {
7665 			if (!preflight) {
7666 				hibernate_stats.cd_found_wired++;
7667 			}
7668 			break;
7669 		}
7670 		if (m->vmp_precious) {
7671 			if (!preflight) {
7672 				hibernate_stats.cd_found_precious++;
7673 			}
7674 			break;
7675 		}
7676 		if (m->vmp_busy || !object->alive) {
7677 			/*
7678 			 *	Somebody is playing with this page.
7679 			 */
7680 			if (!preflight) {
7681 				hibernate_stats.cd_found_busy++;
7682 			}
7683 			break;
7684 		}
7685 		if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7686 			/*
7687 			 * If it's unusual in anyway, ignore it
7688 			 */
7689 			if (!preflight) {
7690 				hibernate_stats.cd_found_unusual++;
7691 			}
7692 			break;
7693 		}
7694 		if (m->vmp_cleaning) {
7695 			if (!preflight) {
7696 				hibernate_stats.cd_found_cleaning++;
7697 			}
7698 			break;
7699 		}
7700 		if (m->vmp_laundry) {
7701 			if (!preflight) {
7702 				hibernate_stats.cd_found_laundry++;
7703 			}
7704 			break;
7705 		}
7706 		if (!m->vmp_dirty) {
7707 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7708 
7709 			if (refmod_state & VM_MEM_REFERENCED) {
7710 				m->vmp_reference = TRUE;
7711 			}
7712 			if (refmod_state & VM_MEM_MODIFIED) {
7713 				SET_PAGE_DIRTY(m, FALSE);
7714 			}
7715 		}
7716 
7717 		/*
7718 		 * If it's clean or purgeable we can discard the page on wakeup.
7719 		 */
7720 		discard = (!m->vmp_dirty)
7721 		    || (VM_PURGABLE_VOLATILE == object->purgable)
7722 		    || (VM_PURGABLE_EMPTY == object->purgable);
7723 
7724 
7725 		if (discard == FALSE) {
7726 			if (!preflight) {
7727 				hibernate_stats.cd_found_dirty++;
7728 			}
7729 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7730 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7731 				if (!preflight) {
7732 					hibernate_stats.cd_found_xpmapped++;
7733 				}
7734 				discard = FALSE;
7735 			} else {
7736 				if (!preflight) {
7737 					hibernate_stats.cd_skipped_xpmapped++;
7738 				}
7739 			}
7740 		}
7741 	}while (FALSE);
7742 
7743 	if (object) {
7744 		vm_object_unlock(object);
7745 	}
7746 
7747 	return discard;
7748 }
7749 
7750 
7751 static void
hibernate_discard_page(vm_page_t m)7752 hibernate_discard_page(vm_page_t m)
7753 {
7754 	vm_object_t m_object;
7755 
7756 	if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7757 		/*
7758 		 * If it's unusual in anyway, ignore
7759 		 */
7760 		return;
7761 	}
7762 
7763 	m_object = VM_PAGE_OBJECT(m);
7764 
7765 #if MACH_ASSERT || DEBUG
7766 	if (!vm_object_lock_try(m_object)) {
7767 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7768 	}
7769 #else
7770 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
7771 	 *  makes sure these locks are uncontended before sleep */
7772 #endif /* MACH_ASSERT || DEBUG */
7773 
7774 	if (m->vmp_pmapped == TRUE) {
7775 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7776 	}
7777 
7778 	if (m->vmp_laundry) {
7779 		panic("hibernate_discard_page(%p) laundry", m);
7780 	}
7781 	if (m->vmp_private) {
7782 		panic("hibernate_discard_page(%p) private", m);
7783 	}
7784 	if (m->vmp_fictitious) {
7785 		panic("hibernate_discard_page(%p) fictitious", m);
7786 	}
7787 
7788 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7789 		/* object should be on a queue */
7790 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7791 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7792 		assert(old_queue);
7793 		if (m_object->purgeable_when_ripe) {
7794 			vm_purgeable_token_delete_first(old_queue);
7795 		}
7796 		vm_object_lock_assert_exclusive(m_object);
7797 		VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
7798 
7799 		/*
7800 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
7801 		 * accounted in the "volatile" ledger, so no change here.
7802 		 * We have to update vm_page_purgeable_count, though, since we're
7803 		 * effectively purging this object.
7804 		 */
7805 		unsigned int delta;
7806 		assert(m_object->resident_page_count >= m_object->wired_page_count);
7807 		delta = (m_object->resident_page_count - m_object->wired_page_count);
7808 		assert(vm_page_purgeable_count >= delta);
7809 		assert(delta > 0);
7810 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7811 	}
7812 
7813 	vm_page_free(m);
7814 
7815 #if MACH_ASSERT || DEBUG
7816 	vm_object_unlock(m_object);
7817 #endif  /* MACH_ASSERT || DEBUG */
7818 }
7819 
7820 /*
7821  *  Grab locks for hibernate_page_list_setall()
7822  */
7823 void
hibernate_vm_lock_queues(void)7824 hibernate_vm_lock_queues(void)
7825 {
7826 	vm_object_lock(compressor_object);
7827 	vm_page_lock_queues();
7828 	vm_free_page_lock();
7829 	lck_mtx_lock(&vm_purgeable_queue_lock);
7830 
7831 	if (vm_page_local_q) {
7832 		zpercpu_foreach(lq, vm_page_local_q) {
7833 			VPL_LOCK(&lq->vpl_lock);
7834 		}
7835 	}
7836 }
7837 
7838 void
hibernate_vm_unlock_queues(void)7839 hibernate_vm_unlock_queues(void)
7840 {
7841 	if (vm_page_local_q) {
7842 		zpercpu_foreach(lq, vm_page_local_q) {
7843 			VPL_UNLOCK(&lq->vpl_lock);
7844 		}
7845 	}
7846 	lck_mtx_unlock(&vm_purgeable_queue_lock);
7847 	vm_free_page_unlock();
7848 	vm_page_unlock_queues();
7849 	vm_object_unlock(compressor_object);
7850 }
7851 
7852 #if CONFIG_SPTM
7853 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(sptm_paddr_t paddr)7854 hibernate_sptm_should_force_page_to_wired_pagelist(sptm_paddr_t paddr)
7855 {
7856 	const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
7857 
7858 	return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG;
7859 }
7860 #endif
7861 
7862 /*
7863  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7864  *  pages known to VM to not need saving are subtracted.
7865  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
7866  */
7867 
7868 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7869 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7870     hibernate_page_list_t * page_list_wired,
7871     hibernate_page_list_t * page_list_pal,
7872     boolean_t preflight,
7873     boolean_t will_discard,
7874     uint32_t * pagesOut)
7875 {
7876 	uint64_t start, end, nsec;
7877 	vm_page_t m;
7878 	vm_page_t next;
7879 	uint32_t pages = page_list->page_count;
7880 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7881 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7882 	uint32_t count_wire = pages;
7883 	uint32_t count_discard_active    = 0;
7884 	uint32_t count_discard_inactive  = 0;
7885 	uint32_t count_retired = 0;
7886 	uint32_t count_discard_cleaned   = 0;
7887 	uint32_t count_discard_purgeable = 0;
7888 	uint32_t count_discard_speculative = 0;
7889 	uint32_t count_discard_vm_struct_pages = 0;
7890 	uint32_t i;
7891 	uint32_t             bank;
7892 	hibernate_bitmap_t * bitmap;
7893 	hibernate_bitmap_t * bitmap_wired;
7894 	boolean_t                    discard_all;
7895 	boolean_t            discard = FALSE;
7896 
7897 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7898 
7899 	if (preflight) {
7900 		page_list       = NULL;
7901 		page_list_wired = NULL;
7902 		page_list_pal   = NULL;
7903 		discard_all     = FALSE;
7904 	} else {
7905 		discard_all     = will_discard;
7906 	}
7907 
7908 #if MACH_ASSERT || DEBUG
7909 	if (!preflight) {
7910 		assert(hibernate_vm_locks_are_safe());
7911 		vm_page_lock_queues();
7912 		if (vm_page_local_q) {
7913 			zpercpu_foreach(lq, vm_page_local_q) {
7914 				VPL_LOCK(&lq->vpl_lock);
7915 			}
7916 		}
7917 	}
7918 #endif  /* MACH_ASSERT || DEBUG */
7919 
7920 
7921 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7922 
7923 	clock_get_uptime(&start);
7924 
7925 	if (!preflight) {
7926 		hibernate_page_list_zero(page_list);
7927 		hibernate_page_list_zero(page_list_wired);
7928 		hibernate_page_list_zero(page_list_pal);
7929 
7930 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7931 		hibernate_stats.cd_pages = pages;
7932 	}
7933 
7934 	if (vm_page_local_q) {
7935 		zpercpu_foreach_cpu(lid) {
7936 			vm_page_reactivate_local(lid, TRUE, !preflight);
7937 		}
7938 	}
7939 
7940 	if (preflight) {
7941 		vm_object_lock(compressor_object);
7942 		vm_page_lock_queues();
7943 		vm_free_page_lock();
7944 	}
7945 
7946 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7947 
7948 	hibernation_vmqueues_inspection = TRUE;
7949 
7950 	m = (vm_page_t) hibernate_gobble_queue;
7951 	while (m) {
7952 		pages--;
7953 		count_wire--;
7954 		if (!preflight) {
7955 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7956 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7957 		}
7958 		m = m->vmp_snext;
7959 	}
7960 
7961 	if (!preflight) {
7962 		percpu_foreach(free_pages_head, free_pages) {
7963 			for (m = *free_pages_head; m; m = m->vmp_snext) {
7964 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7965 
7966 				pages--;
7967 				count_wire--;
7968 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7969 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7970 
7971 				hibernate_stats.cd_local_free++;
7972 				hibernate_stats.cd_total_free++;
7973 			}
7974 		}
7975 	}
7976 
7977 	for (i = 0; i < vm_colors; i++) {
7978 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7979 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7980 
7981 			pages--;
7982 			count_wire--;
7983 			if (!preflight) {
7984 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7985 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7986 
7987 				hibernate_stats.cd_total_free++;
7988 			}
7989 		}
7990 	}
7991 
7992 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7993 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7994 
7995 		pages--;
7996 		count_wire--;
7997 		if (!preflight) {
7998 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7999 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8000 
8001 			hibernate_stats.cd_total_free++;
8002 		}
8003 	}
8004 
8005 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8006 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8007 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8008 
8009 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8010 		discard = FALSE;
8011 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8012 		    && hibernate_consider_discard(m, preflight)) {
8013 			if (!preflight) {
8014 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8015 			}
8016 			count_discard_inactive++;
8017 			discard = discard_all;
8018 		} else {
8019 			count_throttled++;
8020 		}
8021 		count_wire--;
8022 		if (!preflight) {
8023 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8024 		}
8025 
8026 		if (discard) {
8027 			hibernate_discard_page(m);
8028 		}
8029 		m = next;
8030 	}
8031 
8032 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8033 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8034 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8035 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8036 #if CONFIG_SPTM
8037 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(ptoa_64(VM_PAGE_GET_PHYS_PAGE(m)));
8038 #endif
8039 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8040 		discard = FALSE;
8041 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8042 		    hibernate_consider_discard(m, preflight)) {
8043 			if (!preflight) {
8044 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8045 			}
8046 			if (m->vmp_dirty) {
8047 				count_discard_purgeable++;
8048 			} else {
8049 				count_discard_inactive++;
8050 			}
8051 			discard = discard_all;
8052 		} else {
8053 			/*
8054 			 * If the page must be force-added to the wired page list, prevent it from appearing
8055 			 * in the unwired page list.
8056 			 */
8057 			if (force_to_wired_list) {
8058 				if (!preflight) {
8059 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8060 				}
8061 			} else {
8062 				count_anonymous++;
8063 			}
8064 		}
8065 		/*
8066 		 * If the page is NOT being forced into the wired page list, remove it from the
8067 		 * wired page list here.
8068 		 */
8069 		if (!force_to_wired_list) {
8070 			count_wire--;
8071 			if (!preflight) {
8072 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8073 			}
8074 		}
8075 		if (discard) {
8076 			hibernate_discard_page(m);
8077 		}
8078 		m = next;
8079 	}
8080 
8081 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8082 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8083 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8084 
8085 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8086 		discard = FALSE;
8087 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8088 		    hibernate_consider_discard(m, preflight)) {
8089 			if (!preflight) {
8090 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8091 			}
8092 			if (m->vmp_dirty) {
8093 				count_discard_purgeable++;
8094 			} else {
8095 				count_discard_cleaned++;
8096 			}
8097 			discard = discard_all;
8098 		} else {
8099 			count_cleaned++;
8100 		}
8101 		count_wire--;
8102 		if (!preflight) {
8103 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8104 		}
8105 		if (discard) {
8106 			hibernate_discard_page(m);
8107 		}
8108 		m = next;
8109 	}
8110 
8111 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8112 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8113 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8114 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8115 #if CONFIG_SPTM
8116 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(ptoa_64(VM_PAGE_GET_PHYS_PAGE(m)));
8117 #endif
8118 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8119 		discard = FALSE;
8120 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8121 		    hibernate_consider_discard(m, preflight)) {
8122 			if (!preflight) {
8123 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8124 			}
8125 			if (m->vmp_dirty) {
8126 				count_discard_purgeable++;
8127 			} else {
8128 				count_discard_active++;
8129 			}
8130 			discard = discard_all;
8131 		} else {
8132 			/*
8133 			 * If the page must be force-added to the wired page list, prevent it from appearing
8134 			 * in the unwired page list.
8135 			 */
8136 			if (force_to_wired_list) {
8137 				if (!preflight) {
8138 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8139 				}
8140 			} else {
8141 				count_active++;
8142 			}
8143 		}
8144 		/*
8145 		 * If the page is NOT being forced into the wired page list, remove it from the
8146 		 * wired page list here.
8147 		 */
8148 		if (!force_to_wired_list) {
8149 			count_wire--;
8150 			if (!preflight) {
8151 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8152 			}
8153 		}
8154 		if (discard) {
8155 			hibernate_discard_page(m);
8156 		}
8157 		m = next;
8158 	}
8159 
8160 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8161 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8162 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8163 		bool force_to_wired_list = false;        /* Default to NOT forcing page into the wired page list */
8164 #if CONFIG_SPTM
8165 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(ptoa_64(VM_PAGE_GET_PHYS_PAGE(m)));
8166 #endif
8167 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8168 		discard = FALSE;
8169 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8170 		    hibernate_consider_discard(m, preflight)) {
8171 			if (!preflight) {
8172 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8173 			}
8174 			if (m->vmp_dirty) {
8175 				count_discard_purgeable++;
8176 			} else {
8177 				count_discard_inactive++;
8178 			}
8179 			discard = discard_all;
8180 		} else {
8181 			/*
8182 			 * If the page must be force-added to the wired page list, prevent it from appearing
8183 			 * in the unwired page list.
8184 			 */
8185 			if (force_to_wired_list) {
8186 				if (!preflight) {
8187 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8188 				}
8189 			} else {
8190 				count_inactive++;
8191 			}
8192 		}
8193 		/*
8194 		 * If the page is NOT being forced into the wired page list, remove it from the
8195 		 * wired page list here.
8196 		 */
8197 		if (!force_to_wired_list) {
8198 			count_wire--;
8199 			if (!preflight) {
8200 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8201 			}
8202 		}
8203 		if (discard) {
8204 			hibernate_discard_page(m);
8205 		}
8206 		m = next;
8207 	}
8208 	/* XXX FBDP TODO: secluded queue */
8209 
8210 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8211 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8212 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8213 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8214 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8215 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8216 
8217 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8218 			discard = FALSE;
8219 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8220 			    hibernate_consider_discard(m, preflight)) {
8221 				if (!preflight) {
8222 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8223 				}
8224 				count_discard_speculative++;
8225 				discard = discard_all;
8226 			} else {
8227 				count_speculative++;
8228 			}
8229 			count_wire--;
8230 			if (!preflight) {
8231 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8232 			}
8233 			if (discard) {
8234 				hibernate_discard_page(m);
8235 			}
8236 			m = next;
8237 		}
8238 	}
8239 
8240 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
8241 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
8242 
8243 		count_compressor++;
8244 		count_wire--;
8245 		if (!preflight) {
8246 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8247 		}
8248 	}
8249 
8250 
8251 	if (preflight == FALSE && discard_all == TRUE) {
8252 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8253 
8254 		HIBLOG("hibernate_teardown started\n");
8255 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8256 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8257 
8258 		pages -= count_discard_vm_struct_pages;
8259 		count_wire -= count_discard_vm_struct_pages;
8260 
8261 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8262 
8263 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8264 	}
8265 
8266 	if (!preflight) {
8267 		// pull wired from hibernate_bitmap
8268 		bitmap = &page_list->bank_bitmap[0];
8269 		bitmap_wired = &page_list_wired->bank_bitmap[0];
8270 		for (bank = 0; bank < page_list->bank_count; bank++) {
8271 			for (i = 0; i < bitmap->bitmapwords; i++) {
8272 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8273 			}
8274 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8275 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8276 		}
8277 	}
8278 
8279 	// machine dependent adjustments
8280 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8281 
8282 	if (!preflight) {
8283 		hibernate_stats.cd_count_wire = count_wire;
8284 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8285 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8286 	}
8287 
8288 	clock_get_uptime(&end);
8289 	absolutetime_to_nanoseconds(end - start, &nsec);
8290 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8291 
8292 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8293 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8294 	    discard_all ? "did" : "could",
8295 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8296 
8297 	if (hibernate_stats.cd_skipped_xpmapped) {
8298 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8299 	}
8300 
8301 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8302 
8303 	if (preflight && will_discard) {
8304 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8305 		/*
8306 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8307 		 * even if these are clean and so we need to size the hibernation image accordingly.
8308 		 *
8309 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8310 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8311 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8312 		 * clean xpmapped pages.
8313 		 *
8314 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8315 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8316 		 */
8317 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
8318 	}
8319 
8320 	hibernation_vmqueues_inspection = FALSE;
8321 
8322 #if MACH_ASSERT || DEBUG
8323 	if (!preflight) {
8324 		if (vm_page_local_q) {
8325 			zpercpu_foreach(lq, vm_page_local_q) {
8326 				VPL_UNLOCK(&lq->vpl_lock);
8327 			}
8328 		}
8329 		vm_page_unlock_queues();
8330 	}
8331 #endif  /* MACH_ASSERT || DEBUG */
8332 
8333 	if (preflight) {
8334 		vm_free_page_unlock();
8335 		vm_page_unlock_queues();
8336 		vm_object_unlock(compressor_object);
8337 	}
8338 
8339 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8340 }
8341 
8342 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8343 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8344 {
8345 	uint64_t  start, end, nsec;
8346 	vm_page_t m;
8347 	vm_page_t next;
8348 	uint32_t  i;
8349 	uint32_t  count_discard_active    = 0;
8350 	uint32_t  count_discard_inactive  = 0;
8351 	uint32_t  count_discard_purgeable = 0;
8352 	uint32_t  count_discard_cleaned   = 0;
8353 	uint32_t  count_discard_speculative = 0;
8354 
8355 
8356 #if MACH_ASSERT || DEBUG
8357 	vm_page_lock_queues();
8358 	if (vm_page_local_q) {
8359 		zpercpu_foreach(lq, vm_page_local_q) {
8360 			VPL_LOCK(&lq->vpl_lock);
8361 		}
8362 	}
8363 #endif  /* MACH_ASSERT || DEBUG */
8364 
8365 	clock_get_uptime(&start);
8366 
8367 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8368 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8369 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8370 
8371 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8372 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8373 			if (m->vmp_dirty) {
8374 				count_discard_purgeable++;
8375 			} else {
8376 				count_discard_inactive++;
8377 			}
8378 			hibernate_discard_page(m);
8379 		}
8380 		m = next;
8381 	}
8382 
8383 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8384 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8385 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8386 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8387 
8388 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8389 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8390 				count_discard_speculative++;
8391 				hibernate_discard_page(m);
8392 			}
8393 			m = next;
8394 		}
8395 	}
8396 
8397 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8398 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8399 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8400 
8401 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8402 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8403 			if (m->vmp_dirty) {
8404 				count_discard_purgeable++;
8405 			} else {
8406 				count_discard_inactive++;
8407 			}
8408 			hibernate_discard_page(m);
8409 		}
8410 		m = next;
8411 	}
8412 	/* XXX FBDP TODO: secluded queue */
8413 
8414 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8415 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8416 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8417 
8418 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8419 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8420 			if (m->vmp_dirty) {
8421 				count_discard_purgeable++;
8422 			} else {
8423 				count_discard_active++;
8424 			}
8425 			hibernate_discard_page(m);
8426 		}
8427 		m = next;
8428 	}
8429 
8430 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8431 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8432 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8433 
8434 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8435 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8436 			if (m->vmp_dirty) {
8437 				count_discard_purgeable++;
8438 			} else {
8439 				count_discard_cleaned++;
8440 			}
8441 			hibernate_discard_page(m);
8442 		}
8443 		m = next;
8444 	}
8445 
8446 #if MACH_ASSERT || DEBUG
8447 	if (vm_page_local_q) {
8448 		zpercpu_foreach(lq, vm_page_local_q) {
8449 			VPL_UNLOCK(&lq->vpl_lock);
8450 		}
8451 	}
8452 	vm_page_unlock_queues();
8453 #endif  /* MACH_ASSERT || DEBUG */
8454 
8455 	clock_get_uptime(&end);
8456 	absolutetime_to_nanoseconds(end - start, &nsec);
8457 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8458 	    nsec / 1000000ULL,
8459 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8460 }
8461 
8462 boolean_t       hibernate_paddr_map_inited = FALSE;
8463 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8464 vm_page_t       hibernate_rebuild_hash_list = NULL;
8465 
8466 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8467 unsigned int    hibernate_teardown_found_created_pages = 0;
8468 unsigned int    hibernate_teardown_found_free_pages = 0;
8469 unsigned int    hibernate_teardown_vm_page_free_count;
8470 
8471 
8472 struct ppnum_mapping {
8473 	struct ppnum_mapping    *ppnm_next;
8474 	ppnum_t                 ppnm_base_paddr;
8475 	unsigned int            ppnm_sindx;
8476 	unsigned int            ppnm_eindx;
8477 };
8478 
8479 struct ppnum_mapping    *ppnm_head;
8480 struct ppnum_mapping    *ppnm_last_found = NULL;
8481 
8482 
8483 void
hibernate_create_paddr_map(void)8484 hibernate_create_paddr_map(void)
8485 {
8486 	unsigned int    i;
8487 	ppnum_t         next_ppnum_in_run = 0;
8488 	struct ppnum_mapping *ppnm = NULL;
8489 
8490 	if (hibernate_paddr_map_inited == FALSE) {
8491 		for (i = 0; i < vm_pages_count; i++) {
8492 			if (ppnm) {
8493 				ppnm->ppnm_eindx = i;
8494 			}
8495 
8496 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8497 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
8498 
8499 				ppnm->ppnm_next = ppnm_head;
8500 				ppnm_head = ppnm;
8501 
8502 				ppnm->ppnm_sindx = i;
8503 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8504 			}
8505 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8506 		}
8507 		ppnm->ppnm_eindx = vm_pages_count;
8508 
8509 		hibernate_paddr_map_inited = TRUE;
8510 	}
8511 }
8512 
8513 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8514 hibernate_lookup_paddr(unsigned int indx)
8515 {
8516 	struct ppnum_mapping *ppnm = NULL;
8517 
8518 	ppnm = ppnm_last_found;
8519 
8520 	if (ppnm) {
8521 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8522 			goto done;
8523 		}
8524 	}
8525 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8526 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8527 			ppnm_last_found = ppnm;
8528 			break;
8529 		}
8530 	}
8531 	if (ppnm == NULL) {
8532 		panic("hibernate_lookup_paddr of %d failed", indx);
8533 	}
8534 done:
8535 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8536 }
8537 
8538 
8539 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8540 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8541 {
8542 	addr64_t        saddr_aligned;
8543 	addr64_t        eaddr_aligned;
8544 	addr64_t        addr;
8545 	ppnum_t         paddr;
8546 	unsigned int    mark_as_unneeded_pages = 0;
8547 
8548 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8549 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
8550 
8551 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8552 		paddr = pmap_find_phys(kernel_pmap, addr);
8553 
8554 		assert(paddr);
8555 
8556 		hibernate_page_bitset(page_list, TRUE, paddr);
8557 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
8558 
8559 		mark_as_unneeded_pages++;
8560 	}
8561 	return mark_as_unneeded_pages;
8562 }
8563 
8564 
8565 void
hibernate_hash_insert_page(vm_page_t mem)8566 hibernate_hash_insert_page(vm_page_t mem)
8567 {
8568 	vm_page_bucket_t *bucket;
8569 	int             hash_id;
8570 	vm_object_t     m_object;
8571 
8572 	m_object = VM_PAGE_OBJECT(mem);
8573 
8574 	assert(mem->vmp_hashed);
8575 	assert(m_object);
8576 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
8577 
8578 	/*
8579 	 *	Insert it into the object_object/offset hash table
8580 	 */
8581 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
8582 	bucket = &vm_page_buckets[hash_id];
8583 
8584 	mem->vmp_next_m = bucket->page_list;
8585 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
8586 }
8587 
8588 
8589 void
hibernate_free_range(int sindx,int eindx)8590 hibernate_free_range(int sindx, int eindx)
8591 {
8592 	vm_page_t       mem;
8593 	unsigned int    color;
8594 
8595 	while (sindx < eindx) {
8596 		mem = &vm_pages[sindx];
8597 
8598 		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8599 
8600 		mem->vmp_lopage = FALSE;
8601 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8602 
8603 		color = VM_PAGE_GET_COLOR(mem);
8604 #if defined(__x86_64__)
8605 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8606 #else
8607 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8608 #endif
8609 		vm_page_free_count++;
8610 
8611 		sindx++;
8612 	}
8613 }
8614 
8615 void
hibernate_rebuild_vm_structs(void)8616 hibernate_rebuild_vm_structs(void)
8617 {
8618 	int             i, cindx, sindx, eindx;
8619 	vm_page_t       mem, tmem, mem_next;
8620 	AbsoluteTime    startTime, endTime;
8621 	uint64_t        nsec;
8622 
8623 	if (hibernate_rebuild_needed == FALSE) {
8624 		return;
8625 	}
8626 
8627 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8628 	HIBLOG("hibernate_rebuild started\n");
8629 
8630 	clock_get_uptime(&startTime);
8631 
8632 	pal_hib_rebuild_pmap_structs();
8633 
8634 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8635 	eindx = vm_pages_count;
8636 
8637 	/*
8638 	 * Mark all the vm_pages[] that have not been initialized yet as being
8639 	 * transient. This is needed to ensure that buddy page search is corrrect.
8640 	 * Without this random data in these vm_pages[] can trip the buddy search
8641 	 */
8642 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8643 		vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8644 	}
8645 
8646 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8647 		mem = &vm_pages[cindx];
8648 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8649 		/*
8650 		 * hibernate_teardown_vm_structs leaves the location where
8651 		 * this vm_page_t must be located in "next".
8652 		 */
8653 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8654 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8655 
8656 		sindx = (int)(tmem - &vm_pages[0]);
8657 
8658 		if (mem != tmem) {
8659 			/*
8660 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8661 			 * so move it back to its real location
8662 			 */
8663 			*tmem = *mem;
8664 			mem = tmem;
8665 		}
8666 		if (mem->vmp_hashed) {
8667 			hibernate_hash_insert_page(mem);
8668 		}
8669 		/*
8670 		 * the 'hole' between this vm_page_t and the previous
8671 		 * vm_page_t we moved needs to be initialized as
8672 		 * a range of free vm_page_t's
8673 		 */
8674 		hibernate_free_range(sindx + 1, eindx);
8675 
8676 		eindx = sindx;
8677 	}
8678 	if (sindx) {
8679 		hibernate_free_range(0, sindx);
8680 	}
8681 
8682 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8683 
8684 	/*
8685 	 * process the list of vm_page_t's that were entered in the hash,
8686 	 * but were not located in the vm_pages arrary... these are
8687 	 * vm_page_t's that were created on the fly (i.e. fictitious)
8688 	 */
8689 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8690 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8691 
8692 		mem->vmp_next_m = 0;
8693 		hibernate_hash_insert_page(mem);
8694 	}
8695 	hibernate_rebuild_hash_list = NULL;
8696 
8697 	clock_get_uptime(&endTime);
8698 	SUB_ABSOLUTETIME(&endTime, &startTime);
8699 	absolutetime_to_nanoseconds(endTime, &nsec);
8700 
8701 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8702 
8703 	hibernate_rebuild_needed = FALSE;
8704 
8705 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8706 }
8707 
8708 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8709 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8710 {
8711 	unsigned int    i;
8712 	unsigned int    compact_target_indx;
8713 	vm_page_t       mem, mem_next;
8714 	vm_page_bucket_t *bucket;
8715 	unsigned int    mark_as_unneeded_pages = 0;
8716 	unsigned int    unneeded_vm_page_bucket_pages = 0;
8717 	unsigned int    unneeded_vm_pages_pages = 0;
8718 	unsigned int    unneeded_pmap_pages = 0;
8719 	addr64_t        start_of_unneeded = 0;
8720 	addr64_t        end_of_unneeded = 0;
8721 
8722 
8723 	if (hibernate_should_abort()) {
8724 		return 0;
8725 	}
8726 
8727 	hibernate_rebuild_needed = TRUE;
8728 
8729 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8730 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8731 	    vm_page_cleaned_count, compressor_object->resident_page_count);
8732 
8733 	for (i = 0; i < vm_page_bucket_count; i++) {
8734 		bucket = &vm_page_buckets[i];
8735 
8736 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8737 			assert(mem->vmp_hashed);
8738 
8739 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8740 
8741 			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8742 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8743 				hibernate_rebuild_hash_list = mem;
8744 			}
8745 		}
8746 	}
8747 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8748 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8749 
8750 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
8751 
8752 	compact_target_indx = 0;
8753 
8754 	for (i = 0; i < vm_pages_count; i++) {
8755 		mem = &vm_pages[i];
8756 
8757 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8758 			unsigned int color;
8759 
8760 			assert(mem->vmp_busy);
8761 			assert(!mem->vmp_lopage);
8762 
8763 			color = VM_PAGE_GET_COLOR(mem);
8764 
8765 			vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8766 
8767 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8768 
8769 			vm_page_free_count--;
8770 
8771 			hibernate_teardown_found_free_pages++;
8772 
8773 			if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8774 				compact_target_indx = i;
8775 			}
8776 		} else {
8777 			/*
8778 			 * record this vm_page_t's original location
8779 			 * we need this even if it doesn't get moved
8780 			 * as an indicator to the rebuild function that
8781 			 * we don't have to move it
8782 			 */
8783 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8784 
8785 			if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8786 				/*
8787 				 * we've got a hole to fill, so
8788 				 * move this vm_page_t to it's new home
8789 				 */
8790 				vm_pages[compact_target_indx] = *mem;
8791 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8792 
8793 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8794 				compact_target_indx++;
8795 			} else {
8796 				hibernate_teardown_last_valid_compact_indx = i;
8797 			}
8798 		}
8799 	}
8800 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8801 	    (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8802 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
8803 
8804 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8805 
8806 	if (start_of_unneeded) {
8807 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8808 		mark_as_unneeded_pages += unneeded_pmap_pages;
8809 	}
8810 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8811 
8812 	return mark_as_unneeded_pages;
8813 }
8814 
8815 
8816 #endif /* HIBERNATION */
8817 
8818 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8819 
8820 #include <mach_vm_debug.h>
8821 #if     MACH_VM_DEBUG
8822 
8823 #include <mach_debug/hash_info.h>
8824 #include <vm/vm_debug_internal.h>
8825 
8826 /*
8827  *	Routine:	vm_page_info
8828  *	Purpose:
8829  *		Return information about the global VP table.
8830  *		Fills the buffer with as much information as possible
8831  *		and returns the desired size of the buffer.
8832  *	Conditions:
8833  *		Nothing locked.  The caller should provide
8834  *		possibly-pageable memory.
8835  */
8836 
8837 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8838 vm_page_info(
8839 	hash_info_bucket_t *info,
8840 	unsigned int count)
8841 {
8842 	unsigned int i;
8843 	lck_spin_t      *bucket_lock;
8844 
8845 	if (vm_page_bucket_count < count) {
8846 		count = vm_page_bucket_count;
8847 	}
8848 
8849 	for (i = 0; i < count; i++) {
8850 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
8851 		unsigned int bucket_count = 0;
8852 		vm_page_t m;
8853 
8854 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8855 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8856 
8857 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8858 		    m != VM_PAGE_NULL;
8859 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8860 			bucket_count++;
8861 		}
8862 
8863 		lck_spin_unlock(bucket_lock);
8864 
8865 		/* don't touch pageable memory while holding locks */
8866 		info[i].hib_count = bucket_count;
8867 	}
8868 
8869 	return vm_page_bucket_count;
8870 }
8871 #endif  /* MACH_VM_DEBUG */
8872 
8873 #if VM_PAGE_BUCKETS_CHECK
8874 void
vm_page_buckets_check(void)8875 vm_page_buckets_check(void)
8876 {
8877 	unsigned int i;
8878 	vm_page_t p;
8879 	unsigned int p_hash;
8880 	vm_page_bucket_t *bucket;
8881 	lck_spin_t      *bucket_lock;
8882 
8883 	if (!vm_page_buckets_check_ready) {
8884 		return;
8885 	}
8886 
8887 #if HIBERNATION
8888 	if (hibernate_rebuild_needed ||
8889 	    hibernate_rebuild_hash_list) {
8890 		panic("BUCKET_CHECK: hibernation in progress: "
8891 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
8892 		    hibernate_rebuild_needed,
8893 		    hibernate_rebuild_hash_list);
8894 	}
8895 #endif /* HIBERNATION */
8896 
8897 #if VM_PAGE_FAKE_BUCKETS
8898 	char *cp;
8899 	for (cp = (char *) vm_page_fake_buckets_start;
8900 	    cp < (char *) vm_page_fake_buckets_end;
8901 	    cp++) {
8902 		if (*cp != 0x5a) {
8903 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
8904 			    "[0x%llx:0x%llx]\n",
8905 			    cp,
8906 			    (uint64_t) vm_page_fake_buckets_start,
8907 			    (uint64_t) vm_page_fake_buckets_end);
8908 		}
8909 	}
8910 #endif /* VM_PAGE_FAKE_BUCKETS */
8911 
8912 	for (i = 0; i < vm_page_bucket_count; i++) {
8913 		vm_object_t     p_object;
8914 
8915 		bucket = &vm_page_buckets[i];
8916 		if (!bucket->page_list) {
8917 			continue;
8918 		}
8919 
8920 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8921 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8922 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8923 
8924 		while (p != VM_PAGE_NULL) {
8925 			p_object = VM_PAGE_OBJECT(p);
8926 
8927 			if (!p->vmp_hashed) {
8928 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8929 				    "hash %d in bucket %d at %p "
8930 				    "is not hashed\n",
8931 				    p, p_object, p->vmp_offset,
8932 				    p_hash, i, bucket);
8933 			}
8934 			p_hash = vm_page_hash(p_object, p->vmp_offset);
8935 			if (p_hash != i) {
8936 				panic("BUCKET_CHECK: corruption in bucket %d "
8937 				    "at %p: page %p object %p offset 0x%llx "
8938 				    "hash %d\n",
8939 				    i, bucket, p, p_object, p->vmp_offset,
8940 				    p_hash);
8941 			}
8942 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8943 		}
8944 		lck_spin_unlock(bucket_lock);
8945 	}
8946 
8947 //	printf("BUCKET_CHECK: checked buckets\n");
8948 }
8949 #endif /* VM_PAGE_BUCKETS_CHECK */
8950 
8951 /*
8952  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8953  * local queues if they exist... its the only spot in the system where we add pages
8954  * to those queues...  once on those queues, those pages can only move to one of the
8955  * global page queues or the free queues... they NEVER move from local q to local q.
8956  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8957  * the global vm_page_queue_lock at this point...  we still need to take the local lock
8958  * in case this operation is being run on a different CPU then the local queue's identity,
8959  * but we don't have to worry about the page moving to a global queue or becoming wired
8960  * while we're grabbing the local lock since those operations would require the global
8961  * vm_page_queue_lock to be held, and we already own it.
8962  *
8963  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8964  * 'wired' and local are ALWAYS mutually exclusive conditions.
8965  */
8966 
8967 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8968 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8969 {
8970 	boolean_t       was_pageable = TRUE;
8971 	vm_object_t     m_object;
8972 
8973 	m_object = VM_PAGE_OBJECT(mem);
8974 
8975 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8976 
8977 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8978 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8979 		if (remove_from_specialq == TRUE) {
8980 			vm_page_remove_from_specialq(mem);
8981 		}
8982 		/*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
8983 		 *       assert(mem->vmp_specialq.next != 0);
8984 		 *       assert(mem->vmp_specialq.prev != 0);
8985 		 *  } else {*/
8986 		if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
8987 			assert(mem->vmp_specialq.next == 0);
8988 			assert(mem->vmp_specialq.prev == 0);
8989 		}
8990 		return;
8991 	}
8992 
8993 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8994 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8995 		assert(mem->vmp_specialq.next == 0 &&
8996 		    mem->vmp_specialq.prev == 0 &&
8997 		    mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8998 		return;
8999 	}
9000 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9001 		/*
9002 		 * might put these guys on a list for debugging purposes
9003 		 * if we do, we'll need to remove this assert
9004 		 */
9005 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9006 		assert(mem->vmp_specialq.next == 0 &&
9007 		    mem->vmp_specialq.prev == 0);
9008 		/*
9009 		 * Recall that vmp_on_specialq also means a request to put
9010 		 * it on the special Q. So we don't want to reset that bit
9011 		 * just because a wiring request came in. We might want to
9012 		 * put it on the special queue post-unwiring.
9013 		 *
9014 		 * &&
9015 		 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9016 		 */
9017 		return;
9018 	}
9019 
9020 	assert(m_object != compressor_object);
9021 	assert(!is_kernel_object(m_object));
9022 	assert(!mem->vmp_fictitious);
9023 
9024 	switch (mem->vmp_q_state) {
9025 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9026 	{
9027 		struct vpl      *lq;
9028 
9029 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9030 		VPL_LOCK(&lq->vpl_lock);
9031 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9032 		mem->vmp_local_id = 0;
9033 		lq->vpl_count--;
9034 		if (m_object->internal) {
9035 			lq->vpl_internal_count--;
9036 		} else {
9037 			lq->vpl_external_count--;
9038 		}
9039 		VPL_UNLOCK(&lq->vpl_lock);
9040 		was_pageable = FALSE;
9041 		break;
9042 	}
9043 	case VM_PAGE_ON_ACTIVE_Q:
9044 	{
9045 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9046 		vm_page_active_count--;
9047 		break;
9048 	}
9049 
9050 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9051 	{
9052 		assert(m_object->internal == TRUE);
9053 
9054 		vm_page_inactive_count--;
9055 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9056 		vm_page_anonymous_count--;
9057 
9058 		vm_purgeable_q_advance_all();
9059 		vm_page_balance_inactive(3);
9060 		break;
9061 	}
9062 
9063 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9064 	{
9065 		assert(m_object->internal == FALSE);
9066 
9067 		vm_page_inactive_count--;
9068 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9069 		vm_purgeable_q_advance_all();
9070 		vm_page_balance_inactive(3);
9071 		break;
9072 	}
9073 
9074 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9075 	{
9076 		assert(m_object->internal == FALSE);
9077 
9078 		vm_page_inactive_count--;
9079 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9080 		vm_page_cleaned_count--;
9081 		vm_page_balance_inactive(3);
9082 		break;
9083 	}
9084 
9085 	case VM_PAGE_ON_THROTTLED_Q:
9086 	{
9087 		assert(m_object->internal == TRUE);
9088 
9089 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9090 		vm_page_throttled_count--;
9091 		was_pageable = FALSE;
9092 		break;
9093 	}
9094 
9095 	case VM_PAGE_ON_SPECULATIVE_Q:
9096 	{
9097 		assert(m_object->internal == FALSE);
9098 
9099 		vm_page_remque(&mem->vmp_pageq);
9100 		vm_page_speculative_count--;
9101 		vm_page_balance_inactive(3);
9102 		break;
9103 	}
9104 
9105 #if CONFIG_SECLUDED_MEMORY
9106 	case VM_PAGE_ON_SECLUDED_Q:
9107 	{
9108 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9109 		vm_page_secluded_count--;
9110 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9111 		if (m_object == VM_OBJECT_NULL) {
9112 			vm_page_secluded_count_free--;
9113 			was_pageable = FALSE;
9114 		} else {
9115 			assert(!m_object->internal);
9116 			vm_page_secluded_count_inuse--;
9117 			was_pageable = FALSE;
9118 //			was_pageable = TRUE;
9119 		}
9120 		break;
9121 	}
9122 #endif /* CONFIG_SECLUDED_MEMORY */
9123 
9124 	default:
9125 	{
9126 		/*
9127 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9128 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9129 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
9130 		 *              either first remove it (it needs both the page queues lock and the object lock to do
9131 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9132 		 *
9133 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9134 		 *	or any of the undefined states
9135 		 */
9136 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9137 		break;
9138 	}
9139 	}
9140 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9141 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9142 
9143 	if (remove_from_specialq == TRUE) {
9144 		vm_page_remove_from_specialq(mem);
9145 	}
9146 	if (was_pageable) {
9147 		if (m_object->internal) {
9148 			vm_page_pageable_internal_count--;
9149 		} else {
9150 			vm_page_pageable_external_count--;
9151 		}
9152 	}
9153 }
9154 
9155 void
vm_page_remove_internal(vm_page_t page)9156 vm_page_remove_internal(vm_page_t page)
9157 {
9158 	vm_object_t __object = VM_PAGE_OBJECT(page);
9159 	if (page == __object->memq_hint) {
9160 		vm_page_t       __new_hint;
9161 		vm_page_queue_entry_t   __qe;
9162 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9163 		if (vm_page_queue_end(&__object->memq, __qe)) {
9164 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9165 			if (vm_page_queue_end(&__object->memq, __qe)) {
9166 				__qe = NULL;
9167 			}
9168 		}
9169 		__new_hint = (vm_page_t)((uintptr_t) __qe);
9170 		__object->memq_hint = __new_hint;
9171 	}
9172 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
9173 #if CONFIG_SECLUDED_MEMORY
9174 	if (__object->eligible_for_secluded) {
9175 		vm_page_secluded.eligible_for_secluded--;
9176 	}
9177 #endif /* CONFIG_SECLUDED_MEMORY */
9178 }
9179 
9180 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9181 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9182 {
9183 	vm_object_t     m_object;
9184 
9185 	m_object = VM_PAGE_OBJECT(mem);
9186 
9187 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9188 	assert(!mem->vmp_fictitious);
9189 	assert(!mem->vmp_laundry);
9190 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9191 	vm_page_check_pageable_safe(mem);
9192 
9193 	if (m_object->internal) {
9194 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9195 
9196 		if (first == TRUE) {
9197 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9198 		} else {
9199 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9200 		}
9201 
9202 		vm_page_anonymous_count++;
9203 		vm_page_pageable_internal_count++;
9204 	} else {
9205 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9206 
9207 		if (first == TRUE) {
9208 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9209 		} else {
9210 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9211 		}
9212 
9213 		vm_page_pageable_external_count++;
9214 	}
9215 	vm_page_inactive_count++;
9216 	token_new_pagecount++;
9217 
9218 	vm_page_add_to_specialq(mem, FALSE);
9219 }
9220 
9221 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9222 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9223 {
9224 	vm_object_t     m_object;
9225 
9226 	m_object = VM_PAGE_OBJECT(mem);
9227 
9228 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9229 	assert(!mem->vmp_fictitious);
9230 	assert(!mem->vmp_laundry);
9231 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9232 	vm_page_check_pageable_safe(mem);
9233 
9234 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
9235 	if (first == TRUE) {
9236 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
9237 	} else {
9238 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
9239 	}
9240 	vm_page_active_count++;
9241 
9242 	if (m_object->internal) {
9243 		vm_page_pageable_internal_count++;
9244 	} else {
9245 		vm_page_pageable_external_count++;
9246 	}
9247 
9248 	vm_page_add_to_specialq(mem, FALSE);
9249 	vm_page_balance_inactive(3);
9250 }
9251 
9252 /*
9253  * Pages from special kernel objects shouldn't
9254  * be placed on pageable queues.
9255  */
9256 void
vm_page_check_pageable_safe(vm_page_t page)9257 vm_page_check_pageable_safe(vm_page_t page)
9258 {
9259 	vm_object_t     page_object;
9260 
9261 	page_object = VM_PAGE_OBJECT(page);
9262 
9263 	if (is_kernel_object(page_object)) {
9264 		panic("vm_page_check_pageable_safe: trying to add page"
9265 		    "from a kernel object to pageable queue");
9266 	}
9267 
9268 	if (page_object == compressor_object) {
9269 		panic("vm_page_check_pageable_safe: trying to add page"
9270 		    "from compressor object (%p) to pageable queue", compressor_object);
9271 	}
9272 }
9273 
9274 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9275 * wired page diagnose
9276 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9277 
9278 #include <libkern/OSKextLibPrivate.h>
9279 
9280 #define KA_SIZE(namelen, subtotalscount)        \
9281 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9282 
9283 #define KA_NAME(alloc)  \
9284 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9285 
9286 #define KA_NAME_LEN(alloc)      \
9287     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9288 
9289 vm_tag_t
vm_tag_bt(void)9290 vm_tag_bt(void)
9291 {
9292 	uintptr_t* frameptr;
9293 	uintptr_t* frameptr_next;
9294 	uintptr_t retaddr;
9295 	uintptr_t kstackb, kstackt;
9296 	const vm_allocation_site_t * site;
9297 	thread_t cthread;
9298 	kern_allocation_name_t name;
9299 
9300 	cthread = current_thread();
9301 	if (__improbable(cthread == NULL)) {
9302 		return VM_KERN_MEMORY_OSFMK;
9303 	}
9304 
9305 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9306 		if (!name->tag) {
9307 			vm_tag_alloc(name);
9308 		}
9309 		return name->tag;
9310 	}
9311 
9312 	kstackb = cthread->kernel_stack;
9313 	kstackt = kstackb + kernel_stack_size;
9314 
9315 	/* Load stack frame pointer (EBP on x86) into frameptr */
9316 	frameptr = __builtin_frame_address(0);
9317 	site = NULL;
9318 	while (frameptr != NULL) {
9319 		/* Verify thread stack bounds */
9320 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9321 			break;
9322 		}
9323 
9324 		/* Next frame pointer is pointed to by the previous one */
9325 		frameptr_next = (uintptr_t*) *frameptr;
9326 #if defined(HAS_APPLE_PAC)
9327 		frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9328 #endif
9329 
9330 		/* Pull return address from one spot above the frame pointer */
9331 		retaddr = *(frameptr + 1);
9332 
9333 #if defined(HAS_APPLE_PAC)
9334 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9335 #endif
9336 
9337 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9338 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9339 			site = OSKextGetAllocationSiteForCaller(retaddr);
9340 			break;
9341 		}
9342 		frameptr = frameptr_next;
9343 	}
9344 
9345 	return site ? site->tag : VM_KERN_MEMORY_NONE;
9346 }
9347 
9348 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9349 
9350 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9351 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9352 {
9353 	vm_tag_t tag;
9354 	uint64_t avail;
9355 	uint32_t idx;
9356 	vm_allocation_site_t * prev;
9357 
9358 	if (site->tag) {
9359 		return;
9360 	}
9361 
9362 	idx = 0;
9363 	while (TRUE) {
9364 		avail = free_tag_bits[idx];
9365 		if (avail) {
9366 			tag = (vm_tag_t)__builtin_clzll(avail);
9367 			avail &= ~(1ULL << (63 - tag));
9368 			free_tag_bits[idx] = avail;
9369 			tag += (idx << 6);
9370 			break;
9371 		}
9372 		idx++;
9373 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
9374 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9375 				prev = vm_allocation_sites[idx];
9376 				if (!prev) {
9377 					continue;
9378 				}
9379 				if (!KA_NAME_LEN(prev)) {
9380 					continue;
9381 				}
9382 				if (!prev->tag) {
9383 					continue;
9384 				}
9385 				if (prev->total) {
9386 					continue;
9387 				}
9388 				if (1 != prev->refcount) {
9389 					continue;
9390 				}
9391 
9392 				assert(idx == prev->tag);
9393 				tag = (vm_tag_t)idx;
9394 				prev->tag = VM_KERN_MEMORY_NONE;
9395 				*releasesiteP = prev;
9396 				break;
9397 			}
9398 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9399 				tag = VM_KERN_MEMORY_ANY;
9400 			}
9401 			break;
9402 		}
9403 	}
9404 	site->tag = tag;
9405 
9406 	OSAddAtomic16(1, &site->refcount);
9407 
9408 	if (VM_KERN_MEMORY_ANY != tag) {
9409 		vm_allocation_sites[tag] = site;
9410 	}
9411 
9412 	if (tag > vm_allocation_tag_highest) {
9413 		vm_allocation_tag_highest = tag;
9414 	}
9415 }
9416 
9417 static void
vm_tag_free_locked(vm_tag_t tag)9418 vm_tag_free_locked(vm_tag_t tag)
9419 {
9420 	uint64_t avail;
9421 	uint32_t idx;
9422 	uint64_t bit;
9423 
9424 	if (VM_KERN_MEMORY_ANY == tag) {
9425 		return;
9426 	}
9427 
9428 	idx = (tag >> 6);
9429 	avail = free_tag_bits[idx];
9430 	tag &= 63;
9431 	bit = (1ULL << (63 - tag));
9432 	assert(!(avail & bit));
9433 	free_tag_bits[idx] = (avail | bit);
9434 }
9435 
9436 static void
vm_tag_init(void)9437 vm_tag_init(void)
9438 {
9439 	vm_tag_t tag;
9440 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9441 		vm_tag_free_locked(tag);
9442 	}
9443 
9444 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9445 		vm_tag_free_locked(tag);
9446 	}
9447 }
9448 
9449 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9450 vm_tag_alloc(vm_allocation_site_t * site)
9451 {
9452 	vm_allocation_site_t * releasesite;
9453 
9454 	if (!site->tag) {
9455 		releasesite = NULL;
9456 		lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9457 		vm_tag_alloc_locked(site, &releasesite);
9458 		lck_ticket_unlock(&vm_allocation_sites_lock);
9459 		if (releasesite) {
9460 			kern_allocation_name_release(releasesite);
9461 		}
9462 	}
9463 
9464 	return site->tag;
9465 }
9466 
9467 #if VM_BTLOG_TAGS
9468 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9469 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9470 #define VM_TAG_BTLOG_SIZE (16u << 10)
9471 
9472 btlog_t vmtaglog_btlog;
9473 vm_tag_t vmtaglog_tag;
9474 
9475 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9476 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9477 {
9478 	if (is_kernel_object(object)) {
9479 		/* kernel object backtraces are tracked in vm entries */
9480 		return;
9481 	}
9482 	if (delta > 0) {
9483 		btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9484 		btlog_record(vmtaglog_btlog, object, 0, ref);
9485 	} else if (object->wired_page_count == 0) {
9486 		btlog_erase(vmtaglog_btlog, object);
9487 	}
9488 }
9489 
9490 #ifndef ARRAY_SIZE
9491 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9492 #endif /* ARRAY_SIZE */
9493 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9494 const char *vm_kern_memory_strs[] = {
9495 	VM_KERN_MEMORY_ELEM(OSFMK),
9496 	VM_KERN_MEMORY_ELEM(BSD),
9497 	VM_KERN_MEMORY_ELEM(IOKIT),
9498 	VM_KERN_MEMORY_ELEM(LIBKERN),
9499 	VM_KERN_MEMORY_ELEM(OSKEXT),
9500 	VM_KERN_MEMORY_ELEM(KEXT),
9501 	VM_KERN_MEMORY_ELEM(IPC),
9502 	VM_KERN_MEMORY_ELEM(STACK),
9503 	VM_KERN_MEMORY_ELEM(CPU),
9504 	VM_KERN_MEMORY_ELEM(PMAP),
9505 	VM_KERN_MEMORY_ELEM(PTE),
9506 	VM_KERN_MEMORY_ELEM(ZONE),
9507 	VM_KERN_MEMORY_ELEM(KALLOC),
9508 	VM_KERN_MEMORY_ELEM(COMPRESSOR),
9509 	VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9510 	VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9511 	VM_KERN_MEMORY_ELEM(WAITQ),
9512 	VM_KERN_MEMORY_ELEM(DIAG),
9513 	VM_KERN_MEMORY_ELEM(LOG),
9514 	VM_KERN_MEMORY_ELEM(FILE),
9515 	VM_KERN_MEMORY_ELEM(MBUF),
9516 	VM_KERN_MEMORY_ELEM(UBC),
9517 	VM_KERN_MEMORY_ELEM(SECURITY),
9518 	VM_KERN_MEMORY_ELEM(MLOCK),
9519 	VM_KERN_MEMORY_ELEM(REASON),
9520 	VM_KERN_MEMORY_ELEM(SKYWALK),
9521 	VM_KERN_MEMORY_ELEM(LTABLE),
9522 	VM_KERN_MEMORY_ELEM(HV),
9523 	VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9524 	VM_KERN_MEMORY_ELEM(RETIRED),
9525 	VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9526 	VM_KERN_MEMORY_ELEM(TRIAGE),
9527 	VM_KERN_MEMORY_ELEM(RECOUNT),
9528 };
9529 
9530 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9531 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9532 {
9533 	for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9534 		if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9535 			return i;
9536 		}
9537 	}
9538 
9539 	printf("Unable to find vm tag %s for btlog\n", tagstr);
9540 	return VM_KERN_MEMORY_NONE;
9541 }
9542 
9543 __startup_func
9544 static void
vm_btlog_init(void)9545 vm_btlog_init(void)
9546 {
9547 	vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9548 
9549 	if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9550 		vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9551 	}
9552 }
9553 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9554 #endif /* VM_BTLOG_TAGS */
9555 
9556 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9557 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9558 {
9559 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9560 
9561 	kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9562 }
9563 
9564 uint64_t
vm_tag_get_size(vm_tag_t tag)9565 vm_tag_get_size(vm_tag_t tag)
9566 {
9567 	vm_allocation_site_t *allocation;
9568 
9569 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9570 
9571 	allocation = vm_allocation_sites[tag];
9572 	return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9573 }
9574 
9575 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9576 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9577 {
9578 	uint64_t value;
9579 
9580 	value = os_atomic_add(&allocation->total, delta, relaxed);
9581 	if (delta < 0) {
9582 		assertf(value + (uint64_t)-delta > value,
9583 		    "tag %d, site %p", allocation->tag, allocation);
9584 	}
9585 
9586 #if DEBUG || DEVELOPMENT
9587 	if (value > allocation->peak) {
9588 		os_atomic_max(&allocation->peak, value, relaxed);
9589 	}
9590 #endif /* DEBUG || DEVELOPMENT */
9591 
9592 	if (value == (uint64_t)delta && !allocation->tag) {
9593 		vm_tag_alloc(allocation);
9594 	}
9595 
9596 #if VM_BTLOG_TAGS
9597 	if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9598 		vm_tag_log(object, delta, __builtin_frame_address(0));
9599 	}
9600 #endif /* VM_BTLOG_TAGS */
9601 }
9602 
9603 #if VM_TAG_SIZECLASSES
9604 
9605 void
vm_allocation_zones_init(void)9606 vm_allocation_zones_init(void)
9607 {
9608 	vm_offset_t   addr;
9609 	vm_size_t     size;
9610 
9611 	const vm_tag_t early_tags[] = {
9612 		VM_KERN_MEMORY_DIAG,
9613 		VM_KERN_MEMORY_KALLOC,
9614 		VM_KERN_MEMORY_KALLOC_DATA,
9615 		VM_KERN_MEMORY_KALLOC_TYPE,
9616 		VM_KERN_MEMORY_LIBKERN,
9617 		VM_KERN_MEMORY_OSFMK,
9618 		VM_KERN_MEMORY_RECOUNT,
9619 	};
9620 
9621 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9622 	    + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9623 
9624 	kmem_alloc(kernel_map, &addr, round_page(size),
9625 	    KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9626 	    VM_KERN_MEMORY_DIAG);
9627 
9628 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9629 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9630 
9631 	// prepopulate early tag ranges so allocations
9632 	// in vm_tag_update_zone_size() and early boot won't recurse
9633 	for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9634 		vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9635 		addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9636 	}
9637 }
9638 
9639 __attribute__((noinline))
9640 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9641 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9642 {
9643 	vm_allocation_zone_total_t *stats;
9644 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9645 
9646 	flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9647 	stats = kalloc_data(size, flags);
9648 	if (!stats) {
9649 		return VM_KERN_MEMORY_NONE;
9650 	}
9651 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9652 		kfree_data(stats, size);
9653 	}
9654 	return tag;
9655 }
9656 
9657 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)9658 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
9659 {
9660 	assert(VM_KERN_MEMORY_NONE != tag);
9661 	assert(tag < VM_MAX_TAG_VALUE);
9662 
9663 	if (__probable(vm_allocation_zone_totals[tag])) {
9664 		return tag;
9665 	}
9666 	return vm_tag_zone_stats_alloc(tag, zflags);
9667 }
9668 
9669 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9670 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9671 {
9672 	vm_allocation_zone_total_t *stats;
9673 	vm_size_t value;
9674 
9675 	assert(VM_KERN_MEMORY_NONE != tag);
9676 	assert(tag < VM_MAX_TAG_VALUE);
9677 
9678 	if (zidx >= VM_TAG_SIZECLASSES) {
9679 		return;
9680 	}
9681 
9682 	stats = vm_allocation_zone_totals[tag];
9683 	assert(stats);
9684 	stats += zidx;
9685 
9686 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9687 	if (delta < 0) {
9688 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9689 		return;
9690 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9691 		os_atomic_max(&stats->vazt_peak, value, relaxed);
9692 	}
9693 }
9694 
9695 #endif /* VM_TAG_SIZECLASSES */
9696 
9697 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)9698 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
9699 {
9700 	kern_allocation_name_t other;
9701 	struct vm_allocation_total * total;
9702 	uint32_t subidx;
9703 
9704 	assert(VM_KERN_MEMORY_NONE != subtag);
9705 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9706 	for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9707 		total = &allocation->subtotals[subidx];
9708 		if (subtag == total->tag) {
9709 			break;
9710 		}
9711 	}
9712 	if (subidx >= allocation->subtotalscount) {
9713 		for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9714 			total = &allocation->subtotals[subidx];
9715 			if ((VM_KERN_MEMORY_NONE == total->tag)
9716 			    || !total->total) {
9717 				total->tag = (vm_tag_t)subtag;
9718 				break;
9719 			}
9720 		}
9721 	}
9722 	assert(subidx < allocation->subtotalscount);
9723 	if (subidx >= allocation->subtotalscount) {
9724 		lck_ticket_unlock(&vm_allocation_sites_lock);
9725 		return;
9726 	}
9727 	if (delta < 0) {
9728 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9729 	}
9730 	OSAddAtomic64(delta, &total->total);
9731 	lck_ticket_unlock(&vm_allocation_sites_lock);
9732 
9733 	other = vm_allocation_sites[subtag];
9734 	assert(other);
9735 	if (delta < 0) {
9736 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9737 	}
9738 	OSAddAtomic64(delta, &other->mapped);
9739 }
9740 
9741 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9742 kern_allocation_get_name(kern_allocation_name_t allocation)
9743 {
9744 	return KA_NAME(allocation);
9745 }
9746 
9747 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9748 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9749 {
9750 	kern_allocation_name_t allocation;
9751 	uint16_t namelen;
9752 
9753 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9754 
9755 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9756 	allocation->refcount       = 1;
9757 	allocation->subtotalscount = subtotalscount;
9758 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9759 	strlcpy(KA_NAME(allocation), name, namelen + 1);
9760 
9761 	vm_tag_alloc(allocation);
9762 	return allocation;
9763 }
9764 
9765 void
kern_allocation_name_release(kern_allocation_name_t allocation)9766 kern_allocation_name_release(kern_allocation_name_t allocation)
9767 {
9768 	assert(allocation->refcount > 0);
9769 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9770 		kfree_data(allocation,
9771 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9772 	}
9773 }
9774 
9775 #if !VM_TAG_ACTIVE_UPDATE
9776 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9777 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9778 {
9779 	if (!object->wired_page_count) {
9780 		return;
9781 	}
9782 	if (!is_kernel_object(object)) {
9783 		assert(object->wire_tag < num_info);
9784 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9785 	}
9786 }
9787 
9788 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9789     unsigned int num_info, vm_object_t object);
9790 
9791 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9792 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9793     vm_page_iterate_proc proc, purgeable_q_t queue,
9794     int group)
9795 {
9796 	vm_object_t object;
9797 
9798 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
9799 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
9800 	    object = (vm_object_t) queue_next(&object->objq)) {
9801 		proc(info, num_info, object);
9802 	}
9803 }
9804 
9805 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9806 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9807     vm_page_iterate_proc proc)
9808 {
9809 	vm_object_t     object;
9810 
9811 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9812 	queue_iterate(&vm_objects_wired,
9813 	    object,
9814 	    vm_object_t,
9815 	    wired_objq)
9816 	{
9817 		proc(info, num_info, object);
9818 	}
9819 	lck_spin_unlock(&vm_objects_wired_lock);
9820 }
9821 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9822 
9823 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9824 process_account(mach_memory_info_t * info, unsigned int num_info,
9825     uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9826 {
9827 	size_t                 namelen;
9828 	unsigned int           idx, count, nextinfo;
9829 	vm_allocation_site_t * site;
9830 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9831 
9832 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9833 		site = vm_allocation_sites[idx];
9834 		if (!site) {
9835 			continue;
9836 		}
9837 		info[idx].mapped = site->mapped;
9838 		info[idx].tag    = site->tag;
9839 		if (!iterated) {
9840 			info[idx].size = site->total;
9841 #if DEBUG || DEVELOPMENT
9842 			info[idx].peak = site->peak;
9843 #endif /* DEBUG || DEVELOPMENT */
9844 		} else {
9845 			if (!site->subtotalscount && (site->total != info[idx].size)) {
9846 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9847 				info[idx].size = site->total;
9848 			}
9849 		}
9850 		info[idx].flags |= VM_KERN_SITE_WIRED;
9851 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9852 			info[idx].site   = idx;
9853 			info[idx].flags |= VM_KERN_SITE_TAG;
9854 			if (VM_KERN_MEMORY_ZONE == idx) {
9855 				info[idx].flags |= VM_KERN_SITE_HIDE;
9856 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
9857 				info[idx].collectable_bytes = zones_collectable_bytes;
9858 			}
9859 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9860 			info[idx].site   = 0;
9861 			info[idx].flags |= VM_KERN_SITE_NAMED;
9862 			if (namelen > sizeof(info[idx].name)) {
9863 				namelen = sizeof(info[idx].name);
9864 			}
9865 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9866 		} else if (VM_TAG_KMOD & site->flags) {
9867 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
9868 			info[idx].flags |= VM_KERN_SITE_KMOD;
9869 		} else {
9870 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
9871 			info[idx].flags |= VM_KERN_SITE_KERNEL;
9872 		}
9873 	}
9874 
9875 	nextinfo = (vm_allocation_tag_highest + 1);
9876 	count    = nextinfo;
9877 	if (count >= num_info) {
9878 		count = num_info;
9879 	}
9880 
9881 	for (idx = 0; idx < count; idx++) {
9882 		site = vm_allocation_sites[idx];
9883 		if (!site) {
9884 			continue;
9885 		}
9886 #if VM_TAG_SIZECLASSES
9887 		vm_allocation_zone_total_t * zone;
9888 		unsigned int                 zidx;
9889 
9890 		if (!redact_info
9891 		    && vm_allocation_zone_totals
9892 		    && (zone = vm_allocation_zone_totals[idx])
9893 		    && (nextinfo < num_info)) {
9894 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9895 				if (!zone[zidx].vazt_peak) {
9896 					continue;
9897 				}
9898 				info[nextinfo]        = info[idx];
9899 				info[nextinfo].zone   = zone_index_from_tag_index(zidx);
9900 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
9901 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
9902 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
9903 				info[nextinfo].size   = zone[zidx].vazt_total;
9904 				info[nextinfo].peak   = zone[zidx].vazt_peak;
9905 				info[nextinfo].mapped = 0;
9906 				nextinfo++;
9907 			}
9908 		}
9909 #endif /* VM_TAG_SIZECLASSES */
9910 		if (site->subtotalscount) {
9911 			uint64_t mapped, mapcost, take;
9912 			uint32_t sub;
9913 			vm_tag_t alloctag;
9914 
9915 			info[idx].size = site->total;
9916 			mapped = info[idx].size;
9917 			info[idx].mapped = mapped;
9918 			mapcost = 0;
9919 			for (sub = 0; sub < site->subtotalscount; sub++) {
9920 				alloctag = site->subtotals[sub].tag;
9921 				assert(alloctag < num_info);
9922 				if (info[alloctag].name[0]) {
9923 					continue;
9924 				}
9925 				take = site->subtotals[sub].total;
9926 				if (take > info[alloctag].size) {
9927 					take = info[alloctag].size;
9928 				}
9929 				if (take > mapped) {
9930 					take = mapped;
9931 				}
9932 				info[alloctag].mapped  -= take;
9933 				info[alloctag].size    -= take;
9934 				mapped                 -= take;
9935 				mapcost                += take;
9936 			}
9937 			info[idx].size = mapcost;
9938 		}
9939 	}
9940 	lck_ticket_unlock(&vm_allocation_sites_lock);
9941 
9942 	return 0;
9943 }
9944 
9945 uint32_t
vm_page_diagnose_estimate(void)9946 vm_page_diagnose_estimate(void)
9947 {
9948 	vm_allocation_site_t * site;
9949 	uint32_t               count = zone_view_count;
9950 	uint32_t               idx;
9951 
9952 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9953 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9954 		site = vm_allocation_sites[idx];
9955 		if (!site) {
9956 			continue;
9957 		}
9958 		count++;
9959 #if VM_TAG_SIZECLASSES
9960 		if (vm_allocation_zone_totals) {
9961 			vm_allocation_zone_total_t * zone;
9962 			zone = vm_allocation_zone_totals[idx];
9963 			if (!zone) {
9964 				continue;
9965 			}
9966 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9967 				count += (zone[zidx].vazt_peak != 0);
9968 			}
9969 		}
9970 #endif
9971 	}
9972 	lck_ticket_unlock(&vm_allocation_sites_lock);
9973 
9974 	/* some slop for new tags created */
9975 	count += 8;
9976 	count += VM_KERN_COUNTER_COUNT;
9977 
9978 	return count;
9979 }
9980 
9981 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)9982 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9983     bool percpu)
9984 {
9985 	zpercpu_foreach(zs, zstats) {
9986 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9987 	}
9988 	if (percpu) {
9989 		info->size *= zpercpu_count();
9990 	}
9991 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9992 }
9993 
9994 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)9995 vm_page_add_info(
9996 	mach_memory_info_t     *info,
9997 	zone_stats_t            stats,
9998 	bool                    per_cpu,
9999 	const char             *parent_heap_name,
10000 	const char             *parent_zone_name,
10001 	const char             *view_name)
10002 {
10003 	vm_page_diagnose_zone_stats(info, stats, per_cpu);
10004 	snprintf(info->name, sizeof(info->name),
10005 	    "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10006 }
10007 
10008 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10009 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10010 {
10011 	vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10012 	    z->z_name, "raw");
10013 }
10014 
10015 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10016 vm_page_add_view(
10017 	mach_memory_info_t     *info,
10018 	zone_stats_t            stats,
10019 	const char             *parent_heap_name,
10020 	const char             *parent_zone_name,
10021 	const char             *view_name)
10022 {
10023 	vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10024 	    view_name);
10025 }
10026 
10027 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10028 vm_page_diagnose_heap_views(
10029 	mach_memory_info_t     *info,
10030 	kalloc_heap_t           kh,
10031 	const char             *parent_heap_name,
10032 	const char             *parent_zone_name)
10033 {
10034 	uint32_t i = 0;
10035 
10036 	while (kh) {
10037 		vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10038 		    parent_zone_name, kh->kh_name);
10039 		kh = kh->kh_views;
10040 		i++;
10041 	}
10042 	return i;
10043 }
10044 
10045 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10046 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10047 {
10048 	uint32_t i = 0;
10049 
10050 	for (; i < KHEAP_NUM_ZONES; i++) {
10051 		vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10052 	}
10053 
10054 	i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10055 	    NULL);
10056 	return i;
10057 }
10058 
10059 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10060 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10061 {
10062 	uint32_t idx = 0;
10063 	vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10064 	    "", "raw");
10065 	idx++;
10066 
10067 	for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10068 		struct kheap_info heap = kalloc_type_heap_array[i];
10069 		char heap_num_tmp[MAX_ZONE_NAME] = "";
10070 		const char *heap_num;
10071 
10072 		snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10073 		heap_num = &heap_num_tmp[0];
10074 
10075 		for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10076 		    ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10077 			if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10078 				vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10079 				    heap_num, ktv->kt_name);
10080 				idx++;
10081 			}
10082 		}
10083 
10084 		idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10085 		    KHEAP_KT_VAR->kh_name, heap_num);
10086 	}
10087 
10088 	return idx;
10089 }
10090 
10091 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10092 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10093 {
10094 	uint64_t                 wired_size;
10095 	uint64_t                 wired_managed_size;
10096 	uint64_t                 wired_reserved_size;
10097 	boolean_t                iterate;
10098 	mach_memory_info_t     * counts;
10099 	uint32_t                 i;
10100 
10101 	bzero(info, num_info * sizeof(mach_memory_info_t));
10102 
10103 	if (!vm_page_wire_count_initial) {
10104 		return KERN_ABORTED;
10105 	}
10106 
10107 #if !XNU_TARGET_OS_OSX
10108 	wired_size          = ptoa_64(vm_page_wire_count);
10109 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10110 #else /* !XNU_TARGET_OS_OSX */
10111 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
10112 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
10113 #endif /* !XNU_TARGET_OS_OSX */
10114 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10115 
10116 	wired_size += booter_size;
10117 
10118 	assert(num_info >= VM_KERN_COUNTER_COUNT);
10119 	num_info -= VM_KERN_COUNTER_COUNT;
10120 	counts = &info[num_info];
10121 
10122 #define SET_COUNT(xcount, xsize, xflags)                        \
10123     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
10124     counts[xcount].site  = (xcount);                            \
10125     counts[xcount].size  = (xsize);                                 \
10126     counts[xcount].mapped  = (xsize);                           \
10127     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10128 
10129 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10130 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10131 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10132 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10133 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10134 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10135 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10136 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10137 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10138 
10139 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10140     counts[xcount].site    = (xcount);                  \
10141     counts[xcount].size    = (xsize);                   \
10142     counts[xcount].mapped  = (xsize);                   \
10143     counts[xcount].free    = (xfree);                   \
10144     counts[xcount].largest = (xlargest);                \
10145     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
10146 
10147 	vm_map_size_t map_size, map_free, map_largest;
10148 
10149 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10150 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10151 
10152 	zone_map_sizes(&map_size, &map_free, &map_largest);
10153 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10154 
10155 	assert(num_info >= zone_view_count);
10156 	num_info -= zone_view_count;
10157 	counts = &info[num_info];
10158 	i = 0;
10159 
10160 	if (!redact_info) {
10161 		if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
10162 			i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10163 		}
10164 		if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10165 			i += vm_page_diagnose_kt_heaps(counts + i);
10166 		}
10167 		assert(i <= zone_view_count);
10168 
10169 		zone_index_foreach(zidx) {
10170 			zone_t z = &zone_array[zidx];
10171 			zone_security_flags_t zsflags = zone_security_array[zidx];
10172 			zone_view_t zv = z->z_views;
10173 
10174 			if (zv == NULL) {
10175 				continue;
10176 			}
10177 
10178 			zone_stats_t zv_stats_head = z->z_stats;
10179 			bool has_raw_view = false;
10180 
10181 			for (; zv; zv = zv->zv_next) {
10182 				/*
10183 				 * kalloc_types that allocate from the same zone are linked
10184 				 * as views. Only print the ones that have their own stats.
10185 				 */
10186 				if (zv->zv_stats == zv_stats_head) {
10187 					continue;
10188 				}
10189 				has_raw_view = true;
10190 				vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10191 				    z->z_percpu);
10192 				snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10193 				    zone_heap_name(z), z->z_name, zv->zv_name);
10194 				i++;
10195 				assert(i <= zone_view_count);
10196 			}
10197 
10198 			/*
10199 			 * Print raw views for non kalloc or kalloc_type zones
10200 			 */
10201 			bool kalloc_type = zsflags.z_kalloc_type;
10202 			if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10203 			    (kalloc_type && has_raw_view)) {
10204 				vm_page_diagnose_zone(counts + i, z);
10205 				i++;
10206 				assert(i <= zone_view_count);
10207 			}
10208 		}
10209 	}
10210 
10211 	iterate = !VM_TAG_ACTIVE_UPDATE;
10212 	if (iterate) {
10213 		enum                       { kMaxKernelDepth = 1 };
10214 		vm_map_t                     maps[kMaxKernelDepth];
10215 		vm_map_entry_t               entries[kMaxKernelDepth];
10216 		vm_map_t                     map;
10217 		vm_map_entry_t               entry;
10218 		vm_object_offset_t           offset;
10219 		vm_page_t                    page;
10220 		int                          stackIdx, count;
10221 
10222 #if !VM_TAG_ACTIVE_UPDATE
10223 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
10224 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10225 
10226 		map = kernel_map;
10227 		stackIdx = 0;
10228 		while (map) {
10229 			vm_map_lock(map);
10230 			for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
10231 				if (entry->is_sub_map) {
10232 					assert(stackIdx < kMaxKernelDepth);
10233 					maps[stackIdx] = map;
10234 					entries[stackIdx] = entry;
10235 					stackIdx++;
10236 					map = VME_SUBMAP(entry);
10237 					entry = NULL;
10238 					break;
10239 				}
10240 				if (is_kernel_object(VME_OBJECT(entry))) {
10241 					count = 0;
10242 					vm_object_lock(VME_OBJECT(entry));
10243 					for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
10244 						page = vm_page_lookup(VME_OBJECT(entry), offset);
10245 						if (page && VM_PAGE_WIRED(page)) {
10246 							count++;
10247 						}
10248 					}
10249 					vm_object_unlock(VME_OBJECT(entry));
10250 
10251 					if (count) {
10252 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10253 						assert(VME_ALIAS(entry) < num_info);
10254 						info[VME_ALIAS(entry)].size += ptoa_64(count);
10255 					}
10256 				}
10257 				while (map && (entry == vm_map_last_entry(map))) {
10258 					vm_map_unlock(map);
10259 					if (!stackIdx) {
10260 						map = NULL;
10261 					} else {
10262 						--stackIdx;
10263 						map = maps[stackIdx];
10264 						entry = entries[stackIdx];
10265 					}
10266 				}
10267 			}
10268 		}
10269 	}
10270 
10271 	process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10272 
10273 	return KERN_SUCCESS;
10274 }
10275 
10276 #if DEBUG || DEVELOPMENT
10277 
10278 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10279 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10280 {
10281 	kern_return_t  ret;
10282 	vm_size_t      zsize;
10283 	vm_map_t       map;
10284 	vm_map_entry_t entry;
10285 
10286 	zsize = zone_element_info((void *) addr, tag);
10287 	if (zsize) {
10288 		*zone_size = *size = zsize;
10289 		return KERN_SUCCESS;
10290 	}
10291 
10292 	*zone_size = 0;
10293 	ret = KERN_INVALID_ADDRESS;
10294 	for (map = kernel_map; map;) {
10295 		vm_map_lock(map);
10296 		if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10297 			break;
10298 		}
10299 		if (entry->is_sub_map) {
10300 			if (map != kernel_map) {
10301 				break;
10302 			}
10303 			map = VME_SUBMAP(entry);
10304 			continue;
10305 		}
10306 		if (entry->vme_start != addr) {
10307 			break;
10308 		}
10309 		*tag = (vm_tag_t)VME_ALIAS(entry);
10310 		*size = (entry->vme_end - addr);
10311 		ret = KERN_SUCCESS;
10312 		break;
10313 	}
10314 	if (map != kernel_map) {
10315 		vm_map_unlock(map);
10316 	}
10317 	vm_map_unlock(kernel_map);
10318 
10319 	return ret;
10320 }
10321 
10322 #endif /* DEBUG || DEVELOPMENT */
10323 
10324 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10325 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10326 {
10327 	vm_allocation_site_t * site;
10328 	uint32_t               kmodId;
10329 
10330 	kmodId = 0;
10331 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10332 	if ((site = vm_allocation_sites[tag])) {
10333 		if (VM_TAG_KMOD & site->flags) {
10334 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10335 		}
10336 	}
10337 	lck_ticket_unlock(&vm_allocation_sites_lock);
10338 
10339 	return kmodId;
10340 }
10341 
10342 
10343 #if CONFIG_SECLUDED_MEMORY
10344 /*
10345  * Note that there's no locking around other accesses to vm_page_secluded_target.
10346  * That should be OK, since these are the only place where it can be changed after
10347  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10348  * but will eventually get the correct value. This brief mismatch is OK as pageout
10349  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10350  * over time.
10351  */
10352 unsigned int vm_page_secluded_suppress_cnt = 0;
10353 unsigned int vm_page_secluded_save_target;
10354 
10355 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10356 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10357 
10358 void
start_secluded_suppression(task_t task)10359 start_secluded_suppression(task_t task)
10360 {
10361 	if (task->task_suppressed_secluded) {
10362 		return;
10363 	}
10364 	lck_spin_lock(&secluded_suppress_slock);
10365 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10366 		task->task_suppressed_secluded = TRUE;
10367 		vm_page_secluded_save_target = vm_page_secluded_target;
10368 		vm_page_secluded_target = 0;
10369 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10370 	}
10371 	lck_spin_unlock(&secluded_suppress_slock);
10372 }
10373 
10374 void
stop_secluded_suppression(task_t task)10375 stop_secluded_suppression(task_t task)
10376 {
10377 	lck_spin_lock(&secluded_suppress_slock);
10378 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10379 		task->task_suppressed_secluded = FALSE;
10380 		vm_page_secluded_target = vm_page_secluded_save_target;
10381 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10382 	}
10383 	lck_spin_unlock(&secluded_suppress_slock);
10384 }
10385 
10386 #endif /* CONFIG_SECLUDED_MEMORY */
10387 
10388 /*
10389  * Move the list of retired pages on the vm_page_queue_retired to
10390  * their final resting place on retired_pages_object.
10391  */
10392 void
vm_retire_boot_pages(void)10393 vm_retire_boot_pages(void)
10394 {
10395 }
10396 
10397 /*
10398  * This holds the reported physical address if an ECC error leads to a panic.
10399  * SMC will store it in PMU SRAM under the 'sECC' key.
10400  */
10401 uint64_t ecc_panic_physical_address = 0;
10402 
10403 
10404 boolean_t
vm_page_created(vm_page_t page)10405 vm_page_created(vm_page_t page)
10406 {
10407 	return (page < &vm_pages[0]) || (page >= &vm_pages[vm_pages_count]);
10408 }
10409