xref: /xnu-10002.1.13/osfmk/vm/vm_resident.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68 
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_kern.h>                 /* kmem_alloc() */
89 #include <kern/misc_protos.h>
90 #include <mach_debug/zone_info.h>
91 #include <vm/cpm.h>
92 #include <pexpert/pexpert.h>
93 #include <pexpert/device_tree.h>
94 #include <san/kasan.h>
95 
96 #include <vm/vm_protos.h>
97 #include <vm/memory_object.h>
98 #include <vm/vm_purgeable_internal.h>
99 #include <vm/vm_compressor.h>
100 #if defined (__x86_64__)
101 #include <i386/misc_protos.h>
102 #endif
103 
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
106 #endif
107 
108 #if HIBERNATION
109 #include <IOKit/IOHibernatePrivate.h>
110 #include <machine/pal_hibernate.h>
111 #endif /* HIBERNATION */
112 
113 #include <sys/kdebug.h>
114 
115 #if defined(HAS_APPLE_PAC)
116 #include <ptrauth.h>
117 #endif
118 #if defined(__arm64__)
119 #include <arm/cpu_internal.h>
120 #endif /* defined(__arm64__) */
121 
122 #if MACH_ASSERT
123 
124 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
125 
126 #else /* MACH_ASSERT */
127 
128 #define ASSERT_PMAP_FREE(mem) /* nothing */
129 
130 #endif /* MACH_ASSERT */
131 
132 extern boolean_t vm_pageout_running;
133 extern thread_t  vm_pageout_scan_thread;
134 extern bool vps_dynamic_priority_enabled;
135 
136 char    vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
137 char    vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
138 char    vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
139 char    vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
140 
141 #if CONFIG_SECLUDED_MEMORY
142 struct vm_page_secluded_data vm_page_secluded;
143 #endif /* CONFIG_SECLUDED_MEMORY */
144 
145 #if DEVELOPMENT || DEBUG
146 extern struct memory_object_pager_ops shared_region_pager_ops;
147 unsigned int shared_region_pagers_resident_count = 0;
148 unsigned int shared_region_pagers_resident_peak = 0;
149 #endif /* DEVELOPMENT || DEBUG */
150 
151 
152 
153 int             PERCPU_DATA(start_color);
154 vm_page_t       PERCPU_DATA(free_pages);
155 boolean_t       hibernate_cleaning_in_progress = FALSE;
156 boolean_t       vm_page_free_verify = TRUE;
157 
158 uint32_t        vm_lopage_free_count = 0;
159 uint32_t        vm_lopage_free_limit = 0;
160 uint32_t        vm_lopage_lowater    = 0;
161 boolean_t       vm_lopage_refill = FALSE;
162 boolean_t       vm_lopage_needed = FALSE;
163 
164 int             speculative_age_index = 0;
165 int             speculative_steal_index = 0;
166 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
167 
168 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
169                                                           * Updated and checked behind the vm_page_queues_lock. */
170 
171 static void             vm_page_free_prepare(vm_page_t  page);
172 static vm_page_t        vm_page_grab_fictitious_common(ppnum_t, boolean_t);
173 
174 static void vm_tag_init(void);
175 
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
178     VM_PAGE_PACKED_FROM_ARRAY;
179 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
180     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
181 
182 /*
183  *	Associated with page of user-allocatable memory is a
184  *	page structure.
185  */
186 
187 /*
188  *	These variables record the values returned by vm_page_bootstrap,
189  *	for debugging purposes.  The implementation of pmap_steal_memory
190  *	and pmap_startup here also uses them internally.
191  */
192 
193 vm_offset_t virtual_space_start;
194 vm_offset_t virtual_space_end;
195 uint32_t        vm_page_pages;
196 
197 /*
198  *	The vm_page_lookup() routine, which provides for fast
199  *	(virtual memory object, offset) to page lookup, employs
200  *	the following hash table.  The vm_page_{insert,remove}
201  *	routines install and remove associations in the table.
202  *	[This table is often called the virtual-to-physical,
203  *	or VP, table.]
204  */
205 typedef struct {
206 	vm_page_packed_t page_list;
207 #if     MACH_PAGE_HASH_STATS
208 	int             cur_count;              /* current count */
209 	int             hi_count;               /* high water mark */
210 #endif /* MACH_PAGE_HASH_STATS */
211 } vm_page_bucket_t;
212 
213 
214 #define BUCKETS_PER_LOCK        16
215 
216 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
217 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
218 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
219 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
220 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
221 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
222 
223 #ifndef VM_TAG_ACTIVE_UPDATE
224 #error VM_TAG_ACTIVE_UPDATE
225 #endif
226 #ifndef VM_TAG_SIZECLASSES
227 #error VM_TAG_SIZECLASSES
228 #endif
229 
230 /* for debugging */
231 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
232 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
233 
234 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
235 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
236 #if VM_TAG_SIZECLASSES
237 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
238 #endif /* VM_TAG_SIZECLASSES */
239 
240 vm_tag_t vm_allocation_tag_highest;
241 
242 #if VM_PAGE_BUCKETS_CHECK
243 boolean_t vm_page_buckets_check_ready = FALSE;
244 #if VM_PAGE_FAKE_BUCKETS
245 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
246 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
247 #endif /* VM_PAGE_FAKE_BUCKETS */
248 #endif /* VM_PAGE_BUCKETS_CHECK */
249 
250 #if     MACH_PAGE_HASH_STATS
251 /* This routine is only for debug.  It is intended to be called by
252  * hand by a developer using a kernel debugger.  This routine prints
253  * out vm_page_hash table statistics to the kernel debug console.
254  */
255 void
hash_debug(void)256 hash_debug(void)
257 {
258 	int     i;
259 	int     numbuckets = 0;
260 	int     highsum = 0;
261 	int     maxdepth = 0;
262 
263 	for (i = 0; i < vm_page_bucket_count; i++) {
264 		if (vm_page_buckets[i].hi_count) {
265 			numbuckets++;
266 			highsum += vm_page_buckets[i].hi_count;
267 			if (vm_page_buckets[i].hi_count > maxdepth) {
268 				maxdepth = vm_page_buckets[i].hi_count;
269 			}
270 		}
271 	}
272 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
273 	printf("Number used buckets:     %d = %d%%\n",
274 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
275 	printf("Number unused buckets:   %d = %d%%\n",
276 	    vm_page_bucket_count - numbuckets,
277 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
278 	printf("Sum of bucket max depth: %d\n", highsum);
279 	printf("Average bucket depth:    %d.%2d\n",
280 	    highsum / vm_page_bucket_count,
281 	    highsum % vm_page_bucket_count);
282 	printf("Maximum bucket depth:    %d\n", maxdepth);
283 }
284 #endif /* MACH_PAGE_HASH_STATS */
285 
286 /*
287  *	The virtual page size is currently implemented as a runtime
288  *	variable, but is constant once initialized using vm_set_page_size.
289  *	This initialization must be done in the machine-dependent
290  *	bootstrap sequence, before calling other machine-independent
291  *	initializations.
292  *
293  *	All references to the virtual page size outside this
294  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
295  *	constants.
296  */
297 #if defined(__arm64__)
298 vm_size_t       page_size;
299 vm_size_t       page_mask;
300 int             page_shift;
301 #else
302 vm_size_t       page_size  = PAGE_SIZE;
303 vm_size_t       page_mask  = PAGE_MASK;
304 int             page_shift = PAGE_SHIFT;
305 #endif
306 
307 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
308 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
309 vm_page_t                          vm_page_array_ending_addr;
310 
311 unsigned int    vm_pages_count = 0;
312 
313 /*
314  *	Resident pages that represent real memory
315  *	are allocated from a set of free lists,
316  *	one per color.
317  */
318 unsigned int    vm_colors;
319 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
320 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
321 unsigned int    vm_free_magazine_refill_limit = 0;
322 
323 
324 struct vm_page_queue_free_head {
325 	vm_page_queue_head_t    qhead;
326 } VM_PAGE_PACKED_ALIGNED;
327 
328 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
329 
330 
331 unsigned int    vm_page_free_wanted;
332 unsigned int    vm_page_free_wanted_privileged;
333 #if CONFIG_SECLUDED_MEMORY
334 unsigned int    vm_page_free_wanted_secluded;
335 #endif /* CONFIG_SECLUDED_MEMORY */
336 unsigned int    vm_page_free_count;
337 
338 unsigned int    vm_page_realtime_count;
339 
340 /*
341  *	Occasionally, the virtual memory system uses
342  *	resident page structures that do not refer to
343  *	real pages, for example to leave a page with
344  *	important state information in the VP table.
345  *
346  *	These page structures are allocated the way
347  *	most other kernel structures are.
348  */
349 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
350 vm_locks_array_t vm_page_locks;
351 
352 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
353 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
354 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
355 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
356 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
357 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
358 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
359 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
360 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
361 
362 unsigned int    vm_page_local_q_soft_limit = 250;
363 unsigned int    vm_page_local_q_hard_limit = 500;
364 struct vpl     *__zpercpu vm_page_local_q;
365 
366 /* N.B. Guard and fictitious pages must not
367  * be assigned a zero phys_page value.
368  */
369 /*
370  *	Fictitious pages don't have a physical address,
371  *	but we must initialize phys_page to something.
372  *	For debugging, this should be a strange value
373  *	that the pmap module can recognize in assertions.
374  */
375 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
376 
377 /*
378  *	Guard pages are not accessible so they don't
379  *      need a physical address, but we need to enter
380  *	one in the pmap.
381  *	Let's make it recognizable and make sure that
382  *	we don't use a real physical page with that
383  *	physical address.
384  */
385 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
386 
387 /*
388  *	Resident page structures are also chained on
389  *	queues that are used by the page replacement
390  *	system (pageout daemon).  These queues are
391  *	defined here, but are shared by the pageout
392  *	module.  The inactive queue is broken into
393  *	file backed and anonymous for convenience as the
394  *	pageout daemon often assignes a higher
395  *	importance to anonymous pages (less likely to pick)
396  */
397 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
398 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
399 #if CONFIG_SECLUDED_MEMORY
400 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
401 #endif /* CONFIG_SECLUDED_MEMORY */
402 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
403 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
404 
405 queue_head_t    vm_objects_wired;
406 
407 void vm_update_darkwake_mode(boolean_t);
408 
409 vm_page_queue_head_t    vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
410 uint32_t        vm_page_donate_mode;
411 uint32_t        vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
412 uint32_t        vm_page_donate_count;
413 bool            vm_page_donate_queue_ripe;
414 
415 
416 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
417 uint32_t        vm_page_background_target;
418 uint32_t        vm_page_background_target_snapshot;
419 uint32_t        vm_page_background_count;
420 uint64_t        vm_page_background_promoted_count;
421 
422 uint32_t        vm_page_background_internal_count;
423 uint32_t        vm_page_background_external_count;
424 
425 uint32_t        vm_page_background_mode;
426 uint32_t        vm_page_background_exclude_external;
427 
428 unsigned int    vm_page_active_count;
429 unsigned int    vm_page_inactive_count;
430 unsigned int    vm_page_kernelcache_count;
431 #if CONFIG_SECLUDED_MEMORY
432 unsigned int    vm_page_secluded_count;
433 unsigned int    vm_page_secluded_count_free;
434 unsigned int    vm_page_secluded_count_inuse;
435 unsigned int    vm_page_secluded_count_over_target;
436 #endif /* CONFIG_SECLUDED_MEMORY */
437 unsigned int    vm_page_anonymous_count;
438 unsigned int    vm_page_throttled_count;
439 unsigned int    vm_page_speculative_count;
440 
441 unsigned int    vm_page_wire_count;
442 unsigned int    vm_page_wire_count_on_boot = 0;
443 unsigned int    vm_page_stolen_count = 0;
444 unsigned int    vm_page_wire_count_initial;
445 unsigned int    vm_page_gobble_count = 0;
446 unsigned int    vm_page_kern_lpage_count = 0;
447 
448 uint64_t        booter_size;  /* external so it can be found in core dumps */
449 
450 #define VM_PAGE_WIRE_COUNT_WARNING      0
451 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
452 
453 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
454 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
455 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
456 
457 unsigned int    vm_page_xpmapped_external_count = 0;
458 unsigned int    vm_page_external_count = 0;
459 unsigned int    vm_page_internal_count = 0;
460 unsigned int    vm_page_pageable_external_count = 0;
461 unsigned int    vm_page_pageable_internal_count = 0;
462 
463 #if DEVELOPMENT || DEBUG
464 unsigned int    vm_page_speculative_recreated = 0;
465 unsigned int    vm_page_speculative_created = 0;
466 unsigned int    vm_page_speculative_used = 0;
467 #endif
468 
469 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
470 
471 unsigned int    vm_page_cleaned_count = 0;
472 
473 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
474 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
475 
476 
477 /*
478  *	Several page replacement parameters are also
479  *	shared with this module, so that page allocation
480  *	(done here in vm_page_alloc) can trigger the
481  *	pageout daemon.
482  */
483 unsigned int    vm_page_free_target = 0;
484 unsigned int    vm_page_free_min = 0;
485 unsigned int    vm_page_throttle_limit = 0;
486 unsigned int    vm_page_inactive_target = 0;
487 #if CONFIG_SECLUDED_MEMORY
488 unsigned int    vm_page_secluded_target = 0;
489 #endif /* CONFIG_SECLUDED_MEMORY */
490 unsigned int    vm_page_anonymous_min = 0;
491 unsigned int    vm_page_free_reserved = 0;
492 
493 
494 /*
495  *	The VM system has a couple of heuristics for deciding
496  *	that pages are "uninteresting" and should be placed
497  *	on the inactive queue as likely candidates for replacement.
498  *	These variables let the heuristics be controlled at run-time
499  *	to make experimentation easier.
500  */
501 
502 boolean_t vm_page_deactivate_hint = TRUE;
503 
504 struct vm_page_stats_reusable vm_page_stats_reusable;
505 
506 /*
507  *	vm_set_page_size:
508  *
509  *	Sets the page size, perhaps based upon the memory
510  *	size.  Must be called before any use of page-size
511  *	dependent functions.
512  *
513  *	Sets page_shift and page_mask from page_size.
514  */
515 void
vm_set_page_size(void)516 vm_set_page_size(void)
517 {
518 	page_size  = PAGE_SIZE;
519 	page_mask  = PAGE_MASK;
520 	page_shift = PAGE_SHIFT;
521 
522 	if ((page_mask & page_size) != 0) {
523 		panic("vm_set_page_size: page size not a power of two");
524 	}
525 
526 	for (page_shift = 0;; page_shift++) {
527 		if ((1U << page_shift) == page_size) {
528 			break;
529 		}
530 	}
531 }
532 
533 #if defined (__x86_64__)
534 
535 #define MAX_CLUMP_SIZE      16
536 #define DEFAULT_CLUMP_SIZE  4
537 
538 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
539 
540 #if DEVELOPMENT || DEBUG
541 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
542 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
543 
544 static inline void
vm_clump_update_stats(unsigned int c)545 vm_clump_update_stats(unsigned int c)
546 {
547 	assert(c <= vm_clump_size);
548 	if (c > 0 && c <= vm_clump_size) {
549 		vm_clump_stats[c] += c;
550 	}
551 	vm_clump_allocs += c;
552 }
553 #endif  /*  if DEVELOPMENT || DEBUG */
554 
555 /* Called once to setup the VM clump knobs */
556 static void
vm_page_setup_clump(void)557 vm_page_setup_clump( void )
558 {
559 	unsigned int override, n;
560 
561 	vm_clump_size = DEFAULT_CLUMP_SIZE;
562 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
563 		vm_clump_size = override;
564 	}
565 
566 	if (vm_clump_size > MAX_CLUMP_SIZE) {
567 		panic("vm_page_setup_clump:: clump_size is too large!");
568 	}
569 	if (vm_clump_size < 1) {
570 		panic("vm_page_setup_clump:: clump_size must be >= 1");
571 	}
572 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
573 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
574 	}
575 
576 	vm_clump_promote_threshold = vm_clump_size;
577 	vm_clump_mask = vm_clump_size - 1;
578 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
579 		;
580 	}
581 
582 #if DEVELOPMENT || DEBUG
583 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
584 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
585 #endif  /*  if DEVELOPMENT || DEBUG */
586 }
587 
588 #endif  /* #if defined (__x86_64__) */
589 
590 #define COLOR_GROUPS_TO_STEAL   4
591 
592 /* Called once during statup, once the cache geometry is known.
593  */
594 static void
vm_page_set_colors(void)595 vm_page_set_colors( void )
596 {
597 	unsigned int    n, override;
598 
599 #if defined (__x86_64__)
600 	/* adjust #colors because we need to color outside the clump boundary */
601 	vm_cache_geometry_colors >>= vm_clump_shift;
602 #endif
603 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
604 		n = override;
605 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
606 		n = vm_cache_geometry_colors;
607 	} else {
608 		n = DEFAULT_COLORS;                             /* use default if all else fails */
609 	}
610 	if (n == 0) {
611 		n = 1;
612 	}
613 	if (n > MAX_COLORS) {
614 		n = MAX_COLORS;
615 	}
616 
617 	/* the count must be a power of 2  */
618 	if ((n & (n - 1)) != 0) {
619 		n = DEFAULT_COLORS;                             /* use default if all else fails */
620 	}
621 	vm_colors = n;
622 	vm_color_mask = n - 1;
623 
624 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
625 
626 #if defined (__x86_64__)
627 	/* adjust for reduction in colors due to clumping and multiple cores */
628 	if (real_ncpus) {
629 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
630 	}
631 #endif
632 }
633 
634 /*
635  * During single threaded early boot we don't initialize all pages.
636  * This avoids some delay during boot. They'll be initialized and
637  * added to the free list as needed or after we are multithreaded by
638  * what becomes the pageout thread.
639  */
640 static boolean_t fill = FALSE;
641 static unsigned int fillval;
642 uint_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
643 ppnum_t delay_above_pnum = PPNUM_MAX;
644 
645 /*
646  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
647  * If ARM ever uses delayed page initialization, this value may need to be quite different.
648  */
649 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
650 
651 /*
652  * When we have to dip into more delayed pages due to low memory, free up
653  * a large chunk to get things back to normal. This avoids contention on the
654  * delayed code allocating page by page.
655  */
656 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
657 
658 /*
659  * Get and initialize the next delayed page.
660  */
661 static vm_page_t
vm_get_delayed_page(int grab_options)662 vm_get_delayed_page(int grab_options)
663 {
664 	vm_page_t p;
665 	ppnum_t   pnum;
666 
667 	/*
668 	 * Get a new page if we have one.
669 	 */
670 	vm_free_page_lock();
671 	if (vm_delayed_count == 0) {
672 		vm_free_page_unlock();
673 		return NULL;
674 	}
675 
676 	if (!pmap_next_page(&pnum)) {
677 		vm_delayed_count = 0;
678 		vm_free_page_unlock();
679 		return NULL;
680 	}
681 
682 
683 	assert(vm_delayed_count > 0);
684 	--vm_delayed_count;
685 
686 #if defined(__x86_64__)
687 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
688 	if (vm_pages_count > 0) {
689 		assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
690 	}
691 #endif
692 	p = &vm_pages[vm_pages_count];
693 	assert(p < vm_page_array_ending_addr);
694 	vm_page_init(p, pnum, FALSE);
695 	++vm_pages_count;
696 	++vm_page_pages;
697 	vm_free_page_unlock();
698 
699 	/*
700 	 * These pages were initially counted as wired, undo that now.
701 	 */
702 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
703 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
704 	} else {
705 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
706 		vm_page_lockspin_queues();
707 	}
708 	--vm_page_wire_count;
709 	--vm_page_wire_count_initial;
710 	if (vm_page_wire_count_on_boot != 0) {
711 		--vm_page_wire_count_on_boot;
712 	}
713 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
714 		vm_page_unlock_queues();
715 	}
716 
717 
718 	if (fill) {
719 		fillPage(pnum, fillval);
720 	}
721 	return p;
722 }
723 
724 static void vm_page_module_init_delayed(void);
725 
726 /*
727  * Free all remaining delayed pages to the free lists.
728  */
729 void
vm_free_delayed_pages(void)730 vm_free_delayed_pages(void)
731 {
732 	vm_page_t   p;
733 	vm_page_t   list = NULL;
734 	uint_t      cnt = 0;
735 	vm_offset_t start_free_va;
736 	int64_t     free_size;
737 
738 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
739 		if (vm_himemory_mode) {
740 			vm_page_release(p, FALSE);
741 		} else {
742 			p->vmp_snext = list;
743 			list = p;
744 		}
745 		++cnt;
746 	}
747 
748 	/*
749 	 * Free the pages in reverse order if not himemory mode.
750 	 * Hence the low memory pages will be first on free lists. (LIFO)
751 	 */
752 	while (list != NULL) {
753 		p = list;
754 		list = p->vmp_snext;
755 		p->vmp_snext = NULL;
756 		vm_page_release(p, FALSE);
757 	}
758 #if DEVELOPMENT || DEBUG
759 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
760 #endif
761 
762 	/*
763 	 * Free up any unused full pages at the end of the vm_pages[] array
764 	 */
765 	start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
766 
767 #if defined(__x86_64__)
768 	/*
769 	 * Since x86 might have used large pages for vm_pages[], we can't
770 	 * free starting in the middle of a partially used large page.
771 	 */
772 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
773 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
774 	}
775 #endif
776 	if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
777 		free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
778 		if (free_size > 0) {
779 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
780 			vm_page_array_ending_addr = (void *)start_free_va;
781 
782 			/*
783 			 * Note there's no locking here, as only this thread will ever change this value.
784 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
785 			 */
786 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
787 
788 #if DEVELOPMENT || DEBUG
789 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
790 			    (long)free_size, (long)start_free_va);
791 #endif
792 		}
793 	}
794 
795 
796 	/*
797 	 * now we can create the VM page array zone
798 	 */
799 	vm_page_module_init_delayed();
800 }
801 
802 /*
803  * Try and free up enough delayed pages to match a contig memory allocation.
804  */
805 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)806 vm_free_delayed_pages_contig(
807 	uint_t    npages,
808 	ppnum_t   max_pnum,
809 	ppnum_t   pnum_mask)
810 {
811 	vm_page_t p;
812 	ppnum_t   pnum;
813 	uint_t    cnt = 0;
814 
815 	/*
816 	 * Treat 0 as the absolute max page number.
817 	 */
818 	if (max_pnum == 0) {
819 		max_pnum = PPNUM_MAX;
820 	}
821 
822 	/*
823 	 * Free till we get a properly aligned start page
824 	 */
825 	for (;;) {
826 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
827 		if (p == NULL) {
828 			return;
829 		}
830 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
831 		vm_page_release(p, FALSE);
832 		if (pnum >= max_pnum) {
833 			return;
834 		}
835 		if ((pnum & pnum_mask) == 0) {
836 			break;
837 		}
838 	}
839 
840 	/*
841 	 * Having a healthy pool of free pages will help performance. We don't
842 	 * want to fall back to the delayed code for every page allocation.
843 	 */
844 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
845 		npages += VM_DELAY_PAGE_CHUNK;
846 	}
847 
848 	/*
849 	 * Now free up the pages
850 	 */
851 	for (cnt = 1; cnt < npages; ++cnt) {
852 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
853 		if (p == NULL) {
854 			return;
855 		}
856 		vm_page_release(p, FALSE);
857 	}
858 }
859 
860 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
861 
862 void
vm_page_init_local_q(unsigned int num_cpus)863 vm_page_init_local_q(unsigned int num_cpus)
864 {
865 	struct vpl *t_local_q;
866 
867 	/*
868 	 * no point in this for a uni-processor system
869 	 */
870 	if (num_cpus >= 2) {
871 		ml_cpu_info_t cpu_info;
872 
873 		/*
874 		 * Force the allocation alignment to a cacheline,
875 		 * because the `vpl` struct has a lock and will be taken
876 		 * cross CPU so we want to isolate the rest of the per-CPU
877 		 * data to avoid false sharing due to this lock being taken.
878 		 */
879 
880 		ml_cpu_get_info(&cpu_info);
881 
882 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
883 		    cpu_info.cache_line_size - 1);
884 
885 		zpercpu_foreach(lq, t_local_q) {
886 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
887 			vm_page_queue_init(&lq->vpl_queue);
888 		}
889 
890 		/* make the initialization visible to all cores */
891 		os_atomic_store(&vm_page_local_q, t_local_q, release);
892 	}
893 }
894 
895 /*
896  * vm_init_before_launchd
897  *
898  * This should be called right before launchd is loaded.
899  */
900 void
vm_init_before_launchd()901 vm_init_before_launchd()
902 {
903 	vm_page_lockspin_queues();
904 	vm_page_wire_count_on_boot = vm_page_wire_count;
905 	vm_page_unlock_queues();
906 }
907 
908 
909 /*
910  *	vm_page_bootstrap:
911  *
912  *	Initializes the resident memory module.
913  *
914  *	Allocates memory for the page cells, and
915  *	for the object/offset-to-page hash table headers.
916  *	Each page cell is initialized and placed on the free list.
917  *	Returns the range of available kernel virtual memory.
918  */
919 __startup_func
920 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)921 vm_page_bootstrap(
922 	vm_offset_t             *startp,
923 	vm_offset_t             *endp)
924 {
925 	unsigned int            i;
926 	unsigned int            log1;
927 	unsigned int            log2;
928 	unsigned int            size;
929 
930 	/*
931 	 *	Initialize the page queues.
932 	 */
933 
934 	lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
935 	lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
936 	lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
937 
938 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
939 		int group;
940 
941 		purgeable_queues[i].token_q_head = 0;
942 		purgeable_queues[i].token_q_tail = 0;
943 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
944 			queue_init(&purgeable_queues[i].objq[group]);
945 		}
946 
947 		purgeable_queues[i].type = i;
948 		purgeable_queues[i].new_pages = 0;
949 #if MACH_ASSERT
950 		purgeable_queues[i].debug_count_tokens = 0;
951 		purgeable_queues[i].debug_count_objects = 0;
952 #endif
953 	}
954 	;
955 	purgeable_nonvolatile_count = 0;
956 	queue_init(&purgeable_nonvolatile_queue);
957 
958 	for (i = 0; i < MAX_COLORS; i++) {
959 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
960 	}
961 
962 	vm_page_queue_init(&vm_lopage_queue_free);
963 	vm_page_queue_init(&vm_page_queue_active);
964 	vm_page_queue_init(&vm_page_queue_inactive);
965 #if CONFIG_SECLUDED_MEMORY
966 	vm_page_queue_init(&vm_page_queue_secluded);
967 #endif /* CONFIG_SECLUDED_MEMORY */
968 	vm_page_queue_init(&vm_page_queue_cleaned);
969 	vm_page_queue_init(&vm_page_queue_throttled);
970 	vm_page_queue_init(&vm_page_queue_anonymous);
971 	queue_init(&vm_objects_wired);
972 
973 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
974 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
975 
976 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
977 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
978 	}
979 
980 	vm_page_queue_init(&vm_page_queue_donate);
981 	vm_page_queue_init(&vm_page_queue_background);
982 
983 	vm_page_background_count = 0;
984 	vm_page_background_internal_count = 0;
985 	vm_page_background_external_count = 0;
986 	vm_page_background_promoted_count = 0;
987 
988 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
989 
990 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
991 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
992 	}
993 
994 #if    defined(__LP64__)
995 	vm_page_background_mode = VM_PAGE_BG_ENABLED;
996 	vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
997 #else
998 	vm_page_background_mode = VM_PAGE_BG_DISABLED;
999 	vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1000 #endif
1001 	vm_page_background_exclude_external = 0;
1002 
1003 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1004 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1005 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1006 
1007 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1008 		vm_page_background_mode = VM_PAGE_BG_DISABLED;
1009 	}
1010 
1011 	PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1012 	if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1013 		vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1014 	}
1015 
1016 	vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1017 	vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1018 	vm_page_donate_target = vm_page_donate_target_high;
1019 	vm_page_donate_count = 0;
1020 
1021 	vm_page_free_wanted = 0;
1022 	vm_page_free_wanted_privileged = 0;
1023 #if CONFIG_SECLUDED_MEMORY
1024 	vm_page_free_wanted_secluded = 0;
1025 #endif /* CONFIG_SECLUDED_MEMORY */
1026 
1027 #if defined (__x86_64__)
1028 	/* this must be called before vm_page_set_colors() */
1029 	vm_page_setup_clump();
1030 #endif
1031 
1032 	vm_page_set_colors();
1033 
1034 	bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1035 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1036 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1037 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1038 
1039 	bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1040 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1041 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1042 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1043 	vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1044 	vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1045 	vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1046 #if CONFIG_SECLUDED_MEMORY
1047 	vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1048 #endif /* CONFIG_SECLUDED_MEMORY */
1049 
1050 	bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1051 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1052 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1053 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1054 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1055 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1056 #if CONFIG_SECLUDED_MEMORY
1057 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1058 #endif /* CONFIG_SECLUDED_MEMORY */
1059 
1060 	bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1061 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1062 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1063 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1064 	vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1065 #if CONFIG_SECLUDED_MEMORY
1066 	vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1067 #endif /* CONFIG_SECLUDED_MEMORY */
1068 
1069 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1070 		vm_allocation_sites_static[t].refcount = 2;
1071 		vm_allocation_sites_static[t].tag = t;
1072 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1073 	}
1074 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1075 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1076 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1077 
1078 	/*
1079 	 *	Steal memory for the map and zone subsystems.
1080 	 */
1081 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1082 
1083 	/*
1084 	 *	Allocate (and initialize) the virtual-to-physical
1085 	 *	table hash buckets.
1086 	 *
1087 	 *	The number of buckets should be a power of two to
1088 	 *	get a good hash function.  The following computation
1089 	 *	chooses the first power of two that is greater
1090 	 *	than the number of physical pages in the system.
1091 	 */
1092 
1093 	if (vm_page_bucket_count == 0) {
1094 		unsigned int npages = pmap_free_pages();
1095 
1096 		vm_page_bucket_count = 1;
1097 		while (vm_page_bucket_count < npages) {
1098 			vm_page_bucket_count <<= 1;
1099 		}
1100 	}
1101 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1102 
1103 	vm_page_hash_mask = vm_page_bucket_count - 1;
1104 
1105 	/*
1106 	 *	Calculate object shift value for hashing algorithm:
1107 	 *		O = log2(sizeof(struct vm_object))
1108 	 *		B = log2(vm_page_bucket_count)
1109 	 *	        hash shifts the object left by
1110 	 *		B/2 - O
1111 	 */
1112 	size = vm_page_bucket_count;
1113 	for (log1 = 0; size > 1; log1++) {
1114 		size /= 2;
1115 	}
1116 	size = sizeof(struct vm_object);
1117 	for (log2 = 0; size > 1; log2++) {
1118 		size /= 2;
1119 	}
1120 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1121 
1122 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1123 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1124 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1125 
1126 	if (vm_page_hash_mask & vm_page_bucket_count) {
1127 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1128 	}
1129 
1130 #if VM_PAGE_BUCKETS_CHECK
1131 #if VM_PAGE_FAKE_BUCKETS
1132 	/*
1133 	 * Allocate a decoy set of page buckets, to detect
1134 	 * any stomping there.
1135 	 */
1136 	vm_page_fake_buckets = (vm_page_bucket_t *)
1137 	    pmap_steal_memory(vm_page_bucket_count *
1138 	    sizeof(vm_page_bucket_t), 0);
1139 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1140 	vm_page_fake_buckets_end =
1141 	    vm_map_round_page((vm_page_fake_buckets_start +
1142 	    (vm_page_bucket_count *
1143 	    sizeof(vm_page_bucket_t))),
1144 	    PAGE_MASK);
1145 	char *cp;
1146 	for (cp = (char *)vm_page_fake_buckets_start;
1147 	    cp < (char *)vm_page_fake_buckets_end;
1148 	    cp++) {
1149 		*cp = 0x5a;
1150 	}
1151 #endif /* VM_PAGE_FAKE_BUCKETS */
1152 #endif /* VM_PAGE_BUCKETS_CHECK */
1153 
1154 	kernel_debug_string_early("vm_page_buckets");
1155 	vm_page_buckets = (vm_page_bucket_t *)
1156 	    pmap_steal_memory(vm_page_bucket_count *
1157 	    sizeof(vm_page_bucket_t), 0);
1158 
1159 	kernel_debug_string_early("vm_page_bucket_locks");
1160 	vm_page_bucket_locks = (lck_spin_t *)
1161 	    pmap_steal_memory(vm_page_bucket_lock_count *
1162 	    sizeof(lck_spin_t), 0);
1163 
1164 	for (i = 0; i < vm_page_bucket_count; i++) {
1165 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1166 
1167 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1168 #if     MACH_PAGE_HASH_STATS
1169 		bucket->cur_count = 0;
1170 		bucket->hi_count = 0;
1171 #endif /* MACH_PAGE_HASH_STATS */
1172 	}
1173 
1174 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1175 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1176 	}
1177 
1178 	vm_tag_init();
1179 
1180 #if VM_PAGE_BUCKETS_CHECK
1181 	vm_page_buckets_check_ready = TRUE;
1182 #endif /* VM_PAGE_BUCKETS_CHECK */
1183 
1184 	/*
1185 	 *	Machine-dependent code allocates the resident page table.
1186 	 *	It uses vm_page_init to initialize the page frames.
1187 	 *	The code also returns to us the virtual space available
1188 	 *	to the kernel.  We don't trust the pmap module
1189 	 *	to get the alignment right.
1190 	 */
1191 
1192 	kernel_debug_string_early("pmap_startup");
1193 	pmap_startup(&virtual_space_start, &virtual_space_end);
1194 	virtual_space_start = round_page(virtual_space_start);
1195 	virtual_space_end = trunc_page(virtual_space_end);
1196 
1197 	*startp = virtual_space_start;
1198 	*endp = virtual_space_end;
1199 
1200 	/*
1201 	 *	Compute the initial "wire" count.
1202 	 *	Up until now, the pages which have been set aside are not under
1203 	 *	the VM system's control, so although they aren't explicitly
1204 	 *	wired, they nonetheless can't be moved. At this moment,
1205 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1206 	 */
1207 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1208 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1209 	    vm_page_free_count - vm_lopage_free_count;
1210 #if CONFIG_SECLUDED_MEMORY
1211 	vm_page_wire_count -= vm_page_secluded_count;
1212 #endif
1213 	vm_page_wire_count_initial = vm_page_wire_count;
1214 
1215 	/* capture this for later use */
1216 	booter_size = ml_get_booter_memory_size();
1217 
1218 	printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1219 	    vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1220 
1221 	kernel_debug_string_early("vm_page_bootstrap complete");
1222 }
1223 
1224 #ifndef MACHINE_PAGES
1225 /*
1226  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1227  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1228  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1229  */
1230 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags)1231 pmap_steal_memory_internal(
1232 	vm_size_t size,
1233 	vm_size_t alignment,
1234 	boolean_t might_free,
1235 	unsigned int flags)
1236 {
1237 	kern_return_t kr;
1238 	vm_offset_t addr;
1239 	vm_offset_t map_addr;
1240 	ppnum_t phys_page;
1241 	unsigned int pmap_flags;
1242 
1243 	/*
1244 	 * Size needs to be aligned to word size.
1245 	 */
1246 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1247 
1248 	/*
1249 	 * Alignment defaults to word size if not specified.
1250 	 */
1251 	if (alignment == 0) {
1252 		alignment = sizeof(void*);
1253 	}
1254 
1255 	/*
1256 	 * Alignment must be no greater than a page and must be a power of two.
1257 	 */
1258 	assert(alignment <= PAGE_SIZE);
1259 	assert((alignment & (alignment - 1)) == 0);
1260 
1261 	/*
1262 	 * On the first call, get the initial values for virtual address space
1263 	 * and page align them.
1264 	 */
1265 	if (virtual_space_start == virtual_space_end) {
1266 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1267 		virtual_space_start = round_page(virtual_space_start);
1268 		virtual_space_end = trunc_page(virtual_space_end);
1269 
1270 #if defined(__x86_64__)
1271 		/*
1272 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1273 		 * that map it. This makes the VA available for large page mappings.
1274 		 */
1275 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1276 #endif
1277 	}
1278 
1279 	/*
1280 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1281 	 * address if the size is big enough to back with at least 1 large page.
1282 	 */
1283 #if defined(__x86_64__)
1284 	if (size >= I386_LPGBYTES) {
1285 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1286 	}
1287 #endif
1288 	virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1289 	addr = virtual_space_start;
1290 	virtual_space_start += size;
1291 
1292 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1293 
1294 	/*
1295 	 * Allocate and map physical pages to back the new virtual space.
1296 	 */
1297 	map_addr = round_page(addr);
1298 	while (map_addr < addr + size) {
1299 #if defined(__x86_64__)
1300 		/*
1301 		 * Back with a large page if properly aligned on x86
1302 		 */
1303 		if ((map_addr & I386_LPGMASK) == 0 &&
1304 		    map_addr + I386_LPGBYTES <= addr + size &&
1305 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1306 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1307 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1308 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1309 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE);
1310 
1311 			if (kr != KERN_SUCCESS) {
1312 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1313 				    (unsigned long)map_addr, phys_page);
1314 			}
1315 			map_addr += I386_LPGBYTES;
1316 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1317 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1318 			vm_page_kern_lpage_count++;
1319 			continue;
1320 		}
1321 #endif
1322 
1323 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1324 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1325 		}
1326 
1327 #if defined(__x86_64__)
1328 		pmap_pre_expand(kernel_pmap, map_addr);
1329 #endif
1330 		pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1331 
1332 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1333 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1334 		    pmap_flags, FALSE);
1335 
1336 		if (kr != KERN_SUCCESS) {
1337 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1338 			    (unsigned long)map_addr, phys_page);
1339 		}
1340 		map_addr += PAGE_SIZE;
1341 
1342 		/*
1343 		 * Account for newly stolen memory
1344 		 */
1345 		vm_page_wire_count++;
1346 		vm_page_stolen_count++;
1347 	}
1348 
1349 #if defined(__x86_64__)
1350 	/*
1351 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1352 	 * Notify the pmap layer to record which high pages were allocated so far.
1353 	 */
1354 	if (might_free) {
1355 		pmap_hi_pages_done();
1356 	}
1357 #endif
1358 #if KASAN
1359 	kasan_notify_address(round_page(addr), size);
1360 #endif
1361 	return (void *) addr;
1362 }
1363 
1364 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1365 pmap_steal_memory(
1366 	vm_size_t size,
1367 	vm_size_t alignment)
1368 {
1369 	return pmap_steal_memory_internal(size, alignment, FALSE, 0);
1370 }
1371 
1372 void *
pmap_steal_freeable_memory(vm_size_t size)1373 pmap_steal_freeable_memory(
1374 	vm_size_t size)
1375 {
1376 	return pmap_steal_memory_internal(size, 0, TRUE, 0);
1377 }
1378 
1379 void *
pmap_steal_zone_memory(vm_size_t size,vm_size_t alignment)1380 pmap_steal_zone_memory(
1381 	vm_size_t size,
1382 	vm_size_t alignment)
1383 {
1384 	unsigned int flags = 0;
1385 
1386 
1387 	return pmap_steal_memory_internal(size, alignment, FALSE, flags);
1388 }
1389 
1390 
1391 #if CONFIG_SECLUDED_MEMORY
1392 /* boot-args to control secluded memory */
1393 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1394 /* IOKit can use secluded memory */
1395 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1396 /* apps can use secluded memory */
1397 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1398 /* filecache can use seclude memory */
1399 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1400 uint64_t secluded_shutoff_trigger = 0;
1401 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1402 #endif /* CONFIG_SECLUDED_MEMORY */
1403 
1404 
1405 #if defined(__arm64__)
1406 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1407 unsigned int vm_first_phys_ppnum = 0;
1408 #endif
1409 
1410 void vm_page_release_startup(vm_page_t mem);
1411 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1412 pmap_startup(
1413 	vm_offset_t     *startp,
1414 	vm_offset_t     *endp)
1415 {
1416 	unsigned int    i, npages;
1417 	ppnum_t         phys_page;
1418 	uint64_t        mem_sz;
1419 	uint64_t        start_ns;
1420 	uint64_t        now_ns;
1421 	uint_t          low_page_count = 0;
1422 
1423 #if    defined(__LP64__)
1424 	/*
1425 	 * make sure we are aligned on a 64 byte boundary
1426 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1427 	 * 6 bits of the pointer)
1428 	 */
1429 	if (virtual_space_start != virtual_space_end) {
1430 		virtual_space_start = round_page(virtual_space_start);
1431 	}
1432 #endif
1433 
1434 	/*
1435 	 * We calculate how many page frames we will have
1436 	 * and then allocate the page structures in one chunk.
1437 	 *
1438 	 * Note that the calculation here doesn't take into account
1439 	 * the memory needed to map what's being allocated, i.e. the page
1440 	 * table entries. So the actual number of pages we get will be
1441 	 * less than this. To do someday: include that in the computation.
1442 	 *
1443 	 * Also for ARM, we don't use the count of free_pages, but rather the
1444 	 * range from last page to first page (ignore holes due to retired pages).
1445 	 */
1446 #if defined(__arm64__)
1447 	mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1448 #else /* defined(__arm64__) */
1449 	mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1450 #endif /* defined(__arm64__) */
1451 	mem_sz += round_page(virtual_space_start) - virtual_space_start;        /* Account for any slop */
1452 	npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages)));    /* scaled to include the vm_page_ts */
1453 
1454 
1455 	vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1456 
1457 	/*
1458 	 * Check if we want to initialize pages to a known value
1459 	 */
1460 	if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1461 		fill = TRUE;
1462 	}
1463 #if     DEBUG
1464 	/* This slows down booting the DEBUG kernel, particularly on
1465 	 * large memory systems, but is worthwhile in deterministically
1466 	 * trapping uninitialized memory usage.
1467 	 */
1468 	if (!fill) {
1469 		fill = TRUE;
1470 		fillval = 0xDEB8F177;
1471 	}
1472 #endif
1473 	if (fill) {
1474 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1475 	}
1476 
1477 #if CONFIG_SECLUDED_MEMORY
1478 	/*
1479 	 * Figure out how much secluded memory to have before we start
1480 	 * release pages to free lists.
1481 	 * The default, if specified nowhere else, is no secluded mem.
1482 	 */
1483 	vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1484 
1485 	/*
1486 	 * Allow a really large app to effectively use secluded memory until it exits.
1487 	 */
1488 	if (vm_page_secluded_target != 0) {
1489 		/*
1490 		 * Get an amount from boot-args, else use 1/2 of max_mem.
1491 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1492 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
1493 		 */
1494 		int secluded_shutoff_mb;
1495 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1496 		    sizeof(secluded_shutoff_mb))) {
1497 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1498 		} else {
1499 			secluded_shutoff_trigger = max_mem / 2;
1500 		}
1501 
1502 		/* ensure the headroom value is sensible and avoid underflows */
1503 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1504 	}
1505 
1506 #endif /* CONFIG_SECLUDED_MEMORY */
1507 
1508 #if defined(__x86_64__)
1509 
1510 	/*
1511 	 * Decide how much memory we delay freeing at boot time.
1512 	 */
1513 	uint32_t delay_above_gb;
1514 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1515 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1516 	}
1517 
1518 	if (delay_above_gb == 0) {
1519 		delay_above_pnum = PPNUM_MAX;
1520 	} else {
1521 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1522 	}
1523 
1524 	/* make sure we have sane breathing room: 1G above low memory */
1525 	if (delay_above_pnum <= max_valid_low_ppnum) {
1526 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1527 	}
1528 
1529 	if (delay_above_pnum < PPNUM_MAX) {
1530 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1531 	}
1532 
1533 #endif /* defined(__x86_64__) */
1534 
1535 	/*
1536 	 * Initialize and release the page frames.
1537 	 */
1538 	kernel_debug_string_early("page_frame_init");
1539 
1540 	vm_page_array_beginning_addr = &vm_pages[0];
1541 	vm_page_array_ending_addr = &vm_pages[npages];  /* used by ptr packing/unpacking code */
1542 #if VM_PAGE_PACKED_FROM_ARRAY
1543 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1544 		panic("pmap_startup(): too many pages to support vm_page packing");
1545 	}
1546 #endif
1547 
1548 	vm_delayed_count = 0;
1549 
1550 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1551 	vm_pages_count = 0;
1552 	for (i = 0; i < npages; i++) {
1553 		/* Did we run out of pages? */
1554 		if (!pmap_next_page(&phys_page)) {
1555 			break;
1556 		}
1557 
1558 		if (phys_page < max_valid_low_ppnum) {
1559 			++low_page_count;
1560 		}
1561 
1562 		/* Are we at high enough pages to delay the rest? */
1563 		if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1564 			vm_delayed_count = pmap_free_pages();
1565 			break;
1566 		}
1567 
1568 #if defined(__arm64__)
1569 		if (i == 0) {
1570 			vm_first_phys_ppnum = phys_page;
1571 			patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1572 			    (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1573 		}
1574 #endif /* defined(__arm64__) */
1575 
1576 #if defined(__x86_64__)
1577 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
1578 		if (i > 0) {
1579 			assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1580 		}
1581 #endif
1582 		++vm_pages_count;
1583 		vm_page_init(&vm_pages[i], phys_page, FALSE);
1584 		if (fill) {
1585 			fillPage(phys_page, fillval);
1586 		}
1587 		if (vm_himemory_mode) {
1588 			vm_page_release_startup(&vm_pages[i]);
1589 		}
1590 	}
1591 	vm_page_pages = vm_pages_count; /* used to report to user space */
1592 
1593 	if (!vm_himemory_mode) {
1594 		do {
1595 			if (!VMP_ERROR_GET(&vm_pages[--i])) {               /* skip retired pages */
1596 				vm_page_release_startup(&vm_pages[i]);
1597 			}
1598 		} while (i != 0);
1599 	}
1600 
1601 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1602 	printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1603 	printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1604 
1605 #if defined(__LP64__)
1606 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1607 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1608 	}
1609 
1610 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1611 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1612 	}
1613 #endif
1614 
1615 	VM_CHECK_MEMORYSTATUS;
1616 
1617 	/*
1618 	 * We have to re-align virtual_space_start,
1619 	 * because pmap_steal_memory has been using it.
1620 	 */
1621 	virtual_space_start = round_page(virtual_space_start);
1622 	*startp = virtual_space_start;
1623 	*endp = virtual_space_end;
1624 }
1625 #endif  /* MACHINE_PAGES */
1626 
1627 /*
1628  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1629  * or frees to this zone. It's just here for reporting purposes via zprint command.
1630  * This needs to be done after all initially delayed pages are put on the free lists.
1631  */
1632 static void
vm_page_module_init_delayed(void)1633 vm_page_module_init_delayed(void)
1634 {
1635 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1636 	    ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1637 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1638 
1639 		zone_set_exhaustible(z, 0);
1640 		/*
1641 		 * Reflect size and usage information for vm_pages[].
1642 		 */
1643 
1644 		z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1645 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
1646 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1647 		vm_pages_count * sizeof(struct vm_page);
1648 		vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1649 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1650 		z->z_wired_cur += vm_page_zone_pages;
1651 		z->z_wired_hwm = z->z_wired_cur;
1652 		z->z_va_cur = z->z_wired_cur;
1653 		/* since zone accounts for these, take them out of stolen */
1654 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1655 	});
1656 }
1657 
1658 /*
1659  * Create the vm_pages zone. This is used for the vm_page structures for the pages
1660  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1661  * this needs to happen in early VM bootstrap.
1662  */
1663 
1664 __startup_func
1665 static void
vm_page_module_init(void)1666 vm_page_module_init(void)
1667 {
1668 	vm_size_t vm_page_with_ppnum_size;
1669 
1670 	/*
1671 	 * Since the pointers to elements in this zone will be packed, they
1672 	 * must have appropriate size. Not strictly what sizeof() reports.
1673 	 */
1674 	vm_page_with_ppnum_size =
1675 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1676 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1677 
1678 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1679 	    ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NOTBITAG,
1680 	    ZONE_ID_ANY, ^(zone_t z) {
1681 		/*
1682 		 * The number "10" is a small number that is larger than the number
1683 		 * of fictitious pages that any single caller will attempt to allocate
1684 		 * without blocking.
1685 		 *
1686 		 * The largest such number at the moment is kmem_alloc()
1687 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1688 		 * taking into account the 50% hysteresis the zone allocator uses.
1689 		 *
1690 		 * Note: this works at all because the zone allocator
1691 		 *       doesn't ever allocate fictitious pages.
1692 		 */
1693 		zone_raise_reserve(z, 10);
1694 	});
1695 }
1696 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1697 
1698 /*
1699  *	Routine:	vm_page_create
1700  *	Purpose:
1701  *		After the VM system is up, machine-dependent code
1702  *		may stumble across more physical memory.  For example,
1703  *		memory that it was reserving for a frame buffer.
1704  *		vm_page_create turns this memory into available pages.
1705  */
1706 
1707 void
vm_page_create(ppnum_t start,ppnum_t end)1708 vm_page_create(
1709 	ppnum_t start,
1710 	ppnum_t end)
1711 {
1712 	ppnum_t         phys_page;
1713 	vm_page_t       m;
1714 
1715 	for (phys_page = start;
1716 	    phys_page < end;
1717 	    phys_page++) {
1718 		m = vm_page_grab_fictitious_common(phys_page, TRUE);
1719 		m->vmp_fictitious = FALSE;
1720 		pmap_clear_noencrypt(phys_page);
1721 
1722 
1723 		vm_free_page_lock();
1724 		vm_page_pages++;
1725 		vm_free_page_unlock();
1726 		vm_page_release(m, FALSE);
1727 	}
1728 }
1729 
1730 
1731 /*
1732  *	vm_page_hash:
1733  *
1734  *	Distributes the object/offset key pair among hash buckets.
1735  *
1736  *	NOTE:	The bucket count must be a power of 2
1737  */
1738 #define vm_page_hash(object, offset) (\
1739 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1740 	 & vm_page_hash_mask)
1741 
1742 
1743 /*
1744  *	vm_page_insert:		[ internal use only ]
1745  *
1746  *	Inserts the given mem entry into the object/object-page
1747  *	table and object list.
1748  *
1749  *	The object must be locked.
1750  */
1751 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1752 vm_page_insert(
1753 	vm_page_t               mem,
1754 	vm_object_t             object,
1755 	vm_object_offset_t      offset)
1756 {
1757 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1758 }
1759 
1760 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1761 vm_page_insert_wired(
1762 	vm_page_t               mem,
1763 	vm_object_t             object,
1764 	vm_object_offset_t      offset,
1765 	vm_tag_t                tag)
1766 {
1767 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1768 }
1769 
1770 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1771 vm_page_insert_internal(
1772 	vm_page_t               mem,
1773 	vm_object_t             object,
1774 	vm_object_offset_t      offset,
1775 	vm_tag_t                tag,
1776 	boolean_t               queues_lock_held,
1777 	boolean_t               insert_in_hash,
1778 	boolean_t               batch_pmap_op,
1779 	boolean_t               batch_accounting,
1780 	uint64_t                *delayed_ledger_update)
1781 {
1782 	vm_page_bucket_t        *bucket;
1783 	lck_spin_t              *bucket_lock;
1784 	int                     hash_id;
1785 	task_t                  owner;
1786 	int                     ledger_idx_volatile;
1787 	int                     ledger_idx_nonvolatile;
1788 	int                     ledger_idx_volatile_compressed;
1789 	int                     ledger_idx_nonvolatile_compressed;
1790 	boolean_t               do_footprint;
1791 
1792 #if 0
1793 	/*
1794 	 * we may not hold the page queue lock
1795 	 * so this check isn't safe to make
1796 	 */
1797 	VM_PAGE_CHECK(mem);
1798 #endif
1799 
1800 	assertf(page_aligned(offset), "0x%llx\n", offset);
1801 
1802 	assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1803 
1804 	vm_object_lock_assert_exclusive(object);
1805 	LCK_MTX_ASSERT(&vm_page_queue_lock,
1806 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
1807 	    : LCK_MTX_ASSERT_NOTOWNED);
1808 
1809 	if (queues_lock_held == FALSE) {
1810 		assert(!VM_PAGE_PAGEABLE(mem));
1811 	}
1812 
1813 	if (insert_in_hash == TRUE) {
1814 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1815 		if (mem->vmp_tabled || mem->vmp_object) {
1816 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1817 			    "already in (obj=%p,off=0x%llx)",
1818 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1819 		}
1820 #endif
1821 		if (object->internal && (offset >= object->vo_size)) {
1822 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1823 			    mem, object, offset, object->vo_size);
1824 		}
1825 
1826 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1827 
1828 		/*
1829 		 *	Record the object/offset pair in this page
1830 		 */
1831 
1832 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1833 		mem->vmp_offset = offset;
1834 
1835 #if CONFIG_SECLUDED_MEMORY
1836 		if (object->eligible_for_secluded) {
1837 			vm_page_secluded.eligible_for_secluded++;
1838 		}
1839 #endif /* CONFIG_SECLUDED_MEMORY */
1840 
1841 		/*
1842 		 *	Insert it into the object_object/offset hash table
1843 		 */
1844 		hash_id = vm_page_hash(object, offset);
1845 		bucket = &vm_page_buckets[hash_id];
1846 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1847 
1848 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1849 
1850 		mem->vmp_next_m = bucket->page_list;
1851 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
1852 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1853 
1854 #if     MACH_PAGE_HASH_STATS
1855 		if (++bucket->cur_count > bucket->hi_count) {
1856 			bucket->hi_count = bucket->cur_count;
1857 		}
1858 #endif /* MACH_PAGE_HASH_STATS */
1859 		mem->vmp_hashed = TRUE;
1860 		lck_spin_unlock(bucket_lock);
1861 	}
1862 
1863 	{
1864 		unsigned int    cache_attr;
1865 
1866 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
1867 
1868 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
1869 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1870 		}
1871 	}
1872 	/*
1873 	 *	Now link into the object's list of backed pages.
1874 	 */
1875 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
1876 	object->memq_hint = mem;
1877 	mem->vmp_tabled = TRUE;
1878 
1879 	/*
1880 	 *	Show that the object has one more resident page.
1881 	 */
1882 
1883 	object->resident_page_count++;
1884 	if (VM_PAGE_WIRED(mem)) {
1885 		assert(mem->vmp_wire_count > 0);
1886 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1887 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1888 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1889 	}
1890 	assert(object->resident_page_count >= object->wired_page_count);
1891 
1892 #if DEVELOPMENT || DEBUG
1893 	if (object->object_is_shared_cache &&
1894 	    object->pager != NULL &&
1895 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1896 		int new, old;
1897 		assert(!object->internal);
1898 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1899 		do {
1900 			old = shared_region_pagers_resident_peak;
1901 		} while (old < new &&
1902 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1903 	}
1904 #endif /* DEVELOPMENT || DEBUG */
1905 
1906 	if (batch_accounting == FALSE) {
1907 		if (object->internal) {
1908 			OSAddAtomic(1, &vm_page_internal_count);
1909 		} else {
1910 			OSAddAtomic(1, &vm_page_external_count);
1911 		}
1912 	}
1913 
1914 	/*
1915 	 * It wouldn't make sense to insert a "reusable" page in
1916 	 * an object (the page would have been marked "reusable" only
1917 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1918 	 * in the object at that time).
1919 	 * But a page could be inserted in a "all_reusable" object, if
1920 	 * something faults it in (a vm_read() from another task or a
1921 	 * "use-after-free" issue in user space, for example).  It can
1922 	 * also happen if we're relocating a page from that object to
1923 	 * a different physical page during a physically-contiguous
1924 	 * allocation.
1925 	 */
1926 	assert(!mem->vmp_reusable);
1927 	if (object->all_reusable) {
1928 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1929 	}
1930 
1931 	if (object->purgable == VM_PURGABLE_DENY &&
1932 	    !object->vo_ledger_tag) {
1933 		owner = TASK_NULL;
1934 	} else {
1935 		owner = VM_OBJECT_OWNER(object);
1936 		vm_object_ledger_tag_ledgers(object,
1937 		    &ledger_idx_volatile,
1938 		    &ledger_idx_nonvolatile,
1939 		    &ledger_idx_volatile_compressed,
1940 		    &ledger_idx_nonvolatile_compressed,
1941 		    &do_footprint);
1942 	}
1943 	if (owner &&
1944 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
1945 	    object->purgable == VM_PURGABLE_DENY ||
1946 	    VM_PAGE_WIRED(mem))) {
1947 		if (delayed_ledger_update) {
1948 			*delayed_ledger_update += PAGE_SIZE;
1949 		} else {
1950 			/* more non-volatile bytes */
1951 			ledger_credit(owner->ledger,
1952 			    ledger_idx_nonvolatile,
1953 			    PAGE_SIZE);
1954 			if (do_footprint) {
1955 				/* more footprint */
1956 				ledger_credit(owner->ledger,
1957 				    task_ledgers.phys_footprint,
1958 				    PAGE_SIZE);
1959 			}
1960 		}
1961 	} else if (owner &&
1962 	    (object->purgable == VM_PURGABLE_VOLATILE ||
1963 	    object->purgable == VM_PURGABLE_EMPTY)) {
1964 		assert(!VM_PAGE_WIRED(mem));
1965 		/* more volatile bytes */
1966 		ledger_credit(owner->ledger,
1967 		    ledger_idx_volatile,
1968 		    PAGE_SIZE);
1969 	}
1970 
1971 	if (object->purgable == VM_PURGABLE_VOLATILE) {
1972 		if (VM_PAGE_WIRED(mem)) {
1973 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1974 		} else {
1975 			OSAddAtomic(+1, &vm_page_purgeable_count);
1976 		}
1977 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
1978 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1979 		/*
1980 		 * This page belongs to a purged VM object but hasn't
1981 		 * been purged (because it was "busy").
1982 		 * It's in the "throttled" queue and hence not
1983 		 * visible to vm_pageout_scan().  Move it to a pageable
1984 		 * queue, so that it can eventually be reclaimed, instead
1985 		 * of lingering in the "empty" object.
1986 		 */
1987 		if (queues_lock_held == FALSE) {
1988 			vm_page_lockspin_queues();
1989 		}
1990 		vm_page_deactivate(mem);
1991 		if (queues_lock_held == FALSE) {
1992 			vm_page_unlock_queues();
1993 		}
1994 	}
1995 
1996 #if VM_OBJECT_TRACKING_OP_MODIFIED
1997 	if (vm_object_tracking_btlog &&
1998 	    object->internal &&
1999 	    object->resident_page_count == 0 &&
2000 	    object->pager == NULL &&
2001 	    object->shadow != NULL &&
2002 	    object->shadow->vo_copy == object) {
2003 		btlog_record(vm_object_tracking_btlog, object,
2004 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2005 		    btref_get(__builtin_frame_address(0), 0));
2006 	}
2007 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2008 }
2009 
2010 /*
2011  *	vm_page_replace:
2012  *
2013  *	Exactly like vm_page_insert, except that we first
2014  *	remove any existing page at the given offset in object.
2015  *
2016  *	The object must be locked.
2017  */
2018 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2019 vm_page_replace(
2020 	vm_page_t               mem,
2021 	vm_object_t             object,
2022 	vm_object_offset_t      offset)
2023 {
2024 	vm_page_bucket_t *bucket;
2025 	vm_page_t        found_m = VM_PAGE_NULL;
2026 	lck_spin_t      *bucket_lock;
2027 	int             hash_id;
2028 
2029 #if 0
2030 	/*
2031 	 * we don't hold the page queue lock
2032 	 * so this check isn't safe to make
2033 	 */
2034 	VM_PAGE_CHECK(mem);
2035 #endif
2036 	vm_object_lock_assert_exclusive(object);
2037 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2038 	if (mem->vmp_tabled || mem->vmp_object) {
2039 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2040 		    "already in (obj=%p,off=0x%llx)",
2041 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2042 	}
2043 #endif
2044 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2045 
2046 	assert(!VM_PAGE_PAGEABLE(mem));
2047 
2048 	/*
2049 	 *	Record the object/offset pair in this page
2050 	 */
2051 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2052 	mem->vmp_offset = offset;
2053 
2054 	/*
2055 	 *	Insert it into the object_object/offset hash table,
2056 	 *	replacing any page that might have been there.
2057 	 */
2058 
2059 	hash_id = vm_page_hash(object, offset);
2060 	bucket = &vm_page_buckets[hash_id];
2061 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2062 
2063 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2064 
2065 	if (bucket->page_list) {
2066 		vm_page_packed_t *mp = &bucket->page_list;
2067 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2068 
2069 		do {
2070 			/*
2071 			 * compare packed object pointers
2072 			 */
2073 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2074 				/*
2075 				 * Remove old page from hash list
2076 				 */
2077 				*mp = m->vmp_next_m;
2078 				m->vmp_hashed = FALSE;
2079 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2080 
2081 				found_m = m;
2082 				break;
2083 			}
2084 			mp = &m->vmp_next_m;
2085 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2086 
2087 		mem->vmp_next_m = bucket->page_list;
2088 	} else {
2089 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2090 	}
2091 	/*
2092 	 * insert new page at head of hash list
2093 	 */
2094 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2095 	mem->vmp_hashed = TRUE;
2096 
2097 	lck_spin_unlock(bucket_lock);
2098 
2099 	if (found_m) {
2100 		/*
2101 		 * there was already a page at the specified
2102 		 * offset for this object... remove it from
2103 		 * the object and free it back to the free list
2104 		 */
2105 		vm_page_free_unlocked(found_m, FALSE);
2106 	}
2107 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2108 }
2109 
2110 /*
2111  *	vm_page_remove:		[ internal use only ]
2112  *
2113  *	Removes the given mem entry from the object/offset-page
2114  *	table and the object page list.
2115  *
2116  *	The object must be locked.
2117  */
2118 
2119 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2120 vm_page_remove(
2121 	vm_page_t       mem,
2122 	boolean_t       remove_from_hash)
2123 {
2124 	vm_page_bucket_t *bucket;
2125 	vm_page_t       this;
2126 	lck_spin_t      *bucket_lock;
2127 	int             hash_id;
2128 	task_t          owner;
2129 	vm_object_t     m_object;
2130 	int             ledger_idx_volatile;
2131 	int             ledger_idx_nonvolatile;
2132 	int             ledger_idx_volatile_compressed;
2133 	int             ledger_idx_nonvolatile_compressed;
2134 	int             do_footprint;
2135 
2136 	m_object = VM_PAGE_OBJECT(mem);
2137 
2138 	vm_object_lock_assert_exclusive(m_object);
2139 	assert(mem->vmp_tabled);
2140 	assert(!mem->vmp_cleaning);
2141 	assert(!mem->vmp_laundry);
2142 
2143 	if (VM_PAGE_PAGEABLE(mem)) {
2144 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2145 	}
2146 #if 0
2147 	/*
2148 	 * we don't hold the page queue lock
2149 	 * so this check isn't safe to make
2150 	 */
2151 	VM_PAGE_CHECK(mem);
2152 #endif
2153 	if (remove_from_hash == TRUE) {
2154 		/*
2155 		 *	Remove from the object_object/offset hash table
2156 		 */
2157 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2158 		bucket = &vm_page_buckets[hash_id];
2159 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2160 
2161 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2162 
2163 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2164 			/* optimize for common case */
2165 
2166 			bucket->page_list = mem->vmp_next_m;
2167 		} else {
2168 			vm_page_packed_t        *prev;
2169 
2170 			for (prev = &this->vmp_next_m;
2171 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2172 			    prev = &this->vmp_next_m) {
2173 				continue;
2174 			}
2175 			*prev = this->vmp_next_m;
2176 		}
2177 #if     MACH_PAGE_HASH_STATS
2178 		bucket->cur_count--;
2179 #endif /* MACH_PAGE_HASH_STATS */
2180 		mem->vmp_hashed = FALSE;
2181 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2182 		lck_spin_unlock(bucket_lock);
2183 	}
2184 	/*
2185 	 *	Now remove from the object's list of backed pages.
2186 	 */
2187 
2188 	vm_page_remove_internal(mem);
2189 
2190 	/*
2191 	 *	And show that the object has one fewer resident
2192 	 *	page.
2193 	 */
2194 
2195 	assert(m_object->resident_page_count > 0);
2196 	m_object->resident_page_count--;
2197 
2198 #if DEVELOPMENT || DEBUG
2199 	if (m_object->object_is_shared_cache &&
2200 	    m_object->pager != NULL &&
2201 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2202 		assert(!m_object->internal);
2203 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
2204 	}
2205 #endif /* DEVELOPMENT || DEBUG */
2206 
2207 	if (m_object->internal) {
2208 #if DEBUG
2209 		assert(vm_page_internal_count);
2210 #endif /* DEBUG */
2211 
2212 		OSAddAtomic(-1, &vm_page_internal_count);
2213 	} else {
2214 		assert(vm_page_external_count);
2215 		OSAddAtomic(-1, &vm_page_external_count);
2216 
2217 		if (mem->vmp_xpmapped) {
2218 			assert(vm_page_xpmapped_external_count);
2219 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2220 		}
2221 	}
2222 	if (!m_object->internal &&
2223 	    m_object->cached_list.next &&
2224 	    m_object->cached_list.prev) {
2225 		if (m_object->resident_page_count == 0) {
2226 			vm_object_cache_remove(m_object);
2227 		}
2228 	}
2229 
2230 	if (VM_PAGE_WIRED(mem)) {
2231 		assert(mem->vmp_wire_count > 0);
2232 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2233 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2234 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2235 	}
2236 	assert(m_object->resident_page_count >=
2237 	    m_object->wired_page_count);
2238 	if (mem->vmp_reusable) {
2239 		assert(m_object->reusable_page_count > 0);
2240 		m_object->reusable_page_count--;
2241 		assert(m_object->reusable_page_count <=
2242 		    m_object->resident_page_count);
2243 		mem->vmp_reusable = FALSE;
2244 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2245 		vm_page_stats_reusable.reused_remove++;
2246 	} else if (m_object->all_reusable) {
2247 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2248 		vm_page_stats_reusable.reused_remove++;
2249 	}
2250 
2251 	if (m_object->purgable == VM_PURGABLE_DENY &&
2252 	    !m_object->vo_ledger_tag) {
2253 		owner = TASK_NULL;
2254 	} else {
2255 		owner = VM_OBJECT_OWNER(m_object);
2256 		vm_object_ledger_tag_ledgers(m_object,
2257 		    &ledger_idx_volatile,
2258 		    &ledger_idx_nonvolatile,
2259 		    &ledger_idx_volatile_compressed,
2260 		    &ledger_idx_nonvolatile_compressed,
2261 		    &do_footprint);
2262 	}
2263 	if (owner &&
2264 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2265 	    m_object->purgable == VM_PURGABLE_DENY ||
2266 	    VM_PAGE_WIRED(mem))) {
2267 		/* less non-volatile bytes */
2268 		ledger_debit(owner->ledger,
2269 		    ledger_idx_nonvolatile,
2270 		    PAGE_SIZE);
2271 		if (do_footprint) {
2272 			/* less footprint */
2273 			ledger_debit(owner->ledger,
2274 			    task_ledgers.phys_footprint,
2275 			    PAGE_SIZE);
2276 		}
2277 	} else if (owner &&
2278 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
2279 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
2280 		assert(!VM_PAGE_WIRED(mem));
2281 		/* less volatile bytes */
2282 		ledger_debit(owner->ledger,
2283 		    ledger_idx_volatile,
2284 		    PAGE_SIZE);
2285 	}
2286 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2287 		if (VM_PAGE_WIRED(mem)) {
2288 			assert(vm_page_purgeable_wired_count > 0);
2289 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2290 		} else {
2291 			assert(vm_page_purgeable_count > 0);
2292 			OSAddAtomic(-1, &vm_page_purgeable_count);
2293 		}
2294 	}
2295 
2296 	if (m_object->set_cache_attr == TRUE) {
2297 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2298 	}
2299 
2300 	mem->vmp_tabled = FALSE;
2301 	mem->vmp_object = 0;
2302 	mem->vmp_offset = (vm_object_offset_t) -1;
2303 }
2304 
2305 
2306 /*
2307  *	vm_page_lookup:
2308  *
2309  *	Returns the page associated with the object/offset
2310  *	pair specified; if none is found, VM_PAGE_NULL is returned.
2311  *
2312  *	The object must be locked.  No side effects.
2313  */
2314 
2315 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
2316 
2317 #if DEBUG_VM_PAGE_LOOKUP
2318 
2319 struct {
2320 	uint64_t        vpl_total;
2321 	uint64_t        vpl_empty_obj;
2322 	uint64_t        vpl_bucket_NULL;
2323 	uint64_t        vpl_hit_hint;
2324 	uint64_t        vpl_hit_hint_next;
2325 	uint64_t        vpl_hit_hint_prev;
2326 	uint64_t        vpl_fast;
2327 	uint64_t        vpl_slow;
2328 	uint64_t        vpl_hit;
2329 	uint64_t        vpl_miss;
2330 
2331 	uint64_t        vpl_fast_elapsed;
2332 	uint64_t        vpl_slow_elapsed;
2333 } vm_page_lookup_stats __attribute__((aligned(8)));
2334 
2335 #endif
2336 
2337 #define KDP_VM_PAGE_WALK_MAX    1000
2338 
2339 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2340 kdp_vm_page_lookup(
2341 	vm_object_t             object,
2342 	vm_object_offset_t      offset)
2343 {
2344 	vm_page_t cur_page;
2345 	int num_traversed = 0;
2346 
2347 	if (not_in_kdp) {
2348 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2349 	}
2350 
2351 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2352 		if (cur_page->vmp_offset == offset) {
2353 			return cur_page;
2354 		}
2355 		num_traversed++;
2356 
2357 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2358 			return VM_PAGE_NULL;
2359 		}
2360 	}
2361 
2362 	return VM_PAGE_NULL;
2363 }
2364 
2365 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2366 vm_page_lookup(
2367 	vm_object_t             object,
2368 	vm_object_offset_t      offset)
2369 {
2370 	vm_page_t       mem;
2371 	vm_page_bucket_t *bucket;
2372 	vm_page_queue_entry_t   qe;
2373 	lck_spin_t      *bucket_lock = NULL;
2374 	int             hash_id;
2375 #if DEBUG_VM_PAGE_LOOKUP
2376 	uint64_t        start, elapsed;
2377 
2378 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2379 #endif
2380 
2381 	if (VM_KERNEL_ADDRESS(offset)) {
2382 		offset = VM_KERNEL_STRIP_UPTR(offset);
2383 	}
2384 
2385 	vm_object_lock_assert_held(object);
2386 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2387 
2388 	if (object->resident_page_count == 0) {
2389 #if DEBUG_VM_PAGE_LOOKUP
2390 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2391 #endif
2392 		return VM_PAGE_NULL;
2393 	}
2394 
2395 	mem = object->memq_hint;
2396 
2397 	if (mem != VM_PAGE_NULL) {
2398 		assert(VM_PAGE_OBJECT(mem) == object);
2399 
2400 		if (mem->vmp_offset == offset) {
2401 #if DEBUG_VM_PAGE_LOOKUP
2402 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2403 #endif
2404 			return mem;
2405 		}
2406 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2407 
2408 		if (!vm_page_queue_end(&object->memq, qe)) {
2409 			vm_page_t       next_page;
2410 
2411 			next_page = (vm_page_t)((uintptr_t)qe);
2412 			assert(VM_PAGE_OBJECT(next_page) == object);
2413 
2414 			if (next_page->vmp_offset == offset) {
2415 				object->memq_hint = next_page; /* new hint */
2416 #if DEBUG_VM_PAGE_LOOKUP
2417 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2418 #endif
2419 				return next_page;
2420 			}
2421 		}
2422 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2423 
2424 		if (!vm_page_queue_end(&object->memq, qe)) {
2425 			vm_page_t prev_page;
2426 
2427 			prev_page = (vm_page_t)((uintptr_t)qe);
2428 			assert(VM_PAGE_OBJECT(prev_page) == object);
2429 
2430 			if (prev_page->vmp_offset == offset) {
2431 				object->memq_hint = prev_page; /* new hint */
2432 #if DEBUG_VM_PAGE_LOOKUP
2433 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2434 #endif
2435 				return prev_page;
2436 			}
2437 		}
2438 	}
2439 	/*
2440 	 * Search the hash table for this object/offset pair
2441 	 */
2442 	hash_id = vm_page_hash(object, offset);
2443 	bucket = &vm_page_buckets[hash_id];
2444 
2445 	/*
2446 	 * since we hold the object lock, we are guaranteed that no
2447 	 * new pages can be inserted into this object... this in turn
2448 	 * guarantess that the page we're looking for can't exist
2449 	 * if the bucket it hashes to is currently NULL even when looked
2450 	 * at outside the scope of the hash bucket lock... this is a
2451 	 * really cheap optimiztion to avoid taking the lock
2452 	 */
2453 	if (!bucket->page_list) {
2454 #if DEBUG_VM_PAGE_LOOKUP
2455 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2456 #endif
2457 		return VM_PAGE_NULL;
2458 	}
2459 
2460 #if DEBUG_VM_PAGE_LOOKUP
2461 	start = mach_absolute_time();
2462 #endif
2463 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2464 		/*
2465 		 * on average, it's roughly 3 times faster to run a short memq list
2466 		 * than to take the spin lock and go through the hash list
2467 		 */
2468 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
2469 
2470 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2471 			if (mem->vmp_offset == offset) {
2472 				break;
2473 			}
2474 
2475 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2476 		}
2477 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2478 			mem = NULL;
2479 		}
2480 	} else {
2481 		vm_page_object_t        packed_object;
2482 
2483 		packed_object = VM_PAGE_PACK_OBJECT(object);
2484 
2485 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2486 
2487 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2488 
2489 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2490 		    mem != VM_PAGE_NULL;
2491 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2492 #if 0
2493 			/*
2494 			 * we don't hold the page queue lock
2495 			 * so this check isn't safe to make
2496 			 */
2497 			VM_PAGE_CHECK(mem);
2498 #endif
2499 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2500 				break;
2501 			}
2502 		}
2503 		lck_spin_unlock(bucket_lock);
2504 	}
2505 
2506 #if DEBUG_VM_PAGE_LOOKUP
2507 	elapsed = mach_absolute_time() - start;
2508 
2509 	if (bucket_lock) {
2510 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2511 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2512 	} else {
2513 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2514 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2515 	}
2516 	if (mem != VM_PAGE_NULL) {
2517 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2518 	} else {
2519 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2520 	}
2521 #endif
2522 	if (mem != VM_PAGE_NULL) {
2523 		assert(VM_PAGE_OBJECT(mem) == object);
2524 
2525 		object->memq_hint = mem;
2526 	}
2527 	return mem;
2528 }
2529 
2530 
2531 /*
2532  *	vm_page_rename:
2533  *
2534  *	Move the given memory entry from its
2535  *	current object to the specified target object/offset.
2536  *
2537  *	The object must be locked.
2538  */
2539 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2540 vm_page_rename(
2541 	vm_page_t               mem,
2542 	vm_object_t             new_object,
2543 	vm_object_offset_t      new_offset)
2544 {
2545 	boolean_t       internal_to_external, external_to_internal;
2546 	vm_tag_t        tag;
2547 	vm_object_t     m_object;
2548 
2549 	m_object = VM_PAGE_OBJECT(mem);
2550 
2551 	assert(m_object != new_object);
2552 	assert(m_object);
2553 
2554 	/*
2555 	 *	Changes to mem->vmp_object require the page lock because
2556 	 *	the pageout daemon uses that lock to get the object.
2557 	 */
2558 	vm_page_lockspin_queues();
2559 
2560 	internal_to_external = FALSE;
2561 	external_to_internal = FALSE;
2562 
2563 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2564 		/*
2565 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
2566 		 * if we first move the page to the active queue... it's going to end
2567 		 * up there anyway, and we don't do vm_page_rename's frequently enough
2568 		 * for this to matter.
2569 		 */
2570 		vm_page_queues_remove(mem, FALSE);
2571 		vm_page_activate(mem);
2572 	}
2573 	if (VM_PAGE_PAGEABLE(mem)) {
2574 		if (m_object->internal && !new_object->internal) {
2575 			internal_to_external = TRUE;
2576 		}
2577 		if (!m_object->internal && new_object->internal) {
2578 			external_to_internal = TRUE;
2579 		}
2580 	}
2581 
2582 	tag = m_object->wire_tag;
2583 	vm_page_remove(mem, TRUE);
2584 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2585 
2586 	if (internal_to_external) {
2587 		vm_page_pageable_internal_count--;
2588 		vm_page_pageable_external_count++;
2589 	} else if (external_to_internal) {
2590 		vm_page_pageable_external_count--;
2591 		vm_page_pageable_internal_count++;
2592 	}
2593 
2594 	vm_page_unlock_queues();
2595 }
2596 
2597 /*
2598  *	vm_page_init:
2599  *
2600  *	Initialize the fields in a new page.
2601  *	This takes a structure with random values and initializes it
2602  *	so that it can be given to vm_page_release or vm_page_insert.
2603  */
2604 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2605 vm_page_init(
2606 	vm_page_t mem,
2607 	ppnum_t   phys_page,
2608 	boolean_t lopage)
2609 {
2610 	uint_t    i;
2611 	uintptr_t *p;
2612 
2613 	assert(phys_page);
2614 
2615 #if DEBUG
2616 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2617 		if (!(pmap_valid_page(phys_page))) {
2618 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2619 		}
2620 	}
2621 #endif /* DEBUG */
2622 
2623 	/*
2624 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2625 	 * try to use initial values which match 0. This minimizes the number of writes
2626 	 * needed for boot-time initialization.
2627 	 *
2628 	 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2629 	 */
2630 	assert(VM_PAGE_NOT_ON_Q == 0);
2631 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2632 	for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2633 		*p++ = 0;
2634 	}
2635 	mem->vmp_offset = (vm_object_offset_t)-1;
2636 	mem->vmp_busy = TRUE;
2637 	mem->vmp_lopage = lopage;
2638 
2639 	VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2640 #if 0
2641 	/*
2642 	 * we're leaving this turned off for now... currently pages
2643 	 * come off the free list and are either immediately dirtied/referenced
2644 	 * due to zero-fill or COW faults, or are used to read or write files...
2645 	 * in the file I/O case, the UPL mechanism takes care of clearing
2646 	 * the state of the HW ref/mod bits in a somewhat fragile way.
2647 	 * Since we may change the way this works in the future (to toughen it up),
2648 	 * I'm leaving this as a reminder of where these bits could get cleared
2649 	 */
2650 
2651 	/*
2652 	 * make sure both the h/w referenced and modified bits are
2653 	 * clear at this point... we are especially dependent on
2654 	 * not finding a 'stale' h/w modified in a number of spots
2655 	 * once this page goes back into use
2656 	 */
2657 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2658 #endif
2659 }
2660 
2661 /*
2662  *	vm_page_grab_fictitious:
2663  *
2664  *	Remove a fictitious page from the free list.
2665  *	Returns VM_PAGE_NULL if there are no free pages.
2666  */
2667 
2668 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2669 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2670 {
2671 	vm_page_t m;
2672 
2673 	m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2674 	if (m) {
2675 		vm_page_init(m, phys_addr, FALSE);
2676 		m->vmp_fictitious = TRUE;
2677 	}
2678 	return m;
2679 }
2680 
2681 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2682 vm_page_grab_fictitious(boolean_t canwait)
2683 {
2684 	return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2685 }
2686 
2687 int vm_guard_count;
2688 
2689 
2690 vm_page_t
vm_page_grab_guard(boolean_t canwait)2691 vm_page_grab_guard(boolean_t canwait)
2692 {
2693 	vm_page_t page;
2694 	page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2695 	if (page) {
2696 		OSAddAtomic(1, &vm_guard_count);
2697 	}
2698 	return page;
2699 }
2700 
2701 
2702 /*
2703  *	vm_page_release_fictitious:
2704  *
2705  *	Release a fictitious page to the zone pool
2706  */
2707 void
vm_page_release_fictitious(vm_page_t m)2708 vm_page_release_fictitious(
2709 	vm_page_t m)
2710 {
2711 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2712 	assert(m->vmp_fictitious);
2713 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2714 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2715 	assert(!m->vmp_realtime);
2716 
2717 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2718 		OSAddAtomic(-1, &vm_guard_count);
2719 	}
2720 
2721 	zfree(vm_page_zone, m);
2722 }
2723 
2724 /*
2725  *	vm_pool_low():
2726  *
2727  *	Return true if it is not likely that a non-vm_privileged thread
2728  *	can get memory without blocking.  Advisory only, since the
2729  *	situation may change under us.
2730  */
2731 bool
vm_pool_low(void)2732 vm_pool_low(void)
2733 {
2734 	/* No locking, at worst we will fib. */
2735 	return vm_page_free_count <= vm_page_free_reserved;
2736 }
2737 
2738 boolean_t vm_darkwake_mode = FALSE;
2739 
2740 /*
2741  * vm_update_darkwake_mode():
2742  *
2743  * Tells the VM that the system is in / out of darkwake.
2744  *
2745  * Today, the VM only lowers/raises the background queue target
2746  * so as to favor consuming more/less background pages when
2747  * darwake is ON/OFF.
2748  *
2749  * We might need to do more things in the future.
2750  */
2751 
2752 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2753 vm_update_darkwake_mode(boolean_t darkwake_mode)
2754 {
2755 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2756 #pragma unused(darkwake_mode)
2757 	assert(vm_darkwake_mode == FALSE);
2758 	/*
2759 	 * Darkwake mode isn't supported for AS macOS.
2760 	 */
2761 	return;
2762 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2763 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2764 
2765 	vm_page_lockspin_queues();
2766 
2767 	if (vm_darkwake_mode == darkwake_mode) {
2768 		/*
2769 		 * No change.
2770 		 */
2771 		vm_page_unlock_queues();
2772 		return;
2773 	}
2774 
2775 	vm_darkwake_mode = darkwake_mode;
2776 
2777 	if (vm_darkwake_mode == TRUE) {
2778 		/* save background target to restore later */
2779 		vm_page_background_target_snapshot = vm_page_background_target;
2780 
2781 		/* target is set to 0...no protection for background pages */
2782 		vm_page_background_target = 0;
2783 	} else if (vm_darkwake_mode == FALSE) {
2784 		if (vm_page_background_target_snapshot) {
2785 			vm_page_background_target = vm_page_background_target_snapshot;
2786 		}
2787 	}
2788 	vm_page_unlock_queues();
2789 #endif
2790 }
2791 
2792 void
vm_page_update_special_state(vm_page_t mem)2793 vm_page_update_special_state(vm_page_t mem)
2794 {
2795 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2796 		return;
2797 	}
2798 
2799 	int mode = mem->vmp_on_specialq;
2800 
2801 	switch (mode) {
2802 	case VM_PAGE_SPECIAL_Q_BG:
2803 	{
2804 		task_t  my_task = current_task_early();
2805 
2806 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2807 			return;
2808 		}
2809 
2810 		if (my_task) {
2811 			if (task_get_darkwake_mode(my_task)) {
2812 				return;
2813 			}
2814 		}
2815 
2816 		if (my_task) {
2817 			if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2818 				return;
2819 			}
2820 		}
2821 		vm_page_lockspin_queues();
2822 
2823 		vm_page_background_promoted_count++;
2824 
2825 		vm_page_remove_from_specialq(mem);
2826 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2827 
2828 		vm_page_unlock_queues();
2829 		break;
2830 	}
2831 
2832 	case VM_PAGE_SPECIAL_Q_DONATE:
2833 	{
2834 		task_t  my_task = current_task_early();
2835 
2836 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2837 			return;
2838 		}
2839 
2840 		if (my_task->donates_own_pages == false) {
2841 			vm_page_lockspin_queues();
2842 
2843 			vm_page_remove_from_specialq(mem);
2844 			mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2845 
2846 			vm_page_unlock_queues();
2847 		}
2848 		break;
2849 	}
2850 
2851 	default:
2852 	{
2853 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2854 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2855 		break;
2856 	}
2857 	}
2858 }
2859 
2860 
2861 void
vm_page_assign_special_state(vm_page_t mem,int mode)2862 vm_page_assign_special_state(vm_page_t mem, int mode)
2863 {
2864 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2865 		return;
2866 	}
2867 
2868 	switch (mode) {
2869 	case VM_PAGE_SPECIAL_Q_BG:
2870 	{
2871 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2872 			return;
2873 		}
2874 
2875 		task_t  my_task = current_task_early();
2876 
2877 		if (my_task) {
2878 			if (task_get_darkwake_mode(my_task)) {
2879 				mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2880 				return;
2881 			}
2882 		}
2883 
2884 		if (my_task) {
2885 			mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2886 		}
2887 		break;
2888 	}
2889 
2890 	case VM_PAGE_SPECIAL_Q_DONATE:
2891 	{
2892 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2893 			return;
2894 		}
2895 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2896 		break;
2897 	}
2898 
2899 	default:
2900 		break;
2901 	}
2902 }
2903 
2904 
2905 void
vm_page_remove_from_specialq(vm_page_t mem)2906 vm_page_remove_from_specialq(
2907 	vm_page_t       mem)
2908 {
2909 	vm_object_t     m_object;
2910 	unsigned short  mode;
2911 
2912 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2913 
2914 	mode = mem->vmp_on_specialq;
2915 
2916 	switch (mode) {
2917 	case VM_PAGE_SPECIAL_Q_BG:
2918 	{
2919 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2920 			vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2921 
2922 			mem->vmp_specialq.next = 0;
2923 			mem->vmp_specialq.prev = 0;
2924 
2925 			vm_page_background_count--;
2926 
2927 			m_object = VM_PAGE_OBJECT(mem);
2928 
2929 			if (m_object->internal) {
2930 				vm_page_background_internal_count--;
2931 			} else {
2932 				vm_page_background_external_count--;
2933 			}
2934 		}
2935 		break;
2936 	}
2937 
2938 	case VM_PAGE_SPECIAL_Q_DONATE:
2939 	{
2940 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2941 			vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2942 			mem->vmp_specialq.next = 0;
2943 			mem->vmp_specialq.prev = 0;
2944 			vm_page_donate_count--;
2945 			if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2946 				assert(vm_page_donate_target == vm_page_donate_target_low);
2947 				vm_page_donate_target = vm_page_donate_target_high;
2948 				vm_page_donate_queue_ripe = false;
2949 			}
2950 		}
2951 
2952 		break;
2953 	}
2954 
2955 	default:
2956 	{
2957 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2958 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2959 		break;
2960 	}
2961 	}
2962 }
2963 
2964 
2965 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2966 vm_page_add_to_specialq(
2967 	vm_page_t       mem,
2968 	boolean_t       first)
2969 {
2970 	vm_object_t     m_object;
2971 
2972 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2973 
2974 	if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2975 		return;
2976 	}
2977 
2978 	int mode = mem->vmp_on_specialq;
2979 
2980 	switch (mode) {
2981 	case VM_PAGE_SPECIAL_Q_BG:
2982 	{
2983 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2984 			return;
2985 		}
2986 
2987 		m_object = VM_PAGE_OBJECT(mem);
2988 
2989 		if (vm_page_background_exclude_external && !m_object->internal) {
2990 			return;
2991 		}
2992 
2993 		if (first == TRUE) {
2994 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
2995 		} else {
2996 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
2997 		}
2998 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2999 
3000 		vm_page_background_count++;
3001 
3002 		if (m_object->internal) {
3003 			vm_page_background_internal_count++;
3004 		} else {
3005 			vm_page_background_external_count++;
3006 		}
3007 		break;
3008 	}
3009 
3010 	case VM_PAGE_SPECIAL_Q_DONATE:
3011 	{
3012 		if (first == TRUE) {
3013 			vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3014 		} else {
3015 			vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3016 		}
3017 		vm_page_donate_count++;
3018 		if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3019 			assert(vm_page_donate_target == vm_page_donate_target_high);
3020 			vm_page_donate_target = vm_page_donate_target_low;
3021 			vm_page_donate_queue_ripe = true;
3022 		}
3023 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3024 		break;
3025 	}
3026 
3027 	default:
3028 		break;
3029 	}
3030 }
3031 
3032 /*
3033  * This can be switched to FALSE to help debug drivers
3034  * that are having problems with memory > 4G.
3035  */
3036 boolean_t       vm_himemory_mode = TRUE;
3037 
3038 /*
3039  * this interface exists to support hardware controllers
3040  * incapable of generating DMAs with more than 32 bits
3041  * of address on platforms with physical memory > 4G...
3042  */
3043 unsigned int    vm_lopages_allocated_q = 0;
3044 unsigned int    vm_lopages_allocated_cpm_success = 0;
3045 unsigned int    vm_lopages_allocated_cpm_failed = 0;
3046 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3047 
3048 vm_page_t
vm_page_grablo(void)3049 vm_page_grablo(void)
3050 {
3051 	vm_page_t       mem;
3052 
3053 	if (vm_lopage_needed == FALSE) {
3054 		return vm_page_grab();
3055 	}
3056 
3057 	vm_free_page_lock_spin();
3058 
3059 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3060 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3061 		assert(vm_lopage_free_count);
3062 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3063 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3064 
3065 		vm_lopage_free_count--;
3066 		vm_lopages_allocated_q++;
3067 
3068 		if (vm_lopage_free_count < vm_lopage_lowater) {
3069 			vm_lopage_refill = TRUE;
3070 		}
3071 
3072 		vm_free_page_unlock();
3073 
3074 		if (current_task()->donates_own_pages) {
3075 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3076 		} else {
3077 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3078 		}
3079 	} else {
3080 		vm_free_page_unlock();
3081 
3082 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3083 			vm_free_page_lock_spin();
3084 			vm_lopages_allocated_cpm_failed++;
3085 			vm_free_page_unlock();
3086 
3087 			return VM_PAGE_NULL;
3088 		}
3089 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3090 
3091 		mem->vmp_busy = TRUE;
3092 
3093 		vm_page_lockspin_queues();
3094 
3095 		mem->vmp_gobbled = FALSE;
3096 		vm_page_gobble_count--;
3097 		vm_page_wire_count--;
3098 
3099 		vm_lopages_allocated_cpm_success++;
3100 		vm_page_unlock_queues();
3101 	}
3102 	assert(mem->vmp_busy);
3103 	assert(!mem->vmp_pmapped);
3104 	assert(!mem->vmp_wpmapped);
3105 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3106 
3107 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3108 
3109 	counter_inc(&vm_page_grab_count);
3110 	VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3111 
3112 	return mem;
3113 }
3114 
3115 /*
3116  *	vm_page_grab:
3117  *
3118  *	first try to grab a page from the per-cpu free list...
3119  *	this must be done while pre-emption is disabled... if
3120  *      a page is available, we're done...
3121  *	if no page is available, grab the vm_page_queue_free_lock
3122  *	and see if current number of free pages would allow us
3123  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3124  *	if there are pages available, disable preemption and
3125  *      recheck the state of the per-cpu free list... we could
3126  *	have been preempted and moved to a different cpu, or
3127  *      some other thread could have re-filled it... if still
3128  *	empty, figure out how many pages we can steal from the
3129  *	global free queue and move to the per-cpu queue...
3130  *	return 1 of these pages when done... only wakeup the
3131  *      pageout_scan thread if we moved pages from the global
3132  *	list... no need for the wakeup if we've satisfied the
3133  *	request from the per-cpu queue.
3134  */
3135 
3136 #if CONFIG_SECLUDED_MEMORY
3137 vm_page_t vm_page_grab_secluded(void);
3138 #endif /* CONFIG_SECLUDED_MEMORY */
3139 
3140 static inline void
3141 vm_page_grab_diags(void);
3142 
3143 vm_page_t
vm_page_grab(void)3144 vm_page_grab(void)
3145 {
3146 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3147 }
3148 
3149 #if HIBERNATION
3150 boolean_t       hibernate_rebuild_needed = FALSE;
3151 #endif /* HIBERNATION */
3152 
3153 vm_page_t
vm_page_grab_options(int grab_options)3154 vm_page_grab_options(
3155 	int grab_options)
3156 {
3157 	vm_page_t       mem;
3158 
3159 restart:
3160 	disable_preemption();
3161 
3162 	if ((mem = *PERCPU_GET(free_pages))) {
3163 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3164 
3165 #if HIBERNATION
3166 		if (hibernate_rebuild_needed) {
3167 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3168 		}
3169 #endif /* HIBERNATION */
3170 
3171 		vm_page_grab_diags();
3172 
3173 		vm_offset_t pcpu_base = current_percpu_base();
3174 		counter_inc_preemption_disabled(&vm_page_grab_count);
3175 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3176 		VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3177 
3178 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3179 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3180 		enable_preemption();
3181 
3182 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3183 		assert(mem->vmp_tabled == FALSE);
3184 		assert(mem->vmp_object == 0);
3185 		assert(!mem->vmp_laundry);
3186 		ASSERT_PMAP_FREE(mem);
3187 		assert(mem->vmp_busy);
3188 		assert(!mem->vmp_pmapped);
3189 		assert(!mem->vmp_wpmapped);
3190 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3191 		assert(!mem->vmp_realtime);
3192 
3193 		task_t  cur_task = current_task_early();
3194 		if (cur_task && cur_task != kernel_task) {
3195 			if (cur_task->donates_own_pages) {
3196 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3197 			} else {
3198 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3199 			}
3200 		}
3201 		return mem;
3202 	}
3203 	enable_preemption();
3204 
3205 
3206 	/*
3207 	 *	Optionally produce warnings if the wire or gobble
3208 	 *	counts exceed some threshold.
3209 	 */
3210 #if VM_PAGE_WIRE_COUNT_WARNING
3211 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3212 		printf("mk: vm_page_grab(): high wired page count of %d\n",
3213 		    vm_page_wire_count);
3214 	}
3215 #endif
3216 #if VM_PAGE_GOBBLE_COUNT_WARNING
3217 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3218 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3219 		    vm_page_gobble_count);
3220 	}
3221 #endif
3222 
3223 	/*
3224 	 * If free count is low and we have delayed pages from early boot,
3225 	 * get one of those instead.
3226 	 */
3227 	if (__improbable(vm_delayed_count > 0 &&
3228 	    vm_page_free_count <= vm_page_free_target &&
3229 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3230 		assert(!mem->vmp_realtime);
3231 		return mem;
3232 	}
3233 
3234 	vm_free_page_lock_spin();
3235 
3236 	/*
3237 	 *	Only let privileged threads (involved in pageout)
3238 	 *	dip into the reserved pool.
3239 	 */
3240 	if ((vm_page_free_count < vm_page_free_reserved) &&
3241 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
3242 		/* no page for us in the free queue... */
3243 		vm_free_page_unlock();
3244 		mem = VM_PAGE_NULL;
3245 
3246 #if CONFIG_SECLUDED_MEMORY
3247 		/* ... but can we try and grab from the secluded queue? */
3248 		if (vm_page_secluded_count > 0 &&
3249 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3250 		    task_can_use_secluded_mem(current_task(), TRUE))) {
3251 			mem = vm_page_grab_secluded();
3252 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3253 				vm_page_secluded.grab_for_iokit++;
3254 				if (mem) {
3255 					vm_page_secluded.grab_for_iokit_success++;
3256 				}
3257 			}
3258 			if (mem) {
3259 				VM_CHECK_MEMORYSTATUS;
3260 
3261 				vm_page_grab_diags();
3262 				counter_inc(&vm_page_grab_count);
3263 				VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3264 
3265 				assert(!mem->vmp_realtime);
3266 				return mem;
3267 			}
3268 		}
3269 #else /* CONFIG_SECLUDED_MEMORY */
3270 		(void) grab_options;
3271 #endif /* CONFIG_SECLUDED_MEMORY */
3272 	} else {
3273 		vm_page_t        head;
3274 		vm_page_t        tail;
3275 		unsigned int     pages_to_steal;
3276 		unsigned int     color;
3277 		unsigned int clump_end, sub_count;
3278 
3279 		while (vm_page_free_count == 0) {
3280 			vm_free_page_unlock();
3281 			/*
3282 			 * must be a privileged thread to be
3283 			 * in this state since a non-privileged
3284 			 * thread would have bailed if we were
3285 			 * under the vm_page_free_reserved mark
3286 			 */
3287 			VM_PAGE_WAIT();
3288 			vm_free_page_lock_spin();
3289 		}
3290 
3291 		/*
3292 		 * Need to repopulate the per-CPU free list from the global free list.
3293 		 * Note we don't do any processing of pending retirement pages here.
3294 		 * That'll happen in the code above when the page comes off the per-CPU list.
3295 		 */
3296 		disable_preemption();
3297 
3298 		/*
3299 		 * If we got preempted the cache might now have pages.
3300 		 */
3301 		if ((mem = *PERCPU_GET(free_pages))) {
3302 			vm_free_page_unlock();
3303 			enable_preemption();
3304 			goto restart;
3305 		}
3306 
3307 		if (vm_page_free_count <= vm_page_free_reserved) {
3308 			pages_to_steal = 1;
3309 		} else {
3310 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3311 				pages_to_steal = vm_free_magazine_refill_limit;
3312 			} else {
3313 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3314 			}
3315 		}
3316 		color = *PERCPU_GET(start_color);
3317 		head = tail = NULL;
3318 
3319 		vm_page_free_count -= pages_to_steal;
3320 		clump_end = sub_count = 0;
3321 
3322 		while (pages_to_steal--) {
3323 			while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3324 				color = (color + 1) & vm_color_mask;
3325 			}
3326 #if defined(__x86_64__)
3327 			vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3328 			    mem, clump_end);
3329 #else
3330 			vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3331 			    mem, vmp_pageq);
3332 #endif
3333 
3334 			assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3335 
3336 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3337 
3338 #if defined(__arm64__)
3339 			color = (color + 1) & vm_color_mask;
3340 #else
3341 
3342 #if DEVELOPMENT || DEBUG
3343 
3344 			sub_count++;
3345 			if (clump_end) {
3346 				vm_clump_update_stats(sub_count);
3347 				sub_count = 0;
3348 				color = (color + 1) & vm_color_mask;
3349 			}
3350 #else
3351 			if (clump_end) {
3352 				color = (color + 1) & vm_color_mask;
3353 			}
3354 
3355 #endif /* if DEVELOPMENT || DEBUG */
3356 
3357 #endif  /* if defined(__arm64__) */
3358 
3359 			if (head == NULL) {
3360 				head = mem;
3361 			} else {
3362 				tail->vmp_snext = mem;
3363 			}
3364 			tail = mem;
3365 
3366 			assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3367 			assert(mem->vmp_tabled == FALSE);
3368 			assert(mem->vmp_object == 0);
3369 			assert(!mem->vmp_laundry);
3370 
3371 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3372 
3373 			ASSERT_PMAP_FREE(mem);
3374 			assert(mem->vmp_busy);
3375 			assert(!mem->vmp_pmapped);
3376 			assert(!mem->vmp_wpmapped);
3377 			assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3378 			assert(!mem->vmp_realtime);
3379 		}
3380 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3381 		vm_clump_update_stats(sub_count);
3382 #endif
3383 
3384 #if HIBERNATION
3385 		if (hibernate_rebuild_needed) {
3386 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3387 		}
3388 #endif /* HIBERNATION */
3389 		vm_offset_t pcpu_base = current_percpu_base();
3390 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3391 		*PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3392 
3393 		vm_free_page_unlock();
3394 		enable_preemption();
3395 		goto restart;
3396 	}
3397 
3398 	/*
3399 	 *	Decide if we should poke the pageout daemon.
3400 	 *	We do this if the free count is less than the low
3401 	 *	water mark. VM Pageout Scan will keep running till
3402 	 *	the free_count > free_target (& hence above free_min).
3403 	 *	This wakeup is to catch the possibility of the counts
3404 	 *	dropping between VM Pageout Scan parking and this check.
3405 	 *
3406 	 *	We don't have the counts locked ... if they change a little,
3407 	 *	it doesn't really matter.
3408 	 */
3409 	if (vm_page_free_count < vm_page_free_min) {
3410 		vm_free_page_lock();
3411 		if (vm_pageout_running == FALSE) {
3412 			vm_free_page_unlock();
3413 			thread_wakeup((event_t) &vm_page_free_wanted);
3414 		} else {
3415 			vm_free_page_unlock();
3416 		}
3417 	}
3418 
3419 	VM_CHECK_MEMORYSTATUS;
3420 
3421 	if (mem) {
3422 		assert(!mem->vmp_realtime);
3423 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
3424 
3425 		task_t  cur_task = current_task_early();
3426 		if (cur_task && cur_task != kernel_task) {
3427 			if (cur_task->donates_own_pages) {
3428 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3429 			} else {
3430 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3431 			}
3432 		}
3433 	}
3434 	return mem;
3435 }
3436 
3437 #if CONFIG_SECLUDED_MEMORY
3438 vm_page_t
vm_page_grab_secluded(void)3439 vm_page_grab_secluded(void)
3440 {
3441 	vm_page_t       mem;
3442 	vm_object_t     object;
3443 	int             refmod_state;
3444 
3445 	if (vm_page_secluded_count == 0) {
3446 		/* no secluded pages to grab... */
3447 		return VM_PAGE_NULL;
3448 	}
3449 
3450 	/* secluded queue is protected by the VM page queue lock */
3451 	vm_page_lock_queues();
3452 
3453 	if (vm_page_secluded_count == 0) {
3454 		/* no secluded pages to grab... */
3455 		vm_page_unlock_queues();
3456 		return VM_PAGE_NULL;
3457 	}
3458 
3459 #if 00
3460 	/* can we grab from the secluded queue? */
3461 	if (vm_page_secluded_count > vm_page_secluded_target ||
3462 	    (vm_page_secluded_count > 0 &&
3463 	    task_can_use_secluded_mem(current_task(), TRUE))) {
3464 		/* OK */
3465 	} else {
3466 		/* can't grab from secluded queue... */
3467 		vm_page_unlock_queues();
3468 		return VM_PAGE_NULL;
3469 	}
3470 #endif
3471 
3472 	/* we can grab a page from secluded queue! */
3473 	assert((vm_page_secluded_count_free +
3474 	    vm_page_secluded_count_inuse) ==
3475 	    vm_page_secluded_count);
3476 	if (current_task()->task_can_use_secluded_mem) {
3477 		assert(num_tasks_can_use_secluded_mem > 0);
3478 	}
3479 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3480 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3481 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3482 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3483 	vm_page_queues_remove(mem, TRUE);
3484 
3485 	object = VM_PAGE_OBJECT(mem);
3486 
3487 	assert(!mem->vmp_fictitious);
3488 	assert(!VM_PAGE_WIRED(mem));
3489 	if (object == VM_OBJECT_NULL) {
3490 		/* free for grab! */
3491 		vm_page_unlock_queues();
3492 		vm_page_secluded.grab_success_free++;
3493 
3494 		assert(mem->vmp_busy);
3495 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3496 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3497 		assert(mem->vmp_pageq.next == 0);
3498 		assert(mem->vmp_pageq.prev == 0);
3499 		assert(mem->vmp_listq.next == 0);
3500 		assert(mem->vmp_listq.prev == 0);
3501 		assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3502 		assert(mem->vmp_specialq.next == 0);
3503 		assert(mem->vmp_specialq.prev == 0);
3504 		return mem;
3505 	}
3506 
3507 	assert(!object->internal);
3508 //	vm_page_pageable_external_count--;
3509 
3510 	if (!vm_object_lock_try(object)) {
3511 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
3512 		vm_page_secluded.grab_failure_locked++;
3513 reactivate_secluded_page:
3514 		vm_page_activate(mem);
3515 		vm_page_unlock_queues();
3516 		return VM_PAGE_NULL;
3517 	}
3518 	if (mem->vmp_busy ||
3519 	    mem->vmp_cleaning ||
3520 	    mem->vmp_laundry) {
3521 		/* can't steal page in this state... */
3522 		vm_object_unlock(object);
3523 		vm_page_secluded.grab_failure_state++;
3524 		goto reactivate_secluded_page;
3525 	}
3526 	if (mem->vmp_realtime) {
3527 		/* don't steal pages used by realtime threads... */
3528 		vm_object_unlock(object);
3529 		vm_page_secluded.grab_failure_realtime++;
3530 		goto reactivate_secluded_page;
3531 	}
3532 
3533 	mem->vmp_busy = TRUE;
3534 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3535 	if (refmod_state & VM_MEM_REFERENCED) {
3536 		mem->vmp_reference = TRUE;
3537 	}
3538 	if (refmod_state & VM_MEM_MODIFIED) {
3539 		SET_PAGE_DIRTY(mem, FALSE);
3540 	}
3541 	if (mem->vmp_dirty || mem->vmp_precious) {
3542 		/* can't grab a dirty page; re-activate */
3543 //		printf("SECLUDED: dirty page %p\n", mem);
3544 		PAGE_WAKEUP_DONE(mem);
3545 		vm_page_secluded.grab_failure_dirty++;
3546 		vm_object_unlock(object);
3547 		goto reactivate_secluded_page;
3548 	}
3549 	if (mem->vmp_reference) {
3550 		/* it's been used but we do need to grab a page... */
3551 	}
3552 
3553 	vm_page_unlock_queues();
3554 
3555 
3556 	/* finish what vm_page_free() would have done... */
3557 	vm_page_free_prepare_object(mem, TRUE);
3558 	vm_object_unlock(object);
3559 	object = VM_OBJECT_NULL;
3560 	if (vm_page_free_verify) {
3561 		ASSERT_PMAP_FREE(mem);
3562 	}
3563 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3564 	vm_page_secluded.grab_success_other++;
3565 
3566 	assert(mem->vmp_busy);
3567 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3568 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3569 	assert(mem->vmp_pageq.next == 0);
3570 	assert(mem->vmp_pageq.prev == 0);
3571 	assert(mem->vmp_listq.next == 0);
3572 	assert(mem->vmp_listq.prev == 0);
3573 	assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3574 	assert(mem->vmp_specialq.next == 0);
3575 	assert(mem->vmp_specialq.prev == 0);
3576 
3577 	return mem;
3578 }
3579 
3580 uint64_t
vm_page_secluded_drain(void)3581 vm_page_secluded_drain(void)
3582 {
3583 	vm_page_t local_freeq;
3584 	int local_freed;
3585 	uint64_t num_reclaimed;
3586 	unsigned int saved_secluded_count, saved_secluded_target;
3587 
3588 	num_reclaimed = 0;
3589 	local_freeq = NULL;
3590 	local_freed = 0;
3591 
3592 	vm_page_lock_queues();
3593 
3594 	saved_secluded_count = vm_page_secluded_count;
3595 	saved_secluded_target = vm_page_secluded_target;
3596 	vm_page_secluded_target = 0;
3597 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3598 	while (vm_page_secluded_count) {
3599 		vm_page_t secluded_page;
3600 
3601 		assert((vm_page_secluded_count_free +
3602 		    vm_page_secluded_count_inuse) ==
3603 		    vm_page_secluded_count);
3604 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3605 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3606 
3607 		vm_page_queues_remove(secluded_page, FALSE);
3608 		assert(!secluded_page->vmp_fictitious);
3609 		assert(!VM_PAGE_WIRED(secluded_page));
3610 
3611 		if (secluded_page->vmp_object == 0) {
3612 			/* transfer to free queue */
3613 			assert(secluded_page->vmp_busy);
3614 			secluded_page->vmp_snext = local_freeq;
3615 			local_freeq = secluded_page;
3616 			local_freed += 1;
3617 		} else {
3618 			/* transfer to head of active queue */
3619 			vm_page_enqueue_active(secluded_page, FALSE);
3620 			secluded_page = VM_PAGE_NULL;
3621 		}
3622 		num_reclaimed++;
3623 	}
3624 	vm_page_secluded_target = saved_secluded_target;
3625 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3626 
3627 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3628 
3629 	vm_page_unlock_queues();
3630 
3631 	if (local_freed) {
3632 		vm_page_free_list(local_freeq, TRUE);
3633 		local_freeq = NULL;
3634 		local_freed = 0;
3635 	}
3636 
3637 	return num_reclaimed;
3638 }
3639 #endif /* CONFIG_SECLUDED_MEMORY */
3640 
3641 
3642 static inline void
vm_page_grab_diags()3643 vm_page_grab_diags()
3644 {
3645 #if DEVELOPMENT || DEBUG
3646 	task_t task = current_task_early();
3647 	if (task == NULL) {
3648 		return;
3649 	}
3650 
3651 	ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3652 #endif /* DEVELOPMENT || DEBUG */
3653 }
3654 
3655 /*
3656  *	vm_page_release:
3657  *
3658  *	Return a page to the free list.
3659  */
3660 
3661 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3662 vm_page_release(
3663 	vm_page_t       mem,
3664 	boolean_t       page_queues_locked)
3665 {
3666 	unsigned int    color;
3667 	int     need_wakeup = 0;
3668 	int     need_priv_wakeup = 0;
3669 #if CONFIG_SECLUDED_MEMORY
3670 	int     need_secluded_wakeup = 0;
3671 #endif /* CONFIG_SECLUDED_MEMORY */
3672 	event_t wakeup_event = NULL;
3673 
3674 	if (page_queues_locked) {
3675 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3676 	} else {
3677 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3678 	}
3679 
3680 	assert(!mem->vmp_private && !mem->vmp_fictitious);
3681 	if (vm_page_free_verify) {
3682 		ASSERT_PMAP_FREE(mem);
3683 	}
3684 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
3685 
3686 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3687 
3688 	if (__improbable(mem->vmp_realtime)) {
3689 		if (!page_queues_locked) {
3690 			vm_page_lock_queues();
3691 		}
3692 		if (mem->vmp_realtime) {
3693 			mem->vmp_realtime = false;
3694 			vm_page_realtime_count--;
3695 		}
3696 		if (!page_queues_locked) {
3697 			vm_page_unlock_queues();
3698 		}
3699 	}
3700 
3701 	vm_free_page_lock_spin();
3702 
3703 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3704 	assert(mem->vmp_busy);
3705 	assert(!mem->vmp_laundry);
3706 	assert(mem->vmp_object == 0);
3707 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3708 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3709 	assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3710 
3711 	/* Clear any specialQ hints before releasing page to the free pool*/
3712 	mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3713 
3714 	if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3715 	    vm_lopage_free_count < vm_lopage_free_limit &&
3716 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3717 		/*
3718 		 * this exists to support hardware controllers
3719 		 * incapable of generating DMAs with more than 32 bits
3720 		 * of address on platforms with physical memory > 4G...
3721 		 */
3722 		vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3723 		vm_lopage_free_count++;
3724 
3725 		if (vm_lopage_free_count >= vm_lopage_free_limit) {
3726 			vm_lopage_refill = FALSE;
3727 		}
3728 
3729 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3730 		mem->vmp_lopage = TRUE;
3731 #if CONFIG_SECLUDED_MEMORY
3732 	} else if (vm_page_free_count > vm_page_free_reserved &&
3733 	    vm_page_secluded_count < vm_page_secluded_target &&
3734 	    num_tasks_can_use_secluded_mem == 0) {
3735 		/*
3736 		 * XXX FBDP TODO: also avoid refilling secluded queue
3737 		 * when some IOKit objects are already grabbing from it...
3738 		 */
3739 		if (!page_queues_locked) {
3740 			if (!vm_page_trylock_queues()) {
3741 				/* take locks in right order */
3742 				vm_free_page_unlock();
3743 				vm_page_lock_queues();
3744 				vm_free_page_lock_spin();
3745 			}
3746 		}
3747 		mem->vmp_lopage = FALSE;
3748 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3749 		vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3750 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3751 		vm_page_secluded_count++;
3752 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3753 		vm_page_secluded_count_free++;
3754 		if (!page_queues_locked) {
3755 			vm_page_unlock_queues();
3756 		}
3757 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3758 		if (vm_page_free_wanted_secluded > 0) {
3759 			vm_page_free_wanted_secluded--;
3760 			need_secluded_wakeup = 1;
3761 		}
3762 #endif /* CONFIG_SECLUDED_MEMORY */
3763 	} else {
3764 		mem->vmp_lopage = FALSE;
3765 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3766 
3767 		color = VM_PAGE_GET_COLOR(mem);
3768 #if defined(__x86_64__)
3769 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3770 #else
3771 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3772 #endif
3773 		vm_page_free_count++;
3774 		/*
3775 		 *	Check if we should wake up someone waiting for page.
3776 		 *	But don't bother waking them unless they can allocate.
3777 		 *
3778 		 *	We wakeup only one thread, to prevent starvation.
3779 		 *	Because the scheduling system handles wait queues FIFO,
3780 		 *	if we wakeup all waiting threads, one greedy thread
3781 		 *	can starve multiple niceguy threads.  When the threads
3782 		 *	all wakeup, the greedy threads runs first, grabs the page,
3783 		 *	and waits for another page.  It will be the first to run
3784 		 *	when the next page is freed.
3785 		 *
3786 		 *	However, there is a slight danger here.
3787 		 *	The thread we wake might not use the free page.
3788 		 *	Then the other threads could wait indefinitely
3789 		 *	while the page goes unused.  To forestall this,
3790 		 *	the pageout daemon will keep making free pages
3791 		 *	as long as vm_page_free_wanted is non-zero.
3792 		 */
3793 
3794 		assert(vm_page_free_count > 0);
3795 		if (vm_page_free_wanted_privileged > 0) {
3796 			vm_page_free_wanted_privileged--;
3797 			need_priv_wakeup = 1;
3798 #if CONFIG_SECLUDED_MEMORY
3799 		} else if (vm_page_free_wanted_secluded > 0 &&
3800 		    vm_page_free_count > vm_page_free_reserved) {
3801 			vm_page_free_wanted_secluded--;
3802 			need_secluded_wakeup = 1;
3803 #endif /* CONFIG_SECLUDED_MEMORY */
3804 		} else if (vm_page_free_wanted > 0 &&
3805 		    vm_page_free_count > vm_page_free_reserved) {
3806 			vm_page_free_wanted--;
3807 			need_wakeup = 1;
3808 		}
3809 	}
3810 	vm_pageout_vminfo.vm_page_pages_freed++;
3811 
3812 	vm_free_page_unlock();
3813 
3814 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3815 
3816 	if (need_priv_wakeup) {
3817 		wakeup_event = &vm_page_free_wanted_privileged;
3818 	}
3819 #if CONFIG_SECLUDED_MEMORY
3820 	else if (need_secluded_wakeup) {
3821 		wakeup_event = &vm_page_free_wanted_secluded;
3822 	}
3823 #endif /* CONFIG_SECLUDED_MEMORY */
3824 	else if (need_wakeup) {
3825 		wakeup_event = &vm_page_free_count;
3826 	}
3827 
3828 	if (wakeup_event) {
3829 		if (vps_dynamic_priority_enabled) {
3830 			wakeup_one_with_inheritor((event_t) wakeup_event,
3831 			    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3832 			    NULL);
3833 		} else {
3834 			thread_wakeup_one((event_t) wakeup_event);
3835 		}
3836 	}
3837 
3838 	VM_CHECK_MEMORYSTATUS;
3839 }
3840 
3841 /*
3842  * This version of vm_page_release() is used only at startup
3843  * when we are single-threaded and pages are being released
3844  * for the first time. Hence, no locking or unnecessary checks are made.
3845  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3846  */
3847 void
vm_page_release_startup(vm_page_t mem)3848 vm_page_release_startup(
3849 	vm_page_t       mem)
3850 {
3851 	vm_page_queue_t queue_free;
3852 
3853 	if (vm_lopage_free_count < vm_lopage_free_limit &&
3854 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3855 		mem->vmp_lopage = TRUE;
3856 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3857 		vm_lopage_free_count++;
3858 		queue_free = &vm_lopage_queue_free;
3859 #if CONFIG_SECLUDED_MEMORY
3860 	} else if (vm_page_secluded_count < vm_page_secluded_target) {
3861 		mem->vmp_lopage = FALSE;
3862 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3863 		vm_page_secluded_count++;
3864 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3865 		vm_page_secluded_count_free++;
3866 		queue_free = &vm_page_queue_secluded;
3867 #endif /* CONFIG_SECLUDED_MEMORY */
3868 	} else {
3869 		mem->vmp_lopage = FALSE;
3870 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3871 		vm_page_free_count++;
3872 		queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3873 	}
3874 	if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3875 #if defined(__x86_64__)
3876 		vm_page_queue_enter_clump(queue_free, mem);
3877 #else
3878 		vm_page_queue_enter(queue_free, mem, vmp_pageq);
3879 #endif
3880 	} else {
3881 		vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3882 	}
3883 }
3884 
3885 /*
3886  *	vm_page_wait:
3887  *
3888  *	Wait for a page to become available.
3889  *	If there are plenty of free pages, then we don't sleep.
3890  *
3891  *	Returns:
3892  *		TRUE:  There may be another page, try again
3893  *		FALSE: We were interrupted out of our wait, don't try again
3894  */
3895 
3896 boolean_t
vm_page_wait(int interruptible)3897 vm_page_wait(
3898 	int     interruptible )
3899 {
3900 	/*
3901 	 *	We can't use vm_page_free_reserved to make this
3902 	 *	determination.  Consider: some thread might
3903 	 *	need to allocate two pages.  The first allocation
3904 	 *	succeeds, the second fails.  After the first page is freed,
3905 	 *	a call to vm_page_wait must really block.
3906 	 */
3907 	kern_return_t   wait_result;
3908 	int             need_wakeup = 0;
3909 	int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3910 	event_t         wait_event = NULL;
3911 
3912 	vm_free_page_lock_spin();
3913 
3914 	if (is_privileged && vm_page_free_count) {
3915 		vm_free_page_unlock();
3916 		return TRUE;
3917 	}
3918 
3919 	if (vm_page_free_count >= vm_page_free_target) {
3920 		vm_free_page_unlock();
3921 		return TRUE;
3922 	}
3923 
3924 	if (is_privileged) {
3925 		if (vm_page_free_wanted_privileged++ == 0) {
3926 			need_wakeup = 1;
3927 		}
3928 		wait_event = (event_t)&vm_page_free_wanted_privileged;
3929 #if CONFIG_SECLUDED_MEMORY
3930 	} else if (secluded_for_apps &&
3931 	    task_can_use_secluded_mem(current_task(), FALSE)) {
3932 #if 00
3933 		/* XXX FBDP: need pageq lock for this... */
3934 		/* XXX FBDP: might wait even if pages available, */
3935 		/* XXX FBDP: hopefully not for too long... */
3936 		if (vm_page_secluded_count > 0) {
3937 			vm_free_page_unlock();
3938 			return TRUE;
3939 		}
3940 #endif
3941 		if (vm_page_free_wanted_secluded++ == 0) {
3942 			need_wakeup = 1;
3943 		}
3944 		wait_event = (event_t)&vm_page_free_wanted_secluded;
3945 #endif /* CONFIG_SECLUDED_MEMORY */
3946 	} else {
3947 		if (vm_page_free_wanted++ == 0) {
3948 			need_wakeup = 1;
3949 		}
3950 		wait_event = (event_t)&vm_page_free_count;
3951 	}
3952 
3953 	/*
3954 	 * We don't do a vm_pageout_scan wakeup if we already have
3955 	 * some waiters because vm_pageout_scan checks for waiters
3956 	 * before it returns and does so behind the vm_page_queue_free_lock,
3957 	 * which we own when we bump the waiter counts.
3958 	 */
3959 
3960 	if (vps_dynamic_priority_enabled) {
3961 		/*
3962 		 * We are waking up vm_pageout_scan here. If it needs
3963 		 * the vm_page_queue_free_lock before we unlock it
3964 		 * we'll end up just blocking and incur an extra
3965 		 * context switch. Could be a perf. issue.
3966 		 */
3967 
3968 		if (need_wakeup) {
3969 			thread_wakeup((event_t)&vm_page_free_wanted);
3970 		}
3971 
3972 		/*
3973 		 * LD: This event is going to get recorded every time because
3974 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3975 		 * We just block in that routine.
3976 		 */
3977 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3978 		    vm_page_free_wanted_privileged,
3979 		    vm_page_free_wanted,
3980 #if CONFIG_SECLUDED_MEMORY
3981 		    vm_page_free_wanted_secluded,
3982 #else /* CONFIG_SECLUDED_MEMORY */
3983 		    0,
3984 #endif /* CONFIG_SECLUDED_MEMORY */
3985 		    0);
3986 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
3987 		    LCK_SLEEP_UNLOCK,
3988 		    wait_event,
3989 		    vm_pageout_scan_thread,
3990 		    interruptible,
3991 		    0);
3992 	} else {
3993 		wait_result = assert_wait(wait_event, interruptible);
3994 
3995 		vm_free_page_unlock();
3996 
3997 		if (need_wakeup) {
3998 			thread_wakeup((event_t)&vm_page_free_wanted);
3999 		}
4000 
4001 		if (wait_result == THREAD_WAITING) {
4002 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4003 			    vm_page_free_wanted_privileged,
4004 			    vm_page_free_wanted,
4005 #if CONFIG_SECLUDED_MEMORY
4006 			    vm_page_free_wanted_secluded,
4007 #else /* CONFIG_SECLUDED_MEMORY */
4008 			    0,
4009 #endif /* CONFIG_SECLUDED_MEMORY */
4010 			    0);
4011 			wait_result = thread_block(THREAD_CONTINUE_NULL);
4012 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4013 			    VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4014 		}
4015 	}
4016 
4017 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4018 }
4019 
4020 /*
4021  *	vm_page_alloc:
4022  *
4023  *	Allocate and return a memory cell associated
4024  *	with this VM object/offset pair.
4025  *
4026  *	Object must be locked.
4027  */
4028 
4029 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4030 vm_page_alloc(
4031 	vm_object_t             object,
4032 	vm_object_offset_t      offset)
4033 {
4034 	vm_page_t       mem;
4035 	int             grab_options;
4036 
4037 	vm_object_lock_assert_exclusive(object);
4038 	grab_options = 0;
4039 #if CONFIG_SECLUDED_MEMORY
4040 	if (object->can_grab_secluded) {
4041 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4042 	}
4043 #endif /* CONFIG_SECLUDED_MEMORY */
4044 	mem = vm_page_grab_options(grab_options);
4045 	if (mem == VM_PAGE_NULL) {
4046 		return VM_PAGE_NULL;
4047 	}
4048 
4049 	vm_page_insert(mem, object, offset);
4050 
4051 	return mem;
4052 }
4053 
4054 /*
4055  *	vm_page_free_prepare:
4056  *
4057  *	Removes page from any queue it may be on
4058  *	and disassociates it from its VM object.
4059  *
4060  *	Object and page queues must be locked prior to entry.
4061  */
4062 static void
vm_page_free_prepare(vm_page_t mem)4063 vm_page_free_prepare(
4064 	vm_page_t       mem)
4065 {
4066 
4067 	vm_page_free_prepare_queues(mem);
4068 	vm_page_free_prepare_object(mem, TRUE);
4069 }
4070 
4071 
4072 void
vm_page_free_prepare_queues(vm_page_t mem)4073 vm_page_free_prepare_queues(
4074 	vm_page_t       mem)
4075 {
4076 	vm_object_t     m_object;
4077 
4078 	VM_PAGE_CHECK(mem);
4079 
4080 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4081 	assert(!mem->vmp_cleaning);
4082 	m_object = VM_PAGE_OBJECT(mem);
4083 
4084 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4085 	if (m_object) {
4086 		vm_object_lock_assert_exclusive(m_object);
4087 	}
4088 	if (mem->vmp_laundry) {
4089 		/*
4090 		 * We may have to free a page while it's being laundered
4091 		 * if we lost its pager (due to a forced unmount, for example).
4092 		 * We need to call vm_pageout_steal_laundry() before removing
4093 		 * the page from its VM object, so that we can remove it
4094 		 * from its pageout queue and adjust the laundry accounting
4095 		 */
4096 		vm_pageout_steal_laundry(mem, TRUE);
4097 	}
4098 
4099 	vm_page_queues_remove(mem, TRUE);
4100 
4101 	if (__improbable(mem->vmp_realtime)) {
4102 		mem->vmp_realtime = false;
4103 		vm_page_realtime_count--;
4104 	}
4105 
4106 	if (VM_PAGE_WIRED(mem)) {
4107 		assert(mem->vmp_wire_count > 0);
4108 
4109 		if (m_object) {
4110 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4111 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4112 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4113 
4114 			assert(m_object->resident_page_count >=
4115 			    m_object->wired_page_count);
4116 
4117 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4118 				OSAddAtomic(+1, &vm_page_purgeable_count);
4119 				assert(vm_page_purgeable_wired_count > 0);
4120 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4121 			}
4122 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4123 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4124 			    m_object->vo_owner != TASK_NULL) {
4125 				task_t          owner;
4126 				int             ledger_idx_volatile;
4127 				int             ledger_idx_nonvolatile;
4128 				int             ledger_idx_volatile_compressed;
4129 				int             ledger_idx_nonvolatile_compressed;
4130 				boolean_t       do_footprint;
4131 
4132 				owner = VM_OBJECT_OWNER(m_object);
4133 				vm_object_ledger_tag_ledgers(
4134 					m_object,
4135 					&ledger_idx_volatile,
4136 					&ledger_idx_nonvolatile,
4137 					&ledger_idx_volatile_compressed,
4138 					&ledger_idx_nonvolatile_compressed,
4139 					&do_footprint);
4140 				/*
4141 				 * While wired, this page was accounted
4142 				 * as "non-volatile" but it should now
4143 				 * be accounted as "volatile".
4144 				 */
4145 				/* one less "non-volatile"... */
4146 				ledger_debit(owner->ledger,
4147 				    ledger_idx_nonvolatile,
4148 				    PAGE_SIZE);
4149 				if (do_footprint) {
4150 					/* ... and "phys_footprint" */
4151 					ledger_debit(owner->ledger,
4152 					    task_ledgers.phys_footprint,
4153 					    PAGE_SIZE);
4154 				}
4155 				/* one more "volatile" */
4156 				ledger_credit(owner->ledger,
4157 				    ledger_idx_volatile,
4158 				    PAGE_SIZE);
4159 			}
4160 		}
4161 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4162 			vm_page_wire_count--;
4163 		}
4164 
4165 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4166 		mem->vmp_wire_count = 0;
4167 		assert(!mem->vmp_gobbled);
4168 	} else if (mem->vmp_gobbled) {
4169 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4170 			vm_page_wire_count--;
4171 		}
4172 		vm_page_gobble_count--;
4173 	}
4174 }
4175 
4176 
4177 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4178 vm_page_free_prepare_object(
4179 	vm_page_t       mem,
4180 	boolean_t       remove_from_hash)
4181 {
4182 	assert(!mem->vmp_realtime);
4183 	if (mem->vmp_tabled) {
4184 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4185 	}
4186 	PAGE_WAKEUP(mem);               /* clears wanted */
4187 
4188 	if (mem->vmp_private) {
4189 		mem->vmp_private = FALSE;
4190 		mem->vmp_fictitious = TRUE;
4191 		VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4192 	}
4193 	if (!mem->vmp_fictitious) {
4194 		assert(mem->vmp_pageq.next == 0);
4195 		assert(mem->vmp_pageq.prev == 0);
4196 		assert(mem->vmp_listq.next == 0);
4197 		assert(mem->vmp_listq.prev == 0);
4198 		assert(mem->vmp_specialq.next == 0);
4199 		assert(mem->vmp_specialq.prev == 0);
4200 		assert(mem->vmp_next_m == 0);
4201 		ASSERT_PMAP_FREE(mem);
4202 		{
4203 			vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4204 		}
4205 	}
4206 }
4207 
4208 
4209 /*
4210  *	vm_page_free:
4211  *
4212  *	Returns the given page to the free list,
4213  *	disassociating it with any VM object.
4214  *
4215  *	Object and page queues must be locked prior to entry.
4216  */
4217 void
vm_page_free(vm_page_t mem)4218 vm_page_free(
4219 	vm_page_t       mem)
4220 {
4221 	vm_page_free_prepare(mem);
4222 
4223 	if (mem->vmp_fictitious) {
4224 		vm_page_release_fictitious(mem);
4225 	} else {
4226 		vm_page_release(mem, TRUE);  /* page queues are locked */
4227 	}
4228 }
4229 
4230 
4231 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4232 vm_page_free_unlocked(
4233 	vm_page_t       mem,
4234 	boolean_t       remove_from_hash)
4235 {
4236 	vm_page_lockspin_queues();
4237 	vm_page_free_prepare_queues(mem);
4238 	vm_page_unlock_queues();
4239 
4240 	vm_page_free_prepare_object(mem, remove_from_hash);
4241 
4242 	if (mem->vmp_fictitious) {
4243 		vm_page_release_fictitious(mem);
4244 	} else {
4245 		vm_page_release(mem, FALSE); /* page queues are not locked */
4246 	}
4247 }
4248 
4249 
4250 /*
4251  * Free a list of pages.  The list can be up to several hundred pages,
4252  * as blocked up by vm_pageout_scan().
4253  * The big win is not having to take the free list lock once
4254  * per page.
4255  *
4256  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4257  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4258  */
4259 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4260 vm_page_free_list(
4261 	vm_page_t       freeq,
4262 	boolean_t       prepare_object)
4263 {
4264 	vm_page_t       mem;
4265 	vm_page_t       nxt;
4266 	vm_page_t       local_freeq;
4267 	int             pg_count;
4268 
4269 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4270 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4271 
4272 	while (freeq) {
4273 		pg_count = 0;
4274 		local_freeq = VM_PAGE_NULL;
4275 		mem = freeq;
4276 
4277 		/*
4278 		 * break up the processing into smaller chunks so
4279 		 * that we can 'pipeline' the pages onto the
4280 		 * free list w/o introducing too much
4281 		 * contention on the global free queue lock
4282 		 */
4283 		while (mem && pg_count < 64) {
4284 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4285 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4286 			assert(mem->vmp_specialq.next == 0 &&
4287 			    mem->vmp_specialq.prev == 0);
4288 			/*
4289 			 * &&
4290 			 *   mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4291 			 */
4292 			nxt = mem->vmp_snext;
4293 			mem->vmp_snext = NULL;
4294 			assert(mem->vmp_pageq.prev == 0);
4295 
4296 			if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) {
4297 				ASSERT_PMAP_FREE(mem);
4298 			}
4299 
4300 			if (__improbable(mem->vmp_realtime)) {
4301 				vm_page_lock_queues();
4302 				if (mem->vmp_realtime) {
4303 					mem->vmp_realtime = false;
4304 					vm_page_realtime_count--;
4305 				}
4306 				vm_page_unlock_queues();
4307 			}
4308 
4309 			if (prepare_object == TRUE) {
4310 				vm_page_free_prepare_object(mem, TRUE);
4311 			}
4312 
4313 			if (!mem->vmp_fictitious) {
4314 				assert(mem->vmp_busy);
4315 
4316 				if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4317 				    vm_lopage_free_count < vm_lopage_free_limit &&
4318 				    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4319 					vm_page_release(mem, FALSE); /* page queues are not locked */
4320 #if CONFIG_SECLUDED_MEMORY
4321 				} else if (vm_page_secluded_count < vm_page_secluded_target &&
4322 				    num_tasks_can_use_secluded_mem == 0) {
4323 					vm_page_release(mem,
4324 					    FALSE);             /* page queues are not locked */
4325 #endif /* CONFIG_SECLUDED_MEMORY */
4326 				} else {
4327 					/*
4328 					 * IMPORTANT: we can't set the page "free" here
4329 					 * because that would make the page eligible for
4330 					 * a physically-contiguous allocation (see
4331 					 * vm_page_find_contiguous()) right away (we don't
4332 					 * hold the vm_page_queue_free lock).  That would
4333 					 * cause trouble because the page is not actually
4334 					 * in the free queue yet...
4335 					 */
4336 					mem->vmp_snext = local_freeq;
4337 					local_freeq = mem;
4338 					pg_count++;
4339 
4340 					pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4341 				}
4342 			} else {
4343 				assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4344 				    VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4345 				vm_page_release_fictitious(mem);
4346 			}
4347 			mem = nxt;
4348 		}
4349 		freeq = mem;
4350 
4351 		if ((mem = local_freeq)) {
4352 			unsigned int    avail_free_count;
4353 			unsigned int    need_wakeup = 0;
4354 			unsigned int    need_priv_wakeup = 0;
4355 #if CONFIG_SECLUDED_MEMORY
4356 			unsigned int    need_wakeup_secluded = 0;
4357 #endif /* CONFIG_SECLUDED_MEMORY */
4358 			event_t         priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4359 			boolean_t       priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4360 
4361 			vm_free_page_lock_spin();
4362 
4363 			while (mem) {
4364 				int     color;
4365 
4366 				nxt = mem->vmp_snext;
4367 
4368 				assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4369 				assert(mem->vmp_busy);
4370 				assert(!mem->vmp_realtime);
4371 				mem->vmp_lopage = FALSE;
4372 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4373 
4374 				color = VM_PAGE_GET_COLOR(mem);
4375 #if defined(__x86_64__)
4376 				vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4377 #else
4378 				vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4379 				    mem, vmp_pageq);
4380 #endif
4381 				mem = nxt;
4382 			}
4383 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4384 			vm_page_free_count += pg_count;
4385 			avail_free_count = vm_page_free_count;
4386 
4387 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4388 
4389 			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4390 				if (avail_free_count < vm_page_free_wanted_privileged) {
4391 					need_priv_wakeup = avail_free_count;
4392 					vm_page_free_wanted_privileged -= avail_free_count;
4393 					avail_free_count = 0;
4394 				} else {
4395 					need_priv_wakeup = vm_page_free_wanted_privileged;
4396 					avail_free_count -= vm_page_free_wanted_privileged;
4397 					vm_page_free_wanted_privileged = 0;
4398 				}
4399 			}
4400 #if CONFIG_SECLUDED_MEMORY
4401 			if (vm_page_free_wanted_secluded > 0 &&
4402 			    avail_free_count > vm_page_free_reserved) {
4403 				unsigned int available_pages;
4404 				available_pages = (avail_free_count -
4405 				    vm_page_free_reserved);
4406 				if (available_pages <
4407 				    vm_page_free_wanted_secluded) {
4408 					need_wakeup_secluded = available_pages;
4409 					vm_page_free_wanted_secluded -=
4410 					    available_pages;
4411 					avail_free_count -= available_pages;
4412 				} else {
4413 					need_wakeup_secluded =
4414 					    vm_page_free_wanted_secluded;
4415 					avail_free_count -=
4416 					    vm_page_free_wanted_secluded;
4417 					vm_page_free_wanted_secluded = 0;
4418 				}
4419 			}
4420 #endif /* CONFIG_SECLUDED_MEMORY */
4421 			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4422 				unsigned int  available_pages;
4423 
4424 				available_pages = avail_free_count - vm_page_free_reserved;
4425 
4426 				if (available_pages >= vm_page_free_wanted) {
4427 					need_wakeup = vm_page_free_wanted;
4428 					vm_page_free_wanted = 0;
4429 				} else {
4430 					need_wakeup = available_pages;
4431 					vm_page_free_wanted -= available_pages;
4432 				}
4433 			}
4434 			vm_free_page_unlock();
4435 
4436 			priv_wakeup_event = NULL;
4437 			secluded_wakeup_event = NULL;
4438 			normal_wakeup_event = NULL;
4439 
4440 			priv_wakeup_all = FALSE;
4441 			secluded_wakeup_all = FALSE;
4442 			normal_wakeup_all = FALSE;
4443 
4444 
4445 			if (need_priv_wakeup != 0) {
4446 				/*
4447 				 * There shouldn't be that many VM-privileged threads,
4448 				 * so let's wake them all up, even if we don't quite
4449 				 * have enough pages to satisfy them all.
4450 				 */
4451 				priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4452 				priv_wakeup_all = TRUE;
4453 			}
4454 #if CONFIG_SECLUDED_MEMORY
4455 			if (need_wakeup_secluded != 0 &&
4456 			    vm_page_free_wanted_secluded == 0) {
4457 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4458 				secluded_wakeup_all = TRUE;
4459 				need_wakeup_secluded = 0;
4460 			} else {
4461 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4462 			}
4463 #endif /* CONFIG_SECLUDED_MEMORY */
4464 			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4465 				/*
4466 				 * We don't expect to have any more waiters
4467 				 * after this, so let's wake them all up at
4468 				 * once.
4469 				 */
4470 				normal_wakeup_event = (event_t) &vm_page_free_count;
4471 				normal_wakeup_all = TRUE;
4472 				need_wakeup = 0;
4473 			} else {
4474 				normal_wakeup_event = (event_t) &vm_page_free_count;
4475 			}
4476 
4477 			if (priv_wakeup_event ||
4478 #if CONFIG_SECLUDED_MEMORY
4479 			    secluded_wakeup_event ||
4480 #endif /* CONFIG_SECLUDED_MEMORY */
4481 			    normal_wakeup_event) {
4482 				if (vps_dynamic_priority_enabled) {
4483 					if (priv_wakeup_all == TRUE) {
4484 						wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4485 					}
4486 
4487 #if CONFIG_SECLUDED_MEMORY
4488 					if (secluded_wakeup_all == TRUE) {
4489 						wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4490 					}
4491 
4492 					while (need_wakeup_secluded-- != 0) {
4493 						/*
4494 						 * Wake up one waiter per page we just released.
4495 						 */
4496 						wakeup_one_with_inheritor(secluded_wakeup_event,
4497 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4498 					}
4499 #endif /* CONFIG_SECLUDED_MEMORY */
4500 
4501 					if (normal_wakeup_all == TRUE) {
4502 						wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4503 					}
4504 
4505 					while (need_wakeup-- != 0) {
4506 						/*
4507 						 * Wake up one waiter per page we just released.
4508 						 */
4509 						wakeup_one_with_inheritor(normal_wakeup_event,
4510 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4511 						    NULL);
4512 					}
4513 				} else {
4514 					/*
4515 					 * Non-priority-aware wakeups.
4516 					 */
4517 
4518 					if (priv_wakeup_all == TRUE) {
4519 						thread_wakeup(priv_wakeup_event);
4520 					}
4521 
4522 #if CONFIG_SECLUDED_MEMORY
4523 					if (secluded_wakeup_all == TRUE) {
4524 						thread_wakeup(secluded_wakeup_event);
4525 					}
4526 
4527 					while (need_wakeup_secluded-- != 0) {
4528 						/*
4529 						 * Wake up one waiter per page we just released.
4530 						 */
4531 						thread_wakeup_one(secluded_wakeup_event);
4532 					}
4533 
4534 #endif /* CONFIG_SECLUDED_MEMORY */
4535 					if (normal_wakeup_all == TRUE) {
4536 						thread_wakeup(normal_wakeup_event);
4537 					}
4538 
4539 					while (need_wakeup-- != 0) {
4540 						/*
4541 						 * Wake up one waiter per page we just released.
4542 						 */
4543 						thread_wakeup_one(normal_wakeup_event);
4544 					}
4545 				}
4546 			}
4547 
4548 			VM_CHECK_MEMORYSTATUS;
4549 		}
4550 	}
4551 }
4552 
4553 
4554 /*
4555  *	vm_page_wire:
4556  *
4557  *	Mark this page as wired down by yet
4558  *	another map, removing it from paging queues
4559  *	as necessary.
4560  *
4561  *	The page's object and the page queues must be locked.
4562  */
4563 
4564 
4565 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4566 vm_page_wire(
4567 	vm_page_t mem,
4568 	vm_tag_t           tag,
4569 	boolean_t          check_memorystatus)
4570 {
4571 	vm_object_t     m_object;
4572 
4573 	m_object = VM_PAGE_OBJECT(mem);
4574 
4575 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
4576 
4577 	VM_PAGE_CHECK(mem);
4578 	if (m_object) {
4579 		vm_object_lock_assert_exclusive(m_object);
4580 	} else {
4581 		/*
4582 		 * In theory, the page should be in an object before it
4583 		 * gets wired, since we need to hold the object lock
4584 		 * to update some fields in the page structure.
4585 		 * However, some code (i386 pmap, for example) might want
4586 		 * to wire a page before it gets inserted into an object.
4587 		 * That's somewhat OK, as long as nobody else can get to
4588 		 * that page and update it at the same time.
4589 		 */
4590 	}
4591 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4592 	if (!VM_PAGE_WIRED(mem)) {
4593 		if (mem->vmp_laundry) {
4594 			vm_pageout_steal_laundry(mem, TRUE);
4595 		}
4596 
4597 		vm_page_queues_remove(mem, TRUE);
4598 
4599 		assert(mem->vmp_wire_count == 0);
4600 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
4601 
4602 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4603 		if (mem->vmp_unmodified_ro == true) {
4604 			/* Object and PageQ locks are held*/
4605 			mem->vmp_unmodified_ro = false;
4606 			os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4607 			VM_COMPRESSOR_PAGER_STATE_CLR(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4608 		}
4609 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4610 
4611 		if (m_object) {
4612 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4613 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4614 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4615 
4616 			assert(m_object->resident_page_count >=
4617 			    m_object->wired_page_count);
4618 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4619 				assert(vm_page_purgeable_count > 0);
4620 				OSAddAtomic(-1, &vm_page_purgeable_count);
4621 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
4622 			}
4623 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4624 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4625 			    m_object->vo_owner != TASK_NULL) {
4626 				task_t          owner;
4627 				int             ledger_idx_volatile;
4628 				int             ledger_idx_nonvolatile;
4629 				int             ledger_idx_volatile_compressed;
4630 				int             ledger_idx_nonvolatile_compressed;
4631 				boolean_t       do_footprint;
4632 
4633 				owner = VM_OBJECT_OWNER(m_object);
4634 				vm_object_ledger_tag_ledgers(
4635 					m_object,
4636 					&ledger_idx_volatile,
4637 					&ledger_idx_nonvolatile,
4638 					&ledger_idx_volatile_compressed,
4639 					&ledger_idx_nonvolatile_compressed,
4640 					&do_footprint);
4641 				/* less volatile bytes */
4642 				ledger_debit(owner->ledger,
4643 				    ledger_idx_volatile,
4644 				    PAGE_SIZE);
4645 				/* more not-quite-volatile bytes */
4646 				ledger_credit(owner->ledger,
4647 				    ledger_idx_nonvolatile,
4648 				    PAGE_SIZE);
4649 				if (do_footprint) {
4650 					/* more footprint */
4651 					ledger_credit(owner->ledger,
4652 					    task_ledgers.phys_footprint,
4653 					    PAGE_SIZE);
4654 				}
4655 			}
4656 			if (m_object->all_reusable) {
4657 				/*
4658 				 * Wired pages are not counted as "re-usable"
4659 				 * in "all_reusable" VM objects, so nothing
4660 				 * to do here.
4661 				 */
4662 			} else if (mem->vmp_reusable) {
4663 				/*
4664 				 * This page is not "re-usable" when it's
4665 				 * wired, so adjust its state and the
4666 				 * accounting.
4667 				 */
4668 				vm_object_reuse_pages(m_object,
4669 				    mem->vmp_offset,
4670 				    mem->vmp_offset + PAGE_SIZE_64,
4671 				    FALSE);
4672 			}
4673 		}
4674 		assert(!mem->vmp_reusable);
4675 
4676 		if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4677 			vm_page_wire_count++;
4678 		}
4679 		if (mem->vmp_gobbled) {
4680 			vm_page_gobble_count--;
4681 		}
4682 		mem->vmp_gobbled = FALSE;
4683 
4684 		if (check_memorystatus == TRUE) {
4685 			VM_CHECK_MEMORYSTATUS;
4686 		}
4687 	}
4688 	assert(!mem->vmp_gobbled);
4689 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4690 	mem->vmp_wire_count++;
4691 	if (__improbable(mem->vmp_wire_count == 0)) {
4692 		panic("vm_page_wire(%p): wire_count overflow", mem);
4693 	}
4694 	VM_PAGE_CHECK(mem);
4695 }
4696 
4697 /*
4698  *	vm_page_unwire:
4699  *
4700  *	Release one wiring of this page, potentially
4701  *	enabling it to be paged again.
4702  *
4703  *	The page's object and the page queues must be locked.
4704  */
4705 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4706 vm_page_unwire(
4707 	vm_page_t       mem,
4708 	boolean_t       queueit)
4709 {
4710 	vm_object_t     m_object;
4711 
4712 	m_object = VM_PAGE_OBJECT(mem);
4713 
4714 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
4715 
4716 	VM_PAGE_CHECK(mem);
4717 	assert(VM_PAGE_WIRED(mem));
4718 	assert(mem->vmp_wire_count > 0);
4719 	assert(!mem->vmp_gobbled);
4720 	assert(m_object != VM_OBJECT_NULL);
4721 	vm_object_lock_assert_exclusive(m_object);
4722 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4723 	if (--mem->vmp_wire_count == 0) {
4724 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4725 
4726 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4727 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4728 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4729 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4730 			vm_page_wire_count--;
4731 		}
4732 
4733 		assert(m_object->resident_page_count >=
4734 		    m_object->wired_page_count);
4735 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4736 			OSAddAtomic(+1, &vm_page_purgeable_count);
4737 			assert(vm_page_purgeable_wired_count > 0);
4738 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4739 		}
4740 		if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4741 		    m_object->purgable == VM_PURGABLE_EMPTY) &&
4742 		    m_object->vo_owner != TASK_NULL) {
4743 			task_t          owner;
4744 			int             ledger_idx_volatile;
4745 			int             ledger_idx_nonvolatile;
4746 			int             ledger_idx_volatile_compressed;
4747 			int             ledger_idx_nonvolatile_compressed;
4748 			boolean_t       do_footprint;
4749 
4750 			owner = VM_OBJECT_OWNER(m_object);
4751 			vm_object_ledger_tag_ledgers(
4752 				m_object,
4753 				&ledger_idx_volatile,
4754 				&ledger_idx_nonvolatile,
4755 				&ledger_idx_volatile_compressed,
4756 				&ledger_idx_nonvolatile_compressed,
4757 				&do_footprint);
4758 			/* more volatile bytes */
4759 			ledger_credit(owner->ledger,
4760 			    ledger_idx_volatile,
4761 			    PAGE_SIZE);
4762 			/* less not-quite-volatile bytes */
4763 			ledger_debit(owner->ledger,
4764 			    ledger_idx_nonvolatile,
4765 			    PAGE_SIZE);
4766 			if (do_footprint) {
4767 				/* less footprint */
4768 				ledger_debit(owner->ledger,
4769 				    task_ledgers.phys_footprint,
4770 				    PAGE_SIZE);
4771 			}
4772 		}
4773 		assert(!is_kernel_object(m_object));
4774 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4775 
4776 		if (queueit == TRUE) {
4777 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
4778 				vm_page_deactivate(mem);
4779 			} else {
4780 				vm_page_activate(mem);
4781 			}
4782 		}
4783 
4784 		VM_CHECK_MEMORYSTATUS;
4785 	}
4786 	VM_PAGE_CHECK(mem);
4787 }
4788 
4789 /*
4790  *	vm_page_deactivate:
4791  *
4792  *	Returns the given page to the inactive list,
4793  *	indicating that no physical maps have access
4794  *	to this page.  [Used by the physical mapping system.]
4795  *
4796  *	The page queues must be locked.
4797  */
4798 void
vm_page_deactivate(vm_page_t m)4799 vm_page_deactivate(
4800 	vm_page_t       m)
4801 {
4802 	vm_page_deactivate_internal(m, TRUE);
4803 }
4804 
4805 
4806 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4807 vm_page_deactivate_internal(
4808 	vm_page_t       m,
4809 	boolean_t       clear_hw_reference)
4810 {
4811 	vm_object_t     m_object;
4812 
4813 	m_object = VM_PAGE_OBJECT(m);
4814 
4815 	VM_PAGE_CHECK(m);
4816 	assert(!is_kernel_object(m_object));
4817 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4818 
4819 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
4820 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4821 	/*
4822 	 *	This page is no longer very interesting.  If it was
4823 	 *	interesting (active or inactive/referenced), then we
4824 	 *	clear the reference bit and (re)enter it in the
4825 	 *	inactive queue.  Note wired pages should not have
4826 	 *	their reference bit cleared.
4827 	 */
4828 	assert( !(m->vmp_absent && !m->vmp_unusual));
4829 
4830 	if (m->vmp_gobbled) {           /* can this happen? */
4831 		assert( !VM_PAGE_WIRED(m));
4832 
4833 		if (!m->vmp_private && !m->vmp_fictitious) {
4834 			vm_page_wire_count--;
4835 		}
4836 		vm_page_gobble_count--;
4837 		m->vmp_gobbled = FALSE;
4838 	}
4839 	/*
4840 	 * if this page is currently on the pageout queue, we can't do the
4841 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4842 	 * and we can't remove it manually since we would need the object lock
4843 	 * (which is not required here) to decrement the activity_in_progress
4844 	 * reference which is held on the object while the page is in the pageout queue...
4845 	 * just let the normal laundry processing proceed
4846 	 */
4847 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4848 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4849 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4850 	    VM_PAGE_WIRED(m)) {
4851 		return;
4852 	}
4853 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
4854 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4855 	}
4856 
4857 	m->vmp_reference = FALSE;
4858 	m->vmp_no_cache = FALSE;
4859 
4860 	if (!VM_PAGE_INACTIVE(m)) {
4861 		vm_page_queues_remove(m, FALSE);
4862 
4863 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
4864 		    m->vmp_dirty && m_object->internal &&
4865 		    (m_object->purgable == VM_PURGABLE_DENY ||
4866 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4867 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
4868 			vm_page_check_pageable_safe(m);
4869 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4870 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4871 			vm_page_throttled_count++;
4872 		} else {
4873 			if (m_object->named && m_object->ref_count == 1) {
4874 				vm_page_speculate(m, FALSE);
4875 #if DEVELOPMENT || DEBUG
4876 				vm_page_speculative_recreated++;
4877 #endif
4878 			} else {
4879 				vm_page_enqueue_inactive(m, FALSE);
4880 			}
4881 		}
4882 	}
4883 }
4884 
4885 /*
4886  * vm_page_enqueue_cleaned
4887  *
4888  * Put the page on the cleaned queue, mark it cleaned, etc.
4889  * Being on the cleaned queue (and having m->clean_queue set)
4890  * does ** NOT ** guarantee that the page is clean!
4891  *
4892  * Call with the queues lock held.
4893  */
4894 
4895 void
vm_page_enqueue_cleaned(vm_page_t m)4896 vm_page_enqueue_cleaned(vm_page_t m)
4897 {
4898 	vm_object_t     m_object;
4899 
4900 	m_object = VM_PAGE_OBJECT(m);
4901 
4902 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4903 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4904 	assert( !(m->vmp_absent && !m->vmp_unusual));
4905 
4906 	if (VM_PAGE_WIRED(m)) {
4907 		return;
4908 	}
4909 
4910 	if (m->vmp_gobbled) {
4911 		if (!m->vmp_private && !m->vmp_fictitious) {
4912 			vm_page_wire_count--;
4913 		}
4914 		vm_page_gobble_count--;
4915 		m->vmp_gobbled = FALSE;
4916 	}
4917 	/*
4918 	 * if this page is currently on the pageout queue, we can't do the
4919 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4920 	 * and we can't remove it manually since we would need the object lock
4921 	 * (which is not required here) to decrement the activity_in_progress
4922 	 * reference which is held on the object while the page is in the pageout queue...
4923 	 * just let the normal laundry processing proceed
4924 	 */
4925 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4926 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
4927 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4928 		return;
4929 	}
4930 	vm_page_queues_remove(m, FALSE);
4931 
4932 	vm_page_check_pageable_safe(m);
4933 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
4934 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
4935 	vm_page_cleaned_count++;
4936 
4937 	vm_page_inactive_count++;
4938 	if (m_object->internal) {
4939 		vm_page_pageable_internal_count++;
4940 	} else {
4941 		vm_page_pageable_external_count++;
4942 	}
4943 	vm_page_add_to_specialq(m, TRUE);
4944 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
4945 }
4946 
4947 /*
4948  *	vm_page_activate:
4949  *
4950  *	Put the specified page on the active list (if appropriate).
4951  *
4952  *	The page queues must be locked.
4953  */
4954 
4955 void
vm_page_activate(vm_page_t m)4956 vm_page_activate(
4957 	vm_page_t       m)
4958 {
4959 	vm_object_t     m_object;
4960 
4961 	m_object = VM_PAGE_OBJECT(m);
4962 
4963 	VM_PAGE_CHECK(m);
4964 #ifdef  FIXME_4778297
4965 	assert(!is_kernel_object(m_object));
4966 #endif
4967 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4968 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4969 	assert( !(m->vmp_absent && !m->vmp_unusual));
4970 
4971 	if (m->vmp_gobbled) {
4972 		assert( !VM_PAGE_WIRED(m));
4973 		if (!m->vmp_private && !m->vmp_fictitious) {
4974 			vm_page_wire_count--;
4975 		}
4976 		vm_page_gobble_count--;
4977 		m->vmp_gobbled = FALSE;
4978 	}
4979 	/*
4980 	 * if this page is currently on the pageout queue, we can't do the
4981 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4982 	 * and we can't remove it manually since we would need the object lock
4983 	 * (which is not required here) to decrement the activity_in_progress
4984 	 * reference which is held on the object while the page is in the pageout queue...
4985 	 * just let the normal laundry processing proceed
4986 	 */
4987 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4988 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4989 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4990 		return;
4991 	}
4992 
4993 #if DEBUG
4994 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
4995 		panic("vm_page_activate: already active");
4996 	}
4997 #endif
4998 
4999 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5000 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5001 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5002 	}
5003 
5004 	/*
5005 	 * A freshly activated page should be promoted in the donation queue.
5006 	 * So we remove it here while preserving its hint and we will enqueue
5007 	 * it again in vm_page_enqueue_active.
5008 	 */
5009 	vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5010 
5011 	if (!VM_PAGE_WIRED(m)) {
5012 		vm_page_check_pageable_safe(m);
5013 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5014 		    m->vmp_dirty && m_object->internal &&
5015 		    (m_object->purgable == VM_PURGABLE_DENY ||
5016 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5017 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5018 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5019 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5020 			vm_page_throttled_count++;
5021 		} else {
5022 #if CONFIG_SECLUDED_MEMORY
5023 			if (secluded_for_filecache &&
5024 			    vm_page_secluded_target != 0 &&
5025 			    num_tasks_can_use_secluded_mem == 0 &&
5026 			    m_object->eligible_for_secluded &&
5027 			    !m->vmp_realtime) {
5028 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5029 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5030 				vm_page_secluded_count++;
5031 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5032 				vm_page_secluded_count_inuse++;
5033 				assert(!m_object->internal);
5034 //				vm_page_pageable_external_count++;
5035 			} else
5036 #endif /* CONFIG_SECLUDED_MEMORY */
5037 			vm_page_enqueue_active(m, FALSE);
5038 		}
5039 		m->vmp_reference = TRUE;
5040 		m->vmp_no_cache = FALSE;
5041 	}
5042 	VM_PAGE_CHECK(m);
5043 }
5044 
5045 
5046 /*
5047  *      vm_page_speculate:
5048  *
5049  *      Put the specified page on the speculative list (if appropriate).
5050  *
5051  *      The page queues must be locked.
5052  */
5053 void
vm_page_speculate(vm_page_t m,boolean_t new)5054 vm_page_speculate(
5055 	vm_page_t       m,
5056 	boolean_t       new)
5057 {
5058 	struct vm_speculative_age_q     *aq;
5059 	vm_object_t     m_object;
5060 
5061 	m_object = VM_PAGE_OBJECT(m);
5062 
5063 	VM_PAGE_CHECK(m);
5064 	vm_page_check_pageable_safe(m);
5065 
5066 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5067 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5068 	assert( !(m->vmp_absent && !m->vmp_unusual));
5069 	assert(m_object->internal == FALSE);
5070 
5071 	/*
5072 	 * if this page is currently on the pageout queue, we can't do the
5073 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5074 	 * and we can't remove it manually since we would need the object lock
5075 	 * (which is not required here) to decrement the activity_in_progress
5076 	 * reference which is held on the object while the page is in the pageout queue...
5077 	 * just let the normal laundry processing proceed
5078 	 */
5079 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5080 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5081 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5082 		return;
5083 	}
5084 
5085 	vm_page_queues_remove(m, FALSE);
5086 
5087 	if (!VM_PAGE_WIRED(m)) {
5088 		mach_timespec_t         ts;
5089 		clock_sec_t sec;
5090 		clock_nsec_t nsec;
5091 
5092 		clock_get_system_nanotime(&sec, &nsec);
5093 		ts.tv_sec = (unsigned int) sec;
5094 		ts.tv_nsec = nsec;
5095 
5096 		if (vm_page_speculative_count == 0) {
5097 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5098 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5099 
5100 			aq = &vm_page_queue_speculative[speculative_age_index];
5101 
5102 			/*
5103 			 * set the timer to begin a new group
5104 			 */
5105 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5106 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5107 
5108 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5109 		} else {
5110 			aq = &vm_page_queue_speculative[speculative_age_index];
5111 
5112 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5113 				speculative_age_index++;
5114 
5115 				if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5116 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5117 				}
5118 				if (speculative_age_index == speculative_steal_index) {
5119 					speculative_steal_index = speculative_age_index + 1;
5120 
5121 					if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5122 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5123 					}
5124 				}
5125 				aq = &vm_page_queue_speculative[speculative_age_index];
5126 
5127 				if (!vm_page_queue_empty(&aq->age_q)) {
5128 					vm_page_speculate_ageit(aq);
5129 				}
5130 
5131 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5132 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5133 
5134 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5135 			}
5136 		}
5137 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5138 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5139 		vm_page_speculative_count++;
5140 		vm_page_pageable_external_count++;
5141 
5142 		if (new == TRUE) {
5143 			vm_object_lock_assert_exclusive(m_object);
5144 
5145 			m_object->pages_created++;
5146 #if DEVELOPMENT || DEBUG
5147 			vm_page_speculative_created++;
5148 #endif
5149 		}
5150 	}
5151 	VM_PAGE_CHECK(m);
5152 }
5153 
5154 
5155 /*
5156  * move pages from the specified aging bin to
5157  * the speculative bin that pageout_scan claims from
5158  *
5159  *      The page queues must be locked.
5160  */
5161 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5162 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5163 {
5164 	struct vm_speculative_age_q     *sq;
5165 	vm_page_t       t;
5166 
5167 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5168 
5169 	if (vm_page_queue_empty(&sq->age_q)) {
5170 		sq->age_q.next = aq->age_q.next;
5171 		sq->age_q.prev = aq->age_q.prev;
5172 
5173 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5174 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5175 
5176 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5177 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5178 	} else {
5179 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5180 		t->vmp_pageq.next = aq->age_q.next;
5181 
5182 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5183 		t->vmp_pageq.prev = sq->age_q.prev;
5184 
5185 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5186 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5187 
5188 		sq->age_q.prev = aq->age_q.prev;
5189 	}
5190 	vm_page_queue_init(&aq->age_q);
5191 }
5192 
5193 
5194 void
vm_page_lru(vm_page_t m)5195 vm_page_lru(
5196 	vm_page_t       m)
5197 {
5198 	VM_PAGE_CHECK(m);
5199 	assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5200 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5201 
5202 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5203 
5204 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5205 		/*
5206 		 * we don't need to do all the other work that
5207 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5208 		 * bring along for the ride
5209 		 */
5210 		assert(!m->vmp_laundry);
5211 		assert(!m->vmp_private);
5212 
5213 		m->vmp_no_cache = FALSE;
5214 
5215 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5216 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5217 
5218 		return;
5219 	}
5220 	/*
5221 	 * if this page is currently on the pageout queue, we can't do the
5222 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5223 	 * and we can't remove it manually since we would need the object lock
5224 	 * (which is not required here) to decrement the activity_in_progress
5225 	 * reference which is held on the object while the page is in the pageout queue...
5226 	 * just let the normal laundry processing proceed
5227 	 */
5228 	if (m->vmp_laundry || m->vmp_private ||
5229 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5230 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5231 	    VM_PAGE_WIRED(m)) {
5232 		return;
5233 	}
5234 
5235 	m->vmp_no_cache = FALSE;
5236 
5237 	vm_page_queues_remove(m, FALSE);
5238 
5239 	vm_page_enqueue_inactive(m, FALSE);
5240 }
5241 
5242 
5243 void
vm_page_reactivate_all_throttled(void)5244 vm_page_reactivate_all_throttled(void)
5245 {
5246 	vm_page_t       first_throttled, last_throttled;
5247 	vm_page_t       first_active;
5248 	vm_page_t       m;
5249 	int             extra_active_count;
5250 	int             extra_internal_count, extra_external_count;
5251 	vm_object_t     m_object;
5252 
5253 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5254 		return;
5255 	}
5256 
5257 	extra_active_count = 0;
5258 	extra_internal_count = 0;
5259 	extra_external_count = 0;
5260 	vm_page_lock_queues();
5261 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5262 		/*
5263 		 * Switch "throttled" pages to "active".
5264 		 */
5265 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5266 			VM_PAGE_CHECK(m);
5267 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5268 
5269 			m_object = VM_PAGE_OBJECT(m);
5270 
5271 			extra_active_count++;
5272 			if (m_object->internal) {
5273 				extra_internal_count++;
5274 			} else {
5275 				extra_external_count++;
5276 			}
5277 
5278 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5279 			VM_PAGE_CHECK(m);
5280 			vm_page_add_to_specialq(m, FALSE);
5281 		}
5282 
5283 		/*
5284 		 * Transfer the entire throttled queue to a regular LRU page queues.
5285 		 * We insert it at the head of the active queue, so that these pages
5286 		 * get re-evaluated by the LRU algorithm first, since they've been
5287 		 * completely out of it until now.
5288 		 */
5289 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5290 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5291 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5292 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5293 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5294 		} else {
5295 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5296 		}
5297 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5298 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5299 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5300 
5301 #if DEBUG
5302 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5303 #endif
5304 		vm_page_queue_init(&vm_page_queue_throttled);
5305 		/*
5306 		 * Adjust the global page counts.
5307 		 */
5308 		vm_page_active_count += extra_active_count;
5309 		vm_page_pageable_internal_count += extra_internal_count;
5310 		vm_page_pageable_external_count += extra_external_count;
5311 		vm_page_throttled_count = 0;
5312 	}
5313 	assert(vm_page_throttled_count == 0);
5314 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5315 	vm_page_unlock_queues();
5316 }
5317 
5318 
5319 /*
5320  * move pages from the indicated local queue to the global active queue
5321  * its ok to fail if we're below the hard limit and force == FALSE
5322  * the nolocks == TRUE case is to allow this function to be run on
5323  * the hibernate path
5324  */
5325 
5326 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5327 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5328 {
5329 	struct vpl      *lq;
5330 	vm_page_t       first_local, last_local;
5331 	vm_page_t       first_active;
5332 	vm_page_t       m;
5333 	uint32_t        count = 0;
5334 
5335 	if (vm_page_local_q == NULL) {
5336 		return;
5337 	}
5338 
5339 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5340 
5341 	if (nolocks == FALSE) {
5342 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5343 			if (!vm_page_trylockspin_queues()) {
5344 				return;
5345 			}
5346 		} else {
5347 			vm_page_lockspin_queues();
5348 		}
5349 
5350 		VPL_LOCK(&lq->vpl_lock);
5351 	}
5352 	if (lq->vpl_count) {
5353 		/*
5354 		 * Switch "local" pages to "active".
5355 		 */
5356 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5357 
5358 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5359 			VM_PAGE_CHECK(m);
5360 			vm_page_check_pageable_safe(m);
5361 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5362 			assert(!m->vmp_fictitious);
5363 
5364 			if (m->vmp_local_id != lid) {
5365 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5366 			}
5367 
5368 			m->vmp_local_id = 0;
5369 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5370 			VM_PAGE_CHECK(m);
5371 			vm_page_add_to_specialq(m, FALSE);
5372 			count++;
5373 		}
5374 		if (count != lq->vpl_count) {
5375 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5376 		}
5377 
5378 		/*
5379 		 * Transfer the entire local queue to a regular LRU page queues.
5380 		 */
5381 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5382 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5383 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5384 
5385 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5386 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5387 		} else {
5388 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5389 		}
5390 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5391 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5392 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5393 
5394 		vm_page_queue_init(&lq->vpl_queue);
5395 		/*
5396 		 * Adjust the global page counts.
5397 		 */
5398 		vm_page_active_count += lq->vpl_count;
5399 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5400 		vm_page_pageable_external_count += lq->vpl_external_count;
5401 		lq->vpl_count = 0;
5402 		lq->vpl_internal_count = 0;
5403 		lq->vpl_external_count = 0;
5404 	}
5405 	assert(vm_page_queue_empty(&lq->vpl_queue));
5406 
5407 	if (nolocks == FALSE) {
5408 		VPL_UNLOCK(&lq->vpl_lock);
5409 
5410 		vm_page_balance_inactive(count / 4);
5411 		vm_page_unlock_queues();
5412 	}
5413 }
5414 
5415 /*
5416  *	vm_page_part_zero_fill:
5417  *
5418  *	Zero-fill a part of the page.
5419  */
5420 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5421 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5422 vm_page_part_zero_fill(
5423 	vm_page_t       m,
5424 	vm_offset_t     m_pa,
5425 	vm_size_t       len)
5426 {
5427 #if 0
5428 	/*
5429 	 * we don't hold the page queue lock
5430 	 * so this check isn't safe to make
5431 	 */
5432 	VM_PAGE_CHECK(m);
5433 #endif
5434 
5435 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5436 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5437 #else
5438 	vm_page_t       tmp;
5439 	while (1) {
5440 		tmp = vm_page_grab();
5441 		if (tmp == VM_PAGE_NULL) {
5442 			vm_page_wait(THREAD_UNINT);
5443 			continue;
5444 		}
5445 		break;
5446 	}
5447 	vm_page_zero_fill(tmp);
5448 	if (m_pa != 0) {
5449 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
5450 	}
5451 	if ((m_pa + len) < PAGE_SIZE) {
5452 		vm_page_part_copy(m, m_pa + len, tmp,
5453 		    m_pa + len, PAGE_SIZE - (m_pa + len));
5454 	}
5455 	vm_page_copy(tmp, m);
5456 	VM_PAGE_FREE(tmp);
5457 #endif
5458 }
5459 
5460 /*
5461  *	vm_page_zero_fill:
5462  *
5463  *	Zero-fill the specified page.
5464  */
5465 void
vm_page_zero_fill(vm_page_t m)5466 vm_page_zero_fill(
5467 	vm_page_t       m)
5468 {
5469 #if 0
5470 	/*
5471 	 * we don't hold the page queue lock
5472 	 * so this check isn't safe to make
5473 	 */
5474 	VM_PAGE_CHECK(m);
5475 #endif
5476 
5477 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
5478 	pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5479 }
5480 
5481 /*
5482  *	vm_page_part_copy:
5483  *
5484  *	copy part of one page to another
5485  */
5486 
5487 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5488 vm_page_part_copy(
5489 	vm_page_t       src_m,
5490 	vm_offset_t     src_pa,
5491 	vm_page_t       dst_m,
5492 	vm_offset_t     dst_pa,
5493 	vm_size_t       len)
5494 {
5495 #if 0
5496 	/*
5497 	 * we don't hold the page queue lock
5498 	 * so this check isn't safe to make
5499 	 */
5500 	VM_PAGE_CHECK(src_m);
5501 	VM_PAGE_CHECK(dst_m);
5502 #endif
5503 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5504 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5505 }
5506 
5507 /*
5508  *	vm_page_copy:
5509  *
5510  *	Copy one page to another
5511  */
5512 
5513 int vm_page_copy_cs_validations = 0;
5514 int vm_page_copy_cs_tainted = 0;
5515 
5516 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5517 vm_page_copy(
5518 	vm_page_t       src_m,
5519 	vm_page_t       dest_m)
5520 {
5521 	vm_object_t     src_m_object;
5522 
5523 	src_m_object = VM_PAGE_OBJECT(src_m);
5524 
5525 #if 0
5526 	/*
5527 	 * we don't hold the page queue lock
5528 	 * so this check isn't safe to make
5529 	 */
5530 	VM_PAGE_CHECK(src_m);
5531 	VM_PAGE_CHECK(dest_m);
5532 #endif
5533 	vm_object_lock_assert_held(src_m_object);
5534 
5535 	if (src_m_object != VM_OBJECT_NULL &&
5536 	    src_m_object->code_signed) {
5537 		/*
5538 		 * We're copying a page from a code-signed object.
5539 		 * Whoever ends up mapping the copy page might care about
5540 		 * the original page's integrity, so let's validate the
5541 		 * source page now.
5542 		 */
5543 		vm_page_copy_cs_validations++;
5544 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5545 #if DEVELOPMENT || DEBUG
5546 		DTRACE_VM4(codesigned_copy,
5547 		    vm_object_t, src_m_object,
5548 		    vm_object_offset_t, src_m->vmp_offset,
5549 		    int, src_m->vmp_cs_validated,
5550 		    int, src_m->vmp_cs_tainted);
5551 #endif /* DEVELOPMENT || DEBUG */
5552 	}
5553 
5554 	/*
5555 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
5556 	 * the cs_validated bit.
5557 	 */
5558 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5559 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5560 	if (dest_m->vmp_cs_tainted) {
5561 		vm_page_copy_cs_tainted++;
5562 	}
5563 	dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5564 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5565 }
5566 
5567 #if MACH_ASSERT
5568 static void
_vm_page_print(vm_page_t p)5569 _vm_page_print(
5570 	vm_page_t       p)
5571 {
5572 	printf("vm_page %p: \n", p);
5573 	printf("  pageq: next=%p prev=%p\n",
5574 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5575 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5576 	printf("  listq: next=%p prev=%p\n",
5577 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5578 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5579 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5580 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5581 	printf("  wire_count=%u\n", p->vmp_wire_count);
5582 	printf("  q_state=%u\n", p->vmp_q_state);
5583 
5584 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
5585 	    (p->vmp_laundry ? "" : "!"),
5586 	    (p->vmp_reference ? "" : "!"),
5587 	    (p->vmp_gobbled ? "" : "!"),
5588 	    (p->vmp_private ? "" : "!"));
5589 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5590 	    (p->vmp_busy ? "" : "!"),
5591 	    (p->vmp_wanted ? "" : "!"),
5592 	    (p->vmp_tabled ? "" : "!"),
5593 	    (p->vmp_fictitious ? "" : "!"),
5594 	    (p->vmp_pmapped ? "" : "!"),
5595 	    (p->vmp_wpmapped ? "" : "!"));
5596 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5597 	    (p->vmp_free_when_done ? "" : "!"),
5598 	    (p->vmp_absent ? "" : "!"),
5599 	    (VMP_ERROR_GET(p) ? "" : "!"),
5600 	    (p->vmp_dirty ? "" : "!"),
5601 	    (p->vmp_cleaning ? "" : "!"),
5602 	    (p->vmp_precious ? "" : "!"),
5603 	    (p->vmp_clustered ? "" : "!"));
5604 	printf("  %soverwriting, %srestart, %sunusual\n",
5605 	    (p->vmp_overwriting ? "" : "!"),
5606 	    (p->vmp_restart ? "" : "!"),
5607 	    (p->vmp_unusual ? "" : "!"));
5608 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5609 	    p->vmp_cs_validated,
5610 	    p->vmp_cs_tainted,
5611 	    p->vmp_cs_nx,
5612 	    (p->vmp_no_cache ? "" : "!"));
5613 
5614 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5615 }
5616 
5617 /*
5618  *	Check that the list of pages is ordered by
5619  *	ascending physical address and has no holes.
5620  */
5621 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5622 vm_page_verify_contiguous(
5623 	vm_page_t       pages,
5624 	unsigned int    npages)
5625 {
5626 	vm_page_t               m;
5627 	unsigned int            page_count;
5628 	vm_offset_t             prev_addr;
5629 
5630 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5631 	page_count = 1;
5632 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5633 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5634 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5635 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5636 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5637 			panic("vm_page_verify_contiguous:  not contiguous!");
5638 		}
5639 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5640 		++page_count;
5641 	}
5642 	if (page_count != npages) {
5643 		printf("pages %p actual count 0x%x but requested 0x%x\n",
5644 		    pages, page_count, npages);
5645 		panic("vm_page_verify_contiguous:  count error");
5646 	}
5647 	return 1;
5648 }
5649 
5650 
5651 /*
5652  *	Check the free lists for proper length etc.
5653  */
5654 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5655 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5656 vm_page_verify_free_list(
5657 	vm_page_queue_head_t    *vm_page_queue,
5658 	unsigned int    color,
5659 	vm_page_t       look_for_page,
5660 	boolean_t       expect_page)
5661 {
5662 	unsigned int    npages;
5663 	vm_page_t       m;
5664 	vm_page_t       prev_m;
5665 	boolean_t       found_page;
5666 
5667 	if (!vm_page_verify_this_free_list_enabled) {
5668 		return 0;
5669 	}
5670 
5671 	found_page = FALSE;
5672 	npages = 0;
5673 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5674 
5675 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5676 		if (m == look_for_page) {
5677 			found_page = TRUE;
5678 		}
5679 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5680 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5681 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5682 		}
5683 		if (!m->vmp_busy) {
5684 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5685 			    color, npages, m);
5686 		}
5687 		if (color != (unsigned int) -1) {
5688 			if (VM_PAGE_GET_COLOR(m) != color) {
5689 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5690 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
5691 			}
5692 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5693 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5694 				    color, npages, m, m->vmp_q_state);
5695 			}
5696 		} else {
5697 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5698 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5699 				    npages, m, m->vmp_q_state);
5700 			}
5701 		}
5702 		++npages;
5703 		prev_m = m;
5704 	}
5705 	if (look_for_page != VM_PAGE_NULL) {
5706 		unsigned int other_color;
5707 
5708 		if (expect_page && !found_page) {
5709 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5710 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5711 			_vm_page_print(look_for_page);
5712 			for (other_color = 0;
5713 			    other_color < vm_colors;
5714 			    other_color++) {
5715 				if (other_color == color) {
5716 					continue;
5717 				}
5718 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5719 				    other_color, look_for_page, FALSE);
5720 			}
5721 			if (color == (unsigned int) -1) {
5722 				vm_page_verify_free_list(&vm_lopage_queue_free,
5723 				    (unsigned int) -1, look_for_page, FALSE);
5724 			}
5725 			panic("vm_page_verify_free_list(color=%u)", color);
5726 		}
5727 		if (!expect_page && found_page) {
5728 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5729 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5730 		}
5731 	}
5732 	return npages;
5733 }
5734 
5735 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5736 static void
vm_page_verify_free_lists(void)5737 vm_page_verify_free_lists( void )
5738 {
5739 	unsigned int    color, npages, nlopages;
5740 	boolean_t       toggle = TRUE;
5741 
5742 	if (!vm_page_verify_all_free_lists_enabled) {
5743 		return;
5744 	}
5745 
5746 	npages = 0;
5747 
5748 	vm_free_page_lock();
5749 
5750 	if (vm_page_verify_this_free_list_enabled == TRUE) {
5751 		/*
5752 		 * This variable has been set globally for extra checking of
5753 		 * each free list Q. Since we didn't set it, we don't own it
5754 		 * and we shouldn't toggle it.
5755 		 */
5756 		toggle = FALSE;
5757 	}
5758 
5759 	if (toggle == TRUE) {
5760 		vm_page_verify_this_free_list_enabled = TRUE;
5761 	}
5762 
5763 	for (color = 0; color < vm_colors; color++) {
5764 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5765 		    color, VM_PAGE_NULL, FALSE);
5766 	}
5767 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5768 	    (unsigned int) -1,
5769 	    VM_PAGE_NULL, FALSE);
5770 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5771 		panic("vm_page_verify_free_lists:  "
5772 		    "npages %u free_count %d nlopages %u lo_free_count %u",
5773 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5774 	}
5775 
5776 	if (toggle == TRUE) {
5777 		vm_page_verify_this_free_list_enabled = FALSE;
5778 	}
5779 
5780 	vm_free_page_unlock();
5781 }
5782 
5783 #endif  /* MACH_ASSERT */
5784 
5785 /*
5786  * wrapper for pmap_enter()
5787  */
5788 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5789 pmap_enter_check(
5790 	pmap_t           pmap,
5791 	vm_map_address_t virtual_address,
5792 	vm_page_t        page,
5793 	vm_prot_t        protection,
5794 	vm_prot_t        fault_type,
5795 	unsigned int     flags,
5796 	boolean_t        wired)
5797 {
5798 	int             options = 0;
5799 	vm_object_t     obj;
5800 
5801 	if (VMP_ERROR_GET(page)) {
5802 		return KERN_MEMORY_FAILURE;
5803 	}
5804 	obj = VM_PAGE_OBJECT(page);
5805 	if (obj->internal) {
5806 		options |= PMAP_OPTIONS_INTERNAL;
5807 	}
5808 	if (page->vmp_reusable || obj->all_reusable) {
5809 		options |= PMAP_OPTIONS_REUSABLE;
5810 	}
5811 	return pmap_enter_options(pmap,
5812 	           virtual_address,
5813 	           VM_PAGE_GET_PHYS_PAGE(page),
5814 	           protection,
5815 	           fault_type,
5816 	           flags,
5817 	           wired,
5818 	           options,
5819 	           NULL);
5820 }
5821 
5822 
5823 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5824 
5825 /*
5826  *	CONTIGUOUS PAGE ALLOCATION
5827  *
5828  *	Find a region large enough to contain at least n pages
5829  *	of contiguous physical memory.
5830  *
5831  *	This is done by traversing the vm_page_t array in a linear fashion
5832  *	we assume that the vm_page_t array has the avaiable physical pages in an
5833  *	ordered, ascending list... this is currently true of all our implementations
5834  *      and must remain so... there can be 'holes' in the array...  we also can
5835  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5836  *      which use to happen via 'vm_page_convert'... that function was no longer
5837  *      being called and was removed...
5838  *
5839  *	The basic flow consists of stabilizing some of the interesting state of
5840  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5841  *	sweep at the beginning of the array looking for pages that meet our criterea
5842  *	for a 'stealable' page... currently we are pretty conservative... if the page
5843  *	meets this criterea and is physically contiguous to the previous page in the 'run'
5844  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
5845  *	and start to develop a new run... if at this point we've already considered
5846  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5847  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
5848  *	to other threads trying to acquire free pages (or move pages from q to q),
5849  *	and then continue from the spot we left off... we only make 1 pass through the
5850  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
5851  *      which steals the pages from the queues they're currently on... pages on the free
5852  *	queue can be stolen directly... pages that are on any of the other queues
5853  *	must be removed from the object they are tabled on... this requires taking the
5854  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5855  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
5856  *	dump the pages we've currently stolen back to the free list, and pick up our
5857  *	scan from the point where we aborted the 'current' run.
5858  *
5859  *
5860  *	Requirements:
5861  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
5862  *
5863  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5864  *
5865  * Algorithm:
5866  */
5867 
5868 #define MAX_CONSIDERED_BEFORE_YIELD     1000
5869 
5870 
5871 #define RESET_STATE_OF_RUN()    \
5872 	MACRO_BEGIN             \
5873 	prevcontaddr = -2;      \
5874 	start_pnum = -1;        \
5875 	free_considered = 0;    \
5876 	substitute_needed = 0;  \
5877 	npages = 0;             \
5878 	MACRO_END
5879 
5880 /*
5881  * Can we steal in-use (i.e. not free) pages when searching for
5882  * physically-contiguous pages ?
5883  */
5884 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5885 
5886 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
5887 #if DEBUG
5888 int vm_page_find_contig_debug = 0;
5889 #endif
5890 
5891 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)5892 vm_page_find_contiguous(
5893 	unsigned int    contig_pages,
5894 	ppnum_t         max_pnum,
5895 	ppnum_t         pnum_mask,
5896 	boolean_t       wire,
5897 	int             flags)
5898 {
5899 	vm_page_t       m = NULL;
5900 	ppnum_t         prevcontaddr = 0;
5901 	ppnum_t         start_pnum = 0;
5902 	unsigned int    npages = 0, considered = 0, scanned = 0;
5903 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
5904 	unsigned int    idx_last_contig_page_found = 0;
5905 	int             free_considered = 0, free_available = 0;
5906 	int             substitute_needed = 0;
5907 	int             zone_gc_called = 0;
5908 	boolean_t       wrapped;
5909 	kern_return_t   kr;
5910 #if DEBUG
5911 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
5912 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
5913 #endif
5914 
5915 	int             yielded = 0;
5916 	int             dumped_run = 0;
5917 	int             stolen_pages = 0;
5918 	int             compressed_pages = 0;
5919 
5920 
5921 	if (contig_pages == 0) {
5922 		return VM_PAGE_NULL;
5923 	}
5924 
5925 full_scan_again:
5926 
5927 #if MACH_ASSERT
5928 	vm_page_verify_free_lists();
5929 #endif
5930 #if DEBUG
5931 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
5932 #endif
5933 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5934 
5935 	/*
5936 	 * If there are still delayed pages, try to free up some that match.
5937 	 */
5938 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
5939 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
5940 	}
5941 
5942 	vm_page_lock_queues();
5943 	vm_free_page_lock();
5944 
5945 	RESET_STATE_OF_RUN();
5946 
5947 	scanned = 0;
5948 	considered = 0;
5949 	free_available = vm_page_free_count - vm_page_free_reserved;
5950 
5951 	wrapped = FALSE;
5952 
5953 	if (flags & KMA_LOMEM) {
5954 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
5955 	} else {
5956 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
5957 	}
5958 
5959 	orig_last_idx = idx_last_contig_page_found;
5960 	last_idx = orig_last_idx;
5961 
5962 	for (page_idx = last_idx, start_idx = last_idx;
5963 	    npages < contig_pages && page_idx < vm_pages_count;
5964 	    page_idx++) {
5965 retry:
5966 		if (wrapped &&
5967 		    npages == 0 &&
5968 		    page_idx >= orig_last_idx) {
5969 			/*
5970 			 * We're back where we started and we haven't
5971 			 * found any suitable contiguous range.  Let's
5972 			 * give up.
5973 			 */
5974 			break;
5975 		}
5976 		scanned++;
5977 		m = &vm_pages[page_idx];
5978 
5979 		assert(!m->vmp_fictitious);
5980 		assert(!m->vmp_private);
5981 
5982 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
5983 			/* no more low pages... */
5984 			break;
5985 		}
5986 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
5987 			/*
5988 			 * not aligned
5989 			 */
5990 			RESET_STATE_OF_RUN();
5991 		} else if (VM_PAGE_WIRED(m) || m->vmp_gobbled ||
5992 		    m->vmp_laundry || m->vmp_wanted ||
5993 		    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5994 			/*
5995 			 * page is in a transient state
5996 			 * or a state we don't want to deal
5997 			 * with, so don't consider it which
5998 			 * means starting a new run
5999 			 */
6000 			RESET_STATE_OF_RUN();
6001 		} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
6002 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
6003 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
6004 		    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6005 			/*
6006 			 * page needs to be on one of our queues (other then the pageout or special free queues)
6007 			 * or it needs to belong to the compressor pool (which is now indicated
6008 			 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
6009 			 * from the check for VM_PAGE_NOT_ON_Q)
6010 			 * in order for it to be stable behind the
6011 			 * locks we hold at this point...
6012 			 * if not, don't consider it which
6013 			 * means starting a new run
6014 			 */
6015 			RESET_STATE_OF_RUN();
6016 		} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) {
6017 			/*
6018 			 * pages on the free list are always 'busy'
6019 			 * so we couldn't test for 'busy' in the check
6020 			 * for the transient states... pages that are
6021 			 * 'free' are never 'tabled', so we also couldn't
6022 			 * test for 'tabled'.  So we check here to make
6023 			 * sure that a non-free page is not busy and is
6024 			 * tabled on an object...
6025 			 * if not, don't consider it which
6026 			 * means starting a new run
6027 			 */
6028 			RESET_STATE_OF_RUN();
6029 		} else {
6030 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6031 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6032 					RESET_STATE_OF_RUN();
6033 					goto did_consider;
6034 				} else {
6035 					npages = 1;
6036 					start_idx = page_idx;
6037 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6038 				}
6039 			} else {
6040 				npages++;
6041 			}
6042 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6043 
6044 			VM_PAGE_CHECK(m);
6045 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6046 				free_considered++;
6047 			} else {
6048 				/*
6049 				 * This page is not free.
6050 				 * If we can't steal used pages,
6051 				 * we have to give up this run
6052 				 * and keep looking.
6053 				 * Otherwise, we might need to
6054 				 * move the contents of this page
6055 				 * into a substitute page.
6056 				 */
6057 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6058 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6059 					substitute_needed++;
6060 				}
6061 #else
6062 				RESET_STATE_OF_RUN();
6063 #endif
6064 			}
6065 
6066 			if ((free_considered + substitute_needed) > free_available) {
6067 				/*
6068 				 * if we let this run continue
6069 				 * we will end up dropping the vm_page_free_count
6070 				 * below the reserve limit... we need to abort
6071 				 * this run, but we can at least re-consider this
6072 				 * page... thus the jump back to 'retry'
6073 				 */
6074 				RESET_STATE_OF_RUN();
6075 
6076 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6077 					considered++;
6078 					goto retry;
6079 				}
6080 				/*
6081 				 * free_available == 0
6082 				 * so can't consider any free pages... if
6083 				 * we went to retry in this case, we'd
6084 				 * get stuck looking at the same page
6085 				 * w/o making any forward progress
6086 				 * we also want to take this path if we've already
6087 				 * reached our limit that controls the lock latency
6088 				 */
6089 			}
6090 		}
6091 did_consider:
6092 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6093 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6094 
6095 			vm_free_page_unlock();
6096 			vm_page_unlock_queues();
6097 
6098 			mutex_pause(0);
6099 
6100 			PAGE_REPLACEMENT_ALLOWED(TRUE);
6101 
6102 			vm_page_lock_queues();
6103 			vm_free_page_lock();
6104 
6105 			RESET_STATE_OF_RUN();
6106 			/*
6107 			 * reset our free page limit since we
6108 			 * dropped the lock protecting the vm_page_free_queue
6109 			 */
6110 			free_available = vm_page_free_count - vm_page_free_reserved;
6111 			considered = 0;
6112 
6113 			yielded++;
6114 
6115 			goto retry;
6116 		}
6117 		considered++;
6118 	}
6119 	m = VM_PAGE_NULL;
6120 
6121 	if (npages != contig_pages) {
6122 		if (!wrapped) {
6123 			/*
6124 			 * We didn't find a contiguous range but we didn't
6125 			 * start from the very first page.
6126 			 * Start again from the very first page.
6127 			 */
6128 			RESET_STATE_OF_RUN();
6129 			if (flags & KMA_LOMEM) {
6130 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6131 			} else {
6132 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6133 			}
6134 			last_idx = 0;
6135 			page_idx = last_idx;
6136 			wrapped = TRUE;
6137 			goto retry;
6138 		}
6139 		vm_free_page_unlock();
6140 	} else {
6141 		vm_page_t       m1;
6142 		vm_page_t       m2;
6143 		unsigned int    cur_idx;
6144 		unsigned int    tmp_start_idx;
6145 		vm_object_t     locked_object = VM_OBJECT_NULL;
6146 		boolean_t       abort_run = FALSE;
6147 
6148 		assert(page_idx - start_idx == contig_pages);
6149 
6150 		tmp_start_idx = start_idx;
6151 
6152 		/*
6153 		 * first pass through to pull the free pages
6154 		 * off of the free queue so that in case we
6155 		 * need substitute pages, we won't grab any
6156 		 * of the free pages in the run... we'll clear
6157 		 * the 'free' bit in the 2nd pass, and even in
6158 		 * an abort_run case, we'll collect all of the
6159 		 * free pages in this run and return them to the free list
6160 		 */
6161 		while (start_idx < page_idx) {
6162 			m1 = &vm_pages[start_idx++];
6163 
6164 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6165 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6166 #endif
6167 
6168 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6169 				unsigned int color;
6170 
6171 				color = VM_PAGE_GET_COLOR(m1);
6172 #if MACH_ASSERT
6173 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6174 #endif
6175 				vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6176 
6177 				VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6178 #if MACH_ASSERT
6179 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6180 #endif
6181 				/*
6182 				 * Clear the "free" bit so that this page
6183 				 * does not get considered for another
6184 				 * concurrent physically-contiguous allocation.
6185 				 */
6186 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6187 				assert(m1->vmp_busy);
6188 
6189 				vm_page_free_count--;
6190 			}
6191 		}
6192 		if (flags & KMA_LOMEM) {
6193 			vm_page_lomem_find_contiguous_last_idx = page_idx;
6194 		} else {
6195 			vm_page_find_contiguous_last_idx = page_idx;
6196 		}
6197 
6198 		/*
6199 		 * we can drop the free queue lock at this point since
6200 		 * we've pulled any 'free' candidates off of the list
6201 		 * we need it dropped so that we can do a vm_page_grab
6202 		 * when substituing for pmapped/dirty pages
6203 		 */
6204 		vm_free_page_unlock();
6205 
6206 		start_idx = tmp_start_idx;
6207 		cur_idx = page_idx - 1;
6208 
6209 		while (start_idx++ < page_idx) {
6210 			/*
6211 			 * must go through the list from back to front
6212 			 * so that the page list is created in the
6213 			 * correct order - low -> high phys addresses
6214 			 */
6215 			m1 = &vm_pages[cur_idx--];
6216 
6217 			if (m1->vmp_object == 0) {
6218 				/*
6219 				 * page has already been removed from
6220 				 * the free list in the 1st pass
6221 				 */
6222 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6223 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
6224 				assert(m1->vmp_busy);
6225 				assert(!m1->vmp_wanted);
6226 				assert(!m1->vmp_laundry);
6227 			} else {
6228 				vm_object_t object;
6229 				int refmod;
6230 				boolean_t disconnected, reusable;
6231 
6232 				if (abort_run == TRUE) {
6233 					continue;
6234 				}
6235 
6236 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6237 
6238 				object = VM_PAGE_OBJECT(m1);
6239 
6240 				if (object != locked_object) {
6241 					if (locked_object) {
6242 						vm_object_unlock(locked_object);
6243 						locked_object = VM_OBJECT_NULL;
6244 					}
6245 					if (vm_object_lock_try(object)) {
6246 						locked_object = object;
6247 					}
6248 				}
6249 				if (locked_object == VM_OBJECT_NULL ||
6250 				    (VM_PAGE_WIRED(m1) || m1->vmp_gobbled ||
6251 				    m1->vmp_laundry || m1->vmp_wanted ||
6252 				    m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) ||
6253 				    (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6254 					if (locked_object) {
6255 						vm_object_unlock(locked_object);
6256 						locked_object = VM_OBJECT_NULL;
6257 					}
6258 					tmp_start_idx = cur_idx;
6259 					abort_run = TRUE;
6260 					continue;
6261 				}
6262 
6263 				disconnected = FALSE;
6264 				reusable = FALSE;
6265 
6266 				if ((m1->vmp_reusable ||
6267 				    object->all_reusable) &&
6268 				    (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
6269 				    !m1->vmp_dirty &&
6270 				    !m1->vmp_reference) {
6271 					/* reusable page... */
6272 					refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6273 					disconnected = TRUE;
6274 					if (refmod == 0) {
6275 						/*
6276 						 * ... not reused: can steal
6277 						 * without relocating contents.
6278 						 */
6279 						reusable = TRUE;
6280 					}
6281 				}
6282 
6283 				if ((m1->vmp_pmapped &&
6284 				    !reusable) ||
6285 				    m1->vmp_dirty ||
6286 				    m1->vmp_precious) {
6287 					vm_object_offset_t offset;
6288 
6289 					m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6290 
6291 					if (m2 == VM_PAGE_NULL) {
6292 						if (locked_object) {
6293 							vm_object_unlock(locked_object);
6294 							locked_object = VM_OBJECT_NULL;
6295 						}
6296 						tmp_start_idx = cur_idx;
6297 						abort_run = TRUE;
6298 						continue;
6299 					}
6300 					if (!disconnected) {
6301 						if (m1->vmp_pmapped) {
6302 							refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6303 						} else {
6304 							refmod = 0;
6305 						}
6306 					}
6307 
6308 					/* copy the page's contents */
6309 					pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6310 					/* copy the page's state */
6311 					assert(!VM_PAGE_WIRED(m1));
6312 					assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6313 					assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6314 					assert(!m1->vmp_laundry);
6315 					m2->vmp_reference       = m1->vmp_reference;
6316 					assert(!m1->vmp_gobbled);
6317 					assert(!m1->vmp_private);
6318 					m2->vmp_no_cache        = m1->vmp_no_cache;
6319 					m2->vmp_xpmapped        = 0;
6320 					assert(!m1->vmp_busy);
6321 					assert(!m1->vmp_wanted);
6322 					assert(!m1->vmp_fictitious);
6323 					m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6324 					m2->vmp_wpmapped        = m1->vmp_wpmapped;
6325 					assert(!m1->vmp_free_when_done);
6326 					m2->vmp_absent  = m1->vmp_absent;
6327 					m2->vmp_error   = VMP_ERROR_GET(m1);
6328 					m2->vmp_dirty   = m1->vmp_dirty;
6329 					assert(!m1->vmp_cleaning);
6330 					m2->vmp_precious        = m1->vmp_precious;
6331 					m2->vmp_clustered       = m1->vmp_clustered;
6332 					assert(!m1->vmp_overwriting);
6333 					m2->vmp_restart = m1->vmp_restart;
6334 					m2->vmp_unusual = m1->vmp_unusual;
6335 					m2->vmp_cs_validated = m1->vmp_cs_validated;
6336 					m2->vmp_cs_tainted      = m1->vmp_cs_tainted;
6337 					m2->vmp_cs_nx   = m1->vmp_cs_nx;
6338 
6339 					m2->vmp_realtime = m1->vmp_realtime;
6340 					m1->vmp_realtime = false;
6341 
6342 					/*
6343 					 * If m1 had really been reusable,
6344 					 * we would have just stolen it, so
6345 					 * let's not propagate it's "reusable"
6346 					 * bit and assert that m2 is not
6347 					 * marked as "reusable".
6348 					 */
6349 					// m2->vmp_reusable	= m1->vmp_reusable;
6350 					assert(!m2->vmp_reusable);
6351 
6352 					// assert(!m1->vmp_lopage);
6353 
6354 					if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6355 						m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6356 						/*
6357 						 * We just grabbed m2 up above and so it isn't
6358 						 * going to be on any special Q as yet and so
6359 						 * we don't need to 'remove' it from the special
6360 						 * queues. Just resetting the state should be enough.
6361 						 */
6362 						m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6363 					}
6364 
6365 					/*
6366 					 * page may need to be flushed if
6367 					 * it is marshalled into a UPL
6368 					 * that is going to be used by a device
6369 					 * that doesn't support coherency
6370 					 */
6371 					m2->vmp_written_by_kernel = TRUE;
6372 
6373 					/*
6374 					 * make sure we clear the ref/mod state
6375 					 * from the pmap layer... else we risk
6376 					 * inheriting state from the last time
6377 					 * this page was used...
6378 					 */
6379 					pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6380 
6381 					if (refmod & VM_MEM_REFERENCED) {
6382 						m2->vmp_reference = TRUE;
6383 					}
6384 					if (refmod & VM_MEM_MODIFIED) {
6385 						SET_PAGE_DIRTY(m2, TRUE);
6386 					}
6387 					offset = m1->vmp_offset;
6388 
6389 					/*
6390 					 * completely cleans up the state
6391 					 * of the page so that it is ready
6392 					 * to be put onto the free list, or
6393 					 * for this purpose it looks like it
6394 					 * just came off of the free list
6395 					 */
6396 					vm_page_free_prepare(m1);
6397 
6398 					/*
6399 					 * now put the substitute page
6400 					 * on the object
6401 					 */
6402 					vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
6403 
6404 					if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6405 						m2->vmp_pmapped = TRUE;
6406 						m2->vmp_wpmapped = TRUE;
6407 
6408 						kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6409 						    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6410 
6411 						assert(kr == KERN_SUCCESS);
6412 
6413 						compressed_pages++;
6414 					} else {
6415 						if (m2->vmp_reference) {
6416 							vm_page_activate(m2);
6417 						} else {
6418 							vm_page_deactivate(m2);
6419 						}
6420 					}
6421 					PAGE_WAKEUP_DONE(m2);
6422 				} else {
6423 					assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6424 
6425 					/*
6426 					 * completely cleans up the state
6427 					 * of the page so that it is ready
6428 					 * to be put onto the free list, or
6429 					 * for this purpose it looks like it
6430 					 * just came off of the free list
6431 					 */
6432 					vm_page_free_prepare(m1);
6433 				}
6434 
6435 				stolen_pages++;
6436 			}
6437 			if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6438 				/*
6439 				 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6440 				 * change it for pages marked as used-by-compressor.
6441 				 */
6442 				vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6443 			}
6444 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6445 			m1->vmp_snext = m;
6446 			m = m1;
6447 		}
6448 		if (locked_object) {
6449 			vm_object_unlock(locked_object);
6450 			locked_object = VM_OBJECT_NULL;
6451 		}
6452 
6453 		if (abort_run == TRUE) {
6454 			/*
6455 			 * want the index of the last
6456 			 * page in this run that was
6457 			 * successfully 'stolen', so back
6458 			 * it up 1 for the auto-decrement on use
6459 			 * and 1 more to bump back over this page
6460 			 */
6461 			page_idx = tmp_start_idx + 2;
6462 			if (page_idx >= vm_pages_count) {
6463 				if (wrapped) {
6464 					if (m != VM_PAGE_NULL) {
6465 						vm_page_unlock_queues();
6466 						vm_page_free_list(m, FALSE);
6467 						vm_page_lock_queues();
6468 						m = VM_PAGE_NULL;
6469 					}
6470 					dumped_run++;
6471 					goto done_scanning;
6472 				}
6473 				page_idx = last_idx = 0;
6474 				wrapped = TRUE;
6475 			}
6476 			abort_run = FALSE;
6477 
6478 			/*
6479 			 * We didn't find a contiguous range but we didn't
6480 			 * start from the very first page.
6481 			 * Start again from the very first page.
6482 			 */
6483 			RESET_STATE_OF_RUN();
6484 
6485 			if (flags & KMA_LOMEM) {
6486 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
6487 			} else {
6488 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6489 			}
6490 
6491 			last_idx = page_idx;
6492 
6493 			if (m != VM_PAGE_NULL) {
6494 				vm_page_unlock_queues();
6495 				vm_page_free_list(m, FALSE);
6496 				vm_page_lock_queues();
6497 				m = VM_PAGE_NULL;
6498 			}
6499 			dumped_run++;
6500 
6501 			vm_free_page_lock();
6502 			/*
6503 			 * reset our free page limit since we
6504 			 * dropped the lock protecting the vm_page_free_queue
6505 			 */
6506 			free_available = vm_page_free_count - vm_page_free_reserved;
6507 			goto retry;
6508 		}
6509 
6510 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6511 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6512 			assert(m1->vmp_wire_count == 0);
6513 
6514 			if (wire == TRUE) {
6515 				m1->vmp_wire_count++;
6516 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
6517 			} else {
6518 				m1->vmp_gobbled = TRUE;
6519 			}
6520 		}
6521 		if (wire == FALSE) {
6522 			vm_page_gobble_count += npages;
6523 		}
6524 
6525 		/*
6526 		 * gobbled pages are also counted as wired pages
6527 		 */
6528 		vm_page_wire_count += npages;
6529 
6530 		assert(vm_page_verify_contiguous(m, npages));
6531 	}
6532 done_scanning:
6533 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6534 
6535 	vm_page_unlock_queues();
6536 
6537 #if DEBUG
6538 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6539 
6540 	tv_end_sec -= tv_start_sec;
6541 	if (tv_end_usec < tv_start_usec) {
6542 		tv_end_sec--;
6543 		tv_end_usec += 1000000;
6544 	}
6545 	tv_end_usec -= tv_start_usec;
6546 	if (tv_end_usec >= 1000000) {
6547 		tv_end_sec++;
6548 		tv_end_sec -= 1000000;
6549 	}
6550 	if (vm_page_find_contig_debug) {
6551 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
6552 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6553 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
6554 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6555 	}
6556 
6557 #endif
6558 #if MACH_ASSERT
6559 	vm_page_verify_free_lists();
6560 #endif
6561 	if (m == NULL && zone_gc_called < 2) {
6562 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6563 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6564 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6565 
6566 		if (consider_buffer_cache_collect != NULL) {
6567 			(void)(*consider_buffer_cache_collect)(1);
6568 		}
6569 
6570 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6571 
6572 		zone_gc_called++;
6573 
6574 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6575 		goto full_scan_again;
6576 	}
6577 
6578 	return m;
6579 }
6580 
6581 /*
6582  *	Allocate a list of contiguous, wired pages.
6583  */
6584 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6585 cpm_allocate(
6586 	vm_size_t       size,
6587 	vm_page_t       *list,
6588 	ppnum_t         max_pnum,
6589 	ppnum_t         pnum_mask,
6590 	boolean_t       wire,
6591 	int             flags)
6592 {
6593 	vm_page_t               pages;
6594 	unsigned int            npages;
6595 
6596 	if (size % PAGE_SIZE != 0) {
6597 		return KERN_INVALID_ARGUMENT;
6598 	}
6599 
6600 	npages = (unsigned int) (size / PAGE_SIZE);
6601 	if (npages != size / PAGE_SIZE) {
6602 		/* 32-bit overflow */
6603 		return KERN_INVALID_ARGUMENT;
6604 	}
6605 
6606 	/*
6607 	 *	Obtain a pointer to a subset of the free
6608 	 *	list large enough to satisfy the request;
6609 	 *	the region will be physically contiguous.
6610 	 */
6611 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6612 
6613 	if (pages == VM_PAGE_NULL) {
6614 		return KERN_NO_SPACE;
6615 	}
6616 	/*
6617 	 * determine need for wakeups
6618 	 */
6619 	if (vm_page_free_count < vm_page_free_min) {
6620 		vm_free_page_lock();
6621 		if (vm_pageout_running == FALSE) {
6622 			vm_free_page_unlock();
6623 			thread_wakeup((event_t) &vm_page_free_wanted);
6624 		} else {
6625 			vm_free_page_unlock();
6626 		}
6627 	}
6628 
6629 	VM_CHECK_MEMORYSTATUS;
6630 
6631 	/*
6632 	 *	The CPM pages should now be available and
6633 	 *	ordered by ascending physical address.
6634 	 */
6635 	assert(vm_page_verify_contiguous(pages, npages));
6636 
6637 	if (flags & KMA_ZERO) {
6638 		for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6639 			vm_page_zero_fill(m);
6640 		}
6641 	}
6642 
6643 	*list = pages;
6644 	return KERN_SUCCESS;
6645 }
6646 
6647 
6648 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6649 
6650 /*
6651  * when working on a 'run' of pages, it is necessary to hold
6652  * the vm_page_queue_lock (a hot global lock) for certain operations
6653  * on the page... however, the majority of the work can be done
6654  * while merely holding the object lock... in fact there are certain
6655  * collections of pages that don't require any work brokered by the
6656  * vm_page_queue_lock... to mitigate the time spent behind the global
6657  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6658  * while doing all of the work that doesn't require the vm_page_queue_lock...
6659  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6660  * necessary work for each page... we will grab the busy bit on the page
6661  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6662  * if it can't immediately take the vm_page_queue_lock in order to compete
6663  * for the locks in the same order that vm_pageout_scan takes them.
6664  * the operation names are modeled after the names of the routines that
6665  * need to be called in order to make the changes very obvious in the
6666  * original loop
6667  */
6668 
6669 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6670 vm_page_do_delayed_work(
6671 	vm_object_t     object,
6672 	vm_tag_t        tag,
6673 	struct vm_page_delayed_work *dwp,
6674 	int             dw_count)
6675 {
6676 	int             j;
6677 	vm_page_t       m;
6678 	vm_page_t       local_free_q = VM_PAGE_NULL;
6679 
6680 	/*
6681 	 * pageout_scan takes the vm_page_lock_queues first
6682 	 * then tries for the object lock... to avoid what
6683 	 * is effectively a lock inversion, we'll go to the
6684 	 * trouble of taking them in that same order... otherwise
6685 	 * if this object contains the majority of the pages resident
6686 	 * in the UBC (or a small set of large objects actively being
6687 	 * worked on contain the majority of the pages), we could
6688 	 * cause the pageout_scan thread to 'starve' in its attempt
6689 	 * to find pages to move to the free queue, since it has to
6690 	 * successfully acquire the object lock of any candidate page
6691 	 * before it can steal/clean it.
6692 	 */
6693 	if (!vm_page_trylockspin_queues()) {
6694 		vm_object_unlock(object);
6695 
6696 		/*
6697 		 * "Turnstile enabled vm_pageout_scan" can be runnable
6698 		 * for a very long time without getting on a core.
6699 		 * If this is a higher priority thread it could be
6700 		 * waiting here for a very long time respecting the fact
6701 		 * that pageout_scan would like its object after VPS does
6702 		 * a mutex_pause(0).
6703 		 * So we cap the number of yields in the vm_object_lock_avoid()
6704 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
6705 		 * 10us to run and grab the object if needed.
6706 		 */
6707 		vm_page_lockspin_queues();
6708 
6709 		for (j = 0;; j++) {
6710 			if ((!vm_object_lock_avoid(object) ||
6711 			    (vps_dynamic_priority_enabled && (j > 0))) &&
6712 			    _vm_object_lock_try(object)) {
6713 				break;
6714 			}
6715 			vm_page_unlock_queues();
6716 			mutex_pause(j);
6717 			vm_page_lockspin_queues();
6718 		}
6719 	}
6720 	for (j = 0; j < dw_count; j++, dwp++) {
6721 		m = dwp->dw_m;
6722 
6723 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6724 			vm_pageout_throttle_up(m);
6725 		}
6726 #if CONFIG_PHANTOM_CACHE
6727 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6728 			vm_phantom_cache_update(m);
6729 		}
6730 #endif
6731 		if (dwp->dw_mask & DW_vm_page_wire) {
6732 			vm_page_wire(m, tag, FALSE);
6733 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
6734 			boolean_t       queueit;
6735 
6736 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6737 
6738 			vm_page_unwire(m, queueit);
6739 		}
6740 		if (dwp->dw_mask & DW_vm_page_free) {
6741 			vm_page_free_prepare_queues(m);
6742 
6743 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6744 			/*
6745 			 * Add this page to our list of reclaimed pages,
6746 			 * to be freed later.
6747 			 */
6748 			m->vmp_snext = local_free_q;
6749 			local_free_q = m;
6750 		} else {
6751 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6752 				vm_page_deactivate_internal(m, FALSE);
6753 			} else if (dwp->dw_mask & DW_vm_page_activate) {
6754 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6755 					vm_page_activate(m);
6756 				}
6757 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
6758 				vm_page_speculate(m, TRUE);
6759 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
6760 				/*
6761 				 * if we didn't hold the object lock and did this,
6762 				 * we might disconnect the page, then someone might
6763 				 * soft fault it back in, then we would put it on the
6764 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
6765 				 * page on that queue, which we don't want
6766 				 */
6767 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6768 
6769 				if ((refmod_state & VM_MEM_REFERENCED)) {
6770 					/*
6771 					 * this page has been touched since it got cleaned; let's activate it
6772 					 * if it hasn't already been
6773 					 */
6774 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6775 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6776 
6777 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6778 						vm_page_activate(m);
6779 					}
6780 				} else {
6781 					m->vmp_reference = FALSE;
6782 					vm_page_enqueue_cleaned(m);
6783 				}
6784 			} else if (dwp->dw_mask & DW_vm_page_lru) {
6785 				vm_page_lru(m);
6786 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6787 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6788 					vm_page_queues_remove(m, TRUE);
6789 				}
6790 			}
6791 			if (dwp->dw_mask & DW_set_reference) {
6792 				m->vmp_reference = TRUE;
6793 			} else if (dwp->dw_mask & DW_clear_reference) {
6794 				m->vmp_reference = FALSE;
6795 			}
6796 
6797 			if (dwp->dw_mask & DW_move_page) {
6798 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6799 					vm_page_queues_remove(m, FALSE);
6800 
6801 					assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
6802 
6803 					vm_page_enqueue_inactive(m, FALSE);
6804 				}
6805 			}
6806 			if (dwp->dw_mask & DW_clear_busy) {
6807 				m->vmp_busy = FALSE;
6808 			}
6809 
6810 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6811 				PAGE_WAKEUP(m);
6812 			}
6813 		}
6814 	}
6815 	vm_page_unlock_queues();
6816 
6817 	if (local_free_q) {
6818 		vm_page_free_list(local_free_q, TRUE);
6819 	}
6820 
6821 	VM_CHECK_MEMORYSTATUS;
6822 }
6823 
6824 __abortlike
6825 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)6826 __vm_page_alloc_list_failed_panic(
6827 	vm_size_t       page_count,
6828 	kma_flags_t     flags,
6829 	kern_return_t   kr)
6830 {
6831 	panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
6832 	    (size_t)page_count, flags, kr);
6833 }
6834 
6835 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)6836 vm_page_alloc_list(
6837 	vm_size_t   page_count,
6838 	kma_flags_t flags,
6839 	vm_page_t  *list)
6840 {
6841 	vm_page_t       page_list = VM_PAGE_NULL;
6842 	vm_page_t       mem;
6843 	kern_return_t   kr = KERN_SUCCESS;
6844 	int             page_grab_count = 0;
6845 #if DEVELOPMENT || DEBUG
6846 	task_t          task;
6847 #endif /* DEVELOPMENT || DEBUG */
6848 
6849 	for (vm_size_t i = 0; i < page_count; i++) {
6850 		for (;;) {
6851 			if (flags & KMA_LOMEM) {
6852 				mem = vm_page_grablo();
6853 			} else {
6854 				mem = vm_page_grab();
6855 			}
6856 
6857 			if (mem != VM_PAGE_NULL) {
6858 				break;
6859 			}
6860 
6861 			if (flags & KMA_NOPAGEWAIT) {
6862 				kr = KERN_RESOURCE_SHORTAGE;
6863 				goto out;
6864 			}
6865 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
6866 				kr = KERN_RESOURCE_SHORTAGE;
6867 				goto out;
6868 			}
6869 
6870 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
6871 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
6872 
6873 			if ((flags & KMA_NOFAIL) == 0) {
6874 				uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
6875 				if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
6876 					kr = KERN_RESOURCE_SHORTAGE;
6877 					goto out;
6878 				}
6879 			}
6880 			VM_PAGE_WAIT();
6881 		}
6882 
6883 		page_grab_count++;
6884 		mem->vmp_snext = page_list;
6885 		page_list = mem;
6886 	}
6887 
6888 	if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
6889 		for (mem = page_list; mem; mem = mem->vmp_snext) {
6890 			vm_page_zero_fill(mem);
6891 		}
6892 	}
6893 
6894 out:
6895 #if DEBUG || DEVELOPMENT
6896 	task = current_task_early();
6897 	if (task != NULL) {
6898 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
6899 	}
6900 #endif
6901 
6902 	if (kr == KERN_SUCCESS) {
6903 		*list = page_list;
6904 	} else if (flags & KMA_NOFAIL) {
6905 		__vm_page_alloc_list_failed_panic(page_count, flags, kr);
6906 	} else {
6907 		vm_page_free_list(page_list, FALSE);
6908 	}
6909 
6910 	return kr;
6911 }
6912 
6913 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)6914 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
6915 {
6916 	page->vmp_offset = offset;
6917 }
6918 
6919 vm_page_t
vm_page_get_next(vm_page_t page)6920 vm_page_get_next(vm_page_t page)
6921 {
6922 	return page->vmp_snext;
6923 }
6924 
6925 vm_object_offset_t
vm_page_get_offset(vm_page_t page)6926 vm_page_get_offset(vm_page_t page)
6927 {
6928 	return page->vmp_offset;
6929 }
6930 
6931 ppnum_t
vm_page_get_phys_page(vm_page_t page)6932 vm_page_get_phys_page(vm_page_t page)
6933 {
6934 	return VM_PAGE_GET_PHYS_PAGE(page);
6935 }
6936 
6937 
6938 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6939 
6940 #if HIBERNATION
6941 
6942 static vm_page_t hibernate_gobble_queue;
6943 
6944 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
6945 static int  hibernate_flush_dirty_pages(int);
6946 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
6947 
6948 void hibernate_flush_wait(void);
6949 void hibernate_mark_in_progress(void);
6950 void hibernate_clear_in_progress(void);
6951 
6952 void            hibernate_free_range(int, int);
6953 void            hibernate_hash_insert_page(vm_page_t);
6954 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
6955 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
6956 ppnum_t         hibernate_lookup_paddr(unsigned int);
6957 
6958 struct hibernate_statistics {
6959 	int hibernate_considered;
6960 	int hibernate_reentered_on_q;
6961 	int hibernate_found_dirty;
6962 	int hibernate_skipped_cleaning;
6963 	int hibernate_skipped_transient;
6964 	int hibernate_skipped_precious;
6965 	int hibernate_skipped_external;
6966 	int hibernate_queue_nolock;
6967 	int hibernate_queue_paused;
6968 	int hibernate_throttled;
6969 	int hibernate_throttle_timeout;
6970 	int hibernate_drained;
6971 	int hibernate_drain_timeout;
6972 	int cd_lock_failed;
6973 	int cd_found_precious;
6974 	int cd_found_wired;
6975 	int cd_found_busy;
6976 	int cd_found_unusual;
6977 	int cd_found_cleaning;
6978 	int cd_found_laundry;
6979 	int cd_found_dirty;
6980 	int cd_found_xpmapped;
6981 	int cd_skipped_xpmapped;
6982 	int cd_local_free;
6983 	int cd_total_free;
6984 	int cd_vm_page_wire_count;
6985 	int cd_vm_struct_pages_unneeded;
6986 	int cd_pages;
6987 	int cd_discarded;
6988 	int cd_count_wire;
6989 } hibernate_stats;
6990 
6991 
6992 /*
6993  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6994  * so that we don't overrun the estimated image size, which would
6995  * result in a hibernation failure.
6996  *
6997  * We use a size value instead of pages because we don't want to take up more space
6998  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
6999  * to have that additional space available.
7000  *
7001  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7002  * xpmapped size.
7003  */
7004 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7005 
7006 
7007 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7008 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7009 {
7010 	wait_result_t   wait_result;
7011 
7012 	vm_page_lock_queues();
7013 
7014 	while (!vm_page_queue_empty(&q->pgo_pending)) {
7015 		q->pgo_draining = TRUE;
7016 
7017 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7018 
7019 		vm_page_unlock_queues();
7020 
7021 		wait_result = thread_block(THREAD_CONTINUE_NULL);
7022 
7023 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7024 			hibernate_stats.hibernate_drain_timeout++;
7025 
7026 			if (q == &vm_pageout_queue_external) {
7027 				return 0;
7028 			}
7029 
7030 			return 1;
7031 		}
7032 		vm_page_lock_queues();
7033 
7034 		hibernate_stats.hibernate_drained++;
7035 	}
7036 	vm_page_unlock_queues();
7037 
7038 	return 0;
7039 }
7040 
7041 
7042 boolean_t hibernate_skip_external = FALSE;
7043 
7044 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7045 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7046 {
7047 	vm_page_t       m;
7048 	vm_object_t     l_object = NULL;
7049 	vm_object_t     m_object = NULL;
7050 	int             refmod_state = 0;
7051 	int             try_failed_count = 0;
7052 	int             retval = 0;
7053 	int             current_run = 0;
7054 	struct  vm_pageout_queue *iq;
7055 	struct  vm_pageout_queue *eq;
7056 	struct  vm_pageout_queue *tq;
7057 
7058 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7059 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7060 
7061 	iq = &vm_pageout_queue_internal;
7062 	eq = &vm_pageout_queue_external;
7063 
7064 	vm_page_lock_queues();
7065 
7066 	while (qcount && !vm_page_queue_empty(q)) {
7067 		if (current_run++ == 1000) {
7068 			if (hibernate_should_abort()) {
7069 				retval = 1;
7070 				break;
7071 			}
7072 			current_run = 0;
7073 		}
7074 
7075 		m = (vm_page_t) vm_page_queue_first(q);
7076 		m_object = VM_PAGE_OBJECT(m);
7077 
7078 		/*
7079 		 * check to see if we currently are working
7080 		 * with the same object... if so, we've
7081 		 * already got the lock
7082 		 */
7083 		if (m_object != l_object) {
7084 			/*
7085 			 * the object associated with candidate page is
7086 			 * different from the one we were just working
7087 			 * with... dump the lock if we still own it
7088 			 */
7089 			if (l_object != NULL) {
7090 				vm_object_unlock(l_object);
7091 				l_object = NULL;
7092 			}
7093 			/*
7094 			 * Try to lock object; since we've alread got the
7095 			 * page queues lock, we can only 'try' for this one.
7096 			 * if the 'try' fails, we need to do a mutex_pause
7097 			 * to allow the owner of the object lock a chance to
7098 			 * run...
7099 			 */
7100 			if (!vm_object_lock_try_scan(m_object)) {
7101 				if (try_failed_count > 20) {
7102 					hibernate_stats.hibernate_queue_nolock++;
7103 
7104 					goto reenter_pg_on_q;
7105 				}
7106 
7107 				vm_page_unlock_queues();
7108 				mutex_pause(try_failed_count++);
7109 				vm_page_lock_queues();
7110 
7111 				hibernate_stats.hibernate_queue_paused++;
7112 				continue;
7113 			} else {
7114 				l_object = m_object;
7115 			}
7116 		}
7117 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7118 			/*
7119 			 * page is not to be cleaned
7120 			 * put it back on the head of its queue
7121 			 */
7122 			if (m->vmp_cleaning) {
7123 				hibernate_stats.hibernate_skipped_cleaning++;
7124 			} else {
7125 				hibernate_stats.hibernate_skipped_transient++;
7126 			}
7127 
7128 			goto reenter_pg_on_q;
7129 		}
7130 		if (m_object->vo_copy == VM_OBJECT_NULL) {
7131 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7132 				/*
7133 				 * let the normal hibernate image path
7134 				 * deal with these
7135 				 */
7136 				goto reenter_pg_on_q;
7137 			}
7138 		}
7139 		if (!m->vmp_dirty && m->vmp_pmapped) {
7140 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7141 
7142 			if ((refmod_state & VM_MEM_MODIFIED)) {
7143 				SET_PAGE_DIRTY(m, FALSE);
7144 			}
7145 		} else {
7146 			refmod_state = 0;
7147 		}
7148 
7149 		if (!m->vmp_dirty) {
7150 			/*
7151 			 * page is not to be cleaned
7152 			 * put it back on the head of its queue
7153 			 */
7154 			if (m->vmp_precious) {
7155 				hibernate_stats.hibernate_skipped_precious++;
7156 			}
7157 
7158 			goto reenter_pg_on_q;
7159 		}
7160 
7161 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7162 			hibernate_stats.hibernate_skipped_external++;
7163 
7164 			goto reenter_pg_on_q;
7165 		}
7166 		tq = NULL;
7167 
7168 		if (m_object->internal) {
7169 			if (VM_PAGE_Q_THROTTLED(iq)) {
7170 				tq = iq;
7171 			}
7172 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7173 			tq = eq;
7174 		}
7175 
7176 		if (tq != NULL) {
7177 			wait_result_t   wait_result;
7178 			int             wait_count = 5;
7179 
7180 			if (l_object != NULL) {
7181 				vm_object_unlock(l_object);
7182 				l_object = NULL;
7183 			}
7184 
7185 			while (retval == 0) {
7186 				tq->pgo_throttled = TRUE;
7187 
7188 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7189 
7190 				vm_page_unlock_queues();
7191 
7192 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7193 
7194 				vm_page_lock_queues();
7195 
7196 				if (wait_result != THREAD_TIMED_OUT) {
7197 					break;
7198 				}
7199 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7200 					break;
7201 				}
7202 
7203 				if (hibernate_should_abort()) {
7204 					retval = 1;
7205 				}
7206 
7207 				if (--wait_count == 0) {
7208 					hibernate_stats.hibernate_throttle_timeout++;
7209 
7210 					if (tq == eq) {
7211 						hibernate_skip_external = TRUE;
7212 						break;
7213 					}
7214 					retval = 1;
7215 				}
7216 			}
7217 			if (retval) {
7218 				break;
7219 			}
7220 
7221 			hibernate_stats.hibernate_throttled++;
7222 
7223 			continue;
7224 		}
7225 		/*
7226 		 * we've already factored out pages in the laundry which
7227 		 * means this page can't be on the pageout queue so it's
7228 		 * safe to do the vm_page_queues_remove
7229 		 */
7230 		vm_page_queues_remove(m, TRUE);
7231 
7232 		if (m_object->internal == TRUE) {
7233 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7234 		}
7235 
7236 		vm_pageout_cluster(m);
7237 
7238 		hibernate_stats.hibernate_found_dirty++;
7239 
7240 		goto next_pg;
7241 
7242 reenter_pg_on_q:
7243 		vm_page_queue_remove(q, m, vmp_pageq);
7244 		vm_page_queue_enter(q, m, vmp_pageq);
7245 
7246 		hibernate_stats.hibernate_reentered_on_q++;
7247 next_pg:
7248 		hibernate_stats.hibernate_considered++;
7249 
7250 		qcount--;
7251 		try_failed_count = 0;
7252 	}
7253 	if (l_object != NULL) {
7254 		vm_object_unlock(l_object);
7255 		l_object = NULL;
7256 	}
7257 
7258 	vm_page_unlock_queues();
7259 
7260 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7261 
7262 	return retval;
7263 }
7264 
7265 
7266 static int
hibernate_flush_dirty_pages(int pass)7267 hibernate_flush_dirty_pages(int pass)
7268 {
7269 	struct vm_speculative_age_q     *aq;
7270 	uint32_t        i;
7271 
7272 	if (vm_page_local_q) {
7273 		zpercpu_foreach_cpu(lid) {
7274 			vm_page_reactivate_local(lid, TRUE, FALSE);
7275 		}
7276 	}
7277 
7278 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7279 		int             qcount;
7280 		vm_page_t       m;
7281 
7282 		aq = &vm_page_queue_speculative[i];
7283 
7284 		if (vm_page_queue_empty(&aq->age_q)) {
7285 			continue;
7286 		}
7287 		qcount = 0;
7288 
7289 		vm_page_lockspin_queues();
7290 
7291 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7292 			qcount++;
7293 		}
7294 		vm_page_unlock_queues();
7295 
7296 		if (qcount) {
7297 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7298 				return 1;
7299 			}
7300 		}
7301 	}
7302 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7303 		return 1;
7304 	}
7305 	/* XXX FBDP TODO: flush secluded queue */
7306 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7307 		return 1;
7308 	}
7309 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7310 		return 1;
7311 	}
7312 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7313 		return 1;
7314 	}
7315 
7316 	if (pass == 1) {
7317 		vm_compressor_record_warmup_start();
7318 	}
7319 
7320 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7321 		if (pass == 1) {
7322 			vm_compressor_record_warmup_end();
7323 		}
7324 		return 1;
7325 	}
7326 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7327 		if (pass == 1) {
7328 			vm_compressor_record_warmup_end();
7329 		}
7330 		return 1;
7331 	}
7332 	if (pass == 1) {
7333 		vm_compressor_record_warmup_end();
7334 	}
7335 
7336 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7337 		return 1;
7338 	}
7339 
7340 	return 0;
7341 }
7342 
7343 
7344 void
hibernate_reset_stats()7345 hibernate_reset_stats()
7346 {
7347 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7348 }
7349 
7350 
7351 int
hibernate_flush_memory()7352 hibernate_flush_memory()
7353 {
7354 	int     retval;
7355 
7356 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7357 
7358 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7359 
7360 	hibernate_cleaning_in_progress = TRUE;
7361 	hibernate_skip_external = FALSE;
7362 
7363 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7364 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7365 
7366 		vm_compressor_flush();
7367 
7368 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7369 
7370 		if (consider_buffer_cache_collect != NULL) {
7371 			unsigned int orig_wire_count;
7372 
7373 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7374 			orig_wire_count = vm_page_wire_count;
7375 
7376 			(void)(*consider_buffer_cache_collect)(1);
7377 			zone_gc(ZONE_GC_DRAIN);
7378 
7379 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7380 
7381 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7382 		}
7383 	}
7384 	hibernate_cleaning_in_progress = FALSE;
7385 
7386 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7387 
7388 	if (retval) {
7389 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7390 	}
7391 
7392 
7393 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7394 	    hibernate_stats.hibernate_considered,
7395 	    hibernate_stats.hibernate_reentered_on_q,
7396 	    hibernate_stats.hibernate_found_dirty);
7397 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7398 	    hibernate_stats.hibernate_skipped_cleaning,
7399 	    hibernate_stats.hibernate_skipped_transient,
7400 	    hibernate_stats.hibernate_skipped_precious,
7401 	    hibernate_stats.hibernate_skipped_external,
7402 	    hibernate_stats.hibernate_queue_nolock);
7403 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7404 	    hibernate_stats.hibernate_queue_paused,
7405 	    hibernate_stats.hibernate_throttled,
7406 	    hibernate_stats.hibernate_throttle_timeout,
7407 	    hibernate_stats.hibernate_drained,
7408 	    hibernate_stats.hibernate_drain_timeout);
7409 
7410 	return retval;
7411 }
7412 
7413 
7414 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7415 hibernate_page_list_zero(hibernate_page_list_t *list)
7416 {
7417 	uint32_t             bank;
7418 	hibernate_bitmap_t * bitmap;
7419 
7420 	bitmap = &list->bank_bitmap[0];
7421 	for (bank = 0; bank < list->bank_count; bank++) {
7422 		uint32_t last_bit;
7423 
7424 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7425 		// set out-of-bound bits at end of bitmap.
7426 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7427 		if (last_bit) {
7428 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7429 		}
7430 
7431 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7432 	}
7433 }
7434 
7435 void
hibernate_free_gobble_pages(void)7436 hibernate_free_gobble_pages(void)
7437 {
7438 	vm_page_t m, next;
7439 	uint32_t  count = 0;
7440 
7441 	m = (vm_page_t) hibernate_gobble_queue;
7442 	while (m) {
7443 		next = m->vmp_snext;
7444 		vm_page_free(m);
7445 		count++;
7446 		m = next;
7447 	}
7448 	hibernate_gobble_queue = VM_PAGE_NULL;
7449 
7450 	if (count) {
7451 		HIBLOG("Freed %d pages\n", count);
7452 	}
7453 }
7454 
7455 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7456 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7457 {
7458 	vm_object_t object = NULL;
7459 	int                  refmod_state;
7460 	boolean_t            discard = FALSE;
7461 
7462 	do{
7463 		if (m->vmp_private) {
7464 			panic("hibernate_consider_discard: private");
7465 		}
7466 
7467 		object = VM_PAGE_OBJECT(m);
7468 
7469 		if (!vm_object_lock_try(object)) {
7470 			object = NULL;
7471 			if (!preflight) {
7472 				hibernate_stats.cd_lock_failed++;
7473 			}
7474 			break;
7475 		}
7476 		if (VM_PAGE_WIRED(m)) {
7477 			if (!preflight) {
7478 				hibernate_stats.cd_found_wired++;
7479 			}
7480 			break;
7481 		}
7482 		if (m->vmp_precious) {
7483 			if (!preflight) {
7484 				hibernate_stats.cd_found_precious++;
7485 			}
7486 			break;
7487 		}
7488 		if (m->vmp_busy || !object->alive) {
7489 			/*
7490 			 *	Somebody is playing with this page.
7491 			 */
7492 			if (!preflight) {
7493 				hibernate_stats.cd_found_busy++;
7494 			}
7495 			break;
7496 		}
7497 		if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7498 			/*
7499 			 * If it's unusual in anyway, ignore it
7500 			 */
7501 			if (!preflight) {
7502 				hibernate_stats.cd_found_unusual++;
7503 			}
7504 			break;
7505 		}
7506 		if (m->vmp_cleaning) {
7507 			if (!preflight) {
7508 				hibernate_stats.cd_found_cleaning++;
7509 			}
7510 			break;
7511 		}
7512 		if (m->vmp_laundry) {
7513 			if (!preflight) {
7514 				hibernate_stats.cd_found_laundry++;
7515 			}
7516 			break;
7517 		}
7518 		if (!m->vmp_dirty) {
7519 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7520 
7521 			if (refmod_state & VM_MEM_REFERENCED) {
7522 				m->vmp_reference = TRUE;
7523 			}
7524 			if (refmod_state & VM_MEM_MODIFIED) {
7525 				SET_PAGE_DIRTY(m, FALSE);
7526 			}
7527 		}
7528 
7529 		/*
7530 		 * If it's clean or purgeable we can discard the page on wakeup.
7531 		 */
7532 		discard = (!m->vmp_dirty)
7533 		    || (VM_PURGABLE_VOLATILE == object->purgable)
7534 		    || (VM_PURGABLE_EMPTY == object->purgable);
7535 
7536 
7537 		if (discard == FALSE) {
7538 			if (!preflight) {
7539 				hibernate_stats.cd_found_dirty++;
7540 			}
7541 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7542 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7543 				if (!preflight) {
7544 					hibernate_stats.cd_found_xpmapped++;
7545 				}
7546 				discard = FALSE;
7547 			} else {
7548 				if (!preflight) {
7549 					hibernate_stats.cd_skipped_xpmapped++;
7550 				}
7551 			}
7552 		}
7553 	}while (FALSE);
7554 
7555 	if (object) {
7556 		vm_object_unlock(object);
7557 	}
7558 
7559 	return discard;
7560 }
7561 
7562 
7563 static void
hibernate_discard_page(vm_page_t m)7564 hibernate_discard_page(vm_page_t m)
7565 {
7566 	vm_object_t m_object;
7567 
7568 	if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7569 		/*
7570 		 * If it's unusual in anyway, ignore
7571 		 */
7572 		return;
7573 	}
7574 
7575 	m_object = VM_PAGE_OBJECT(m);
7576 
7577 #if MACH_ASSERT || DEBUG
7578 	if (!vm_object_lock_try(m_object)) {
7579 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7580 	}
7581 #else
7582 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
7583 	 *  makes sure these locks are uncontended before sleep */
7584 #endif /* MACH_ASSERT || DEBUG */
7585 
7586 	if (m->vmp_pmapped == TRUE) {
7587 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7588 	}
7589 
7590 	if (m->vmp_laundry) {
7591 		panic("hibernate_discard_page(%p) laundry", m);
7592 	}
7593 	if (m->vmp_private) {
7594 		panic("hibernate_discard_page(%p) private", m);
7595 	}
7596 	if (m->vmp_fictitious) {
7597 		panic("hibernate_discard_page(%p) fictitious", m);
7598 	}
7599 
7600 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7601 		/* object should be on a queue */
7602 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7603 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7604 		assert(old_queue);
7605 		if (m_object->purgeable_when_ripe) {
7606 			vm_purgeable_token_delete_first(old_queue);
7607 		}
7608 		vm_object_lock_assert_exclusive(m_object);
7609 		m_object->purgable = VM_PURGABLE_EMPTY;
7610 
7611 		/*
7612 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
7613 		 * accounted in the "volatile" ledger, so no change here.
7614 		 * We have to update vm_page_purgeable_count, though, since we're
7615 		 * effectively purging this object.
7616 		 */
7617 		unsigned int delta;
7618 		assert(m_object->resident_page_count >= m_object->wired_page_count);
7619 		delta = (m_object->resident_page_count - m_object->wired_page_count);
7620 		assert(vm_page_purgeable_count >= delta);
7621 		assert(delta > 0);
7622 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7623 	}
7624 
7625 	vm_page_free(m);
7626 
7627 #if MACH_ASSERT || DEBUG
7628 	vm_object_unlock(m_object);
7629 #endif  /* MACH_ASSERT || DEBUG */
7630 }
7631 
7632 /*
7633  *  Grab locks for hibernate_page_list_setall()
7634  */
7635 void
hibernate_vm_lock_queues(void)7636 hibernate_vm_lock_queues(void)
7637 {
7638 	vm_object_lock(compressor_object);
7639 	vm_page_lock_queues();
7640 	vm_free_page_lock();
7641 	lck_mtx_lock(&vm_purgeable_queue_lock);
7642 
7643 	if (vm_page_local_q) {
7644 		zpercpu_foreach(lq, vm_page_local_q) {
7645 			VPL_LOCK(&lq->vpl_lock);
7646 		}
7647 	}
7648 }
7649 
7650 void
hibernate_vm_unlock_queues(void)7651 hibernate_vm_unlock_queues(void)
7652 {
7653 	if (vm_page_local_q) {
7654 		zpercpu_foreach(lq, vm_page_local_q) {
7655 			VPL_UNLOCK(&lq->vpl_lock);
7656 		}
7657 	}
7658 	lck_mtx_unlock(&vm_purgeable_queue_lock);
7659 	vm_free_page_unlock();
7660 	vm_page_unlock_queues();
7661 	vm_object_unlock(compressor_object);
7662 }
7663 
7664 /*
7665  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7666  *  pages known to VM to not need saving are subtracted.
7667  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
7668  */
7669 
7670 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7671 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7672     hibernate_page_list_t * page_list_wired,
7673     hibernate_page_list_t * page_list_pal,
7674     boolean_t preflight,
7675     boolean_t will_discard,
7676     uint32_t * pagesOut)
7677 {
7678 	uint64_t start, end, nsec;
7679 	vm_page_t m;
7680 	vm_page_t next;
7681 	uint32_t pages = page_list->page_count;
7682 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7683 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7684 	uint32_t count_wire = pages;
7685 	uint32_t count_discard_active    = 0;
7686 	uint32_t count_discard_inactive  = 0;
7687 	uint32_t count_retired = 0;
7688 	uint32_t count_discard_cleaned   = 0;
7689 	uint32_t count_discard_purgeable = 0;
7690 	uint32_t count_discard_speculative = 0;
7691 	uint32_t count_discard_vm_struct_pages = 0;
7692 	uint32_t i;
7693 	uint32_t             bank;
7694 	hibernate_bitmap_t * bitmap;
7695 	hibernate_bitmap_t * bitmap_wired;
7696 	boolean_t                    discard_all;
7697 	boolean_t            discard = FALSE;
7698 
7699 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7700 
7701 	if (preflight) {
7702 		page_list       = NULL;
7703 		page_list_wired = NULL;
7704 		page_list_pal   = NULL;
7705 		discard_all     = FALSE;
7706 	} else {
7707 		discard_all     = will_discard;
7708 	}
7709 
7710 #if MACH_ASSERT || DEBUG
7711 	if (!preflight) {
7712 		assert(hibernate_vm_locks_are_safe());
7713 		vm_page_lock_queues();
7714 		if (vm_page_local_q) {
7715 			zpercpu_foreach(lq, vm_page_local_q) {
7716 				VPL_LOCK(&lq->vpl_lock);
7717 			}
7718 		}
7719 	}
7720 #endif  /* MACH_ASSERT || DEBUG */
7721 
7722 
7723 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7724 
7725 	clock_get_uptime(&start);
7726 
7727 	if (!preflight) {
7728 		hibernate_page_list_zero(page_list);
7729 		hibernate_page_list_zero(page_list_wired);
7730 		hibernate_page_list_zero(page_list_pal);
7731 
7732 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7733 		hibernate_stats.cd_pages = pages;
7734 	}
7735 
7736 	if (vm_page_local_q) {
7737 		zpercpu_foreach_cpu(lid) {
7738 			vm_page_reactivate_local(lid, TRUE, !preflight);
7739 		}
7740 	}
7741 
7742 	if (preflight) {
7743 		vm_object_lock(compressor_object);
7744 		vm_page_lock_queues();
7745 		vm_free_page_lock();
7746 	}
7747 
7748 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7749 
7750 	hibernation_vmqueues_inspection = TRUE;
7751 
7752 	m = (vm_page_t) hibernate_gobble_queue;
7753 	while (m) {
7754 		pages--;
7755 		count_wire--;
7756 		if (!preflight) {
7757 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7758 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7759 		}
7760 		m = m->vmp_snext;
7761 	}
7762 
7763 	if (!preflight) {
7764 		percpu_foreach(free_pages_head, free_pages) {
7765 			for (m = *free_pages_head; m; m = m->vmp_snext) {
7766 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7767 
7768 				pages--;
7769 				count_wire--;
7770 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7771 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7772 
7773 				hibernate_stats.cd_local_free++;
7774 				hibernate_stats.cd_total_free++;
7775 			}
7776 		}
7777 	}
7778 
7779 	for (i = 0; i < vm_colors; i++) {
7780 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7781 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7782 
7783 			pages--;
7784 			count_wire--;
7785 			if (!preflight) {
7786 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7787 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7788 
7789 				hibernate_stats.cd_total_free++;
7790 			}
7791 		}
7792 	}
7793 
7794 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7795 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7796 
7797 		pages--;
7798 		count_wire--;
7799 		if (!preflight) {
7800 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7801 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7802 
7803 			hibernate_stats.cd_total_free++;
7804 		}
7805 	}
7806 
7807 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
7808 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
7809 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
7810 
7811 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7812 		discard = FALSE;
7813 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
7814 		    && hibernate_consider_discard(m, preflight)) {
7815 			if (!preflight) {
7816 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7817 			}
7818 			count_discard_inactive++;
7819 			discard = discard_all;
7820 		} else {
7821 			count_throttled++;
7822 		}
7823 		count_wire--;
7824 		if (!preflight) {
7825 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7826 		}
7827 
7828 		if (discard) {
7829 			hibernate_discard_page(m);
7830 		}
7831 		m = next;
7832 	}
7833 
7834 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
7835 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7836 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7837 
7838 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7839 		discard = FALSE;
7840 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7841 		    hibernate_consider_discard(m, preflight)) {
7842 			if (!preflight) {
7843 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7844 			}
7845 			if (m->vmp_dirty) {
7846 				count_discard_purgeable++;
7847 			} else {
7848 				count_discard_inactive++;
7849 			}
7850 			discard = discard_all;
7851 		} else {
7852 			count_anonymous++;
7853 		}
7854 		count_wire--;
7855 		if (!preflight) {
7856 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7857 		}
7858 		if (discard) {
7859 			hibernate_discard_page(m);
7860 		}
7861 		m = next;
7862 	}
7863 
7864 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7865 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7866 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7867 
7868 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7869 		discard = FALSE;
7870 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7871 		    hibernate_consider_discard(m, preflight)) {
7872 			if (!preflight) {
7873 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7874 			}
7875 			if (m->vmp_dirty) {
7876 				count_discard_purgeable++;
7877 			} else {
7878 				count_discard_cleaned++;
7879 			}
7880 			discard = discard_all;
7881 		} else {
7882 			count_cleaned++;
7883 		}
7884 		count_wire--;
7885 		if (!preflight) {
7886 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7887 		}
7888 		if (discard) {
7889 			hibernate_discard_page(m);
7890 		}
7891 		m = next;
7892 	}
7893 
7894 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7895 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7896 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7897 
7898 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7899 		discard = FALSE;
7900 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
7901 		    hibernate_consider_discard(m, preflight)) {
7902 			if (!preflight) {
7903 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7904 			}
7905 			if (m->vmp_dirty) {
7906 				count_discard_purgeable++;
7907 			} else {
7908 				count_discard_active++;
7909 			}
7910 			discard = discard_all;
7911 		} else {
7912 			count_active++;
7913 		}
7914 		count_wire--;
7915 		if (!preflight) {
7916 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7917 		}
7918 		if (discard) {
7919 			hibernate_discard_page(m);
7920 		}
7921 		m = next;
7922 	}
7923 
7924 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7925 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7926 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7927 
7928 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7929 		discard = FALSE;
7930 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7931 		    hibernate_consider_discard(m, preflight)) {
7932 			if (!preflight) {
7933 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7934 			}
7935 			if (m->vmp_dirty) {
7936 				count_discard_purgeable++;
7937 			} else {
7938 				count_discard_inactive++;
7939 			}
7940 			discard = discard_all;
7941 		} else {
7942 			count_inactive++;
7943 		}
7944 		count_wire--;
7945 		if (!preflight) {
7946 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7947 		}
7948 		if (discard) {
7949 			hibernate_discard_page(m);
7950 		}
7951 		m = next;
7952 	}
7953 	/* XXX FBDP TODO: secluded queue */
7954 
7955 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7956 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7957 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7958 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
7959 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7960 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
7961 
7962 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7963 			discard = FALSE;
7964 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7965 			    hibernate_consider_discard(m, preflight)) {
7966 				if (!preflight) {
7967 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7968 				}
7969 				count_discard_speculative++;
7970 				discard = discard_all;
7971 			} else {
7972 				count_speculative++;
7973 			}
7974 			count_wire--;
7975 			if (!preflight) {
7976 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7977 			}
7978 			if (discard) {
7979 				hibernate_discard_page(m);
7980 			}
7981 			m = next;
7982 		}
7983 	}
7984 
7985 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
7986 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
7987 
7988 		count_compressor++;
7989 		count_wire--;
7990 		if (!preflight) {
7991 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7992 		}
7993 	}
7994 
7995 
7996 	if (preflight == FALSE && discard_all == TRUE) {
7997 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
7998 
7999 		HIBLOG("hibernate_teardown started\n");
8000 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8001 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8002 
8003 		pages -= count_discard_vm_struct_pages;
8004 		count_wire -= count_discard_vm_struct_pages;
8005 
8006 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8007 
8008 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8009 	}
8010 
8011 	if (!preflight) {
8012 		// pull wired from hibernate_bitmap
8013 		bitmap = &page_list->bank_bitmap[0];
8014 		bitmap_wired = &page_list_wired->bank_bitmap[0];
8015 		for (bank = 0; bank < page_list->bank_count; bank++) {
8016 			for (i = 0; i < bitmap->bitmapwords; i++) {
8017 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8018 			}
8019 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8020 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8021 		}
8022 	}
8023 
8024 	// machine dependent adjustments
8025 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8026 
8027 	if (!preflight) {
8028 		hibernate_stats.cd_count_wire = count_wire;
8029 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8030 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8031 	}
8032 
8033 	clock_get_uptime(&end);
8034 	absolutetime_to_nanoseconds(end - start, &nsec);
8035 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8036 
8037 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8038 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8039 	    discard_all ? "did" : "could",
8040 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8041 
8042 	if (hibernate_stats.cd_skipped_xpmapped) {
8043 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8044 	}
8045 
8046 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8047 
8048 	if (preflight && will_discard) {
8049 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8050 		/*
8051 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8052 		 * even if these are clean and so we need to size the hibernation image accordingly.
8053 		 *
8054 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8055 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8056 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8057 		 * clean xpmapped pages.
8058 		 *
8059 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8060 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8061 		 */
8062 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
8063 	}
8064 
8065 	hibernation_vmqueues_inspection = FALSE;
8066 
8067 #if MACH_ASSERT || DEBUG
8068 	if (!preflight) {
8069 		if (vm_page_local_q) {
8070 			zpercpu_foreach(lq, vm_page_local_q) {
8071 				VPL_UNLOCK(&lq->vpl_lock);
8072 			}
8073 		}
8074 		vm_page_unlock_queues();
8075 	}
8076 #endif  /* MACH_ASSERT || DEBUG */
8077 
8078 	if (preflight) {
8079 		vm_free_page_unlock();
8080 		vm_page_unlock_queues();
8081 		vm_object_unlock(compressor_object);
8082 	}
8083 
8084 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8085 }
8086 
8087 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8088 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8089 {
8090 	uint64_t  start, end, nsec;
8091 	vm_page_t m;
8092 	vm_page_t next;
8093 	uint32_t  i;
8094 	uint32_t  count_discard_active    = 0;
8095 	uint32_t  count_discard_inactive  = 0;
8096 	uint32_t  count_discard_purgeable = 0;
8097 	uint32_t  count_discard_cleaned   = 0;
8098 	uint32_t  count_discard_speculative = 0;
8099 
8100 
8101 #if MACH_ASSERT || DEBUG
8102 	vm_page_lock_queues();
8103 	if (vm_page_local_q) {
8104 		zpercpu_foreach(lq, vm_page_local_q) {
8105 			VPL_LOCK(&lq->vpl_lock);
8106 		}
8107 	}
8108 #endif  /* MACH_ASSERT || DEBUG */
8109 
8110 	clock_get_uptime(&start);
8111 
8112 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8113 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8114 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8115 
8116 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8117 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8118 			if (m->vmp_dirty) {
8119 				count_discard_purgeable++;
8120 			} else {
8121 				count_discard_inactive++;
8122 			}
8123 			hibernate_discard_page(m);
8124 		}
8125 		m = next;
8126 	}
8127 
8128 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
8129 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8130 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8131 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8132 
8133 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8134 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8135 				count_discard_speculative++;
8136 				hibernate_discard_page(m);
8137 			}
8138 			m = next;
8139 		}
8140 	}
8141 
8142 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8143 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8144 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8145 
8146 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8147 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8148 			if (m->vmp_dirty) {
8149 				count_discard_purgeable++;
8150 			} else {
8151 				count_discard_inactive++;
8152 			}
8153 			hibernate_discard_page(m);
8154 		}
8155 		m = next;
8156 	}
8157 	/* XXX FBDP TODO: secluded queue */
8158 
8159 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8160 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8161 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8162 
8163 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8164 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8165 			if (m->vmp_dirty) {
8166 				count_discard_purgeable++;
8167 			} else {
8168 				count_discard_active++;
8169 			}
8170 			hibernate_discard_page(m);
8171 		}
8172 		m = next;
8173 	}
8174 
8175 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8176 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8177 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8178 
8179 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8180 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8181 			if (m->vmp_dirty) {
8182 				count_discard_purgeable++;
8183 			} else {
8184 				count_discard_cleaned++;
8185 			}
8186 			hibernate_discard_page(m);
8187 		}
8188 		m = next;
8189 	}
8190 
8191 #if MACH_ASSERT || DEBUG
8192 	if (vm_page_local_q) {
8193 		zpercpu_foreach(lq, vm_page_local_q) {
8194 			VPL_UNLOCK(&lq->vpl_lock);
8195 		}
8196 	}
8197 	vm_page_unlock_queues();
8198 #endif  /* MACH_ASSERT || DEBUG */
8199 
8200 	clock_get_uptime(&end);
8201 	absolutetime_to_nanoseconds(end - start, &nsec);
8202 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8203 	    nsec / 1000000ULL,
8204 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8205 }
8206 
8207 boolean_t       hibernate_paddr_map_inited = FALSE;
8208 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8209 vm_page_t       hibernate_rebuild_hash_list = NULL;
8210 
8211 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8212 unsigned int    hibernate_teardown_found_created_pages = 0;
8213 unsigned int    hibernate_teardown_found_free_pages = 0;
8214 unsigned int    hibernate_teardown_vm_page_free_count;
8215 
8216 
8217 struct ppnum_mapping {
8218 	struct ppnum_mapping    *ppnm_next;
8219 	ppnum_t                 ppnm_base_paddr;
8220 	unsigned int            ppnm_sindx;
8221 	unsigned int            ppnm_eindx;
8222 };
8223 
8224 struct ppnum_mapping    *ppnm_head;
8225 struct ppnum_mapping    *ppnm_last_found = NULL;
8226 
8227 
8228 void
hibernate_create_paddr_map(void)8229 hibernate_create_paddr_map(void)
8230 {
8231 	unsigned int    i;
8232 	ppnum_t         next_ppnum_in_run = 0;
8233 	struct ppnum_mapping *ppnm = NULL;
8234 
8235 	if (hibernate_paddr_map_inited == FALSE) {
8236 		for (i = 0; i < vm_pages_count; i++) {
8237 			if (ppnm) {
8238 				ppnm->ppnm_eindx = i;
8239 			}
8240 
8241 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8242 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
8243 
8244 				ppnm->ppnm_next = ppnm_head;
8245 				ppnm_head = ppnm;
8246 
8247 				ppnm->ppnm_sindx = i;
8248 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8249 			}
8250 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8251 		}
8252 		ppnm->ppnm_eindx = vm_pages_count;
8253 
8254 		hibernate_paddr_map_inited = TRUE;
8255 	}
8256 }
8257 
8258 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8259 hibernate_lookup_paddr(unsigned int indx)
8260 {
8261 	struct ppnum_mapping *ppnm = NULL;
8262 
8263 	ppnm = ppnm_last_found;
8264 
8265 	if (ppnm) {
8266 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8267 			goto done;
8268 		}
8269 	}
8270 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8271 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8272 			ppnm_last_found = ppnm;
8273 			break;
8274 		}
8275 	}
8276 	if (ppnm == NULL) {
8277 		panic("hibernate_lookup_paddr of %d failed", indx);
8278 	}
8279 done:
8280 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8281 }
8282 
8283 
8284 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8285 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8286 {
8287 	addr64_t        saddr_aligned;
8288 	addr64_t        eaddr_aligned;
8289 	addr64_t        addr;
8290 	ppnum_t         paddr;
8291 	unsigned int    mark_as_unneeded_pages = 0;
8292 
8293 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8294 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
8295 
8296 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8297 		paddr = pmap_find_phys(kernel_pmap, addr);
8298 
8299 		assert(paddr);
8300 
8301 		hibernate_page_bitset(page_list, TRUE, paddr);
8302 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
8303 
8304 		mark_as_unneeded_pages++;
8305 	}
8306 	return mark_as_unneeded_pages;
8307 }
8308 
8309 
8310 void
hibernate_hash_insert_page(vm_page_t mem)8311 hibernate_hash_insert_page(vm_page_t mem)
8312 {
8313 	vm_page_bucket_t *bucket;
8314 	int             hash_id;
8315 	vm_object_t     m_object;
8316 
8317 	m_object = VM_PAGE_OBJECT(mem);
8318 
8319 	assert(mem->vmp_hashed);
8320 	assert(m_object);
8321 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
8322 
8323 	/*
8324 	 *	Insert it into the object_object/offset hash table
8325 	 */
8326 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
8327 	bucket = &vm_page_buckets[hash_id];
8328 
8329 	mem->vmp_next_m = bucket->page_list;
8330 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
8331 }
8332 
8333 
8334 void
hibernate_free_range(int sindx,int eindx)8335 hibernate_free_range(int sindx, int eindx)
8336 {
8337 	vm_page_t       mem;
8338 	unsigned int    color;
8339 
8340 	while (sindx < eindx) {
8341 		mem = &vm_pages[sindx];
8342 
8343 		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8344 
8345 		mem->vmp_lopage = FALSE;
8346 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8347 
8348 		color = VM_PAGE_GET_COLOR(mem);
8349 #if defined(__x86_64__)
8350 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8351 #else
8352 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8353 #endif
8354 		vm_page_free_count++;
8355 
8356 		sindx++;
8357 	}
8358 }
8359 
8360 void
hibernate_rebuild_vm_structs(void)8361 hibernate_rebuild_vm_structs(void)
8362 {
8363 	int             i, cindx, sindx, eindx;
8364 	vm_page_t       mem, tmem, mem_next;
8365 	AbsoluteTime    startTime, endTime;
8366 	uint64_t        nsec;
8367 
8368 	if (hibernate_rebuild_needed == FALSE) {
8369 		return;
8370 	}
8371 
8372 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8373 	HIBLOG("hibernate_rebuild started\n");
8374 
8375 	clock_get_uptime(&startTime);
8376 
8377 	pal_hib_rebuild_pmap_structs();
8378 
8379 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8380 	eindx = vm_pages_count;
8381 
8382 	/*
8383 	 * Mark all the vm_pages[] that have not been initialized yet as being
8384 	 * transient. This is needed to ensure that buddy page search is corrrect.
8385 	 * Without this random data in these vm_pages[] can trip the buddy search
8386 	 */
8387 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8388 		vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8389 	}
8390 
8391 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8392 		mem = &vm_pages[cindx];
8393 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8394 		/*
8395 		 * hibernate_teardown_vm_structs leaves the location where
8396 		 * this vm_page_t must be located in "next".
8397 		 */
8398 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8399 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8400 
8401 		sindx = (int)(tmem - &vm_pages[0]);
8402 
8403 		if (mem != tmem) {
8404 			/*
8405 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8406 			 * so move it back to its real location
8407 			 */
8408 			*tmem = *mem;
8409 			mem = tmem;
8410 		}
8411 		if (mem->vmp_hashed) {
8412 			hibernate_hash_insert_page(mem);
8413 		}
8414 		/*
8415 		 * the 'hole' between this vm_page_t and the previous
8416 		 * vm_page_t we moved needs to be initialized as
8417 		 * a range of free vm_page_t's
8418 		 */
8419 		hibernate_free_range(sindx + 1, eindx);
8420 
8421 		eindx = sindx;
8422 	}
8423 	if (sindx) {
8424 		hibernate_free_range(0, sindx);
8425 	}
8426 
8427 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8428 
8429 	/*
8430 	 * process the list of vm_page_t's that were entered in the hash,
8431 	 * but were not located in the vm_pages arrary... these are
8432 	 * vm_page_t's that were created on the fly (i.e. fictitious)
8433 	 */
8434 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8435 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8436 
8437 		mem->vmp_next_m = 0;
8438 		hibernate_hash_insert_page(mem);
8439 	}
8440 	hibernate_rebuild_hash_list = NULL;
8441 
8442 	clock_get_uptime(&endTime);
8443 	SUB_ABSOLUTETIME(&endTime, &startTime);
8444 	absolutetime_to_nanoseconds(endTime, &nsec);
8445 
8446 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8447 
8448 	hibernate_rebuild_needed = FALSE;
8449 
8450 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8451 }
8452 
8453 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8454 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8455 {
8456 	unsigned int    i;
8457 	unsigned int    compact_target_indx;
8458 	vm_page_t       mem, mem_next;
8459 	vm_page_bucket_t *bucket;
8460 	unsigned int    mark_as_unneeded_pages = 0;
8461 	unsigned int    unneeded_vm_page_bucket_pages = 0;
8462 	unsigned int    unneeded_vm_pages_pages = 0;
8463 	unsigned int    unneeded_pmap_pages = 0;
8464 	addr64_t        start_of_unneeded = 0;
8465 	addr64_t        end_of_unneeded = 0;
8466 
8467 
8468 	if (hibernate_should_abort()) {
8469 		return 0;
8470 	}
8471 
8472 	hibernate_rebuild_needed = TRUE;
8473 
8474 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8475 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8476 	    vm_page_cleaned_count, compressor_object->resident_page_count);
8477 
8478 	for (i = 0; i < vm_page_bucket_count; i++) {
8479 		bucket = &vm_page_buckets[i];
8480 
8481 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8482 			assert(mem->vmp_hashed);
8483 
8484 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8485 
8486 			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8487 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8488 				hibernate_rebuild_hash_list = mem;
8489 			}
8490 		}
8491 	}
8492 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8493 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8494 
8495 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
8496 
8497 	compact_target_indx = 0;
8498 
8499 	for (i = 0; i < vm_pages_count; i++) {
8500 		mem = &vm_pages[i];
8501 
8502 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8503 			unsigned int color;
8504 
8505 			assert(mem->vmp_busy);
8506 			assert(!mem->vmp_lopage);
8507 
8508 			color = VM_PAGE_GET_COLOR(mem);
8509 
8510 			vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8511 
8512 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8513 
8514 			vm_page_free_count--;
8515 
8516 			hibernate_teardown_found_free_pages++;
8517 
8518 			if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8519 				compact_target_indx = i;
8520 			}
8521 		} else {
8522 			/*
8523 			 * record this vm_page_t's original location
8524 			 * we need this even if it doesn't get moved
8525 			 * as an indicator to the rebuild function that
8526 			 * we don't have to move it
8527 			 */
8528 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8529 
8530 			if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8531 				/*
8532 				 * we've got a hole to fill, so
8533 				 * move this vm_page_t to it's new home
8534 				 */
8535 				vm_pages[compact_target_indx] = *mem;
8536 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8537 
8538 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8539 				compact_target_indx++;
8540 			} else {
8541 				hibernate_teardown_last_valid_compact_indx = i;
8542 			}
8543 		}
8544 	}
8545 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8546 	    (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8547 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
8548 
8549 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8550 
8551 	if (start_of_unneeded) {
8552 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8553 		mark_as_unneeded_pages += unneeded_pmap_pages;
8554 	}
8555 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8556 
8557 	return mark_as_unneeded_pages;
8558 }
8559 
8560 
8561 #endif /* HIBERNATION */
8562 
8563 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8564 
8565 #include <mach_vm_debug.h>
8566 #if     MACH_VM_DEBUG
8567 
8568 #include <mach_debug/hash_info.h>
8569 #include <vm/vm_debug.h>
8570 
8571 /*
8572  *	Routine:	vm_page_info
8573  *	Purpose:
8574  *		Return information about the global VP table.
8575  *		Fills the buffer with as much information as possible
8576  *		and returns the desired size of the buffer.
8577  *	Conditions:
8578  *		Nothing locked.  The caller should provide
8579  *		possibly-pageable memory.
8580  */
8581 
8582 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8583 vm_page_info(
8584 	hash_info_bucket_t *info,
8585 	unsigned int count)
8586 {
8587 	unsigned int i;
8588 	lck_spin_t      *bucket_lock;
8589 
8590 	if (vm_page_bucket_count < count) {
8591 		count = vm_page_bucket_count;
8592 	}
8593 
8594 	for (i = 0; i < count; i++) {
8595 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
8596 		unsigned int bucket_count = 0;
8597 		vm_page_t m;
8598 
8599 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8600 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8601 
8602 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8603 		    m != VM_PAGE_NULL;
8604 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8605 			bucket_count++;
8606 		}
8607 
8608 		lck_spin_unlock(bucket_lock);
8609 
8610 		/* don't touch pageable memory while holding locks */
8611 		info[i].hib_count = bucket_count;
8612 	}
8613 
8614 	return vm_page_bucket_count;
8615 }
8616 #endif  /* MACH_VM_DEBUG */
8617 
8618 #if VM_PAGE_BUCKETS_CHECK
8619 void
vm_page_buckets_check(void)8620 vm_page_buckets_check(void)
8621 {
8622 	unsigned int i;
8623 	vm_page_t p;
8624 	unsigned int p_hash;
8625 	vm_page_bucket_t *bucket;
8626 	lck_spin_t      *bucket_lock;
8627 
8628 	if (!vm_page_buckets_check_ready) {
8629 		return;
8630 	}
8631 
8632 #if HIBERNATION
8633 	if (hibernate_rebuild_needed ||
8634 	    hibernate_rebuild_hash_list) {
8635 		panic("BUCKET_CHECK: hibernation in progress: "
8636 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
8637 		    hibernate_rebuild_needed,
8638 		    hibernate_rebuild_hash_list);
8639 	}
8640 #endif /* HIBERNATION */
8641 
8642 #if VM_PAGE_FAKE_BUCKETS
8643 	char *cp;
8644 	for (cp = (char *) vm_page_fake_buckets_start;
8645 	    cp < (char *) vm_page_fake_buckets_end;
8646 	    cp++) {
8647 		if (*cp != 0x5a) {
8648 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
8649 			    "[0x%llx:0x%llx]\n",
8650 			    cp,
8651 			    (uint64_t) vm_page_fake_buckets_start,
8652 			    (uint64_t) vm_page_fake_buckets_end);
8653 		}
8654 	}
8655 #endif /* VM_PAGE_FAKE_BUCKETS */
8656 
8657 	for (i = 0; i < vm_page_bucket_count; i++) {
8658 		vm_object_t     p_object;
8659 
8660 		bucket = &vm_page_buckets[i];
8661 		if (!bucket->page_list) {
8662 			continue;
8663 		}
8664 
8665 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8666 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8667 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8668 
8669 		while (p != VM_PAGE_NULL) {
8670 			p_object = VM_PAGE_OBJECT(p);
8671 
8672 			if (!p->vmp_hashed) {
8673 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8674 				    "hash %d in bucket %d at %p "
8675 				    "is not hashed\n",
8676 				    p, p_object, p->vmp_offset,
8677 				    p_hash, i, bucket);
8678 			}
8679 			p_hash = vm_page_hash(p_object, p->vmp_offset);
8680 			if (p_hash != i) {
8681 				panic("BUCKET_CHECK: corruption in bucket %d "
8682 				    "at %p: page %p object %p offset 0x%llx "
8683 				    "hash %d\n",
8684 				    i, bucket, p, p_object, p->vmp_offset,
8685 				    p_hash);
8686 			}
8687 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8688 		}
8689 		lck_spin_unlock(bucket_lock);
8690 	}
8691 
8692 //	printf("BUCKET_CHECK: checked buckets\n");
8693 }
8694 #endif /* VM_PAGE_BUCKETS_CHECK */
8695 
8696 /*
8697  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8698  * local queues if they exist... its the only spot in the system where we add pages
8699  * to those queues...  once on those queues, those pages can only move to one of the
8700  * global page queues or the free queues... they NEVER move from local q to local q.
8701  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8702  * the global vm_page_queue_lock at this point...  we still need to take the local lock
8703  * in case this operation is being run on a different CPU then the local queue's identity,
8704  * but we don't have to worry about the page moving to a global queue or becoming wired
8705  * while we're grabbing the local lock since those operations would require the global
8706  * vm_page_queue_lock to be held, and we already own it.
8707  *
8708  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8709  * 'wired' and local are ALWAYS mutually exclusive conditions.
8710  */
8711 
8712 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8713 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8714 {
8715 	boolean_t       was_pageable = TRUE;
8716 	vm_object_t     m_object;
8717 
8718 	m_object = VM_PAGE_OBJECT(mem);
8719 
8720 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8721 
8722 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8723 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8724 		if (remove_from_specialq == TRUE) {
8725 			vm_page_remove_from_specialq(mem);
8726 		}
8727 		/*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
8728 		 *       assert(mem->vmp_specialq.next != 0);
8729 		 *       assert(mem->vmp_specialq.prev != 0);
8730 		 *  } else {*/
8731 		if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
8732 			assert(mem->vmp_specialq.next == 0);
8733 			assert(mem->vmp_specialq.prev == 0);
8734 		}
8735 		return;
8736 	}
8737 
8738 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8739 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8740 		assert(mem->vmp_specialq.next == 0 &&
8741 		    mem->vmp_specialq.prev == 0 &&
8742 		    mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8743 		return;
8744 	}
8745 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
8746 		/*
8747 		 * might put these guys on a list for debugging purposes
8748 		 * if we do, we'll need to remove this assert
8749 		 */
8750 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8751 		assert(mem->vmp_specialq.next == 0 &&
8752 		    mem->vmp_specialq.prev == 0);
8753 		/*
8754 		 * Recall that vmp_on_specialq also means a request to put
8755 		 * it on the special Q. So we don't want to reset that bit
8756 		 * just because a wiring request came in. We might want to
8757 		 * put it on the special queue post-unwiring.
8758 		 *
8759 		 * &&
8760 		 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8761 		 */
8762 		return;
8763 	}
8764 
8765 	assert(m_object != compressor_object);
8766 	assert(!is_kernel_object(m_object));
8767 	assert(!mem->vmp_fictitious);
8768 
8769 	switch (mem->vmp_q_state) {
8770 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
8771 	{
8772 		struct vpl      *lq;
8773 
8774 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
8775 		VPL_LOCK(&lq->vpl_lock);
8776 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
8777 		mem->vmp_local_id = 0;
8778 		lq->vpl_count--;
8779 		if (m_object->internal) {
8780 			lq->vpl_internal_count--;
8781 		} else {
8782 			lq->vpl_external_count--;
8783 		}
8784 		VPL_UNLOCK(&lq->vpl_lock);
8785 		was_pageable = FALSE;
8786 		break;
8787 	}
8788 	case VM_PAGE_ON_ACTIVE_Q:
8789 	{
8790 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
8791 		vm_page_active_count--;
8792 		break;
8793 	}
8794 
8795 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
8796 	{
8797 		assert(m_object->internal == TRUE);
8798 
8799 		vm_page_inactive_count--;
8800 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
8801 		vm_page_anonymous_count--;
8802 
8803 		vm_purgeable_q_advance_all();
8804 		vm_page_balance_inactive(3);
8805 		break;
8806 	}
8807 
8808 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
8809 	{
8810 		assert(m_object->internal == FALSE);
8811 
8812 		vm_page_inactive_count--;
8813 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
8814 		vm_purgeable_q_advance_all();
8815 		vm_page_balance_inactive(3);
8816 		break;
8817 	}
8818 
8819 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
8820 	{
8821 		assert(m_object->internal == FALSE);
8822 
8823 		vm_page_inactive_count--;
8824 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
8825 		vm_page_cleaned_count--;
8826 		vm_page_balance_inactive(3);
8827 		break;
8828 	}
8829 
8830 	case VM_PAGE_ON_THROTTLED_Q:
8831 	{
8832 		assert(m_object->internal == TRUE);
8833 
8834 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
8835 		vm_page_throttled_count--;
8836 		was_pageable = FALSE;
8837 		break;
8838 	}
8839 
8840 	case VM_PAGE_ON_SPECULATIVE_Q:
8841 	{
8842 		assert(m_object->internal == FALSE);
8843 
8844 		vm_page_remque(&mem->vmp_pageq);
8845 		vm_page_speculative_count--;
8846 		vm_page_balance_inactive(3);
8847 		break;
8848 	}
8849 
8850 #if CONFIG_SECLUDED_MEMORY
8851 	case VM_PAGE_ON_SECLUDED_Q:
8852 	{
8853 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
8854 		vm_page_secluded_count--;
8855 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8856 		if (m_object == VM_OBJECT_NULL) {
8857 			vm_page_secluded_count_free--;
8858 			was_pageable = FALSE;
8859 		} else {
8860 			assert(!m_object->internal);
8861 			vm_page_secluded_count_inuse--;
8862 			was_pageable = FALSE;
8863 //			was_pageable = TRUE;
8864 		}
8865 		break;
8866 	}
8867 #endif /* CONFIG_SECLUDED_MEMORY */
8868 
8869 	default:
8870 	{
8871 		/*
8872 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8873 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8874 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
8875 		 *              either first remove it (it needs both the page queues lock and the object lock to do
8876 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8877 		 *
8878 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8879 		 *	or any of the undefined states
8880 		 */
8881 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
8882 		break;
8883 	}
8884 	}
8885 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8886 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
8887 
8888 	if (remove_from_specialq == TRUE) {
8889 		vm_page_remove_from_specialq(mem);
8890 	}
8891 	if (was_pageable) {
8892 		if (m_object->internal) {
8893 			vm_page_pageable_internal_count--;
8894 		} else {
8895 			vm_page_pageable_external_count--;
8896 		}
8897 	}
8898 }
8899 
8900 void
vm_page_remove_internal(vm_page_t page)8901 vm_page_remove_internal(vm_page_t page)
8902 {
8903 	vm_object_t __object = VM_PAGE_OBJECT(page);
8904 	if (page == __object->memq_hint) {
8905 		vm_page_t       __new_hint;
8906 		vm_page_queue_entry_t   __qe;
8907 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
8908 		if (vm_page_queue_end(&__object->memq, __qe)) {
8909 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
8910 			if (vm_page_queue_end(&__object->memq, __qe)) {
8911 				__qe = NULL;
8912 			}
8913 		}
8914 		__new_hint = (vm_page_t)((uintptr_t) __qe);
8915 		__object->memq_hint = __new_hint;
8916 	}
8917 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
8918 #if CONFIG_SECLUDED_MEMORY
8919 	if (__object->eligible_for_secluded) {
8920 		vm_page_secluded.eligible_for_secluded--;
8921 	}
8922 #endif /* CONFIG_SECLUDED_MEMORY */
8923 }
8924 
8925 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)8926 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
8927 {
8928 	vm_object_t     m_object;
8929 
8930 	m_object = VM_PAGE_OBJECT(mem);
8931 
8932 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8933 	assert(!mem->vmp_fictitious);
8934 	assert(!mem->vmp_laundry);
8935 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8936 	vm_page_check_pageable_safe(mem);
8937 
8938 	if (m_object->internal) {
8939 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
8940 
8941 		if (first == TRUE) {
8942 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
8943 		} else {
8944 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
8945 		}
8946 
8947 		vm_page_anonymous_count++;
8948 		vm_page_pageable_internal_count++;
8949 	} else {
8950 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
8951 
8952 		if (first == TRUE) {
8953 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
8954 		} else {
8955 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
8956 		}
8957 
8958 		vm_page_pageable_external_count++;
8959 	}
8960 	vm_page_inactive_count++;
8961 	token_new_pagecount++;
8962 
8963 	vm_page_add_to_specialq(mem, FALSE);
8964 }
8965 
8966 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)8967 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
8968 {
8969 	vm_object_t     m_object;
8970 
8971 	m_object = VM_PAGE_OBJECT(mem);
8972 
8973 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8974 	assert(!mem->vmp_fictitious);
8975 	assert(!mem->vmp_laundry);
8976 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8977 	vm_page_check_pageable_safe(mem);
8978 
8979 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
8980 	if (first == TRUE) {
8981 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
8982 	} else {
8983 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
8984 	}
8985 	vm_page_active_count++;
8986 
8987 	if (m_object->internal) {
8988 		vm_page_pageable_internal_count++;
8989 	} else {
8990 		vm_page_pageable_external_count++;
8991 	}
8992 
8993 	vm_page_add_to_specialq(mem, FALSE);
8994 	vm_page_balance_inactive(3);
8995 }
8996 
8997 /*
8998  * Pages from special kernel objects shouldn't
8999  * be placed on pageable queues.
9000  */
9001 void
vm_page_check_pageable_safe(vm_page_t page)9002 vm_page_check_pageable_safe(vm_page_t page)
9003 {
9004 	vm_object_t     page_object;
9005 
9006 	page_object = VM_PAGE_OBJECT(page);
9007 
9008 	if (is_kernel_object(page_object)) {
9009 		panic("vm_page_check_pageable_safe: trying to add page"
9010 		    "from kernel object (%p) to pageable queue", page_object);
9011 	}
9012 
9013 	if (page_object == compressor_object) {
9014 		panic("vm_page_check_pageable_safe: trying to add page"
9015 		    "from compressor object (%p) to pageable queue", compressor_object);
9016 	}
9017 }
9018 
9019 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9020 * wired page diagnose
9021 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9022 
9023 #include <libkern/OSKextLibPrivate.h>
9024 
9025 #define KA_SIZE(namelen, subtotalscount)        \
9026 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9027 
9028 #define KA_NAME(alloc)  \
9029 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9030 
9031 #define KA_NAME_LEN(alloc)      \
9032     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9033 
9034 vm_tag_t
vm_tag_bt(void)9035 vm_tag_bt(void)
9036 {
9037 	uintptr_t* frameptr;
9038 	uintptr_t* frameptr_next;
9039 	uintptr_t retaddr;
9040 	uintptr_t kstackb, kstackt;
9041 	const vm_allocation_site_t * site;
9042 	thread_t cthread;
9043 	kern_allocation_name_t name;
9044 
9045 	cthread = current_thread();
9046 	if (__improbable(cthread == NULL)) {
9047 		return VM_KERN_MEMORY_OSFMK;
9048 	}
9049 
9050 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9051 		if (!name->tag) {
9052 			vm_tag_alloc(name);
9053 		}
9054 		return name->tag;
9055 	}
9056 
9057 	kstackb = cthread->kernel_stack;
9058 	kstackt = kstackb + kernel_stack_size;
9059 
9060 	/* Load stack frame pointer (EBP on x86) into frameptr */
9061 	frameptr = __builtin_frame_address(0);
9062 	site = NULL;
9063 	while (frameptr != NULL) {
9064 		/* Verify thread stack bounds */
9065 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9066 			break;
9067 		}
9068 
9069 		/* Next frame pointer is pointed to by the previous one */
9070 		frameptr_next = (uintptr_t*) *frameptr;
9071 #if defined(HAS_APPLE_PAC)
9072 		frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9073 #endif
9074 
9075 		/* Pull return address from one spot above the frame pointer */
9076 		retaddr = *(frameptr + 1);
9077 
9078 #if defined(HAS_APPLE_PAC)
9079 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9080 #endif
9081 
9082 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9083 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9084 			site = OSKextGetAllocationSiteForCaller(retaddr);
9085 			break;
9086 		}
9087 		frameptr = frameptr_next;
9088 	}
9089 
9090 	return site ? site->tag : VM_KERN_MEMORY_NONE;
9091 }
9092 
9093 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9094 
9095 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9096 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9097 {
9098 	vm_tag_t tag;
9099 	uint64_t avail;
9100 	uint32_t idx;
9101 	vm_allocation_site_t * prev;
9102 
9103 	if (site->tag) {
9104 		return;
9105 	}
9106 
9107 	idx = 0;
9108 	while (TRUE) {
9109 		avail = free_tag_bits[idx];
9110 		if (avail) {
9111 			tag = (vm_tag_t)__builtin_clzll(avail);
9112 			avail &= ~(1ULL << (63 - tag));
9113 			free_tag_bits[idx] = avail;
9114 			tag += (idx << 6);
9115 			break;
9116 		}
9117 		idx++;
9118 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
9119 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9120 				prev = vm_allocation_sites[idx];
9121 				if (!prev) {
9122 					continue;
9123 				}
9124 				if (!KA_NAME_LEN(prev)) {
9125 					continue;
9126 				}
9127 				if (!prev->tag) {
9128 					continue;
9129 				}
9130 				if (prev->total) {
9131 					continue;
9132 				}
9133 				if (1 != prev->refcount) {
9134 					continue;
9135 				}
9136 
9137 				assert(idx == prev->tag);
9138 				tag = (vm_tag_t)idx;
9139 				prev->tag = VM_KERN_MEMORY_NONE;
9140 				*releasesiteP = prev;
9141 				break;
9142 			}
9143 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9144 				tag = VM_KERN_MEMORY_ANY;
9145 			}
9146 			break;
9147 		}
9148 	}
9149 	site->tag = tag;
9150 
9151 	OSAddAtomic16(1, &site->refcount);
9152 
9153 	if (VM_KERN_MEMORY_ANY != tag) {
9154 		vm_allocation_sites[tag] = site;
9155 	}
9156 
9157 	if (tag > vm_allocation_tag_highest) {
9158 		vm_allocation_tag_highest = tag;
9159 	}
9160 }
9161 
9162 static void
vm_tag_free_locked(vm_tag_t tag)9163 vm_tag_free_locked(vm_tag_t tag)
9164 {
9165 	uint64_t avail;
9166 	uint32_t idx;
9167 	uint64_t bit;
9168 
9169 	if (VM_KERN_MEMORY_ANY == tag) {
9170 		return;
9171 	}
9172 
9173 	idx = (tag >> 6);
9174 	avail = free_tag_bits[idx];
9175 	tag &= 63;
9176 	bit = (1ULL << (63 - tag));
9177 	assert(!(avail & bit));
9178 	free_tag_bits[idx] = (avail | bit);
9179 }
9180 
9181 static void
vm_tag_init(void)9182 vm_tag_init(void)
9183 {
9184 	vm_tag_t tag;
9185 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9186 		vm_tag_free_locked(tag);
9187 	}
9188 
9189 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9190 		vm_tag_free_locked(tag);
9191 	}
9192 }
9193 
9194 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9195 vm_tag_alloc(vm_allocation_site_t * site)
9196 {
9197 	vm_allocation_site_t * releasesite;
9198 
9199 	if (!site->tag) {
9200 		releasesite = NULL;
9201 		lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9202 		vm_tag_alloc_locked(site, &releasesite);
9203 		lck_ticket_unlock(&vm_allocation_sites_lock);
9204 		if (releasesite) {
9205 			kern_allocation_name_release(releasesite);
9206 		}
9207 	}
9208 
9209 	return site->tag;
9210 }
9211 
9212 #if VM_BTLOG_TAGS
9213 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9214 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9215 #define VM_TAG_BTLOG_SIZE (16u << 10)
9216 
9217 btlog_t vmtaglog_btlog;
9218 vm_tag_t vmtaglog_tag;
9219 
9220 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9221 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9222 {
9223 	if (is_kernel_object(object)) {
9224 		/* kernel object backtraces are tracked in vm entries */
9225 		return;
9226 	}
9227 	if (delta > 0) {
9228 		btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9229 		btlog_record(vmtaglog_btlog, object, 0, ref);
9230 	} else if (object->wired_page_count == 0) {
9231 		btlog_erase(vmtaglog_btlog, object);
9232 	}
9233 }
9234 
9235 #ifndef ARRAY_SIZE
9236 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9237 #endif /* ARRAY_SIZE */
9238 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9239 const char *vm_kern_memory_strs[] = {
9240 	VM_KERN_MEMORY_ELEM(OSFMK),
9241 	VM_KERN_MEMORY_ELEM(BSD),
9242 	VM_KERN_MEMORY_ELEM(IOKIT),
9243 	VM_KERN_MEMORY_ELEM(LIBKERN),
9244 	VM_KERN_MEMORY_ELEM(OSKEXT),
9245 	VM_KERN_MEMORY_ELEM(KEXT),
9246 	VM_KERN_MEMORY_ELEM(IPC),
9247 	VM_KERN_MEMORY_ELEM(STACK),
9248 	VM_KERN_MEMORY_ELEM(CPU),
9249 	VM_KERN_MEMORY_ELEM(PMAP),
9250 	VM_KERN_MEMORY_ELEM(PTE),
9251 	VM_KERN_MEMORY_ELEM(ZONE),
9252 	VM_KERN_MEMORY_ELEM(KALLOC),
9253 	VM_KERN_MEMORY_ELEM(COMPRESSOR),
9254 	VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9255 	VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9256 	VM_KERN_MEMORY_ELEM(WAITQ),
9257 	VM_KERN_MEMORY_ELEM(DIAG),
9258 	VM_KERN_MEMORY_ELEM(LOG),
9259 	VM_KERN_MEMORY_ELEM(FILE),
9260 	VM_KERN_MEMORY_ELEM(MBUF),
9261 	VM_KERN_MEMORY_ELEM(UBC),
9262 	VM_KERN_MEMORY_ELEM(SECURITY),
9263 	VM_KERN_MEMORY_ELEM(MLOCK),
9264 	VM_KERN_MEMORY_ELEM(REASON),
9265 	VM_KERN_MEMORY_ELEM(SKYWALK),
9266 	VM_KERN_MEMORY_ELEM(LTABLE),
9267 	VM_KERN_MEMORY_ELEM(HV),
9268 	VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9269 	VM_KERN_MEMORY_ELEM(RETIRED),
9270 	VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9271 	VM_KERN_MEMORY_ELEM(TRIAGE),
9272 	VM_KERN_MEMORY_ELEM(RECOUNT),
9273 };
9274 
9275 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9276 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9277 {
9278 	for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9279 		if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9280 			return i;
9281 		}
9282 	}
9283 
9284 	printf("Unable to find vm tag %s for btlog\n", tagstr);
9285 	return VM_KERN_MEMORY_NONE;
9286 }
9287 
9288 __startup_func
9289 static void
vm_btlog_init(void)9290 vm_btlog_init(void)
9291 {
9292 	vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9293 
9294 	if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9295 		vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9296 	}
9297 }
9298 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9299 #endif /* VM_BTLOG_TAGS */
9300 
9301 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9302 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9303 {
9304 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9305 
9306 	kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9307 }
9308 
9309 uint64_t
vm_tag_get_size(vm_tag_t tag)9310 vm_tag_get_size(vm_tag_t tag)
9311 {
9312 	vm_allocation_site_t *allocation;
9313 
9314 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9315 
9316 	allocation = vm_allocation_sites[tag];
9317 	return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9318 }
9319 
9320 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9321 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9322 {
9323 	uint64_t value;
9324 
9325 	value = os_atomic_add(&allocation->total, delta, relaxed);
9326 	if (delta < 0) {
9327 		assertf(value + (uint64_t)-delta > value,
9328 		    "tag %d, site %p", allocation->tag, allocation);
9329 	}
9330 
9331 #if DEBUG || DEVELOPMENT
9332 	if (value > allocation->peak) {
9333 		os_atomic_max(&allocation->peak, value, relaxed);
9334 	}
9335 #endif /* DEBUG || DEVELOPMENT */
9336 
9337 	if (value == (uint64_t)delta && !allocation->tag) {
9338 		vm_tag_alloc(allocation);
9339 	}
9340 
9341 #if VM_BTLOG_TAGS
9342 	if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9343 		vm_tag_log(object, delta, __builtin_frame_address(0));
9344 	}
9345 #endif /* VM_BTLOG_TAGS */
9346 }
9347 
9348 #if VM_TAG_SIZECLASSES
9349 
9350 void
vm_allocation_zones_init(void)9351 vm_allocation_zones_init(void)
9352 {
9353 	vm_offset_t   addr;
9354 	vm_size_t     size;
9355 
9356 	const vm_tag_t early_tags[] = {
9357 		VM_KERN_MEMORY_DIAG,
9358 		VM_KERN_MEMORY_KALLOC,
9359 		VM_KERN_MEMORY_KALLOC_DATA,
9360 		VM_KERN_MEMORY_KALLOC_TYPE,
9361 		VM_KERN_MEMORY_LIBKERN,
9362 		VM_KERN_MEMORY_OSFMK,
9363 		VM_KERN_MEMORY_RECOUNT,
9364 	};
9365 
9366 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9367 	    + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9368 
9369 	kmem_alloc(kernel_map, &addr, round_page(size),
9370 	    KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9371 	    VM_KERN_MEMORY_DIAG);
9372 
9373 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9374 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9375 
9376 	// prepopulate early tag ranges so allocations
9377 	// in vm_tag_update_zone_size() and early boot won't recurse
9378 	for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9379 		vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9380 		addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9381 	}
9382 }
9383 
9384 __attribute__((noinline))
9385 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9386 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9387 {
9388 	vm_allocation_zone_total_t *stats;
9389 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9390 
9391 	flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9392 	stats = kalloc_data(size, flags);
9393 	if (!stats) {
9394 		return VM_KERN_MEMORY_NONE;
9395 	}
9396 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9397 		kfree_data(stats, size);
9398 	}
9399 	return tag;
9400 }
9401 
9402 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zidx,uint32_t zflags)9403 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags)
9404 {
9405 	assert(VM_KERN_MEMORY_NONE != tag);
9406 	assert(tag < VM_MAX_TAG_VALUE);
9407 
9408 	if (zidx >= VM_TAG_SIZECLASSES) {
9409 		return VM_KERN_MEMORY_NONE;
9410 	}
9411 
9412 	if (__probable(vm_allocation_zone_totals[tag])) {
9413 		return tag;
9414 	}
9415 	return vm_tag_zone_stats_alloc(tag, zflags);
9416 }
9417 
9418 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9419 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9420 {
9421 	vm_allocation_zone_total_t *stats;
9422 	vm_size_t value;
9423 
9424 	assert(VM_KERN_MEMORY_NONE != tag);
9425 	assert(tag < VM_MAX_TAG_VALUE);
9426 
9427 	if (zidx >= VM_TAG_SIZECLASSES) {
9428 		return;
9429 	}
9430 
9431 	stats = vm_allocation_zone_totals[tag];
9432 	assert(stats);
9433 	stats += zidx;
9434 
9435 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9436 	if (delta < 0) {
9437 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9438 		return;
9439 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9440 		os_atomic_max(&stats->vazt_peak, value, relaxed);
9441 	}
9442 }
9443 
9444 #endif /* VM_TAG_SIZECLASSES */
9445 
9446 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,uint32_t subtag,int64_t delta)9447 kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
9448 {
9449 	kern_allocation_name_t other;
9450 	struct vm_allocation_total * total;
9451 	uint32_t subidx;
9452 
9453 	subidx = 0;
9454 	assert(VM_KERN_MEMORY_NONE != subtag);
9455 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9456 	for (; subidx < allocation->subtotalscount; subidx++) {
9457 		if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) {
9458 			allocation->subtotals[subidx].tag = (vm_tag_t)subtag;
9459 			break;
9460 		}
9461 		if (subtag == allocation->subtotals[subidx].tag) {
9462 			break;
9463 		}
9464 	}
9465 	lck_ticket_unlock(&vm_allocation_sites_lock);
9466 	assert(subidx < allocation->subtotalscount);
9467 	if (subidx >= allocation->subtotalscount) {
9468 		return;
9469 	}
9470 
9471 	total = &allocation->subtotals[subidx];
9472 	other = vm_allocation_sites[subtag];
9473 	assert(other);
9474 
9475 	if (delta < 0) {
9476 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9477 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9478 	}
9479 	OSAddAtomic64(delta, &other->mapped);
9480 	OSAddAtomic64(delta, &total->total);
9481 }
9482 
9483 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9484 kern_allocation_get_name(kern_allocation_name_t allocation)
9485 {
9486 	return KA_NAME(allocation);
9487 }
9488 
9489 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9490 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9491 {
9492 	kern_allocation_name_t allocation;
9493 	uint16_t namelen;
9494 
9495 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9496 
9497 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9498 	allocation->refcount       = 1;
9499 	allocation->subtotalscount = subtotalscount;
9500 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9501 	strlcpy(KA_NAME(allocation), name, namelen + 1);
9502 
9503 	vm_tag_alloc(allocation);
9504 	return allocation;
9505 }
9506 
9507 void
kern_allocation_name_release(kern_allocation_name_t allocation)9508 kern_allocation_name_release(kern_allocation_name_t allocation)
9509 {
9510 	assert(allocation->refcount > 0);
9511 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9512 		kfree_data(allocation,
9513 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9514 	}
9515 }
9516 
9517 vm_tag_t
kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)9518 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
9519 {
9520 	return vm_tag_alloc(allocation);
9521 }
9522 
9523 #if !VM_TAG_ACTIVE_UPDATE
9524 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9525 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9526 {
9527 	if (!object->wired_page_count) {
9528 		return;
9529 	}
9530 	if (!is_kernel_object(object)) {
9531 		assert(object->wire_tag < num_info);
9532 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9533 	}
9534 }
9535 
9536 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9537     unsigned int num_info, vm_object_t object);
9538 
9539 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9540 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9541     vm_page_iterate_proc proc, purgeable_q_t queue,
9542     int group)
9543 {
9544 	vm_object_t object;
9545 
9546 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
9547 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
9548 	    object = (vm_object_t) queue_next(&object->objq)) {
9549 		proc(info, num_info, object);
9550 	}
9551 }
9552 
9553 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9554 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9555     vm_page_iterate_proc proc)
9556 {
9557 	vm_object_t     object;
9558 
9559 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9560 	queue_iterate(&vm_objects_wired,
9561 	    object,
9562 	    vm_object_t,
9563 	    wired_objq)
9564 	{
9565 		proc(info, num_info, object);
9566 	}
9567 	lck_spin_unlock(&vm_objects_wired_lock);
9568 }
9569 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9570 
9571 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9572 process_account(mach_memory_info_t * info, unsigned int num_info,
9573     uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9574 {
9575 	size_t                 namelen;
9576 	unsigned int           idx, count, nextinfo;
9577 	vm_allocation_site_t * site;
9578 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9579 
9580 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9581 		site = vm_allocation_sites[idx];
9582 		if (!site) {
9583 			continue;
9584 		}
9585 		info[idx].mapped = site->mapped;
9586 		info[idx].tag    = site->tag;
9587 		if (!iterated) {
9588 			info[idx].size = site->total;
9589 #if DEBUG || DEVELOPMENT
9590 			info[idx].peak = site->peak;
9591 #endif /* DEBUG || DEVELOPMENT */
9592 		} else {
9593 			if (!site->subtotalscount && (site->total != info[idx].size)) {
9594 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9595 				info[idx].size = site->total;
9596 			}
9597 		}
9598 		info[idx].flags |= VM_KERN_SITE_WIRED;
9599 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9600 			info[idx].site   = idx;
9601 			info[idx].flags |= VM_KERN_SITE_TAG;
9602 			if (VM_KERN_MEMORY_ZONE == idx) {
9603 				info[idx].flags |= VM_KERN_SITE_HIDE;
9604 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
9605 				info[idx].collectable_bytes = zones_collectable_bytes;
9606 			}
9607 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9608 			info[idx].site   = 0;
9609 			info[idx].flags |= VM_KERN_SITE_NAMED;
9610 			if (namelen > sizeof(info[idx].name)) {
9611 				namelen = sizeof(info[idx].name);
9612 			}
9613 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9614 		} else if (VM_TAG_KMOD & site->flags) {
9615 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
9616 			info[idx].flags |= VM_KERN_SITE_KMOD;
9617 		} else {
9618 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
9619 			info[idx].flags |= VM_KERN_SITE_KERNEL;
9620 		}
9621 	}
9622 
9623 	nextinfo = (vm_allocation_tag_highest + 1);
9624 	count    = nextinfo;
9625 	if (count >= num_info) {
9626 		count = num_info;
9627 	}
9628 
9629 	for (idx = 0; idx < count; idx++) {
9630 		site = vm_allocation_sites[idx];
9631 		if (!site) {
9632 			continue;
9633 		}
9634 #if VM_TAG_SIZECLASSES
9635 		vm_allocation_zone_total_t * zone;
9636 		unsigned int                 zidx;
9637 
9638 		if (!redact_info
9639 		    && vm_allocation_zone_totals
9640 		    && (zone = vm_allocation_zone_totals[idx])
9641 		    && (nextinfo < num_info)) {
9642 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9643 				if (!zone[zidx].vazt_peak) {
9644 					continue;
9645 				}
9646 				info[nextinfo]        = info[idx];
9647 				info[nextinfo].zone   = zone_index_from_tag_index(zidx);
9648 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
9649 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
9650 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
9651 				info[nextinfo].size   = zone[zidx].vazt_total;
9652 				info[nextinfo].peak   = zone[zidx].vazt_peak;
9653 				info[nextinfo].mapped = 0;
9654 				nextinfo++;
9655 			}
9656 		}
9657 #endif /* VM_TAG_SIZECLASSES */
9658 		if (site->subtotalscount) {
9659 			uint64_t mapped, mapcost, take;
9660 			uint32_t sub;
9661 			vm_tag_t alloctag;
9662 
9663 			info[idx].size = site->total;
9664 			mapped = info[idx].size;
9665 			info[idx].mapped = mapped;
9666 			mapcost = 0;
9667 			for (sub = 0; sub < site->subtotalscount; sub++) {
9668 				alloctag = site->subtotals[sub].tag;
9669 				assert(alloctag < num_info);
9670 				if (info[alloctag].name[0]) {
9671 					continue;
9672 				}
9673 				take = site->subtotals[sub].total;
9674 				if (take > info[alloctag].size) {
9675 					take = info[alloctag].size;
9676 				}
9677 				if (take > mapped) {
9678 					take = mapped;
9679 				}
9680 				info[alloctag].mapped  -= take;
9681 				info[alloctag].size    -= take;
9682 				mapped                 -= take;
9683 				mapcost                += take;
9684 			}
9685 			info[idx].size = mapcost;
9686 		}
9687 	}
9688 	lck_ticket_unlock(&vm_allocation_sites_lock);
9689 
9690 	return 0;
9691 }
9692 
9693 uint32_t
vm_page_diagnose_estimate(void)9694 vm_page_diagnose_estimate(void)
9695 {
9696 	vm_allocation_site_t * site;
9697 	uint32_t               count = zone_view_count;
9698 	uint32_t               idx;
9699 
9700 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9701 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9702 		site = vm_allocation_sites[idx];
9703 		if (!site) {
9704 			continue;
9705 		}
9706 		count++;
9707 #if VM_TAG_SIZECLASSES
9708 		if (vm_allocation_zone_totals) {
9709 			vm_allocation_zone_total_t * zone;
9710 			zone = vm_allocation_zone_totals[idx];
9711 			if (!zone) {
9712 				continue;
9713 			}
9714 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9715 				count += (zone[zidx].vazt_peak != 0);
9716 			}
9717 		}
9718 #endif
9719 	}
9720 	lck_ticket_unlock(&vm_allocation_sites_lock);
9721 
9722 	/* some slop for new tags created */
9723 	count += 8;
9724 	count += VM_KERN_COUNTER_COUNT;
9725 
9726 	return count;
9727 }
9728 
9729 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)9730 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9731     bool percpu)
9732 {
9733 	zpercpu_foreach(zs, zstats) {
9734 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9735 	}
9736 	if (percpu) {
9737 		info->size *= zpercpu_count();
9738 	}
9739 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9740 }
9741 
9742 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)9743 vm_page_add_info(
9744 	mach_memory_info_t     *info,
9745 	zone_stats_t            stats,
9746 	bool                    per_cpu,
9747 	const char             *parent_heap_name,
9748 	const char             *parent_zone_name,
9749 	const char             *view_name)
9750 {
9751 	vm_page_diagnose_zone_stats(info, stats, per_cpu);
9752 	snprintf(info->name, sizeof(info->name),
9753 	    "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
9754 }
9755 
9756 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)9757 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
9758 {
9759 	vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
9760 	    z->z_name, "raw");
9761 }
9762 
9763 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)9764 vm_page_add_view(
9765 	mach_memory_info_t     *info,
9766 	zone_stats_t            stats,
9767 	const char             *parent_heap_name,
9768 	const char             *parent_zone_name,
9769 	const char             *view_name)
9770 {
9771 	vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
9772 	    view_name);
9773 }
9774 
9775 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)9776 vm_page_diagnose_heap_views(
9777 	mach_memory_info_t     *info,
9778 	kalloc_heap_t           kh,
9779 	const char             *parent_heap_name,
9780 	const char             *parent_zone_name)
9781 {
9782 	uint32_t i = 0;
9783 
9784 	while (kh) {
9785 		vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
9786 		    parent_zone_name, kh->kh_name);
9787 		kh = kh->kh_views;
9788 		i++;
9789 	}
9790 	return i;
9791 }
9792 
9793 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)9794 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
9795 {
9796 	uint32_t i = 0;
9797 
9798 	for (; i < KHEAP_NUM_ZONES; i++) {
9799 		vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
9800 	}
9801 
9802 	i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
9803 	    NULL);
9804 	return i;
9805 }
9806 
9807 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)9808 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
9809 {
9810 	uint32_t idx = 0;
9811 	vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
9812 	    "", "raw");
9813 	idx++;
9814 
9815 	for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
9816 		struct kheap_info heap = kalloc_type_heap_array[i];
9817 		char heap_num_tmp[MAX_ZONE_NAME] = "";
9818 		const char *heap_num;
9819 
9820 		snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
9821 		heap_num = &heap_num_tmp[0];
9822 
9823 		for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
9824 		    ktv = (kalloc_type_var_view_t) ktv->kt_next) {
9825 			if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
9826 				vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
9827 				    heap_num, ktv->kt_name);
9828 				idx++;
9829 			}
9830 		}
9831 
9832 		idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
9833 		    KHEAP_KT_VAR->kh_name, heap_num);
9834 	}
9835 
9836 	return idx;
9837 }
9838 
9839 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)9840 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
9841 {
9842 	uint64_t                 wired_size;
9843 	uint64_t                 wired_managed_size;
9844 	uint64_t                 wired_reserved_size;
9845 	boolean_t                iterate;
9846 	mach_memory_info_t     * counts;
9847 	uint32_t                 i;
9848 
9849 	bzero(info, num_info * sizeof(mach_memory_info_t));
9850 
9851 	if (!vm_page_wire_count_initial) {
9852 		return KERN_ABORTED;
9853 	}
9854 
9855 #if !XNU_TARGET_OS_OSX
9856 	wired_size          = ptoa_64(vm_page_wire_count);
9857 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
9858 #else /* !XNU_TARGET_OS_OSX */
9859 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
9860 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
9861 #endif /* !XNU_TARGET_OS_OSX */
9862 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
9863 
9864 	wired_size += booter_size;
9865 
9866 	assert(num_info >= VM_KERN_COUNTER_COUNT);
9867 	num_info -= VM_KERN_COUNTER_COUNT;
9868 	counts = &info[num_info];
9869 
9870 #define SET_COUNT(xcount, xsize, xflags)                        \
9871     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
9872     counts[xcount].site  = (xcount);                            \
9873     counts[xcount].size  = (xsize);                                 \
9874     counts[xcount].mapped  = (xsize);                           \
9875     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9876 
9877 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
9878 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
9879 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
9880 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
9881 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
9882 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
9883 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
9884 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
9885 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
9886 
9887 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9888     counts[xcount].site    = (xcount);                  \
9889     counts[xcount].size    = (xsize);                   \
9890     counts[xcount].mapped  = (xsize);                   \
9891     counts[xcount].free    = (xfree);                   \
9892     counts[xcount].largest = (xlargest);                \
9893     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
9894 
9895 	vm_map_size_t map_size, map_free, map_largest;
9896 
9897 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
9898 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
9899 
9900 	zone_map_sizes(&map_size, &map_free, &map_largest);
9901 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
9902 
9903 	assert(num_info >= zone_view_count);
9904 	num_info -= zone_view_count;
9905 	counts = &info[num_info];
9906 	i = 0;
9907 
9908 	if (!redact_info) {
9909 		if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
9910 			i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
9911 		}
9912 		if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
9913 			i += vm_page_diagnose_kt_heaps(counts + i);
9914 		}
9915 		assert(i <= zone_view_count);
9916 
9917 		zone_index_foreach(zidx) {
9918 			zone_t z = &zone_array[zidx];
9919 			zone_security_flags_t zsflags = zone_security_array[zidx];
9920 			zone_view_t zv = z->z_views;
9921 
9922 			if (zv == NULL) {
9923 				continue;
9924 			}
9925 
9926 			zone_stats_t zv_stats_head = z->z_stats;
9927 			bool has_raw_view = false;
9928 
9929 			for (; zv; zv = zv->zv_next) {
9930 				/*
9931 				 * kalloc_types that allocate from the same zone are linked
9932 				 * as views. Only print the ones that have their own stats.
9933 				 */
9934 				if (zv->zv_stats == zv_stats_head) {
9935 					continue;
9936 				}
9937 				has_raw_view = true;
9938 				vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
9939 				    z->z_percpu);
9940 				snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
9941 				    zone_heap_name(z), z->z_name, zv->zv_name);
9942 				i++;
9943 				assert(i <= zone_view_count);
9944 			}
9945 
9946 			/*
9947 			 * Print raw views for non kalloc or kalloc_type zones
9948 			 */
9949 			bool kalloc_type = zsflags.z_kalloc_type;
9950 			if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
9951 			    (kalloc_type && has_raw_view)) {
9952 				vm_page_diagnose_zone(counts + i, z);
9953 				i++;
9954 				assert(i <= zone_view_count);
9955 			}
9956 		}
9957 	}
9958 
9959 	iterate = !VM_TAG_ACTIVE_UPDATE;
9960 	if (iterate) {
9961 		enum                       { kMaxKernelDepth = 1 };
9962 		vm_map_t                     maps[kMaxKernelDepth];
9963 		vm_map_entry_t               entries[kMaxKernelDepth];
9964 		vm_map_t                     map;
9965 		vm_map_entry_t               entry;
9966 		vm_object_offset_t           offset;
9967 		vm_page_t                    page;
9968 		int                          stackIdx, count;
9969 
9970 #if !VM_TAG_ACTIVE_UPDATE
9971 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
9972 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9973 
9974 		map = kernel_map;
9975 		stackIdx = 0;
9976 		while (map) {
9977 			vm_map_lock(map);
9978 			for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
9979 				if (entry->is_sub_map) {
9980 					assert(stackIdx < kMaxKernelDepth);
9981 					maps[stackIdx] = map;
9982 					entries[stackIdx] = entry;
9983 					stackIdx++;
9984 					map = VME_SUBMAP(entry);
9985 					entry = NULL;
9986 					break;
9987 				}
9988 				if (is_kernel_object(VME_OBJECT(entry))) {
9989 					count = 0;
9990 					vm_object_lock(VME_OBJECT(entry));
9991 					for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
9992 						page = vm_page_lookup(VME_OBJECT(entry), offset);
9993 						if (page && VM_PAGE_WIRED(page)) {
9994 							count++;
9995 						}
9996 					}
9997 					vm_object_unlock(VME_OBJECT(entry));
9998 
9999 					if (count) {
10000 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10001 						assert(VME_ALIAS(entry) < num_info);
10002 						info[VME_ALIAS(entry)].size += ptoa_64(count);
10003 					}
10004 				}
10005 				while (map && (entry == vm_map_last_entry(map))) {
10006 					vm_map_unlock(map);
10007 					if (!stackIdx) {
10008 						map = NULL;
10009 					} else {
10010 						--stackIdx;
10011 						map = maps[stackIdx];
10012 						entry = entries[stackIdx];
10013 					}
10014 				}
10015 			}
10016 		}
10017 	}
10018 
10019 	process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10020 
10021 	return KERN_SUCCESS;
10022 }
10023 
10024 #if DEBUG || DEVELOPMENT
10025 
10026 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10027 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10028 {
10029 	kern_return_t  ret;
10030 	vm_size_t      zsize;
10031 	vm_map_t       map;
10032 	vm_map_entry_t entry;
10033 
10034 	zsize = zone_element_info((void *) addr, tag);
10035 	if (zsize) {
10036 		*zone_size = *size = zsize;
10037 		return KERN_SUCCESS;
10038 	}
10039 
10040 	*zone_size = 0;
10041 	ret = KERN_INVALID_ADDRESS;
10042 	for (map = kernel_map; map;) {
10043 		vm_map_lock(map);
10044 		if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10045 			break;
10046 		}
10047 		if (entry->is_sub_map) {
10048 			if (map != kernel_map) {
10049 				break;
10050 			}
10051 			map = VME_SUBMAP(entry);
10052 			continue;
10053 		}
10054 		if (entry->vme_start != addr) {
10055 			break;
10056 		}
10057 		*tag = (vm_tag_t)VME_ALIAS(entry);
10058 		*size = (entry->vme_end - addr);
10059 		ret = KERN_SUCCESS;
10060 		break;
10061 	}
10062 	if (map != kernel_map) {
10063 		vm_map_unlock(map);
10064 	}
10065 	vm_map_unlock(kernel_map);
10066 
10067 	return ret;
10068 }
10069 
10070 #endif /* DEBUG || DEVELOPMENT */
10071 
10072 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10073 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10074 {
10075 	vm_allocation_site_t * site;
10076 	uint32_t               kmodId;
10077 
10078 	kmodId = 0;
10079 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10080 	if ((site = vm_allocation_sites[tag])) {
10081 		if (VM_TAG_KMOD & site->flags) {
10082 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10083 		}
10084 	}
10085 	lck_ticket_unlock(&vm_allocation_sites_lock);
10086 
10087 	return kmodId;
10088 }
10089 
10090 
10091 #if CONFIG_SECLUDED_MEMORY
10092 /*
10093  * Note that there's no locking around other accesses to vm_page_secluded_target.
10094  * That should be OK, since these are the only place where it can be changed after
10095  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10096  * but will eventually get the correct value. This brief mismatch is OK as pageout
10097  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10098  * over time.
10099  */
10100 unsigned int vm_page_secluded_suppress_cnt = 0;
10101 unsigned int vm_page_secluded_save_target;
10102 
10103 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10104 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10105 
10106 void
start_secluded_suppression(task_t task)10107 start_secluded_suppression(task_t task)
10108 {
10109 	if (task->task_suppressed_secluded) {
10110 		return;
10111 	}
10112 	lck_spin_lock(&secluded_suppress_slock);
10113 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10114 		task->task_suppressed_secluded = TRUE;
10115 		vm_page_secluded_save_target = vm_page_secluded_target;
10116 		vm_page_secluded_target = 0;
10117 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10118 	}
10119 	lck_spin_unlock(&secluded_suppress_slock);
10120 }
10121 
10122 void
stop_secluded_suppression(task_t task)10123 stop_secluded_suppression(task_t task)
10124 {
10125 	lck_spin_lock(&secluded_suppress_slock);
10126 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10127 		task->task_suppressed_secluded = FALSE;
10128 		vm_page_secluded_target = vm_page_secluded_save_target;
10129 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10130 	}
10131 	lck_spin_unlock(&secluded_suppress_slock);
10132 }
10133 
10134 #endif /* CONFIG_SECLUDED_MEMORY */
10135 
10136 /*
10137  * Move the list of retired pages on the vm_page_queue_retired to
10138  * their final resting place on retired_pages_object.
10139  */
10140 void
vm_retire_boot_pages(void)10141 vm_retire_boot_pages(void)
10142 {
10143 }
10144 
10145 /*
10146  * This holds the reported physical address if an ECC error leads to a panic.
10147  * SMC will store it in PMU SRAM under the 'sECC' key.
10148  */
10149 uint64_t ecc_panic_physical_address = 0;
10150 
10151