xref: /xnu-10002.61.3/osfmk/vm/vm_resident.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68 
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_kern.h>                 /* kmem_alloc() */
89 #include <kern/misc_protos.h>
90 #include <mach_debug/zone_info.h>
91 #include <vm/cpm.h>
92 #include <pexpert/pexpert.h>
93 #include <pexpert/device_tree.h>
94 #include <san/kasan.h>
95 
96 #include <vm/vm_protos.h>
97 #include <vm/memory_object.h>
98 #include <vm/vm_purgeable_internal.h>
99 #include <vm/vm_compressor.h>
100 #if defined (__x86_64__)
101 #include <i386/misc_protos.h>
102 #endif
103 
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
106 #endif
107 
108 #if HIBERNATION
109 #include <IOKit/IOHibernatePrivate.h>
110 #include <machine/pal_hibernate.h>
111 #endif /* HIBERNATION */
112 
113 #include <sys/kdebug.h>
114 
115 #if defined(HAS_APPLE_PAC)
116 #include <ptrauth.h>
117 #endif
118 #if defined(__arm64__)
119 #include <arm/cpu_internal.h>
120 #endif /* defined(__arm64__) */
121 
122 #if MACH_ASSERT
123 
124 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
125 
126 #else /* MACH_ASSERT */
127 
128 #define ASSERT_PMAP_FREE(mem) /* nothing */
129 
130 #endif /* MACH_ASSERT */
131 
132 extern boolean_t vm_pageout_running;
133 extern thread_t  vm_pageout_scan_thread;
134 extern bool vps_dynamic_priority_enabled;
135 
136 char    vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
137 char    vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
138 char    vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
139 char    vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
140 
141 #if CONFIG_SECLUDED_MEMORY
142 struct vm_page_secluded_data vm_page_secluded;
143 #endif /* CONFIG_SECLUDED_MEMORY */
144 
145 #if DEVELOPMENT || DEBUG
146 extern struct memory_object_pager_ops shared_region_pager_ops;
147 unsigned int shared_region_pagers_resident_count = 0;
148 unsigned int shared_region_pagers_resident_peak = 0;
149 #endif /* DEVELOPMENT || DEBUG */
150 
151 
152 
153 int             PERCPU_DATA(start_color);
154 vm_page_t       PERCPU_DATA(free_pages);
155 boolean_t       hibernate_cleaning_in_progress = FALSE;
156 boolean_t       vm_page_free_verify = TRUE;
157 
158 uint32_t        vm_lopage_free_count = 0;
159 uint32_t        vm_lopage_free_limit = 0;
160 uint32_t        vm_lopage_lowater    = 0;
161 boolean_t       vm_lopage_refill = FALSE;
162 boolean_t       vm_lopage_needed = FALSE;
163 
164 int             speculative_age_index = 0;
165 int             speculative_steal_index = 0;
166 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
167 
168 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
169                                                           * Updated and checked behind the vm_page_queues_lock. */
170 
171 static void             vm_page_free_prepare(vm_page_t  page);
172 static vm_page_t        vm_page_grab_fictitious_common(ppnum_t, boolean_t);
173 
174 static void vm_tag_init(void);
175 
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
178     VM_PAGE_PACKED_FROM_ARRAY;
179 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
180     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
181 
182 /*
183  *	Associated with page of user-allocatable memory is a
184  *	page structure.
185  */
186 
187 /*
188  *	These variables record the values returned by vm_page_bootstrap,
189  *	for debugging purposes.  The implementation of pmap_steal_memory
190  *	and pmap_startup here also uses them internally.
191  */
192 
193 vm_offset_t virtual_space_start;
194 vm_offset_t virtual_space_end;
195 uint32_t        vm_page_pages;
196 
197 /*
198  *	The vm_page_lookup() routine, which provides for fast
199  *	(virtual memory object, offset) to page lookup, employs
200  *	the following hash table.  The vm_page_{insert,remove}
201  *	routines install and remove associations in the table.
202  *	[This table is often called the virtual-to-physical,
203  *	or VP, table.]
204  */
205 typedef struct {
206 	vm_page_packed_t page_list;
207 #if     MACH_PAGE_HASH_STATS
208 	int             cur_count;              /* current count */
209 	int             hi_count;               /* high water mark */
210 #endif /* MACH_PAGE_HASH_STATS */
211 } vm_page_bucket_t;
212 
213 
214 #define BUCKETS_PER_LOCK        16
215 
216 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
217 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
218 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
219 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
220 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
221 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
222 
223 #ifndef VM_TAG_ACTIVE_UPDATE
224 #error VM_TAG_ACTIVE_UPDATE
225 #endif
226 #ifndef VM_TAG_SIZECLASSES
227 #error VM_TAG_SIZECLASSES
228 #endif
229 
230 /* for debugging */
231 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
232 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
233 
234 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
235 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
236 #if VM_TAG_SIZECLASSES
237 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
238 #endif /* VM_TAG_SIZECLASSES */
239 
240 vm_tag_t vm_allocation_tag_highest;
241 
242 #if VM_PAGE_BUCKETS_CHECK
243 boolean_t vm_page_buckets_check_ready = FALSE;
244 #if VM_PAGE_FAKE_BUCKETS
245 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
246 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
247 #endif /* VM_PAGE_FAKE_BUCKETS */
248 #endif /* VM_PAGE_BUCKETS_CHECK */
249 
250 #if     MACH_PAGE_HASH_STATS
251 /* This routine is only for debug.  It is intended to be called by
252  * hand by a developer using a kernel debugger.  This routine prints
253  * out vm_page_hash table statistics to the kernel debug console.
254  */
255 void
hash_debug(void)256 hash_debug(void)
257 {
258 	int     i;
259 	int     numbuckets = 0;
260 	int     highsum = 0;
261 	int     maxdepth = 0;
262 
263 	for (i = 0; i < vm_page_bucket_count; i++) {
264 		if (vm_page_buckets[i].hi_count) {
265 			numbuckets++;
266 			highsum += vm_page_buckets[i].hi_count;
267 			if (vm_page_buckets[i].hi_count > maxdepth) {
268 				maxdepth = vm_page_buckets[i].hi_count;
269 			}
270 		}
271 	}
272 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
273 	printf("Number used buckets:     %d = %d%%\n",
274 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
275 	printf("Number unused buckets:   %d = %d%%\n",
276 	    vm_page_bucket_count - numbuckets,
277 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
278 	printf("Sum of bucket max depth: %d\n", highsum);
279 	printf("Average bucket depth:    %d.%2d\n",
280 	    highsum / vm_page_bucket_count,
281 	    highsum % vm_page_bucket_count);
282 	printf("Maximum bucket depth:    %d\n", maxdepth);
283 }
284 #endif /* MACH_PAGE_HASH_STATS */
285 
286 /*
287  *	The virtual page size is currently implemented as a runtime
288  *	variable, but is constant once initialized using vm_set_page_size.
289  *	This initialization must be done in the machine-dependent
290  *	bootstrap sequence, before calling other machine-independent
291  *	initializations.
292  *
293  *	All references to the virtual page size outside this
294  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
295  *	constants.
296  */
297 #if defined(__arm64__)
298 vm_size_t       page_size;
299 vm_size_t       page_mask;
300 int             page_shift;
301 #else
302 vm_size_t       page_size  = PAGE_SIZE;
303 vm_size_t       page_mask  = PAGE_MASK;
304 int             page_shift = PAGE_SHIFT;
305 #endif
306 
307 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
308 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
309 vm_page_t                          vm_page_array_ending_addr;
310 
311 unsigned int    vm_pages_count = 0;
312 
313 /*
314  *	Resident pages that represent real memory
315  *	are allocated from a set of free lists,
316  *	one per color.
317  */
318 unsigned int    vm_colors;
319 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
320 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
321 unsigned int    vm_free_magazine_refill_limit = 0;
322 
323 
324 struct vm_page_queue_free_head {
325 	vm_page_queue_head_t    qhead;
326 } VM_PAGE_PACKED_ALIGNED;
327 
328 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
329 
330 
331 unsigned int    vm_page_free_wanted;
332 unsigned int    vm_page_free_wanted_privileged;
333 #if CONFIG_SECLUDED_MEMORY
334 unsigned int    vm_page_free_wanted_secluded;
335 #endif /* CONFIG_SECLUDED_MEMORY */
336 unsigned int    vm_page_free_count;
337 
338 unsigned int    vm_page_realtime_count;
339 
340 /*
341  *	Occasionally, the virtual memory system uses
342  *	resident page structures that do not refer to
343  *	real pages, for example to leave a page with
344  *	important state information in the VP table.
345  *
346  *	These page structures are allocated the way
347  *	most other kernel structures are.
348  */
349 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
350 vm_locks_array_t vm_page_locks;
351 
352 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
353 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
354 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
355 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
356 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
357 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
358 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
359 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
360 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
361 
362 unsigned int    vm_page_local_q_soft_limit = 250;
363 unsigned int    vm_page_local_q_hard_limit = 500;
364 struct vpl     *__zpercpu vm_page_local_q;
365 
366 /* N.B. Guard and fictitious pages must not
367  * be assigned a zero phys_page value.
368  */
369 /*
370  *	Fictitious pages don't have a physical address,
371  *	but we must initialize phys_page to something.
372  *	For debugging, this should be a strange value
373  *	that the pmap module can recognize in assertions.
374  */
375 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
376 
377 /*
378  *	Guard pages are not accessible so they don't
379  *      need a physical address, but we need to enter
380  *	one in the pmap.
381  *	Let's make it recognizable and make sure that
382  *	we don't use a real physical page with that
383  *	physical address.
384  */
385 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
386 
387 /*
388  *	Resident page structures are also chained on
389  *	queues that are used by the page replacement
390  *	system (pageout daemon).  These queues are
391  *	defined here, but are shared by the pageout
392  *	module.  The inactive queue is broken into
393  *	file backed and anonymous for convenience as the
394  *	pageout daemon often assignes a higher
395  *	importance to anonymous pages (less likely to pick)
396  */
397 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
398 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
399 #if CONFIG_SECLUDED_MEMORY
400 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
401 #endif /* CONFIG_SECLUDED_MEMORY */
402 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
403 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
404 
405 queue_head_t    vm_objects_wired;
406 
407 void vm_update_darkwake_mode(boolean_t);
408 
409 vm_page_queue_head_t    vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
410 uint32_t        vm_page_donate_mode;
411 uint32_t        vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
412 uint32_t        vm_page_donate_count;
413 bool            vm_page_donate_queue_ripe;
414 
415 
416 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
417 uint32_t        vm_page_background_target;
418 uint32_t        vm_page_background_target_snapshot;
419 uint32_t        vm_page_background_count;
420 uint64_t        vm_page_background_promoted_count;
421 
422 uint32_t        vm_page_background_internal_count;
423 uint32_t        vm_page_background_external_count;
424 
425 uint32_t        vm_page_background_mode;
426 uint32_t        vm_page_background_exclude_external;
427 
428 unsigned int    vm_page_active_count;
429 unsigned int    vm_page_inactive_count;
430 unsigned int    vm_page_kernelcache_count;
431 #if CONFIG_SECLUDED_MEMORY
432 unsigned int    vm_page_secluded_count;
433 unsigned int    vm_page_secluded_count_free;
434 unsigned int    vm_page_secluded_count_inuse;
435 unsigned int    vm_page_secluded_count_over_target;
436 #endif /* CONFIG_SECLUDED_MEMORY */
437 unsigned int    vm_page_anonymous_count;
438 unsigned int    vm_page_throttled_count;
439 unsigned int    vm_page_speculative_count;
440 
441 unsigned int    vm_page_wire_count;
442 unsigned int    vm_page_wire_count_on_boot = 0;
443 unsigned int    vm_page_stolen_count = 0;
444 unsigned int    vm_page_wire_count_initial;
445 unsigned int    vm_page_gobble_count = 0;
446 unsigned int    vm_page_kern_lpage_count = 0;
447 
448 uint64_t        booter_size;  /* external so it can be found in core dumps */
449 
450 #define VM_PAGE_WIRE_COUNT_WARNING      0
451 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
452 
453 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
454 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
455 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
456 
457 unsigned int    vm_page_xpmapped_external_count = 0;
458 unsigned int    vm_page_external_count = 0;
459 unsigned int    vm_page_internal_count = 0;
460 unsigned int    vm_page_pageable_external_count = 0;
461 unsigned int    vm_page_pageable_internal_count = 0;
462 
463 #if DEVELOPMENT || DEBUG
464 unsigned int    vm_page_speculative_recreated = 0;
465 unsigned int    vm_page_speculative_created = 0;
466 unsigned int    vm_page_speculative_used = 0;
467 #endif
468 
469 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
470 
471 unsigned int    vm_page_cleaned_count = 0;
472 
473 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
474 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
475 
476 
477 /*
478  *	Several page replacement parameters are also
479  *	shared with this module, so that page allocation
480  *	(done here in vm_page_alloc) can trigger the
481  *	pageout daemon.
482  */
483 unsigned int    vm_page_free_target = 0;
484 unsigned int    vm_page_free_min = 0;
485 unsigned int    vm_page_throttle_limit = 0;
486 unsigned int    vm_page_inactive_target = 0;
487 #if CONFIG_SECLUDED_MEMORY
488 unsigned int    vm_page_secluded_target = 0;
489 #endif /* CONFIG_SECLUDED_MEMORY */
490 unsigned int    vm_page_anonymous_min = 0;
491 unsigned int    vm_page_free_reserved = 0;
492 
493 
494 /*
495  *	The VM system has a couple of heuristics for deciding
496  *	that pages are "uninteresting" and should be placed
497  *	on the inactive queue as likely candidates for replacement.
498  *	These variables let the heuristics be controlled at run-time
499  *	to make experimentation easier.
500  */
501 
502 boolean_t vm_page_deactivate_hint = TRUE;
503 
504 struct vm_page_stats_reusable vm_page_stats_reusable;
505 
506 /*
507  *	vm_set_page_size:
508  *
509  *	Sets the page size, perhaps based upon the memory
510  *	size.  Must be called before any use of page-size
511  *	dependent functions.
512  *
513  *	Sets page_shift and page_mask from page_size.
514  */
515 void
vm_set_page_size(void)516 vm_set_page_size(void)
517 {
518 	page_size  = PAGE_SIZE;
519 	page_mask  = PAGE_MASK;
520 	page_shift = PAGE_SHIFT;
521 
522 	if ((page_mask & page_size) != 0) {
523 		panic("vm_set_page_size: page size not a power of two");
524 	}
525 
526 	for (page_shift = 0;; page_shift++) {
527 		if ((1U << page_shift) == page_size) {
528 			break;
529 		}
530 	}
531 }
532 
533 #if defined (__x86_64__)
534 
535 #define MAX_CLUMP_SIZE      16
536 #define DEFAULT_CLUMP_SIZE  4
537 
538 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
539 
540 #if DEVELOPMENT || DEBUG
541 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
542 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
543 
544 static inline void
vm_clump_update_stats(unsigned int c)545 vm_clump_update_stats(unsigned int c)
546 {
547 	assert(c <= vm_clump_size);
548 	if (c > 0 && c <= vm_clump_size) {
549 		vm_clump_stats[c] += c;
550 	}
551 	vm_clump_allocs += c;
552 }
553 #endif  /*  if DEVELOPMENT || DEBUG */
554 
555 /* Called once to setup the VM clump knobs */
556 static void
vm_page_setup_clump(void)557 vm_page_setup_clump( void )
558 {
559 	unsigned int override, n;
560 
561 	vm_clump_size = DEFAULT_CLUMP_SIZE;
562 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
563 		vm_clump_size = override;
564 	}
565 
566 	if (vm_clump_size > MAX_CLUMP_SIZE) {
567 		panic("vm_page_setup_clump:: clump_size is too large!");
568 	}
569 	if (vm_clump_size < 1) {
570 		panic("vm_page_setup_clump:: clump_size must be >= 1");
571 	}
572 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
573 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
574 	}
575 
576 	vm_clump_promote_threshold = vm_clump_size;
577 	vm_clump_mask = vm_clump_size - 1;
578 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
579 		;
580 	}
581 
582 #if DEVELOPMENT || DEBUG
583 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
584 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
585 #endif  /*  if DEVELOPMENT || DEBUG */
586 }
587 
588 #endif  /* #if defined (__x86_64__) */
589 
590 #define COLOR_GROUPS_TO_STEAL   4
591 
592 /* Called once during statup, once the cache geometry is known.
593  */
594 static void
vm_page_set_colors(void)595 vm_page_set_colors( void )
596 {
597 	unsigned int    n, override;
598 
599 #if defined (__x86_64__)
600 	/* adjust #colors because we need to color outside the clump boundary */
601 	vm_cache_geometry_colors >>= vm_clump_shift;
602 #endif
603 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
604 		n = override;
605 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
606 		n = vm_cache_geometry_colors;
607 	} else {
608 		n = DEFAULT_COLORS;                             /* use default if all else fails */
609 	}
610 	if (n == 0) {
611 		n = 1;
612 	}
613 	if (n > MAX_COLORS) {
614 		n = MAX_COLORS;
615 	}
616 
617 	/* the count must be a power of 2  */
618 	if ((n & (n - 1)) != 0) {
619 		n = DEFAULT_COLORS;                             /* use default if all else fails */
620 	}
621 	vm_colors = n;
622 	vm_color_mask = n - 1;
623 
624 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
625 
626 #if defined (__x86_64__)
627 	/* adjust for reduction in colors due to clumping and multiple cores */
628 	if (real_ncpus) {
629 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
630 	}
631 #endif
632 }
633 
634 /*
635  * During single threaded early boot we don't initialize all pages.
636  * This avoids some delay during boot. They'll be initialized and
637  * added to the free list as needed or after we are multithreaded by
638  * what becomes the pageout thread.
639  */
640 static boolean_t fill = FALSE;
641 static unsigned int fillval;
642 uint_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
643 ppnum_t delay_above_pnum = PPNUM_MAX;
644 
645 /*
646  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
647  * If ARM ever uses delayed page initialization, this value may need to be quite different.
648  */
649 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
650 
651 /*
652  * When we have to dip into more delayed pages due to low memory, free up
653  * a large chunk to get things back to normal. This avoids contention on the
654  * delayed code allocating page by page.
655  */
656 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
657 
658 /*
659  * Get and initialize the next delayed page.
660  */
661 static vm_page_t
vm_get_delayed_page(int grab_options)662 vm_get_delayed_page(int grab_options)
663 {
664 	vm_page_t p;
665 	ppnum_t   pnum;
666 
667 	/*
668 	 * Get a new page if we have one.
669 	 */
670 	vm_free_page_lock();
671 	if (vm_delayed_count == 0) {
672 		vm_free_page_unlock();
673 		return NULL;
674 	}
675 
676 	if (!pmap_next_page(&pnum)) {
677 		vm_delayed_count = 0;
678 		vm_free_page_unlock();
679 		return NULL;
680 	}
681 
682 
683 	assert(vm_delayed_count > 0);
684 	--vm_delayed_count;
685 
686 #if defined(__x86_64__)
687 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
688 	if (vm_pages_count > 0) {
689 		assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
690 	}
691 #endif
692 	p = &vm_pages[vm_pages_count];
693 	assert(p < vm_page_array_ending_addr);
694 	vm_page_init(p, pnum, FALSE);
695 	++vm_pages_count;
696 	++vm_page_pages;
697 	vm_free_page_unlock();
698 
699 	/*
700 	 * These pages were initially counted as wired, undo that now.
701 	 */
702 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
703 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
704 	} else {
705 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
706 		vm_page_lockspin_queues();
707 	}
708 	--vm_page_wire_count;
709 	--vm_page_wire_count_initial;
710 	if (vm_page_wire_count_on_boot != 0) {
711 		--vm_page_wire_count_on_boot;
712 	}
713 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
714 		vm_page_unlock_queues();
715 	}
716 
717 
718 	if (fill) {
719 		fillPage(pnum, fillval);
720 	}
721 	return p;
722 }
723 
724 static void vm_page_module_init_delayed(void);
725 
726 /*
727  * Free all remaining delayed pages to the free lists.
728  */
729 void
vm_free_delayed_pages(void)730 vm_free_delayed_pages(void)
731 {
732 	vm_page_t   p;
733 	vm_page_t   list = NULL;
734 	uint_t      cnt = 0;
735 	vm_offset_t start_free_va;
736 	int64_t     free_size;
737 
738 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
739 		if (vm_himemory_mode) {
740 			vm_page_release(p, FALSE);
741 		} else {
742 			p->vmp_snext = list;
743 			list = p;
744 		}
745 		++cnt;
746 	}
747 
748 	/*
749 	 * Free the pages in reverse order if not himemory mode.
750 	 * Hence the low memory pages will be first on free lists. (LIFO)
751 	 */
752 	while (list != NULL) {
753 		p = list;
754 		list = p->vmp_snext;
755 		p->vmp_snext = NULL;
756 		vm_page_release(p, FALSE);
757 	}
758 #if DEVELOPMENT || DEBUG
759 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
760 #endif
761 
762 	/*
763 	 * Free up any unused full pages at the end of the vm_pages[] array
764 	 */
765 	start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
766 
767 #if defined(__x86_64__)
768 	/*
769 	 * Since x86 might have used large pages for vm_pages[], we can't
770 	 * free starting in the middle of a partially used large page.
771 	 */
772 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
773 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
774 	}
775 #endif
776 	if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
777 		free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
778 		if (free_size > 0) {
779 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
780 			vm_page_array_ending_addr = (void *)start_free_va;
781 
782 			/*
783 			 * Note there's no locking here, as only this thread will ever change this value.
784 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
785 			 */
786 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
787 
788 #if DEVELOPMENT || DEBUG
789 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
790 			    (long)free_size, (long)start_free_va);
791 #endif
792 		}
793 	}
794 
795 
796 	/*
797 	 * now we can create the VM page array zone
798 	 */
799 	vm_page_module_init_delayed();
800 }
801 
802 /*
803  * Try and free up enough delayed pages to match a contig memory allocation.
804  */
805 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)806 vm_free_delayed_pages_contig(
807 	uint_t    npages,
808 	ppnum_t   max_pnum,
809 	ppnum_t   pnum_mask)
810 {
811 	vm_page_t p;
812 	ppnum_t   pnum;
813 	uint_t    cnt = 0;
814 
815 	/*
816 	 * Treat 0 as the absolute max page number.
817 	 */
818 	if (max_pnum == 0) {
819 		max_pnum = PPNUM_MAX;
820 	}
821 
822 	/*
823 	 * Free till we get a properly aligned start page
824 	 */
825 	for (;;) {
826 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
827 		if (p == NULL) {
828 			return;
829 		}
830 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
831 		vm_page_release(p, FALSE);
832 		if (pnum >= max_pnum) {
833 			return;
834 		}
835 		if ((pnum & pnum_mask) == 0) {
836 			break;
837 		}
838 	}
839 
840 	/*
841 	 * Having a healthy pool of free pages will help performance. We don't
842 	 * want to fall back to the delayed code for every page allocation.
843 	 */
844 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
845 		npages += VM_DELAY_PAGE_CHUNK;
846 	}
847 
848 	/*
849 	 * Now free up the pages
850 	 */
851 	for (cnt = 1; cnt < npages; ++cnt) {
852 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
853 		if (p == NULL) {
854 			return;
855 		}
856 		vm_page_release(p, FALSE);
857 	}
858 }
859 
860 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
861 
862 void
vm_page_init_local_q(unsigned int num_cpus)863 vm_page_init_local_q(unsigned int num_cpus)
864 {
865 	struct vpl *t_local_q;
866 
867 	/*
868 	 * no point in this for a uni-processor system
869 	 */
870 	if (num_cpus >= 2) {
871 		ml_cpu_info_t cpu_info;
872 
873 		/*
874 		 * Force the allocation alignment to a cacheline,
875 		 * because the `vpl` struct has a lock and will be taken
876 		 * cross CPU so we want to isolate the rest of the per-CPU
877 		 * data to avoid false sharing due to this lock being taken.
878 		 */
879 
880 		ml_cpu_get_info(&cpu_info);
881 
882 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
883 		    cpu_info.cache_line_size - 1);
884 
885 		zpercpu_foreach(lq, t_local_q) {
886 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
887 			vm_page_queue_init(&lq->vpl_queue);
888 		}
889 
890 		/* make the initialization visible to all cores */
891 		os_atomic_store(&vm_page_local_q, t_local_q, release);
892 	}
893 }
894 
895 /*
896  * vm_init_before_launchd
897  *
898  * This should be called right before launchd is loaded.
899  */
900 void
vm_init_before_launchd()901 vm_init_before_launchd()
902 {
903 	vm_page_lockspin_queues();
904 	vm_page_wire_count_on_boot = vm_page_wire_count;
905 	vm_page_unlock_queues();
906 }
907 
908 
909 /*
910  *	vm_page_bootstrap:
911  *
912  *	Initializes the resident memory module.
913  *
914  *	Allocates memory for the page cells, and
915  *	for the object/offset-to-page hash table headers.
916  *	Each page cell is initialized and placed on the free list.
917  *	Returns the range of available kernel virtual memory.
918  */
919 __startup_func
920 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)921 vm_page_bootstrap(
922 	vm_offset_t             *startp,
923 	vm_offset_t             *endp)
924 {
925 	unsigned int            i;
926 	unsigned int            log1;
927 	unsigned int            log2;
928 	unsigned int            size;
929 
930 	/*
931 	 *	Initialize the page queues.
932 	 */
933 
934 	lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
935 	lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
936 	lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
937 
938 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
939 		int group;
940 
941 		purgeable_queues[i].token_q_head = 0;
942 		purgeable_queues[i].token_q_tail = 0;
943 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
944 			queue_init(&purgeable_queues[i].objq[group]);
945 		}
946 
947 		purgeable_queues[i].type = i;
948 		purgeable_queues[i].new_pages = 0;
949 #if MACH_ASSERT
950 		purgeable_queues[i].debug_count_tokens = 0;
951 		purgeable_queues[i].debug_count_objects = 0;
952 #endif
953 	}
954 	;
955 	purgeable_nonvolatile_count = 0;
956 	queue_init(&purgeable_nonvolatile_queue);
957 
958 	for (i = 0; i < MAX_COLORS; i++) {
959 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
960 	}
961 
962 	vm_page_queue_init(&vm_lopage_queue_free);
963 	vm_page_queue_init(&vm_page_queue_active);
964 	vm_page_queue_init(&vm_page_queue_inactive);
965 #if CONFIG_SECLUDED_MEMORY
966 	vm_page_queue_init(&vm_page_queue_secluded);
967 #endif /* CONFIG_SECLUDED_MEMORY */
968 	vm_page_queue_init(&vm_page_queue_cleaned);
969 	vm_page_queue_init(&vm_page_queue_throttled);
970 	vm_page_queue_init(&vm_page_queue_anonymous);
971 	queue_init(&vm_objects_wired);
972 
973 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
974 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
975 
976 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
977 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
978 	}
979 
980 	vm_page_queue_init(&vm_page_queue_donate);
981 	vm_page_queue_init(&vm_page_queue_background);
982 
983 	vm_page_background_count = 0;
984 	vm_page_background_internal_count = 0;
985 	vm_page_background_external_count = 0;
986 	vm_page_background_promoted_count = 0;
987 
988 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
989 
990 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
991 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
992 	}
993 
994 #if    defined(__LP64__)
995 	vm_page_background_mode = VM_PAGE_BG_ENABLED;
996 	vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
997 #else
998 	vm_page_background_mode = VM_PAGE_BG_DISABLED;
999 	vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1000 #endif
1001 	vm_page_background_exclude_external = 0;
1002 
1003 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1004 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1005 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1006 
1007 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1008 		vm_page_background_mode = VM_PAGE_BG_DISABLED;
1009 	}
1010 
1011 	PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1012 	if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1013 		vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1014 	}
1015 
1016 	vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1017 	vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1018 	vm_page_donate_target = vm_page_donate_target_high;
1019 	vm_page_donate_count = 0;
1020 
1021 	vm_page_free_wanted = 0;
1022 	vm_page_free_wanted_privileged = 0;
1023 #if CONFIG_SECLUDED_MEMORY
1024 	vm_page_free_wanted_secluded = 0;
1025 #endif /* CONFIG_SECLUDED_MEMORY */
1026 
1027 #if defined (__x86_64__)
1028 	/* this must be called before vm_page_set_colors() */
1029 	vm_page_setup_clump();
1030 #endif
1031 
1032 	vm_page_set_colors();
1033 
1034 	bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1035 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1036 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1037 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1038 
1039 	bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1040 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1041 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1042 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1043 	vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1044 	vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1045 	vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1046 #if CONFIG_SECLUDED_MEMORY
1047 	vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1048 #endif /* CONFIG_SECLUDED_MEMORY */
1049 
1050 	bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1051 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1052 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1053 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1054 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1055 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1056 #if CONFIG_SECLUDED_MEMORY
1057 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1058 #endif /* CONFIG_SECLUDED_MEMORY */
1059 
1060 	bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1061 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1062 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1063 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1064 	vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1065 #if CONFIG_SECLUDED_MEMORY
1066 	vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1067 #endif /* CONFIG_SECLUDED_MEMORY */
1068 
1069 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1070 		vm_allocation_sites_static[t].refcount = 2;
1071 		vm_allocation_sites_static[t].tag = t;
1072 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1073 	}
1074 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1075 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1076 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1077 
1078 	/*
1079 	 *	Steal memory for the map and zone subsystems.
1080 	 */
1081 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1082 
1083 	/*
1084 	 *	Allocate (and initialize) the virtual-to-physical
1085 	 *	table hash buckets.
1086 	 *
1087 	 *	The number of buckets should be a power of two to
1088 	 *	get a good hash function.  The following computation
1089 	 *	chooses the first power of two that is greater
1090 	 *	than the number of physical pages in the system.
1091 	 */
1092 
1093 	if (vm_page_bucket_count == 0) {
1094 		unsigned int npages = pmap_free_pages();
1095 
1096 		vm_page_bucket_count = 1;
1097 		while (vm_page_bucket_count < npages) {
1098 			vm_page_bucket_count <<= 1;
1099 		}
1100 	}
1101 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1102 
1103 	vm_page_hash_mask = vm_page_bucket_count - 1;
1104 
1105 	/*
1106 	 *	Calculate object shift value for hashing algorithm:
1107 	 *		O = log2(sizeof(struct vm_object))
1108 	 *		B = log2(vm_page_bucket_count)
1109 	 *	        hash shifts the object left by
1110 	 *		B/2 - O
1111 	 */
1112 	size = vm_page_bucket_count;
1113 	for (log1 = 0; size > 1; log1++) {
1114 		size /= 2;
1115 	}
1116 	size = sizeof(struct vm_object);
1117 	for (log2 = 0; size > 1; log2++) {
1118 		size /= 2;
1119 	}
1120 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1121 
1122 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1123 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1124 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1125 
1126 	if (vm_page_hash_mask & vm_page_bucket_count) {
1127 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1128 	}
1129 
1130 #if VM_PAGE_BUCKETS_CHECK
1131 #if VM_PAGE_FAKE_BUCKETS
1132 	/*
1133 	 * Allocate a decoy set of page buckets, to detect
1134 	 * any stomping there.
1135 	 */
1136 	vm_page_fake_buckets = (vm_page_bucket_t *)
1137 	    pmap_steal_memory(vm_page_bucket_count *
1138 	    sizeof(vm_page_bucket_t), 0);
1139 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1140 	vm_page_fake_buckets_end =
1141 	    vm_map_round_page((vm_page_fake_buckets_start +
1142 	    (vm_page_bucket_count *
1143 	    sizeof(vm_page_bucket_t))),
1144 	    PAGE_MASK);
1145 	char *cp;
1146 	for (cp = (char *)vm_page_fake_buckets_start;
1147 	    cp < (char *)vm_page_fake_buckets_end;
1148 	    cp++) {
1149 		*cp = 0x5a;
1150 	}
1151 #endif /* VM_PAGE_FAKE_BUCKETS */
1152 #endif /* VM_PAGE_BUCKETS_CHECK */
1153 
1154 	kernel_debug_string_early("vm_page_buckets");
1155 	vm_page_buckets = (vm_page_bucket_t *)
1156 	    pmap_steal_memory(vm_page_bucket_count *
1157 	    sizeof(vm_page_bucket_t), 0);
1158 
1159 	kernel_debug_string_early("vm_page_bucket_locks");
1160 	vm_page_bucket_locks = (lck_spin_t *)
1161 	    pmap_steal_memory(vm_page_bucket_lock_count *
1162 	    sizeof(lck_spin_t), 0);
1163 
1164 	for (i = 0; i < vm_page_bucket_count; i++) {
1165 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1166 
1167 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1168 #if     MACH_PAGE_HASH_STATS
1169 		bucket->cur_count = 0;
1170 		bucket->hi_count = 0;
1171 #endif /* MACH_PAGE_HASH_STATS */
1172 	}
1173 
1174 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1175 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1176 	}
1177 
1178 	vm_tag_init();
1179 
1180 #if VM_PAGE_BUCKETS_CHECK
1181 	vm_page_buckets_check_ready = TRUE;
1182 #endif /* VM_PAGE_BUCKETS_CHECK */
1183 
1184 	/*
1185 	 *	Machine-dependent code allocates the resident page table.
1186 	 *	It uses vm_page_init to initialize the page frames.
1187 	 *	The code also returns to us the virtual space available
1188 	 *	to the kernel.  We don't trust the pmap module
1189 	 *	to get the alignment right.
1190 	 */
1191 
1192 	kernel_debug_string_early("pmap_startup");
1193 	pmap_startup(&virtual_space_start, &virtual_space_end);
1194 	virtual_space_start = round_page(virtual_space_start);
1195 	virtual_space_end = trunc_page(virtual_space_end);
1196 
1197 	*startp = virtual_space_start;
1198 	*endp = virtual_space_end;
1199 
1200 	/*
1201 	 *	Compute the initial "wire" count.
1202 	 *	Up until now, the pages which have been set aside are not under
1203 	 *	the VM system's control, so although they aren't explicitly
1204 	 *	wired, they nonetheless can't be moved. At this moment,
1205 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1206 	 */
1207 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1208 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1209 	    vm_page_free_count - vm_lopage_free_count;
1210 #if CONFIG_SECLUDED_MEMORY
1211 	vm_page_wire_count -= vm_page_secluded_count;
1212 #endif
1213 	vm_page_wire_count_initial = vm_page_wire_count;
1214 
1215 	/* capture this for later use */
1216 	booter_size = ml_get_booter_memory_size();
1217 
1218 	printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1219 	    vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1220 
1221 	kernel_debug_string_early("vm_page_bootstrap complete");
1222 }
1223 
1224 #ifndef MACHINE_PAGES
1225 /*
1226  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1227  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1228  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1229  */
1230 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1231 pmap_steal_memory_internal(
1232 	vm_size_t size,
1233 	vm_size_t alignment,
1234 	boolean_t might_free,
1235 	unsigned int flags,
1236 	pmap_mapping_type_t mapping_type)
1237 {
1238 	kern_return_t kr;
1239 	vm_offset_t addr;
1240 	vm_offset_t map_addr;
1241 	ppnum_t phys_page;
1242 	unsigned int pmap_flags;
1243 
1244 	/*
1245 	 * Size needs to be aligned to word size.
1246 	 */
1247 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1248 
1249 	/*
1250 	 * Alignment defaults to word size if not specified.
1251 	 */
1252 	if (alignment == 0) {
1253 		alignment = sizeof(void*);
1254 	}
1255 
1256 	/*
1257 	 * Alignment must be no greater than a page and must be a power of two.
1258 	 */
1259 	assert(alignment <= PAGE_SIZE);
1260 	assert((alignment & (alignment - 1)) == 0);
1261 
1262 	/*
1263 	 * On the first call, get the initial values for virtual address space
1264 	 * and page align them.
1265 	 */
1266 	if (virtual_space_start == virtual_space_end) {
1267 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1268 		virtual_space_start = round_page(virtual_space_start);
1269 		virtual_space_end = trunc_page(virtual_space_end);
1270 
1271 #if defined(__x86_64__)
1272 		/*
1273 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1274 		 * that map it. This makes the VA available for large page mappings.
1275 		 */
1276 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1277 #endif
1278 	}
1279 
1280 	/*
1281 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1282 	 * address if the size is big enough to back with at least 1 large page.
1283 	 */
1284 #if defined(__x86_64__)
1285 	if (size >= I386_LPGBYTES) {
1286 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1287 	}
1288 #endif
1289 	virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1290 	addr = virtual_space_start;
1291 	virtual_space_start += size;
1292 
1293 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1294 
1295 	/*
1296 	 * Allocate and map physical pages to back the new virtual space.
1297 	 */
1298 	map_addr = round_page(addr);
1299 	while (map_addr < addr + size) {
1300 #if defined(__x86_64__)
1301 		/*
1302 		 * Back with a large page if properly aligned on x86
1303 		 */
1304 		if ((map_addr & I386_LPGMASK) == 0 &&
1305 		    map_addr + I386_LPGBYTES <= addr + size &&
1306 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1307 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1308 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1309 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1310 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1311 
1312 			if (kr != KERN_SUCCESS) {
1313 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1314 				    (unsigned long)map_addr, phys_page);
1315 			}
1316 			map_addr += I386_LPGBYTES;
1317 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1318 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1319 			vm_page_kern_lpage_count++;
1320 			continue;
1321 		}
1322 #endif
1323 
1324 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1325 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1326 		}
1327 
1328 #if defined(__x86_64__)
1329 		pmap_pre_expand(kernel_pmap, map_addr);
1330 #endif
1331 		pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1332 
1333 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1334 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1335 		    pmap_flags, FALSE, mapping_type);
1336 
1337 		if (kr != KERN_SUCCESS) {
1338 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1339 			    (unsigned long)map_addr, phys_page);
1340 		}
1341 		map_addr += PAGE_SIZE;
1342 
1343 		/*
1344 		 * Account for newly stolen memory
1345 		 */
1346 		vm_page_wire_count++;
1347 		vm_page_stolen_count++;
1348 	}
1349 
1350 #if defined(__x86_64__)
1351 	/*
1352 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1353 	 * Notify the pmap layer to record which high pages were allocated so far.
1354 	 */
1355 	if (might_free) {
1356 		pmap_hi_pages_done();
1357 	}
1358 #endif
1359 #if KASAN
1360 	kasan_notify_address(round_page(addr), size);
1361 #endif
1362 	return (void *) addr;
1363 }
1364 
1365 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1366 pmap_steal_memory(
1367 	vm_size_t size,
1368 	vm_size_t alignment)
1369 {
1370 	return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1371 }
1372 
1373 void *
pmap_steal_freeable_memory(vm_size_t size)1374 pmap_steal_freeable_memory(
1375 	vm_size_t size)
1376 {
1377 	return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1378 }
1379 
1380 void *
pmap_steal_zone_memory(vm_size_t size,vm_size_t alignment)1381 pmap_steal_zone_memory(
1382 	vm_size_t size,
1383 	vm_size_t alignment)
1384 {
1385 	unsigned int flags = 0;
1386 
1387 
1388 	return pmap_steal_memory_internal(size, alignment, FALSE, flags, PMAP_MAPPING_TYPE_RESTRICTED);
1389 }
1390 
1391 
1392 #if CONFIG_SECLUDED_MEMORY
1393 /* boot-args to control secluded memory */
1394 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1395 /* IOKit can use secluded memory */
1396 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1397 /* apps can use secluded memory */
1398 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1399 /* filecache can use seclude memory */
1400 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1401 uint64_t secluded_shutoff_trigger = 0;
1402 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1403 #endif /* CONFIG_SECLUDED_MEMORY */
1404 
1405 
1406 #if defined(__arm64__)
1407 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1408 unsigned int vm_first_phys_ppnum = 0;
1409 #endif
1410 
1411 void vm_page_release_startup(vm_page_t mem);
1412 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1413 pmap_startup(
1414 	vm_offset_t     *startp,
1415 	vm_offset_t     *endp)
1416 {
1417 	unsigned int    i, npages;
1418 	ppnum_t         phys_page;
1419 	uint64_t        mem_sz;
1420 	uint64_t        start_ns;
1421 	uint64_t        now_ns;
1422 	uint_t          low_page_count = 0;
1423 
1424 #if    defined(__LP64__)
1425 	/*
1426 	 * make sure we are aligned on a 64 byte boundary
1427 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1428 	 * 6 bits of the pointer)
1429 	 */
1430 	if (virtual_space_start != virtual_space_end) {
1431 		virtual_space_start = round_page(virtual_space_start);
1432 	}
1433 #endif
1434 
1435 	/*
1436 	 * We calculate how many page frames we will have
1437 	 * and then allocate the page structures in one chunk.
1438 	 *
1439 	 * Note that the calculation here doesn't take into account
1440 	 * the memory needed to map what's being allocated, i.e. the page
1441 	 * table entries. So the actual number of pages we get will be
1442 	 * less than this. To do someday: include that in the computation.
1443 	 *
1444 	 * Also for ARM, we don't use the count of free_pages, but rather the
1445 	 * range from last page to first page (ignore holes due to retired pages).
1446 	 */
1447 #if defined(__arm64__)
1448 	mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1449 #else /* defined(__arm64__) */
1450 	mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1451 #endif /* defined(__arm64__) */
1452 	mem_sz += round_page(virtual_space_start) - virtual_space_start;        /* Account for any slop */
1453 	npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages)));    /* scaled to include the vm_page_ts */
1454 
1455 
1456 	vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1457 
1458 	/*
1459 	 * Check if we want to initialize pages to a known value
1460 	 */
1461 	if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1462 		fill = TRUE;
1463 	}
1464 #if     DEBUG
1465 	/* This slows down booting the DEBUG kernel, particularly on
1466 	 * large memory systems, but is worthwhile in deterministically
1467 	 * trapping uninitialized memory usage.
1468 	 */
1469 	if (!fill) {
1470 		fill = TRUE;
1471 		fillval = 0xDEB8F177;
1472 	}
1473 #endif
1474 	if (fill) {
1475 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1476 	}
1477 
1478 #if CONFIG_SECLUDED_MEMORY
1479 	/*
1480 	 * Figure out how much secluded memory to have before we start
1481 	 * release pages to free lists.
1482 	 * The default, if specified nowhere else, is no secluded mem.
1483 	 */
1484 	vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1485 
1486 	/*
1487 	 * Allow a really large app to effectively use secluded memory until it exits.
1488 	 */
1489 	if (vm_page_secluded_target != 0) {
1490 		/*
1491 		 * Get an amount from boot-args, else use 1/2 of max_mem.
1492 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1493 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
1494 		 */
1495 		int secluded_shutoff_mb;
1496 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1497 		    sizeof(secluded_shutoff_mb))) {
1498 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1499 		} else {
1500 			secluded_shutoff_trigger = max_mem / 2;
1501 		}
1502 
1503 		/* ensure the headroom value is sensible and avoid underflows */
1504 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1505 	}
1506 
1507 #endif /* CONFIG_SECLUDED_MEMORY */
1508 
1509 #if defined(__x86_64__)
1510 
1511 	/*
1512 	 * Decide how much memory we delay freeing at boot time.
1513 	 */
1514 	uint32_t delay_above_gb;
1515 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1516 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1517 	}
1518 
1519 	if (delay_above_gb == 0) {
1520 		delay_above_pnum = PPNUM_MAX;
1521 	} else {
1522 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1523 	}
1524 
1525 	/* make sure we have sane breathing room: 1G above low memory */
1526 	if (delay_above_pnum <= max_valid_low_ppnum) {
1527 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1528 	}
1529 
1530 	if (delay_above_pnum < PPNUM_MAX) {
1531 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1532 	}
1533 
1534 #endif /* defined(__x86_64__) */
1535 
1536 	/*
1537 	 * Initialize and release the page frames.
1538 	 */
1539 	kernel_debug_string_early("page_frame_init");
1540 
1541 	vm_page_array_beginning_addr = &vm_pages[0];
1542 	vm_page_array_ending_addr = &vm_pages[npages];  /* used by ptr packing/unpacking code */
1543 #if VM_PAGE_PACKED_FROM_ARRAY
1544 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1545 		panic("pmap_startup(): too many pages to support vm_page packing");
1546 	}
1547 #endif
1548 
1549 	vm_delayed_count = 0;
1550 
1551 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1552 	vm_pages_count = 0;
1553 	for (i = 0; i < npages; i++) {
1554 		/* Did we run out of pages? */
1555 		if (!pmap_next_page(&phys_page)) {
1556 			break;
1557 		}
1558 
1559 		if (phys_page < max_valid_low_ppnum) {
1560 			++low_page_count;
1561 		}
1562 
1563 		/* Are we at high enough pages to delay the rest? */
1564 		if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1565 			vm_delayed_count = pmap_free_pages();
1566 			break;
1567 		}
1568 
1569 #if defined(__arm64__)
1570 		if (i == 0) {
1571 			vm_first_phys_ppnum = phys_page;
1572 			patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1573 			    (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1574 		}
1575 #endif /* defined(__arm64__) */
1576 
1577 #if defined(__x86_64__)
1578 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
1579 		if (i > 0) {
1580 			assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1581 		}
1582 #endif
1583 		++vm_pages_count;
1584 		vm_page_init(&vm_pages[i], phys_page, FALSE);
1585 		if (fill) {
1586 			fillPage(phys_page, fillval);
1587 		}
1588 		if (vm_himemory_mode) {
1589 			vm_page_release_startup(&vm_pages[i]);
1590 		}
1591 	}
1592 	vm_page_pages = vm_pages_count; /* used to report to user space */
1593 
1594 	if (!vm_himemory_mode) {
1595 		do {
1596 			if (!VMP_ERROR_GET(&vm_pages[--i])) {               /* skip retired pages */
1597 				vm_page_release_startup(&vm_pages[i]);
1598 			}
1599 		} while (i != 0);
1600 	}
1601 
1602 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1603 	printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1604 	printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1605 
1606 #if defined(__LP64__)
1607 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1608 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1609 	}
1610 
1611 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1612 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1613 	}
1614 #endif
1615 
1616 	VM_CHECK_MEMORYSTATUS;
1617 
1618 	/*
1619 	 * We have to re-align virtual_space_start,
1620 	 * because pmap_steal_memory has been using it.
1621 	 */
1622 	virtual_space_start = round_page(virtual_space_start);
1623 	*startp = virtual_space_start;
1624 	*endp = virtual_space_end;
1625 }
1626 #endif  /* MACHINE_PAGES */
1627 
1628 /*
1629  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1630  * or frees to this zone. It's just here for reporting purposes via zprint command.
1631  * This needs to be done after all initially delayed pages are put on the free lists.
1632  */
1633 static void
vm_page_module_init_delayed(void)1634 vm_page_module_init_delayed(void)
1635 {
1636 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1637 	    ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1638 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1639 
1640 		zone_set_exhaustible(z, 0, true);
1641 		/*
1642 		 * Reflect size and usage information for vm_pages[].
1643 		 */
1644 
1645 		z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1646 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
1647 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1648 		vm_pages_count * sizeof(struct vm_page);
1649 		vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1650 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1651 		z->z_wired_cur += vm_page_zone_pages;
1652 		z->z_wired_hwm = z->z_wired_cur;
1653 		z->z_va_cur = z->z_wired_cur;
1654 		/* since zone accounts for these, take them out of stolen */
1655 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1656 	});
1657 }
1658 
1659 /*
1660  * Create the vm_pages zone. This is used for the vm_page structures for the pages
1661  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1662  * this needs to happen in early VM bootstrap.
1663  */
1664 
1665 __startup_func
1666 static void
vm_page_module_init(void)1667 vm_page_module_init(void)
1668 {
1669 	vm_size_t vm_page_with_ppnum_size;
1670 
1671 	/*
1672 	 * Since the pointers to elements in this zone will be packed, they
1673 	 * must have appropriate size. Not strictly what sizeof() reports.
1674 	 */
1675 	vm_page_with_ppnum_size =
1676 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1677 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1678 
1679 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1680 	    ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NOTBITAG,
1681 	    ZONE_ID_ANY, ^(zone_t z) {
1682 		/*
1683 		 * The number "10" is a small number that is larger than the number
1684 		 * of fictitious pages that any single caller will attempt to allocate
1685 		 * without blocking.
1686 		 *
1687 		 * The largest such number at the moment is kmem_alloc()
1688 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1689 		 * taking into account the 50% hysteresis the zone allocator uses.
1690 		 *
1691 		 * Note: this works at all because the zone allocator
1692 		 *       doesn't ever allocate fictitious pages.
1693 		 */
1694 		zone_raise_reserve(z, 10);
1695 	});
1696 }
1697 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1698 
1699 /*
1700  *	Routine:	vm_page_create
1701  *	Purpose:
1702  *		After the VM system is up, machine-dependent code
1703  *		may stumble across more physical memory.  For example,
1704  *		memory that it was reserving for a frame buffer.
1705  *		vm_page_create turns this memory into available pages.
1706  */
1707 
1708 void
vm_page_create(ppnum_t start,ppnum_t end)1709 vm_page_create(
1710 	ppnum_t start,
1711 	ppnum_t end)
1712 {
1713 	ppnum_t         phys_page;
1714 	vm_page_t       m;
1715 
1716 	for (phys_page = start;
1717 	    phys_page < end;
1718 	    phys_page++) {
1719 		m = vm_page_grab_fictitious_common(phys_page, TRUE);
1720 		m->vmp_fictitious = FALSE;
1721 		pmap_clear_noencrypt(phys_page);
1722 
1723 
1724 		vm_free_page_lock();
1725 		vm_page_pages++;
1726 		vm_free_page_unlock();
1727 		vm_page_release(m, FALSE);
1728 	}
1729 }
1730 
1731 
1732 /*
1733  *	vm_page_hash:
1734  *
1735  *	Distributes the object/offset key pair among hash buckets.
1736  *
1737  *	NOTE:	The bucket count must be a power of 2
1738  */
1739 #define vm_page_hash(object, offset) (\
1740 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1741 	 & vm_page_hash_mask)
1742 
1743 
1744 /*
1745  *	vm_page_insert:		[ internal use only ]
1746  *
1747  *	Inserts the given mem entry into the object/object-page
1748  *	table and object list.
1749  *
1750  *	The object must be locked.
1751  */
1752 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1753 vm_page_insert(
1754 	vm_page_t               mem,
1755 	vm_object_t             object,
1756 	vm_object_offset_t      offset)
1757 {
1758 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1759 }
1760 
1761 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1762 vm_page_insert_wired(
1763 	vm_page_t               mem,
1764 	vm_object_t             object,
1765 	vm_object_offset_t      offset,
1766 	vm_tag_t                tag)
1767 {
1768 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1769 }
1770 
1771 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1772 vm_page_insert_internal(
1773 	vm_page_t               mem,
1774 	vm_object_t             object,
1775 	vm_object_offset_t      offset,
1776 	vm_tag_t                tag,
1777 	boolean_t               queues_lock_held,
1778 	boolean_t               insert_in_hash,
1779 	boolean_t               batch_pmap_op,
1780 	boolean_t               batch_accounting,
1781 	uint64_t                *delayed_ledger_update)
1782 {
1783 	vm_page_bucket_t        *bucket;
1784 	lck_spin_t              *bucket_lock;
1785 	int                     hash_id;
1786 	task_t                  owner;
1787 	int                     ledger_idx_volatile;
1788 	int                     ledger_idx_nonvolatile;
1789 	int                     ledger_idx_volatile_compressed;
1790 	int                     ledger_idx_nonvolatile_compressed;
1791 	boolean_t               do_footprint;
1792 
1793 #if 0
1794 	/*
1795 	 * we may not hold the page queue lock
1796 	 * so this check isn't safe to make
1797 	 */
1798 	VM_PAGE_CHECK(mem);
1799 #endif
1800 
1801 	assertf(page_aligned(offset), "0x%llx\n", offset);
1802 
1803 	assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1804 
1805 	vm_object_lock_assert_exclusive(object);
1806 	LCK_MTX_ASSERT(&vm_page_queue_lock,
1807 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
1808 	    : LCK_MTX_ASSERT_NOTOWNED);
1809 
1810 	if (queues_lock_held == FALSE) {
1811 		assert(!VM_PAGE_PAGEABLE(mem));
1812 	}
1813 
1814 	if (insert_in_hash == TRUE) {
1815 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1816 		if (mem->vmp_tabled || mem->vmp_object) {
1817 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1818 			    "already in (obj=%p,off=0x%llx)",
1819 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1820 		}
1821 #endif
1822 		if (object->internal && (offset >= object->vo_size)) {
1823 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1824 			    mem, object, offset, object->vo_size);
1825 		}
1826 
1827 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1828 
1829 		/*
1830 		 *	Record the object/offset pair in this page
1831 		 */
1832 
1833 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1834 		mem->vmp_offset = offset;
1835 
1836 #if CONFIG_SECLUDED_MEMORY
1837 		if (object->eligible_for_secluded) {
1838 			vm_page_secluded.eligible_for_secluded++;
1839 		}
1840 #endif /* CONFIG_SECLUDED_MEMORY */
1841 
1842 		/*
1843 		 *	Insert it into the object_object/offset hash table
1844 		 */
1845 		hash_id = vm_page_hash(object, offset);
1846 		bucket = &vm_page_buckets[hash_id];
1847 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1848 
1849 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1850 
1851 		mem->vmp_next_m = bucket->page_list;
1852 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
1853 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1854 
1855 #if     MACH_PAGE_HASH_STATS
1856 		if (++bucket->cur_count > bucket->hi_count) {
1857 			bucket->hi_count = bucket->cur_count;
1858 		}
1859 #endif /* MACH_PAGE_HASH_STATS */
1860 		mem->vmp_hashed = TRUE;
1861 		lck_spin_unlock(bucket_lock);
1862 	}
1863 
1864 	{
1865 		unsigned int    cache_attr;
1866 
1867 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
1868 
1869 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
1870 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1871 		}
1872 	}
1873 	/*
1874 	 *	Now link into the object's list of backed pages.
1875 	 */
1876 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
1877 	object->memq_hint = mem;
1878 	mem->vmp_tabled = TRUE;
1879 
1880 	/*
1881 	 *	Show that the object has one more resident page.
1882 	 */
1883 
1884 	object->resident_page_count++;
1885 	if (VM_PAGE_WIRED(mem)) {
1886 		assert(mem->vmp_wire_count > 0);
1887 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1888 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1889 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1890 	}
1891 	assert(object->resident_page_count >= object->wired_page_count);
1892 
1893 #if DEVELOPMENT || DEBUG
1894 	if (object->object_is_shared_cache &&
1895 	    object->pager != NULL &&
1896 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1897 		int new, old;
1898 		assert(!object->internal);
1899 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1900 		do {
1901 			old = shared_region_pagers_resident_peak;
1902 		} while (old < new &&
1903 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1904 	}
1905 #endif /* DEVELOPMENT || DEBUG */
1906 
1907 	if (batch_accounting == FALSE) {
1908 		if (object->internal) {
1909 			OSAddAtomic(1, &vm_page_internal_count);
1910 		} else {
1911 			OSAddAtomic(1, &vm_page_external_count);
1912 		}
1913 	}
1914 
1915 	/*
1916 	 * It wouldn't make sense to insert a "reusable" page in
1917 	 * an object (the page would have been marked "reusable" only
1918 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1919 	 * in the object at that time).
1920 	 * But a page could be inserted in a "all_reusable" object, if
1921 	 * something faults it in (a vm_read() from another task or a
1922 	 * "use-after-free" issue in user space, for example).  It can
1923 	 * also happen if we're relocating a page from that object to
1924 	 * a different physical page during a physically-contiguous
1925 	 * allocation.
1926 	 */
1927 	assert(!mem->vmp_reusable);
1928 	if (object->all_reusable) {
1929 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1930 	}
1931 
1932 	if (object->purgable == VM_PURGABLE_DENY &&
1933 	    !object->vo_ledger_tag) {
1934 		owner = TASK_NULL;
1935 	} else {
1936 		owner = VM_OBJECT_OWNER(object);
1937 		vm_object_ledger_tag_ledgers(object,
1938 		    &ledger_idx_volatile,
1939 		    &ledger_idx_nonvolatile,
1940 		    &ledger_idx_volatile_compressed,
1941 		    &ledger_idx_nonvolatile_compressed,
1942 		    &do_footprint);
1943 	}
1944 	if (owner &&
1945 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
1946 	    object->purgable == VM_PURGABLE_DENY ||
1947 	    VM_PAGE_WIRED(mem))) {
1948 		if (delayed_ledger_update) {
1949 			*delayed_ledger_update += PAGE_SIZE;
1950 		} else {
1951 			/* more non-volatile bytes */
1952 			ledger_credit(owner->ledger,
1953 			    ledger_idx_nonvolatile,
1954 			    PAGE_SIZE);
1955 			if (do_footprint) {
1956 				/* more footprint */
1957 				ledger_credit(owner->ledger,
1958 				    task_ledgers.phys_footprint,
1959 				    PAGE_SIZE);
1960 			}
1961 		}
1962 	} else if (owner &&
1963 	    (object->purgable == VM_PURGABLE_VOLATILE ||
1964 	    object->purgable == VM_PURGABLE_EMPTY)) {
1965 		assert(!VM_PAGE_WIRED(mem));
1966 		/* more volatile bytes */
1967 		ledger_credit(owner->ledger,
1968 		    ledger_idx_volatile,
1969 		    PAGE_SIZE);
1970 	}
1971 
1972 	if (object->purgable == VM_PURGABLE_VOLATILE) {
1973 		if (VM_PAGE_WIRED(mem)) {
1974 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1975 		} else {
1976 			OSAddAtomic(+1, &vm_page_purgeable_count);
1977 		}
1978 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
1979 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1980 		/*
1981 		 * This page belongs to a purged VM object but hasn't
1982 		 * been purged (because it was "busy").
1983 		 * It's in the "throttled" queue and hence not
1984 		 * visible to vm_pageout_scan().  Move it to a pageable
1985 		 * queue, so that it can eventually be reclaimed, instead
1986 		 * of lingering in the "empty" object.
1987 		 */
1988 		if (queues_lock_held == FALSE) {
1989 			vm_page_lockspin_queues();
1990 		}
1991 		vm_page_deactivate(mem);
1992 		if (queues_lock_held == FALSE) {
1993 			vm_page_unlock_queues();
1994 		}
1995 	}
1996 
1997 #if VM_OBJECT_TRACKING_OP_MODIFIED
1998 	if (vm_object_tracking_btlog &&
1999 	    object->internal &&
2000 	    object->resident_page_count == 0 &&
2001 	    object->pager == NULL &&
2002 	    object->shadow != NULL &&
2003 	    object->shadow->vo_copy == object) {
2004 		btlog_record(vm_object_tracking_btlog, object,
2005 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2006 		    btref_get(__builtin_frame_address(0), 0));
2007 	}
2008 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2009 }
2010 
2011 /*
2012  *	vm_page_replace:
2013  *
2014  *	Exactly like vm_page_insert, except that we first
2015  *	remove any existing page at the given offset in object.
2016  *
2017  *	The object must be locked.
2018  */
2019 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2020 vm_page_replace(
2021 	vm_page_t               mem,
2022 	vm_object_t             object,
2023 	vm_object_offset_t      offset)
2024 {
2025 	vm_page_bucket_t *bucket;
2026 	vm_page_t        found_m = VM_PAGE_NULL;
2027 	lck_spin_t      *bucket_lock;
2028 	int             hash_id;
2029 
2030 #if 0
2031 	/*
2032 	 * we don't hold the page queue lock
2033 	 * so this check isn't safe to make
2034 	 */
2035 	VM_PAGE_CHECK(mem);
2036 #endif
2037 	vm_object_lock_assert_exclusive(object);
2038 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2039 	if (mem->vmp_tabled || mem->vmp_object) {
2040 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2041 		    "already in (obj=%p,off=0x%llx)",
2042 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2043 	}
2044 #endif
2045 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2046 
2047 	assert(!VM_PAGE_PAGEABLE(mem));
2048 
2049 	/*
2050 	 *	Record the object/offset pair in this page
2051 	 */
2052 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2053 	mem->vmp_offset = offset;
2054 
2055 	/*
2056 	 *	Insert it into the object_object/offset hash table,
2057 	 *	replacing any page that might have been there.
2058 	 */
2059 
2060 	hash_id = vm_page_hash(object, offset);
2061 	bucket = &vm_page_buckets[hash_id];
2062 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2063 
2064 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2065 
2066 	if (bucket->page_list) {
2067 		vm_page_packed_t *mp = &bucket->page_list;
2068 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2069 
2070 		do {
2071 			/*
2072 			 * compare packed object pointers
2073 			 */
2074 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2075 				/*
2076 				 * Remove old page from hash list
2077 				 */
2078 				*mp = m->vmp_next_m;
2079 				m->vmp_hashed = FALSE;
2080 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2081 
2082 				found_m = m;
2083 				break;
2084 			}
2085 			mp = &m->vmp_next_m;
2086 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2087 
2088 		mem->vmp_next_m = bucket->page_list;
2089 	} else {
2090 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2091 	}
2092 	/*
2093 	 * insert new page at head of hash list
2094 	 */
2095 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2096 	mem->vmp_hashed = TRUE;
2097 
2098 	lck_spin_unlock(bucket_lock);
2099 
2100 	if (found_m) {
2101 		/*
2102 		 * there was already a page at the specified
2103 		 * offset for this object... remove it from
2104 		 * the object and free it back to the free list
2105 		 */
2106 		vm_page_free_unlocked(found_m, FALSE);
2107 	}
2108 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2109 }
2110 
2111 /*
2112  *	vm_page_remove:		[ internal use only ]
2113  *
2114  *	Removes the given mem entry from the object/offset-page
2115  *	table and the object page list.
2116  *
2117  *	The object must be locked.
2118  */
2119 
2120 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2121 vm_page_remove(
2122 	vm_page_t       mem,
2123 	boolean_t       remove_from_hash)
2124 {
2125 	vm_page_bucket_t *bucket;
2126 	vm_page_t       this;
2127 	lck_spin_t      *bucket_lock;
2128 	int             hash_id;
2129 	task_t          owner;
2130 	vm_object_t     m_object;
2131 	int             ledger_idx_volatile;
2132 	int             ledger_idx_nonvolatile;
2133 	int             ledger_idx_volatile_compressed;
2134 	int             ledger_idx_nonvolatile_compressed;
2135 	int             do_footprint;
2136 
2137 	m_object = VM_PAGE_OBJECT(mem);
2138 
2139 	vm_object_lock_assert_exclusive(m_object);
2140 	assert(mem->vmp_tabled);
2141 	assert(!mem->vmp_cleaning);
2142 	assert(!mem->vmp_laundry);
2143 
2144 	if (VM_PAGE_PAGEABLE(mem)) {
2145 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2146 	}
2147 #if 0
2148 	/*
2149 	 * we don't hold the page queue lock
2150 	 * so this check isn't safe to make
2151 	 */
2152 	VM_PAGE_CHECK(mem);
2153 #endif
2154 	if (remove_from_hash == TRUE) {
2155 		/*
2156 		 *	Remove from the object_object/offset hash table
2157 		 */
2158 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2159 		bucket = &vm_page_buckets[hash_id];
2160 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2161 
2162 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2163 
2164 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2165 			/* optimize for common case */
2166 
2167 			bucket->page_list = mem->vmp_next_m;
2168 		} else {
2169 			vm_page_packed_t        *prev;
2170 
2171 			for (prev = &this->vmp_next_m;
2172 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2173 			    prev = &this->vmp_next_m) {
2174 				continue;
2175 			}
2176 			*prev = this->vmp_next_m;
2177 		}
2178 #if     MACH_PAGE_HASH_STATS
2179 		bucket->cur_count--;
2180 #endif /* MACH_PAGE_HASH_STATS */
2181 		mem->vmp_hashed = FALSE;
2182 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2183 		lck_spin_unlock(bucket_lock);
2184 	}
2185 	/*
2186 	 *	Now remove from the object's list of backed pages.
2187 	 */
2188 
2189 	vm_page_remove_internal(mem);
2190 
2191 	/*
2192 	 *	And show that the object has one fewer resident
2193 	 *	page.
2194 	 */
2195 
2196 	assert(m_object->resident_page_count > 0);
2197 	m_object->resident_page_count--;
2198 
2199 #if DEVELOPMENT || DEBUG
2200 	if (m_object->object_is_shared_cache &&
2201 	    m_object->pager != NULL &&
2202 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2203 		assert(!m_object->internal);
2204 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
2205 	}
2206 #endif /* DEVELOPMENT || DEBUG */
2207 
2208 	if (m_object->internal) {
2209 #if DEBUG
2210 		assert(vm_page_internal_count);
2211 #endif /* DEBUG */
2212 
2213 		OSAddAtomic(-1, &vm_page_internal_count);
2214 	} else {
2215 		assert(vm_page_external_count);
2216 		OSAddAtomic(-1, &vm_page_external_count);
2217 
2218 		if (mem->vmp_xpmapped) {
2219 			assert(vm_page_xpmapped_external_count);
2220 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2221 		}
2222 	}
2223 	if (!m_object->internal &&
2224 	    m_object->cached_list.next &&
2225 	    m_object->cached_list.prev) {
2226 		if (m_object->resident_page_count == 0) {
2227 			vm_object_cache_remove(m_object);
2228 		}
2229 	}
2230 
2231 	if (VM_PAGE_WIRED(mem)) {
2232 		assert(mem->vmp_wire_count > 0);
2233 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2234 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2235 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2236 	}
2237 	assert(m_object->resident_page_count >=
2238 	    m_object->wired_page_count);
2239 	if (mem->vmp_reusable) {
2240 		assert(m_object->reusable_page_count > 0);
2241 		m_object->reusable_page_count--;
2242 		assert(m_object->reusable_page_count <=
2243 		    m_object->resident_page_count);
2244 		mem->vmp_reusable = FALSE;
2245 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2246 		vm_page_stats_reusable.reused_remove++;
2247 	} else if (m_object->all_reusable) {
2248 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2249 		vm_page_stats_reusable.reused_remove++;
2250 	}
2251 
2252 	if (m_object->purgable == VM_PURGABLE_DENY &&
2253 	    !m_object->vo_ledger_tag) {
2254 		owner = TASK_NULL;
2255 	} else {
2256 		owner = VM_OBJECT_OWNER(m_object);
2257 		vm_object_ledger_tag_ledgers(m_object,
2258 		    &ledger_idx_volatile,
2259 		    &ledger_idx_nonvolatile,
2260 		    &ledger_idx_volatile_compressed,
2261 		    &ledger_idx_nonvolatile_compressed,
2262 		    &do_footprint);
2263 	}
2264 	if (owner &&
2265 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2266 	    m_object->purgable == VM_PURGABLE_DENY ||
2267 	    VM_PAGE_WIRED(mem))) {
2268 		/* less non-volatile bytes */
2269 		ledger_debit(owner->ledger,
2270 		    ledger_idx_nonvolatile,
2271 		    PAGE_SIZE);
2272 		if (do_footprint) {
2273 			/* less footprint */
2274 			ledger_debit(owner->ledger,
2275 			    task_ledgers.phys_footprint,
2276 			    PAGE_SIZE);
2277 		}
2278 	} else if (owner &&
2279 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
2280 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
2281 		assert(!VM_PAGE_WIRED(mem));
2282 		/* less volatile bytes */
2283 		ledger_debit(owner->ledger,
2284 		    ledger_idx_volatile,
2285 		    PAGE_SIZE);
2286 	}
2287 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2288 		if (VM_PAGE_WIRED(mem)) {
2289 			assert(vm_page_purgeable_wired_count > 0);
2290 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2291 		} else {
2292 			assert(vm_page_purgeable_count > 0);
2293 			OSAddAtomic(-1, &vm_page_purgeable_count);
2294 		}
2295 	}
2296 
2297 	if (m_object->set_cache_attr == TRUE) {
2298 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2299 	}
2300 
2301 	mem->vmp_tabled = FALSE;
2302 	mem->vmp_object = 0;
2303 	mem->vmp_offset = (vm_object_offset_t) -1;
2304 }
2305 
2306 
2307 /*
2308  *	vm_page_lookup:
2309  *
2310  *	Returns the page associated with the object/offset
2311  *	pair specified; if none is found, VM_PAGE_NULL is returned.
2312  *
2313  *	The object must be locked.  No side effects.
2314  */
2315 
2316 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
2317 
2318 #if DEBUG_VM_PAGE_LOOKUP
2319 
2320 struct {
2321 	uint64_t        vpl_total;
2322 	uint64_t        vpl_empty_obj;
2323 	uint64_t        vpl_bucket_NULL;
2324 	uint64_t        vpl_hit_hint;
2325 	uint64_t        vpl_hit_hint_next;
2326 	uint64_t        vpl_hit_hint_prev;
2327 	uint64_t        vpl_fast;
2328 	uint64_t        vpl_slow;
2329 	uint64_t        vpl_hit;
2330 	uint64_t        vpl_miss;
2331 
2332 	uint64_t        vpl_fast_elapsed;
2333 	uint64_t        vpl_slow_elapsed;
2334 } vm_page_lookup_stats __attribute__((aligned(8)));
2335 
2336 #endif
2337 
2338 #define KDP_VM_PAGE_WALK_MAX    1000
2339 
2340 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2341 kdp_vm_page_lookup(
2342 	vm_object_t             object,
2343 	vm_object_offset_t      offset)
2344 {
2345 	vm_page_t cur_page;
2346 	int num_traversed = 0;
2347 
2348 	if (not_in_kdp) {
2349 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2350 	}
2351 
2352 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2353 		if (cur_page->vmp_offset == offset) {
2354 			return cur_page;
2355 		}
2356 		num_traversed++;
2357 
2358 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2359 			return VM_PAGE_NULL;
2360 		}
2361 	}
2362 
2363 	return VM_PAGE_NULL;
2364 }
2365 
2366 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2367 vm_page_lookup(
2368 	vm_object_t             object,
2369 	vm_object_offset_t      offset)
2370 {
2371 	vm_page_t       mem;
2372 	vm_page_bucket_t *bucket;
2373 	vm_page_queue_entry_t   qe;
2374 	lck_spin_t      *bucket_lock = NULL;
2375 	int             hash_id;
2376 #if DEBUG_VM_PAGE_LOOKUP
2377 	uint64_t        start, elapsed;
2378 
2379 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2380 #endif
2381 
2382 	if (VM_KERNEL_ADDRESS(offset)) {
2383 		offset = VM_KERNEL_STRIP_UPTR(offset);
2384 	}
2385 
2386 	vm_object_lock_assert_held(object);
2387 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2388 
2389 	if (object->resident_page_count == 0) {
2390 #if DEBUG_VM_PAGE_LOOKUP
2391 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2392 #endif
2393 		return VM_PAGE_NULL;
2394 	}
2395 
2396 	mem = object->memq_hint;
2397 
2398 	if (mem != VM_PAGE_NULL) {
2399 		assert(VM_PAGE_OBJECT(mem) == object);
2400 
2401 		if (mem->vmp_offset == offset) {
2402 #if DEBUG_VM_PAGE_LOOKUP
2403 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2404 #endif
2405 			return mem;
2406 		}
2407 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2408 
2409 		if (!vm_page_queue_end(&object->memq, qe)) {
2410 			vm_page_t       next_page;
2411 
2412 			next_page = (vm_page_t)((uintptr_t)qe);
2413 			assert(VM_PAGE_OBJECT(next_page) == object);
2414 
2415 			if (next_page->vmp_offset == offset) {
2416 				object->memq_hint = next_page; /* new hint */
2417 #if DEBUG_VM_PAGE_LOOKUP
2418 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2419 #endif
2420 				return next_page;
2421 			}
2422 		}
2423 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2424 
2425 		if (!vm_page_queue_end(&object->memq, qe)) {
2426 			vm_page_t prev_page;
2427 
2428 			prev_page = (vm_page_t)((uintptr_t)qe);
2429 			assert(VM_PAGE_OBJECT(prev_page) == object);
2430 
2431 			if (prev_page->vmp_offset == offset) {
2432 				object->memq_hint = prev_page; /* new hint */
2433 #if DEBUG_VM_PAGE_LOOKUP
2434 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2435 #endif
2436 				return prev_page;
2437 			}
2438 		}
2439 	}
2440 	/*
2441 	 * Search the hash table for this object/offset pair
2442 	 */
2443 	hash_id = vm_page_hash(object, offset);
2444 	bucket = &vm_page_buckets[hash_id];
2445 
2446 	/*
2447 	 * since we hold the object lock, we are guaranteed that no
2448 	 * new pages can be inserted into this object... this in turn
2449 	 * guarantess that the page we're looking for can't exist
2450 	 * if the bucket it hashes to is currently NULL even when looked
2451 	 * at outside the scope of the hash bucket lock... this is a
2452 	 * really cheap optimiztion to avoid taking the lock
2453 	 */
2454 	if (!bucket->page_list) {
2455 #if DEBUG_VM_PAGE_LOOKUP
2456 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2457 #endif
2458 		return VM_PAGE_NULL;
2459 	}
2460 
2461 #if DEBUG_VM_PAGE_LOOKUP
2462 	start = mach_absolute_time();
2463 #endif
2464 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2465 		/*
2466 		 * on average, it's roughly 3 times faster to run a short memq list
2467 		 * than to take the spin lock and go through the hash list
2468 		 */
2469 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
2470 
2471 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2472 			if (mem->vmp_offset == offset) {
2473 				break;
2474 			}
2475 
2476 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2477 		}
2478 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2479 			mem = NULL;
2480 		}
2481 	} else {
2482 		vm_page_object_t        packed_object;
2483 
2484 		packed_object = VM_PAGE_PACK_OBJECT(object);
2485 
2486 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2487 
2488 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2489 
2490 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2491 		    mem != VM_PAGE_NULL;
2492 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2493 #if 0
2494 			/*
2495 			 * we don't hold the page queue lock
2496 			 * so this check isn't safe to make
2497 			 */
2498 			VM_PAGE_CHECK(mem);
2499 #endif
2500 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2501 				break;
2502 			}
2503 		}
2504 		lck_spin_unlock(bucket_lock);
2505 	}
2506 
2507 #if DEBUG_VM_PAGE_LOOKUP
2508 	elapsed = mach_absolute_time() - start;
2509 
2510 	if (bucket_lock) {
2511 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2512 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2513 	} else {
2514 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2515 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2516 	}
2517 	if (mem != VM_PAGE_NULL) {
2518 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2519 	} else {
2520 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2521 	}
2522 #endif
2523 	if (mem != VM_PAGE_NULL) {
2524 		assert(VM_PAGE_OBJECT(mem) == object);
2525 
2526 		object->memq_hint = mem;
2527 	}
2528 	return mem;
2529 }
2530 
2531 
2532 /*
2533  *	vm_page_rename:
2534  *
2535  *	Move the given memory entry from its
2536  *	current object to the specified target object/offset.
2537  *
2538  *	The object must be locked.
2539  */
2540 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2541 vm_page_rename(
2542 	vm_page_t               mem,
2543 	vm_object_t             new_object,
2544 	vm_object_offset_t      new_offset)
2545 {
2546 	boolean_t       internal_to_external, external_to_internal;
2547 	vm_tag_t        tag;
2548 	vm_object_t     m_object;
2549 
2550 	m_object = VM_PAGE_OBJECT(mem);
2551 
2552 	assert(m_object != new_object);
2553 	assert(m_object);
2554 
2555 	/*
2556 	 *	Changes to mem->vmp_object require the page lock because
2557 	 *	the pageout daemon uses that lock to get the object.
2558 	 */
2559 	vm_page_lockspin_queues();
2560 
2561 	internal_to_external = FALSE;
2562 	external_to_internal = FALSE;
2563 
2564 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2565 		/*
2566 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
2567 		 * if we first move the page to the active queue... it's going to end
2568 		 * up there anyway, and we don't do vm_page_rename's frequently enough
2569 		 * for this to matter.
2570 		 */
2571 		vm_page_queues_remove(mem, FALSE);
2572 		vm_page_activate(mem);
2573 	}
2574 	if (VM_PAGE_PAGEABLE(mem)) {
2575 		if (m_object->internal && !new_object->internal) {
2576 			internal_to_external = TRUE;
2577 		}
2578 		if (!m_object->internal && new_object->internal) {
2579 			external_to_internal = TRUE;
2580 		}
2581 	}
2582 
2583 	tag = m_object->wire_tag;
2584 	vm_page_remove(mem, TRUE);
2585 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2586 
2587 	if (internal_to_external) {
2588 		vm_page_pageable_internal_count--;
2589 		vm_page_pageable_external_count++;
2590 	} else if (external_to_internal) {
2591 		vm_page_pageable_external_count--;
2592 		vm_page_pageable_internal_count++;
2593 	}
2594 
2595 	vm_page_unlock_queues();
2596 }
2597 
2598 /*
2599  *	vm_page_init:
2600  *
2601  *	Initialize the fields in a new page.
2602  *	This takes a structure with random values and initializes it
2603  *	so that it can be given to vm_page_release or vm_page_insert.
2604  */
2605 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2606 vm_page_init(
2607 	vm_page_t mem,
2608 	ppnum_t   phys_page,
2609 	boolean_t lopage)
2610 {
2611 	uint_t    i;
2612 	uintptr_t *p;
2613 
2614 	assert(phys_page);
2615 
2616 #if DEBUG
2617 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2618 		if (!(pmap_valid_page(phys_page))) {
2619 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2620 		}
2621 	}
2622 #endif /* DEBUG */
2623 
2624 	/*
2625 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2626 	 * try to use initial values which match 0. This minimizes the number of writes
2627 	 * needed for boot-time initialization.
2628 	 *
2629 	 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2630 	 */
2631 	assert(VM_PAGE_NOT_ON_Q == 0);
2632 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2633 	for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2634 		*p++ = 0;
2635 	}
2636 	mem->vmp_offset = (vm_object_offset_t)-1;
2637 	mem->vmp_busy = TRUE;
2638 	mem->vmp_lopage = lopage;
2639 
2640 	VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2641 #if 0
2642 	/*
2643 	 * we're leaving this turned off for now... currently pages
2644 	 * come off the free list and are either immediately dirtied/referenced
2645 	 * due to zero-fill or COW faults, or are used to read or write files...
2646 	 * in the file I/O case, the UPL mechanism takes care of clearing
2647 	 * the state of the HW ref/mod bits in a somewhat fragile way.
2648 	 * Since we may change the way this works in the future (to toughen it up),
2649 	 * I'm leaving this as a reminder of where these bits could get cleared
2650 	 */
2651 
2652 	/*
2653 	 * make sure both the h/w referenced and modified bits are
2654 	 * clear at this point... we are especially dependent on
2655 	 * not finding a 'stale' h/w modified in a number of spots
2656 	 * once this page goes back into use
2657 	 */
2658 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2659 #endif
2660 }
2661 
2662 /*
2663  *	vm_page_grab_fictitious:
2664  *
2665  *	Remove a fictitious page from the free list.
2666  *	Returns VM_PAGE_NULL if there are no free pages.
2667  */
2668 
2669 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2670 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2671 {
2672 	vm_page_t m;
2673 
2674 	m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2675 	if (m) {
2676 		vm_page_init(m, phys_addr, FALSE);
2677 		m->vmp_fictitious = TRUE;
2678 	}
2679 	return m;
2680 }
2681 
2682 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2683 vm_page_grab_fictitious(boolean_t canwait)
2684 {
2685 	return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2686 }
2687 
2688 int vm_guard_count;
2689 
2690 
2691 vm_page_t
vm_page_grab_guard(boolean_t canwait)2692 vm_page_grab_guard(boolean_t canwait)
2693 {
2694 	vm_page_t page;
2695 	page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2696 	if (page) {
2697 		OSAddAtomic(1, &vm_guard_count);
2698 	}
2699 	return page;
2700 }
2701 
2702 
2703 /*
2704  *	vm_page_release_fictitious:
2705  *
2706  *	Release a fictitious page to the zone pool
2707  */
2708 void
vm_page_release_fictitious(vm_page_t m)2709 vm_page_release_fictitious(
2710 	vm_page_t m)
2711 {
2712 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2713 	assert(m->vmp_fictitious);
2714 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2715 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2716 	assert(!m->vmp_realtime);
2717 
2718 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2719 		OSAddAtomic(-1, &vm_guard_count);
2720 	}
2721 
2722 	zfree(vm_page_zone, m);
2723 }
2724 
2725 /*
2726  *	vm_pool_low():
2727  *
2728  *	Return true if it is not likely that a non-vm_privileged thread
2729  *	can get memory without blocking.  Advisory only, since the
2730  *	situation may change under us.
2731  */
2732 bool
vm_pool_low(void)2733 vm_pool_low(void)
2734 {
2735 	/* No locking, at worst we will fib. */
2736 	return vm_page_free_count <= vm_page_free_reserved;
2737 }
2738 
2739 boolean_t vm_darkwake_mode = FALSE;
2740 
2741 /*
2742  * vm_update_darkwake_mode():
2743  *
2744  * Tells the VM that the system is in / out of darkwake.
2745  *
2746  * Today, the VM only lowers/raises the background queue target
2747  * so as to favor consuming more/less background pages when
2748  * darwake is ON/OFF.
2749  *
2750  * We might need to do more things in the future.
2751  */
2752 
2753 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2754 vm_update_darkwake_mode(boolean_t darkwake_mode)
2755 {
2756 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2757 #pragma unused(darkwake_mode)
2758 	assert(vm_darkwake_mode == FALSE);
2759 	/*
2760 	 * Darkwake mode isn't supported for AS macOS.
2761 	 */
2762 	return;
2763 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2764 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2765 
2766 	vm_page_lockspin_queues();
2767 
2768 	if (vm_darkwake_mode == darkwake_mode) {
2769 		/*
2770 		 * No change.
2771 		 */
2772 		vm_page_unlock_queues();
2773 		return;
2774 	}
2775 
2776 	vm_darkwake_mode = darkwake_mode;
2777 
2778 	if (vm_darkwake_mode == TRUE) {
2779 		/* save background target to restore later */
2780 		vm_page_background_target_snapshot = vm_page_background_target;
2781 
2782 		/* target is set to 0...no protection for background pages */
2783 		vm_page_background_target = 0;
2784 	} else if (vm_darkwake_mode == FALSE) {
2785 		if (vm_page_background_target_snapshot) {
2786 			vm_page_background_target = vm_page_background_target_snapshot;
2787 		}
2788 	}
2789 	vm_page_unlock_queues();
2790 #endif
2791 }
2792 
2793 void
vm_page_update_special_state(vm_page_t mem)2794 vm_page_update_special_state(vm_page_t mem)
2795 {
2796 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2797 		return;
2798 	}
2799 
2800 	int mode = mem->vmp_on_specialq;
2801 
2802 	switch (mode) {
2803 	case VM_PAGE_SPECIAL_Q_BG:
2804 	{
2805 		task_t  my_task = current_task_early();
2806 
2807 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2808 			return;
2809 		}
2810 
2811 		if (my_task) {
2812 			if (task_get_darkwake_mode(my_task)) {
2813 				return;
2814 			}
2815 		}
2816 
2817 		if (my_task) {
2818 			if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2819 				return;
2820 			}
2821 		}
2822 		vm_page_lockspin_queues();
2823 
2824 		vm_page_background_promoted_count++;
2825 
2826 		vm_page_remove_from_specialq(mem);
2827 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2828 
2829 		vm_page_unlock_queues();
2830 		break;
2831 	}
2832 
2833 	case VM_PAGE_SPECIAL_Q_DONATE:
2834 	{
2835 		task_t  my_task = current_task_early();
2836 
2837 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2838 			return;
2839 		}
2840 
2841 		if (my_task->donates_own_pages == false) {
2842 			vm_page_lockspin_queues();
2843 
2844 			vm_page_remove_from_specialq(mem);
2845 			mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2846 
2847 			vm_page_unlock_queues();
2848 		}
2849 		break;
2850 	}
2851 
2852 	default:
2853 	{
2854 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2855 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2856 		break;
2857 	}
2858 	}
2859 }
2860 
2861 
2862 void
vm_page_assign_special_state(vm_page_t mem,int mode)2863 vm_page_assign_special_state(vm_page_t mem, int mode)
2864 {
2865 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2866 		return;
2867 	}
2868 
2869 	switch (mode) {
2870 	case VM_PAGE_SPECIAL_Q_BG:
2871 	{
2872 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2873 			return;
2874 		}
2875 
2876 		task_t  my_task = current_task_early();
2877 
2878 		if (my_task) {
2879 			if (task_get_darkwake_mode(my_task)) {
2880 				mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2881 				return;
2882 			}
2883 		}
2884 
2885 		if (my_task) {
2886 			mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2887 		}
2888 		break;
2889 	}
2890 
2891 	case VM_PAGE_SPECIAL_Q_DONATE:
2892 	{
2893 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2894 			return;
2895 		}
2896 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2897 		break;
2898 	}
2899 
2900 	default:
2901 		break;
2902 	}
2903 }
2904 
2905 
2906 void
vm_page_remove_from_specialq(vm_page_t mem)2907 vm_page_remove_from_specialq(
2908 	vm_page_t       mem)
2909 {
2910 	vm_object_t     m_object;
2911 	unsigned short  mode;
2912 
2913 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2914 
2915 	mode = mem->vmp_on_specialq;
2916 
2917 	switch (mode) {
2918 	case VM_PAGE_SPECIAL_Q_BG:
2919 	{
2920 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2921 			vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2922 
2923 			mem->vmp_specialq.next = 0;
2924 			mem->vmp_specialq.prev = 0;
2925 
2926 			vm_page_background_count--;
2927 
2928 			m_object = VM_PAGE_OBJECT(mem);
2929 
2930 			if (m_object->internal) {
2931 				vm_page_background_internal_count--;
2932 			} else {
2933 				vm_page_background_external_count--;
2934 			}
2935 		}
2936 		break;
2937 	}
2938 
2939 	case VM_PAGE_SPECIAL_Q_DONATE:
2940 	{
2941 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2942 			vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2943 			mem->vmp_specialq.next = 0;
2944 			mem->vmp_specialq.prev = 0;
2945 			vm_page_donate_count--;
2946 			if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2947 				assert(vm_page_donate_target == vm_page_donate_target_low);
2948 				vm_page_donate_target = vm_page_donate_target_high;
2949 				vm_page_donate_queue_ripe = false;
2950 			}
2951 		}
2952 
2953 		break;
2954 	}
2955 
2956 	default:
2957 	{
2958 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2959 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2960 		break;
2961 	}
2962 	}
2963 }
2964 
2965 
2966 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2967 vm_page_add_to_specialq(
2968 	vm_page_t       mem,
2969 	boolean_t       first)
2970 {
2971 	vm_object_t     m_object;
2972 
2973 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2974 
2975 	if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2976 		return;
2977 	}
2978 
2979 	int mode = mem->vmp_on_specialq;
2980 
2981 	switch (mode) {
2982 	case VM_PAGE_SPECIAL_Q_BG:
2983 	{
2984 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2985 			return;
2986 		}
2987 
2988 		m_object = VM_PAGE_OBJECT(mem);
2989 
2990 		if (vm_page_background_exclude_external && !m_object->internal) {
2991 			return;
2992 		}
2993 
2994 		if (first == TRUE) {
2995 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
2996 		} else {
2997 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
2998 		}
2999 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3000 
3001 		vm_page_background_count++;
3002 
3003 		if (m_object->internal) {
3004 			vm_page_background_internal_count++;
3005 		} else {
3006 			vm_page_background_external_count++;
3007 		}
3008 		break;
3009 	}
3010 
3011 	case VM_PAGE_SPECIAL_Q_DONATE:
3012 	{
3013 		if (first == TRUE) {
3014 			vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3015 		} else {
3016 			vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3017 		}
3018 		vm_page_donate_count++;
3019 		if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3020 			assert(vm_page_donate_target == vm_page_donate_target_high);
3021 			vm_page_donate_target = vm_page_donate_target_low;
3022 			vm_page_donate_queue_ripe = true;
3023 		}
3024 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3025 		break;
3026 	}
3027 
3028 	default:
3029 		break;
3030 	}
3031 }
3032 
3033 /*
3034  * This can be switched to FALSE to help debug drivers
3035  * that are having problems with memory > 4G.
3036  */
3037 boolean_t       vm_himemory_mode = TRUE;
3038 
3039 /*
3040  * this interface exists to support hardware controllers
3041  * incapable of generating DMAs with more than 32 bits
3042  * of address on platforms with physical memory > 4G...
3043  */
3044 unsigned int    vm_lopages_allocated_q = 0;
3045 unsigned int    vm_lopages_allocated_cpm_success = 0;
3046 unsigned int    vm_lopages_allocated_cpm_failed = 0;
3047 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3048 
3049 vm_page_t
vm_page_grablo(void)3050 vm_page_grablo(void)
3051 {
3052 	vm_page_t       mem;
3053 
3054 	if (vm_lopage_needed == FALSE) {
3055 		return vm_page_grab();
3056 	}
3057 
3058 	vm_free_page_lock_spin();
3059 
3060 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3061 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3062 		assert(vm_lopage_free_count);
3063 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3064 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3065 
3066 		vm_lopage_free_count--;
3067 		vm_lopages_allocated_q++;
3068 
3069 		if (vm_lopage_free_count < vm_lopage_lowater) {
3070 			vm_lopage_refill = TRUE;
3071 		}
3072 
3073 		vm_free_page_unlock();
3074 
3075 		if (current_task()->donates_own_pages) {
3076 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3077 		} else {
3078 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3079 		}
3080 	} else {
3081 		vm_free_page_unlock();
3082 
3083 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3084 			vm_free_page_lock_spin();
3085 			vm_lopages_allocated_cpm_failed++;
3086 			vm_free_page_unlock();
3087 
3088 			return VM_PAGE_NULL;
3089 		}
3090 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3091 
3092 		mem->vmp_busy = TRUE;
3093 
3094 		vm_page_lockspin_queues();
3095 
3096 		mem->vmp_gobbled = FALSE;
3097 		vm_page_gobble_count--;
3098 		vm_page_wire_count--;
3099 
3100 		vm_lopages_allocated_cpm_success++;
3101 		vm_page_unlock_queues();
3102 	}
3103 	assert(mem->vmp_busy);
3104 	assert(!mem->vmp_pmapped);
3105 	assert(!mem->vmp_wpmapped);
3106 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3107 
3108 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3109 
3110 	counter_inc(&vm_page_grab_count);
3111 	VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3112 
3113 	return mem;
3114 }
3115 
3116 /*
3117  *	vm_page_grab:
3118  *
3119  *	first try to grab a page from the per-cpu free list...
3120  *	this must be done while pre-emption is disabled... if
3121  *      a page is available, we're done...
3122  *	if no page is available, grab the vm_page_queue_free_lock
3123  *	and see if current number of free pages would allow us
3124  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3125  *	if there are pages available, disable preemption and
3126  *      recheck the state of the per-cpu free list... we could
3127  *	have been preempted and moved to a different cpu, or
3128  *      some other thread could have re-filled it... if still
3129  *	empty, figure out how many pages we can steal from the
3130  *	global free queue and move to the per-cpu queue...
3131  *	return 1 of these pages when done... only wakeup the
3132  *      pageout_scan thread if we moved pages from the global
3133  *	list... no need for the wakeup if we've satisfied the
3134  *	request from the per-cpu queue.
3135  */
3136 
3137 #if CONFIG_SECLUDED_MEMORY
3138 vm_page_t vm_page_grab_secluded(void);
3139 #endif /* CONFIG_SECLUDED_MEMORY */
3140 
3141 static inline void
3142 vm_page_grab_diags(void);
3143 
3144 vm_page_t
vm_page_grab(void)3145 vm_page_grab(void)
3146 {
3147 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3148 }
3149 
3150 #if HIBERNATION
3151 boolean_t       hibernate_rebuild_needed = FALSE;
3152 #endif /* HIBERNATION */
3153 
3154 vm_page_t
vm_page_grab_options(int grab_options)3155 vm_page_grab_options(
3156 	int grab_options)
3157 {
3158 	vm_page_t       mem;
3159 
3160 restart:
3161 	disable_preemption();
3162 
3163 	if ((mem = *PERCPU_GET(free_pages))) {
3164 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3165 
3166 #if HIBERNATION
3167 		if (hibernate_rebuild_needed) {
3168 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3169 		}
3170 #endif /* HIBERNATION */
3171 
3172 		vm_page_grab_diags();
3173 
3174 		vm_offset_t pcpu_base = current_percpu_base();
3175 		counter_inc_preemption_disabled(&vm_page_grab_count);
3176 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3177 		VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3178 
3179 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3180 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3181 		enable_preemption();
3182 
3183 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3184 		assert(mem->vmp_tabled == FALSE);
3185 		assert(mem->vmp_object == 0);
3186 		assert(!mem->vmp_laundry);
3187 		ASSERT_PMAP_FREE(mem);
3188 		assert(mem->vmp_busy);
3189 		assert(!mem->vmp_pmapped);
3190 		assert(!mem->vmp_wpmapped);
3191 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3192 		assert(!mem->vmp_realtime);
3193 
3194 		task_t  cur_task = current_task_early();
3195 		if (cur_task && cur_task != kernel_task) {
3196 			if (cur_task->donates_own_pages) {
3197 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3198 			} else {
3199 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3200 			}
3201 		}
3202 		return mem;
3203 	}
3204 	enable_preemption();
3205 
3206 
3207 	/*
3208 	 *	Optionally produce warnings if the wire or gobble
3209 	 *	counts exceed some threshold.
3210 	 */
3211 #if VM_PAGE_WIRE_COUNT_WARNING
3212 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3213 		printf("mk: vm_page_grab(): high wired page count of %d\n",
3214 		    vm_page_wire_count);
3215 	}
3216 #endif
3217 #if VM_PAGE_GOBBLE_COUNT_WARNING
3218 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3219 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3220 		    vm_page_gobble_count);
3221 	}
3222 #endif
3223 
3224 	/*
3225 	 * If free count is low and we have delayed pages from early boot,
3226 	 * get one of those instead.
3227 	 */
3228 	if (__improbable(vm_delayed_count > 0 &&
3229 	    vm_page_free_count <= vm_page_free_target &&
3230 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3231 		assert(!mem->vmp_realtime);
3232 		return mem;
3233 	}
3234 
3235 	vm_free_page_lock_spin();
3236 
3237 	/*
3238 	 *	Only let privileged threads (involved in pageout)
3239 	 *	dip into the reserved pool.
3240 	 */
3241 	if ((vm_page_free_count < vm_page_free_reserved) &&
3242 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
3243 		/* no page for us in the free queue... */
3244 		vm_free_page_unlock();
3245 		mem = VM_PAGE_NULL;
3246 
3247 #if CONFIG_SECLUDED_MEMORY
3248 		/* ... but can we try and grab from the secluded queue? */
3249 		if (vm_page_secluded_count > 0 &&
3250 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3251 		    task_can_use_secluded_mem(current_task(), TRUE))) {
3252 			mem = vm_page_grab_secluded();
3253 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3254 				vm_page_secluded.grab_for_iokit++;
3255 				if (mem) {
3256 					vm_page_secluded.grab_for_iokit_success++;
3257 				}
3258 			}
3259 			if (mem) {
3260 				VM_CHECK_MEMORYSTATUS;
3261 
3262 				vm_page_grab_diags();
3263 				counter_inc(&vm_page_grab_count);
3264 				VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3265 
3266 				assert(!mem->vmp_realtime);
3267 				return mem;
3268 			}
3269 		}
3270 #else /* CONFIG_SECLUDED_MEMORY */
3271 		(void) grab_options;
3272 #endif /* CONFIG_SECLUDED_MEMORY */
3273 	} else {
3274 		vm_page_t        head;
3275 		vm_page_t        tail;
3276 		unsigned int     pages_to_steal;
3277 		unsigned int     color;
3278 		unsigned int clump_end, sub_count;
3279 
3280 		while (vm_page_free_count == 0) {
3281 			vm_free_page_unlock();
3282 			/*
3283 			 * must be a privileged thread to be
3284 			 * in this state since a non-privileged
3285 			 * thread would have bailed if we were
3286 			 * under the vm_page_free_reserved mark
3287 			 */
3288 			VM_PAGE_WAIT();
3289 			vm_free_page_lock_spin();
3290 		}
3291 
3292 		/*
3293 		 * Need to repopulate the per-CPU free list from the global free list.
3294 		 * Note we don't do any processing of pending retirement pages here.
3295 		 * That'll happen in the code above when the page comes off the per-CPU list.
3296 		 */
3297 		disable_preemption();
3298 
3299 		/*
3300 		 * If we got preempted the cache might now have pages.
3301 		 */
3302 		if ((mem = *PERCPU_GET(free_pages))) {
3303 			vm_free_page_unlock();
3304 			enable_preemption();
3305 			goto restart;
3306 		}
3307 
3308 		if (vm_page_free_count <= vm_page_free_reserved) {
3309 			pages_to_steal = 1;
3310 		} else {
3311 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3312 				pages_to_steal = vm_free_magazine_refill_limit;
3313 			} else {
3314 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3315 			}
3316 		}
3317 		color = *PERCPU_GET(start_color);
3318 		head = tail = NULL;
3319 
3320 		vm_page_free_count -= pages_to_steal;
3321 		clump_end = sub_count = 0;
3322 
3323 		while (pages_to_steal--) {
3324 			while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3325 				color = (color + 1) & vm_color_mask;
3326 			}
3327 #if defined(__x86_64__)
3328 			vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3329 			    mem, clump_end);
3330 #else
3331 			vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3332 			    mem, vmp_pageq);
3333 #endif
3334 
3335 			assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3336 
3337 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3338 
3339 #if defined(__arm64__)
3340 			color = (color + 1) & vm_color_mask;
3341 #else
3342 
3343 #if DEVELOPMENT || DEBUG
3344 
3345 			sub_count++;
3346 			if (clump_end) {
3347 				vm_clump_update_stats(sub_count);
3348 				sub_count = 0;
3349 				color = (color + 1) & vm_color_mask;
3350 			}
3351 #else
3352 			if (clump_end) {
3353 				color = (color + 1) & vm_color_mask;
3354 			}
3355 
3356 #endif /* if DEVELOPMENT || DEBUG */
3357 
3358 #endif  /* if defined(__arm64__) */
3359 
3360 			if (head == NULL) {
3361 				head = mem;
3362 			} else {
3363 				tail->vmp_snext = mem;
3364 			}
3365 			tail = mem;
3366 
3367 			assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3368 			assert(mem->vmp_tabled == FALSE);
3369 			assert(mem->vmp_object == 0);
3370 			assert(!mem->vmp_laundry);
3371 
3372 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3373 
3374 			ASSERT_PMAP_FREE(mem);
3375 			assert(mem->vmp_busy);
3376 			assert(!mem->vmp_pmapped);
3377 			assert(!mem->vmp_wpmapped);
3378 			assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3379 			assert(!mem->vmp_realtime);
3380 		}
3381 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3382 		vm_clump_update_stats(sub_count);
3383 #endif
3384 
3385 #if HIBERNATION
3386 		if (hibernate_rebuild_needed) {
3387 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3388 		}
3389 #endif /* HIBERNATION */
3390 		vm_offset_t pcpu_base = current_percpu_base();
3391 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3392 		*PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3393 
3394 		vm_free_page_unlock();
3395 		enable_preemption();
3396 		goto restart;
3397 	}
3398 
3399 	/*
3400 	 *	Decide if we should poke the pageout daemon.
3401 	 *	We do this if the free count is less than the low
3402 	 *	water mark. VM Pageout Scan will keep running till
3403 	 *	the free_count > free_target (& hence above free_min).
3404 	 *	This wakeup is to catch the possibility of the counts
3405 	 *	dropping between VM Pageout Scan parking and this check.
3406 	 *
3407 	 *	We don't have the counts locked ... if they change a little,
3408 	 *	it doesn't really matter.
3409 	 */
3410 	if (vm_page_free_count < vm_page_free_min) {
3411 		vm_free_page_lock();
3412 		if (vm_pageout_running == FALSE) {
3413 			vm_free_page_unlock();
3414 			thread_wakeup((event_t) &vm_page_free_wanted);
3415 		} else {
3416 			vm_free_page_unlock();
3417 		}
3418 	}
3419 
3420 	VM_CHECK_MEMORYSTATUS;
3421 
3422 	if (mem) {
3423 		assert(!mem->vmp_realtime);
3424 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
3425 
3426 		task_t  cur_task = current_task_early();
3427 		if (cur_task && cur_task != kernel_task) {
3428 			if (cur_task->donates_own_pages) {
3429 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3430 			} else {
3431 				vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3432 			}
3433 		}
3434 	}
3435 	return mem;
3436 }
3437 
3438 #if CONFIG_SECLUDED_MEMORY
3439 vm_page_t
vm_page_grab_secluded(void)3440 vm_page_grab_secluded(void)
3441 {
3442 	vm_page_t       mem;
3443 	vm_object_t     object;
3444 	int             refmod_state;
3445 
3446 	if (vm_page_secluded_count == 0) {
3447 		/* no secluded pages to grab... */
3448 		return VM_PAGE_NULL;
3449 	}
3450 
3451 	/* secluded queue is protected by the VM page queue lock */
3452 	vm_page_lock_queues();
3453 
3454 	if (vm_page_secluded_count == 0) {
3455 		/* no secluded pages to grab... */
3456 		vm_page_unlock_queues();
3457 		return VM_PAGE_NULL;
3458 	}
3459 
3460 #if 00
3461 	/* can we grab from the secluded queue? */
3462 	if (vm_page_secluded_count > vm_page_secluded_target ||
3463 	    (vm_page_secluded_count > 0 &&
3464 	    task_can_use_secluded_mem(current_task(), TRUE))) {
3465 		/* OK */
3466 	} else {
3467 		/* can't grab from secluded queue... */
3468 		vm_page_unlock_queues();
3469 		return VM_PAGE_NULL;
3470 	}
3471 #endif
3472 
3473 	/* we can grab a page from secluded queue! */
3474 	assert((vm_page_secluded_count_free +
3475 	    vm_page_secluded_count_inuse) ==
3476 	    vm_page_secluded_count);
3477 	if (current_task()->task_can_use_secluded_mem) {
3478 		assert(num_tasks_can_use_secluded_mem > 0);
3479 	}
3480 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3481 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3482 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3483 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3484 	vm_page_queues_remove(mem, TRUE);
3485 
3486 	object = VM_PAGE_OBJECT(mem);
3487 
3488 	assert(!mem->vmp_fictitious);
3489 	assert(!VM_PAGE_WIRED(mem));
3490 	if (object == VM_OBJECT_NULL) {
3491 		/* free for grab! */
3492 		vm_page_unlock_queues();
3493 		vm_page_secluded.grab_success_free++;
3494 
3495 		assert(mem->vmp_busy);
3496 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3497 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3498 		assert(mem->vmp_pageq.next == 0);
3499 		assert(mem->vmp_pageq.prev == 0);
3500 		assert(mem->vmp_listq.next == 0);
3501 		assert(mem->vmp_listq.prev == 0);
3502 		assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3503 		assert(mem->vmp_specialq.next == 0);
3504 		assert(mem->vmp_specialq.prev == 0);
3505 		return mem;
3506 	}
3507 
3508 	assert(!object->internal);
3509 //	vm_page_pageable_external_count--;
3510 
3511 	if (!vm_object_lock_try(object)) {
3512 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
3513 		vm_page_secluded.grab_failure_locked++;
3514 reactivate_secluded_page:
3515 		vm_page_activate(mem);
3516 		vm_page_unlock_queues();
3517 		return VM_PAGE_NULL;
3518 	}
3519 	if (mem->vmp_busy ||
3520 	    mem->vmp_cleaning ||
3521 	    mem->vmp_laundry) {
3522 		/* can't steal page in this state... */
3523 		vm_object_unlock(object);
3524 		vm_page_secluded.grab_failure_state++;
3525 		goto reactivate_secluded_page;
3526 	}
3527 	if (mem->vmp_realtime) {
3528 		/* don't steal pages used by realtime threads... */
3529 		vm_object_unlock(object);
3530 		vm_page_secluded.grab_failure_realtime++;
3531 		goto reactivate_secluded_page;
3532 	}
3533 
3534 	mem->vmp_busy = TRUE;
3535 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3536 	if (refmod_state & VM_MEM_REFERENCED) {
3537 		mem->vmp_reference = TRUE;
3538 	}
3539 	if (refmod_state & VM_MEM_MODIFIED) {
3540 		SET_PAGE_DIRTY(mem, FALSE);
3541 	}
3542 	if (mem->vmp_dirty || mem->vmp_precious) {
3543 		/* can't grab a dirty page; re-activate */
3544 //		printf("SECLUDED: dirty page %p\n", mem);
3545 		PAGE_WAKEUP_DONE(mem);
3546 		vm_page_secluded.grab_failure_dirty++;
3547 		vm_object_unlock(object);
3548 		goto reactivate_secluded_page;
3549 	}
3550 	if (mem->vmp_reference) {
3551 		/* it's been used but we do need to grab a page... */
3552 	}
3553 
3554 	vm_page_unlock_queues();
3555 
3556 
3557 	/* finish what vm_page_free() would have done... */
3558 	vm_page_free_prepare_object(mem, TRUE);
3559 	vm_object_unlock(object);
3560 	object = VM_OBJECT_NULL;
3561 	if (vm_page_free_verify) {
3562 		ASSERT_PMAP_FREE(mem);
3563 	}
3564 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3565 	vm_page_secluded.grab_success_other++;
3566 
3567 	assert(mem->vmp_busy);
3568 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3569 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3570 	assert(mem->vmp_pageq.next == 0);
3571 	assert(mem->vmp_pageq.prev == 0);
3572 	assert(mem->vmp_listq.next == 0);
3573 	assert(mem->vmp_listq.prev == 0);
3574 	assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3575 	assert(mem->vmp_specialq.next == 0);
3576 	assert(mem->vmp_specialq.prev == 0);
3577 
3578 	return mem;
3579 }
3580 
3581 uint64_t
vm_page_secluded_drain(void)3582 vm_page_secluded_drain(void)
3583 {
3584 	vm_page_t local_freeq;
3585 	int local_freed;
3586 	uint64_t num_reclaimed;
3587 	unsigned int saved_secluded_count, saved_secluded_target;
3588 
3589 	num_reclaimed = 0;
3590 	local_freeq = NULL;
3591 	local_freed = 0;
3592 
3593 	vm_page_lock_queues();
3594 
3595 	saved_secluded_count = vm_page_secluded_count;
3596 	saved_secluded_target = vm_page_secluded_target;
3597 	vm_page_secluded_target = 0;
3598 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3599 	while (vm_page_secluded_count) {
3600 		vm_page_t secluded_page;
3601 
3602 		assert((vm_page_secluded_count_free +
3603 		    vm_page_secluded_count_inuse) ==
3604 		    vm_page_secluded_count);
3605 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3606 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3607 
3608 		vm_page_queues_remove(secluded_page, FALSE);
3609 		assert(!secluded_page->vmp_fictitious);
3610 		assert(!VM_PAGE_WIRED(secluded_page));
3611 
3612 		if (secluded_page->vmp_object == 0) {
3613 			/* transfer to free queue */
3614 			assert(secluded_page->vmp_busy);
3615 			secluded_page->vmp_snext = local_freeq;
3616 			local_freeq = secluded_page;
3617 			local_freed += 1;
3618 		} else {
3619 			/* transfer to head of active queue */
3620 			vm_page_enqueue_active(secluded_page, FALSE);
3621 			secluded_page = VM_PAGE_NULL;
3622 		}
3623 		num_reclaimed++;
3624 	}
3625 	vm_page_secluded_target = saved_secluded_target;
3626 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3627 
3628 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3629 
3630 	vm_page_unlock_queues();
3631 
3632 	if (local_freed) {
3633 		vm_page_free_list(local_freeq, TRUE);
3634 		local_freeq = NULL;
3635 		local_freed = 0;
3636 	}
3637 
3638 	return num_reclaimed;
3639 }
3640 #endif /* CONFIG_SECLUDED_MEMORY */
3641 
3642 
3643 static inline void
vm_page_grab_diags()3644 vm_page_grab_diags()
3645 {
3646 #if DEVELOPMENT || DEBUG
3647 	task_t task = current_task_early();
3648 	if (task == NULL) {
3649 		return;
3650 	}
3651 
3652 	ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3653 #endif /* DEVELOPMENT || DEBUG */
3654 }
3655 
3656 /*
3657  *	vm_page_release:
3658  *
3659  *	Return a page to the free list.
3660  */
3661 
3662 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3663 vm_page_release(
3664 	vm_page_t       mem,
3665 	boolean_t       page_queues_locked)
3666 {
3667 	unsigned int    color;
3668 	int     need_wakeup = 0;
3669 	int     need_priv_wakeup = 0;
3670 #if CONFIG_SECLUDED_MEMORY
3671 	int     need_secluded_wakeup = 0;
3672 #endif /* CONFIG_SECLUDED_MEMORY */
3673 	event_t wakeup_event = NULL;
3674 
3675 	if (page_queues_locked) {
3676 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3677 	} else {
3678 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3679 	}
3680 
3681 	assert(!mem->vmp_private && !mem->vmp_fictitious);
3682 	if (vm_page_free_verify) {
3683 		ASSERT_PMAP_FREE(mem);
3684 	}
3685 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
3686 
3687 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3688 
3689 	if (__improbable(mem->vmp_realtime)) {
3690 		if (!page_queues_locked) {
3691 			vm_page_lock_queues();
3692 		}
3693 		if (mem->vmp_realtime) {
3694 			mem->vmp_realtime = false;
3695 			vm_page_realtime_count--;
3696 		}
3697 		if (!page_queues_locked) {
3698 			vm_page_unlock_queues();
3699 		}
3700 	}
3701 
3702 	vm_free_page_lock_spin();
3703 
3704 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3705 	assert(mem->vmp_busy);
3706 	assert(!mem->vmp_laundry);
3707 	assert(mem->vmp_object == 0);
3708 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3709 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3710 	assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3711 
3712 	/* Clear any specialQ hints before releasing page to the free pool*/
3713 	mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3714 
3715 	if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3716 	    vm_lopage_free_count < vm_lopage_free_limit &&
3717 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3718 		/*
3719 		 * this exists to support hardware controllers
3720 		 * incapable of generating DMAs with more than 32 bits
3721 		 * of address on platforms with physical memory > 4G...
3722 		 */
3723 		vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3724 		vm_lopage_free_count++;
3725 
3726 		if (vm_lopage_free_count >= vm_lopage_free_limit) {
3727 			vm_lopage_refill = FALSE;
3728 		}
3729 
3730 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3731 		mem->vmp_lopage = TRUE;
3732 #if CONFIG_SECLUDED_MEMORY
3733 	} else if (vm_page_free_count > vm_page_free_reserved &&
3734 	    vm_page_secluded_count < vm_page_secluded_target &&
3735 	    num_tasks_can_use_secluded_mem == 0) {
3736 		/*
3737 		 * XXX FBDP TODO: also avoid refilling secluded queue
3738 		 * when some IOKit objects are already grabbing from it...
3739 		 */
3740 		if (!page_queues_locked) {
3741 			if (!vm_page_trylock_queues()) {
3742 				/* take locks in right order */
3743 				vm_free_page_unlock();
3744 				vm_page_lock_queues();
3745 				vm_free_page_lock_spin();
3746 			}
3747 		}
3748 		mem->vmp_lopage = FALSE;
3749 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3750 		vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3751 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3752 		vm_page_secluded_count++;
3753 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3754 		vm_page_secluded_count_free++;
3755 		if (!page_queues_locked) {
3756 			vm_page_unlock_queues();
3757 		}
3758 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3759 		if (vm_page_free_wanted_secluded > 0) {
3760 			vm_page_free_wanted_secluded--;
3761 			need_secluded_wakeup = 1;
3762 		}
3763 #endif /* CONFIG_SECLUDED_MEMORY */
3764 	} else {
3765 		mem->vmp_lopage = FALSE;
3766 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3767 
3768 		color = VM_PAGE_GET_COLOR(mem);
3769 #if defined(__x86_64__)
3770 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3771 #else
3772 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3773 #endif
3774 		vm_page_free_count++;
3775 		/*
3776 		 *	Check if we should wake up someone waiting for page.
3777 		 *	But don't bother waking them unless they can allocate.
3778 		 *
3779 		 *	We wakeup only one thread, to prevent starvation.
3780 		 *	Because the scheduling system handles wait queues FIFO,
3781 		 *	if we wakeup all waiting threads, one greedy thread
3782 		 *	can starve multiple niceguy threads.  When the threads
3783 		 *	all wakeup, the greedy threads runs first, grabs the page,
3784 		 *	and waits for another page.  It will be the first to run
3785 		 *	when the next page is freed.
3786 		 *
3787 		 *	However, there is a slight danger here.
3788 		 *	The thread we wake might not use the free page.
3789 		 *	Then the other threads could wait indefinitely
3790 		 *	while the page goes unused.  To forestall this,
3791 		 *	the pageout daemon will keep making free pages
3792 		 *	as long as vm_page_free_wanted is non-zero.
3793 		 */
3794 
3795 		assert(vm_page_free_count > 0);
3796 		if (vm_page_free_wanted_privileged > 0) {
3797 			vm_page_free_wanted_privileged--;
3798 			need_priv_wakeup = 1;
3799 #if CONFIG_SECLUDED_MEMORY
3800 		} else if (vm_page_free_wanted_secluded > 0 &&
3801 		    vm_page_free_count > vm_page_free_reserved) {
3802 			vm_page_free_wanted_secluded--;
3803 			need_secluded_wakeup = 1;
3804 #endif /* CONFIG_SECLUDED_MEMORY */
3805 		} else if (vm_page_free_wanted > 0 &&
3806 		    vm_page_free_count > vm_page_free_reserved) {
3807 			vm_page_free_wanted--;
3808 			need_wakeup = 1;
3809 		}
3810 	}
3811 	vm_pageout_vminfo.vm_page_pages_freed++;
3812 
3813 	vm_free_page_unlock();
3814 
3815 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3816 
3817 	if (need_priv_wakeup) {
3818 		wakeup_event = &vm_page_free_wanted_privileged;
3819 	}
3820 #if CONFIG_SECLUDED_MEMORY
3821 	else if (need_secluded_wakeup) {
3822 		wakeup_event = &vm_page_free_wanted_secluded;
3823 	}
3824 #endif /* CONFIG_SECLUDED_MEMORY */
3825 	else if (need_wakeup) {
3826 		wakeup_event = &vm_page_free_count;
3827 	}
3828 
3829 	if (wakeup_event) {
3830 		if (vps_dynamic_priority_enabled) {
3831 			wakeup_one_with_inheritor((event_t) wakeup_event,
3832 			    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3833 			    NULL);
3834 		} else {
3835 			thread_wakeup_one((event_t) wakeup_event);
3836 		}
3837 	}
3838 
3839 	VM_CHECK_MEMORYSTATUS;
3840 }
3841 
3842 /*
3843  * This version of vm_page_release() is used only at startup
3844  * when we are single-threaded and pages are being released
3845  * for the first time. Hence, no locking or unnecessary checks are made.
3846  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3847  */
3848 void
vm_page_release_startup(vm_page_t mem)3849 vm_page_release_startup(
3850 	vm_page_t       mem)
3851 {
3852 	vm_page_queue_t queue_free;
3853 
3854 	if (vm_lopage_free_count < vm_lopage_free_limit &&
3855 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3856 		mem->vmp_lopage = TRUE;
3857 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3858 		vm_lopage_free_count++;
3859 		queue_free = &vm_lopage_queue_free;
3860 #if CONFIG_SECLUDED_MEMORY
3861 	} else if (vm_page_secluded_count < vm_page_secluded_target) {
3862 		mem->vmp_lopage = FALSE;
3863 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3864 		vm_page_secluded_count++;
3865 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3866 		vm_page_secluded_count_free++;
3867 		queue_free = &vm_page_queue_secluded;
3868 #endif /* CONFIG_SECLUDED_MEMORY */
3869 	} else {
3870 		mem->vmp_lopage = FALSE;
3871 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3872 		vm_page_free_count++;
3873 		queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3874 	}
3875 	if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3876 #if defined(__x86_64__)
3877 		vm_page_queue_enter_clump(queue_free, mem);
3878 #else
3879 		vm_page_queue_enter(queue_free, mem, vmp_pageq);
3880 #endif
3881 	} else {
3882 		vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3883 	}
3884 }
3885 
3886 /*
3887  *	vm_page_wait:
3888  *
3889  *	Wait for a page to become available.
3890  *	If there are plenty of free pages, then we don't sleep.
3891  *
3892  *	Returns:
3893  *		TRUE:  There may be another page, try again
3894  *		FALSE: We were interrupted out of our wait, don't try again
3895  */
3896 
3897 boolean_t
vm_page_wait(int interruptible)3898 vm_page_wait(
3899 	int     interruptible )
3900 {
3901 	/*
3902 	 *	We can't use vm_page_free_reserved to make this
3903 	 *	determination.  Consider: some thread might
3904 	 *	need to allocate two pages.  The first allocation
3905 	 *	succeeds, the second fails.  After the first page is freed,
3906 	 *	a call to vm_page_wait must really block.
3907 	 */
3908 	kern_return_t   wait_result;
3909 	int             need_wakeup = 0;
3910 	int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3911 	event_t         wait_event = NULL;
3912 
3913 	vm_free_page_lock_spin();
3914 
3915 	if (is_privileged && vm_page_free_count) {
3916 		vm_free_page_unlock();
3917 		return TRUE;
3918 	}
3919 
3920 	if (vm_page_free_count >= vm_page_free_target) {
3921 		vm_free_page_unlock();
3922 		return TRUE;
3923 	}
3924 
3925 	if (is_privileged) {
3926 		if (vm_page_free_wanted_privileged++ == 0) {
3927 			need_wakeup = 1;
3928 		}
3929 		wait_event = (event_t)&vm_page_free_wanted_privileged;
3930 #if CONFIG_SECLUDED_MEMORY
3931 	} else if (secluded_for_apps &&
3932 	    task_can_use_secluded_mem(current_task(), FALSE)) {
3933 #if 00
3934 		/* XXX FBDP: need pageq lock for this... */
3935 		/* XXX FBDP: might wait even if pages available, */
3936 		/* XXX FBDP: hopefully not for too long... */
3937 		if (vm_page_secluded_count > 0) {
3938 			vm_free_page_unlock();
3939 			return TRUE;
3940 		}
3941 #endif
3942 		if (vm_page_free_wanted_secluded++ == 0) {
3943 			need_wakeup = 1;
3944 		}
3945 		wait_event = (event_t)&vm_page_free_wanted_secluded;
3946 #endif /* CONFIG_SECLUDED_MEMORY */
3947 	} else {
3948 		if (vm_page_free_wanted++ == 0) {
3949 			need_wakeup = 1;
3950 		}
3951 		wait_event = (event_t)&vm_page_free_count;
3952 	}
3953 
3954 	/*
3955 	 * We don't do a vm_pageout_scan wakeup if we already have
3956 	 * some waiters because vm_pageout_scan checks for waiters
3957 	 * before it returns and does so behind the vm_page_queue_free_lock,
3958 	 * which we own when we bump the waiter counts.
3959 	 */
3960 
3961 	if (vps_dynamic_priority_enabled) {
3962 		/*
3963 		 * We are waking up vm_pageout_scan here. If it needs
3964 		 * the vm_page_queue_free_lock before we unlock it
3965 		 * we'll end up just blocking and incur an extra
3966 		 * context switch. Could be a perf. issue.
3967 		 */
3968 
3969 		if (need_wakeup) {
3970 			thread_wakeup((event_t)&vm_page_free_wanted);
3971 		}
3972 
3973 		/*
3974 		 * LD: This event is going to get recorded every time because
3975 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3976 		 * We just block in that routine.
3977 		 */
3978 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3979 		    vm_page_free_wanted_privileged,
3980 		    vm_page_free_wanted,
3981 #if CONFIG_SECLUDED_MEMORY
3982 		    vm_page_free_wanted_secluded,
3983 #else /* CONFIG_SECLUDED_MEMORY */
3984 		    0,
3985 #endif /* CONFIG_SECLUDED_MEMORY */
3986 		    0);
3987 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
3988 		    LCK_SLEEP_UNLOCK,
3989 		    wait_event,
3990 		    vm_pageout_scan_thread,
3991 		    interruptible,
3992 		    0);
3993 	} else {
3994 		wait_result = assert_wait(wait_event, interruptible);
3995 
3996 		vm_free_page_unlock();
3997 
3998 		if (need_wakeup) {
3999 			thread_wakeup((event_t)&vm_page_free_wanted);
4000 		}
4001 
4002 		if (wait_result == THREAD_WAITING) {
4003 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4004 			    vm_page_free_wanted_privileged,
4005 			    vm_page_free_wanted,
4006 #if CONFIG_SECLUDED_MEMORY
4007 			    vm_page_free_wanted_secluded,
4008 #else /* CONFIG_SECLUDED_MEMORY */
4009 			    0,
4010 #endif /* CONFIG_SECLUDED_MEMORY */
4011 			    0);
4012 			wait_result = thread_block(THREAD_CONTINUE_NULL);
4013 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4014 			    VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4015 		}
4016 	}
4017 
4018 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4019 }
4020 
4021 /*
4022  *	vm_page_alloc:
4023  *
4024  *	Allocate and return a memory cell associated
4025  *	with this VM object/offset pair.
4026  *
4027  *	Object must be locked.
4028  */
4029 
4030 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4031 vm_page_alloc(
4032 	vm_object_t             object,
4033 	vm_object_offset_t      offset)
4034 {
4035 	vm_page_t       mem;
4036 	int             grab_options;
4037 
4038 	vm_object_lock_assert_exclusive(object);
4039 	grab_options = 0;
4040 #if CONFIG_SECLUDED_MEMORY
4041 	if (object->can_grab_secluded) {
4042 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4043 	}
4044 #endif /* CONFIG_SECLUDED_MEMORY */
4045 	mem = vm_page_grab_options(grab_options);
4046 	if (mem == VM_PAGE_NULL) {
4047 		return VM_PAGE_NULL;
4048 	}
4049 
4050 	vm_page_insert(mem, object, offset);
4051 
4052 	return mem;
4053 }
4054 
4055 /*
4056  *	vm_page_free_prepare:
4057  *
4058  *	Removes page from any queue it may be on
4059  *	and disassociates it from its VM object.
4060  *
4061  *	Object and page queues must be locked prior to entry.
4062  */
4063 static void
vm_page_free_prepare(vm_page_t mem)4064 vm_page_free_prepare(
4065 	vm_page_t       mem)
4066 {
4067 
4068 	vm_page_free_prepare_queues(mem);
4069 	vm_page_free_prepare_object(mem, TRUE);
4070 }
4071 
4072 
4073 void
vm_page_free_prepare_queues(vm_page_t mem)4074 vm_page_free_prepare_queues(
4075 	vm_page_t       mem)
4076 {
4077 	vm_object_t     m_object;
4078 
4079 	VM_PAGE_CHECK(mem);
4080 
4081 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4082 	assert(!mem->vmp_cleaning);
4083 	m_object = VM_PAGE_OBJECT(mem);
4084 
4085 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4086 	if (m_object) {
4087 		vm_object_lock_assert_exclusive(m_object);
4088 	}
4089 	if (mem->vmp_laundry) {
4090 		/*
4091 		 * We may have to free a page while it's being laundered
4092 		 * if we lost its pager (due to a forced unmount, for example).
4093 		 * We need to call vm_pageout_steal_laundry() before removing
4094 		 * the page from its VM object, so that we can remove it
4095 		 * from its pageout queue and adjust the laundry accounting
4096 		 */
4097 		vm_pageout_steal_laundry(mem, TRUE);
4098 	}
4099 
4100 	vm_page_queues_remove(mem, TRUE);
4101 
4102 	if (__improbable(mem->vmp_realtime)) {
4103 		mem->vmp_realtime = false;
4104 		vm_page_realtime_count--;
4105 	}
4106 
4107 	if (VM_PAGE_WIRED(mem)) {
4108 		assert(mem->vmp_wire_count > 0);
4109 
4110 		if (m_object) {
4111 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4112 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4113 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4114 
4115 			assert(m_object->resident_page_count >=
4116 			    m_object->wired_page_count);
4117 
4118 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4119 				OSAddAtomic(+1, &vm_page_purgeable_count);
4120 				assert(vm_page_purgeable_wired_count > 0);
4121 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4122 			}
4123 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4124 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4125 			    m_object->vo_owner != TASK_NULL) {
4126 				task_t          owner;
4127 				int             ledger_idx_volatile;
4128 				int             ledger_idx_nonvolatile;
4129 				int             ledger_idx_volatile_compressed;
4130 				int             ledger_idx_nonvolatile_compressed;
4131 				boolean_t       do_footprint;
4132 
4133 				owner = VM_OBJECT_OWNER(m_object);
4134 				vm_object_ledger_tag_ledgers(
4135 					m_object,
4136 					&ledger_idx_volatile,
4137 					&ledger_idx_nonvolatile,
4138 					&ledger_idx_volatile_compressed,
4139 					&ledger_idx_nonvolatile_compressed,
4140 					&do_footprint);
4141 				/*
4142 				 * While wired, this page was accounted
4143 				 * as "non-volatile" but it should now
4144 				 * be accounted as "volatile".
4145 				 */
4146 				/* one less "non-volatile"... */
4147 				ledger_debit(owner->ledger,
4148 				    ledger_idx_nonvolatile,
4149 				    PAGE_SIZE);
4150 				if (do_footprint) {
4151 					/* ... and "phys_footprint" */
4152 					ledger_debit(owner->ledger,
4153 					    task_ledgers.phys_footprint,
4154 					    PAGE_SIZE);
4155 				}
4156 				/* one more "volatile" */
4157 				ledger_credit(owner->ledger,
4158 				    ledger_idx_volatile,
4159 				    PAGE_SIZE);
4160 			}
4161 		}
4162 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4163 			vm_page_wire_count--;
4164 		}
4165 
4166 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4167 		mem->vmp_wire_count = 0;
4168 		assert(!mem->vmp_gobbled);
4169 	} else if (mem->vmp_gobbled) {
4170 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4171 			vm_page_wire_count--;
4172 		}
4173 		vm_page_gobble_count--;
4174 	}
4175 }
4176 
4177 
4178 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4179 vm_page_free_prepare_object(
4180 	vm_page_t       mem,
4181 	boolean_t       remove_from_hash)
4182 {
4183 	assert(!mem->vmp_realtime);
4184 	if (mem->vmp_tabled) {
4185 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4186 	}
4187 	PAGE_WAKEUP(mem);               /* clears wanted */
4188 
4189 	if (mem->vmp_private) {
4190 		mem->vmp_private = FALSE;
4191 		mem->vmp_fictitious = TRUE;
4192 		VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4193 	}
4194 	if (!mem->vmp_fictitious) {
4195 		assert(mem->vmp_pageq.next == 0);
4196 		assert(mem->vmp_pageq.prev == 0);
4197 		assert(mem->vmp_listq.next == 0);
4198 		assert(mem->vmp_listq.prev == 0);
4199 		assert(mem->vmp_specialq.next == 0);
4200 		assert(mem->vmp_specialq.prev == 0);
4201 		assert(mem->vmp_next_m == 0);
4202 		ASSERT_PMAP_FREE(mem);
4203 		{
4204 			vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4205 		}
4206 	}
4207 }
4208 
4209 
4210 /*
4211  *	vm_page_free:
4212  *
4213  *	Returns the given page to the free list,
4214  *	disassociating it with any VM object.
4215  *
4216  *	Object and page queues must be locked prior to entry.
4217  */
4218 void
vm_page_free(vm_page_t mem)4219 vm_page_free(
4220 	vm_page_t       mem)
4221 {
4222 	vm_page_free_prepare(mem);
4223 
4224 	if (mem->vmp_fictitious) {
4225 		vm_page_release_fictitious(mem);
4226 	} else {
4227 		vm_page_release(mem, TRUE);  /* page queues are locked */
4228 	}
4229 }
4230 
4231 
4232 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4233 vm_page_free_unlocked(
4234 	vm_page_t       mem,
4235 	boolean_t       remove_from_hash)
4236 {
4237 	vm_page_lockspin_queues();
4238 	vm_page_free_prepare_queues(mem);
4239 	vm_page_unlock_queues();
4240 
4241 	vm_page_free_prepare_object(mem, remove_from_hash);
4242 
4243 	if (mem->vmp_fictitious) {
4244 		vm_page_release_fictitious(mem);
4245 	} else {
4246 		vm_page_release(mem, FALSE); /* page queues are not locked */
4247 	}
4248 }
4249 
4250 
4251 /*
4252  * Free a list of pages.  The list can be up to several hundred pages,
4253  * as blocked up by vm_pageout_scan().
4254  * The big win is not having to take the free list lock once
4255  * per page.
4256  *
4257  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4258  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4259  */
4260 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4261 vm_page_free_list(
4262 	vm_page_t       freeq,
4263 	boolean_t       prepare_object)
4264 {
4265 	vm_page_t       mem;
4266 	vm_page_t       nxt;
4267 	vm_page_t       local_freeq;
4268 	int             pg_count;
4269 
4270 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4271 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4272 
4273 	while (freeq) {
4274 		pg_count = 0;
4275 		local_freeq = VM_PAGE_NULL;
4276 		mem = freeq;
4277 
4278 		/*
4279 		 * break up the processing into smaller chunks so
4280 		 * that we can 'pipeline' the pages onto the
4281 		 * free list w/o introducing too much
4282 		 * contention on the global free queue lock
4283 		 */
4284 		while (mem && pg_count < 64) {
4285 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4286 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4287 			assert(mem->vmp_specialq.next == 0 &&
4288 			    mem->vmp_specialq.prev == 0);
4289 			/*
4290 			 * &&
4291 			 *   mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4292 			 */
4293 			nxt = mem->vmp_snext;
4294 			mem->vmp_snext = NULL;
4295 			assert(mem->vmp_pageq.prev == 0);
4296 
4297 			if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) {
4298 				ASSERT_PMAP_FREE(mem);
4299 			}
4300 
4301 			if (__improbable(mem->vmp_realtime)) {
4302 				vm_page_lock_queues();
4303 				if (mem->vmp_realtime) {
4304 					mem->vmp_realtime = false;
4305 					vm_page_realtime_count--;
4306 				}
4307 				vm_page_unlock_queues();
4308 			}
4309 
4310 			if (prepare_object == TRUE) {
4311 				vm_page_free_prepare_object(mem, TRUE);
4312 			}
4313 
4314 			if (!mem->vmp_fictitious) {
4315 				assert(mem->vmp_busy);
4316 
4317 				if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4318 				    vm_lopage_free_count < vm_lopage_free_limit &&
4319 				    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4320 					vm_page_release(mem, FALSE); /* page queues are not locked */
4321 #if CONFIG_SECLUDED_MEMORY
4322 				} else if (vm_page_secluded_count < vm_page_secluded_target &&
4323 				    num_tasks_can_use_secluded_mem == 0) {
4324 					vm_page_release(mem,
4325 					    FALSE);             /* page queues are not locked */
4326 #endif /* CONFIG_SECLUDED_MEMORY */
4327 				} else {
4328 					/*
4329 					 * IMPORTANT: we can't set the page "free" here
4330 					 * because that would make the page eligible for
4331 					 * a physically-contiguous allocation (see
4332 					 * vm_page_find_contiguous()) right away (we don't
4333 					 * hold the vm_page_queue_free lock).  That would
4334 					 * cause trouble because the page is not actually
4335 					 * in the free queue yet...
4336 					 */
4337 					mem->vmp_snext = local_freeq;
4338 					local_freeq = mem;
4339 					pg_count++;
4340 
4341 					pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4342 				}
4343 			} else {
4344 				assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4345 				    VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4346 				vm_page_release_fictitious(mem);
4347 			}
4348 			mem = nxt;
4349 		}
4350 		freeq = mem;
4351 
4352 		if ((mem = local_freeq)) {
4353 			unsigned int    avail_free_count;
4354 			unsigned int    need_wakeup = 0;
4355 			unsigned int    need_priv_wakeup = 0;
4356 #if CONFIG_SECLUDED_MEMORY
4357 			unsigned int    need_wakeup_secluded = 0;
4358 #endif /* CONFIG_SECLUDED_MEMORY */
4359 			event_t         priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4360 			boolean_t       priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4361 
4362 			vm_free_page_lock_spin();
4363 
4364 			while (mem) {
4365 				int     color;
4366 
4367 				nxt = mem->vmp_snext;
4368 
4369 				assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4370 				assert(mem->vmp_busy);
4371 				assert(!mem->vmp_realtime);
4372 				mem->vmp_lopage = FALSE;
4373 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4374 
4375 				color = VM_PAGE_GET_COLOR(mem);
4376 #if defined(__x86_64__)
4377 				vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4378 #else
4379 				vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4380 				    mem, vmp_pageq);
4381 #endif
4382 				mem = nxt;
4383 			}
4384 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4385 			vm_page_free_count += pg_count;
4386 			avail_free_count = vm_page_free_count;
4387 
4388 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4389 
4390 			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4391 				if (avail_free_count < vm_page_free_wanted_privileged) {
4392 					need_priv_wakeup = avail_free_count;
4393 					vm_page_free_wanted_privileged -= avail_free_count;
4394 					avail_free_count = 0;
4395 				} else {
4396 					need_priv_wakeup = vm_page_free_wanted_privileged;
4397 					avail_free_count -= vm_page_free_wanted_privileged;
4398 					vm_page_free_wanted_privileged = 0;
4399 				}
4400 			}
4401 #if CONFIG_SECLUDED_MEMORY
4402 			if (vm_page_free_wanted_secluded > 0 &&
4403 			    avail_free_count > vm_page_free_reserved) {
4404 				unsigned int available_pages;
4405 				available_pages = (avail_free_count -
4406 				    vm_page_free_reserved);
4407 				if (available_pages <
4408 				    vm_page_free_wanted_secluded) {
4409 					need_wakeup_secluded = available_pages;
4410 					vm_page_free_wanted_secluded -=
4411 					    available_pages;
4412 					avail_free_count -= available_pages;
4413 				} else {
4414 					need_wakeup_secluded =
4415 					    vm_page_free_wanted_secluded;
4416 					avail_free_count -=
4417 					    vm_page_free_wanted_secluded;
4418 					vm_page_free_wanted_secluded = 0;
4419 				}
4420 			}
4421 #endif /* CONFIG_SECLUDED_MEMORY */
4422 			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4423 				unsigned int  available_pages;
4424 
4425 				available_pages = avail_free_count - vm_page_free_reserved;
4426 
4427 				if (available_pages >= vm_page_free_wanted) {
4428 					need_wakeup = vm_page_free_wanted;
4429 					vm_page_free_wanted = 0;
4430 				} else {
4431 					need_wakeup = available_pages;
4432 					vm_page_free_wanted -= available_pages;
4433 				}
4434 			}
4435 			vm_free_page_unlock();
4436 
4437 			priv_wakeup_event = NULL;
4438 			secluded_wakeup_event = NULL;
4439 			normal_wakeup_event = NULL;
4440 
4441 			priv_wakeup_all = FALSE;
4442 			secluded_wakeup_all = FALSE;
4443 			normal_wakeup_all = FALSE;
4444 
4445 
4446 			if (need_priv_wakeup != 0) {
4447 				/*
4448 				 * There shouldn't be that many VM-privileged threads,
4449 				 * so let's wake them all up, even if we don't quite
4450 				 * have enough pages to satisfy them all.
4451 				 */
4452 				priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4453 				priv_wakeup_all = TRUE;
4454 			}
4455 #if CONFIG_SECLUDED_MEMORY
4456 			if (need_wakeup_secluded != 0 &&
4457 			    vm_page_free_wanted_secluded == 0) {
4458 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4459 				secluded_wakeup_all = TRUE;
4460 				need_wakeup_secluded = 0;
4461 			} else {
4462 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4463 			}
4464 #endif /* CONFIG_SECLUDED_MEMORY */
4465 			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4466 				/*
4467 				 * We don't expect to have any more waiters
4468 				 * after this, so let's wake them all up at
4469 				 * once.
4470 				 */
4471 				normal_wakeup_event = (event_t) &vm_page_free_count;
4472 				normal_wakeup_all = TRUE;
4473 				need_wakeup = 0;
4474 			} else {
4475 				normal_wakeup_event = (event_t) &vm_page_free_count;
4476 			}
4477 
4478 			if (priv_wakeup_event ||
4479 #if CONFIG_SECLUDED_MEMORY
4480 			    secluded_wakeup_event ||
4481 #endif /* CONFIG_SECLUDED_MEMORY */
4482 			    normal_wakeup_event) {
4483 				if (vps_dynamic_priority_enabled) {
4484 					if (priv_wakeup_all == TRUE) {
4485 						wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4486 					}
4487 
4488 #if CONFIG_SECLUDED_MEMORY
4489 					if (secluded_wakeup_all == TRUE) {
4490 						wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4491 					}
4492 
4493 					while (need_wakeup_secluded-- != 0) {
4494 						/*
4495 						 * Wake up one waiter per page we just released.
4496 						 */
4497 						wakeup_one_with_inheritor(secluded_wakeup_event,
4498 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4499 					}
4500 #endif /* CONFIG_SECLUDED_MEMORY */
4501 
4502 					if (normal_wakeup_all == TRUE) {
4503 						wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4504 					}
4505 
4506 					while (need_wakeup-- != 0) {
4507 						/*
4508 						 * Wake up one waiter per page we just released.
4509 						 */
4510 						wakeup_one_with_inheritor(normal_wakeup_event,
4511 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4512 						    NULL);
4513 					}
4514 				} else {
4515 					/*
4516 					 * Non-priority-aware wakeups.
4517 					 */
4518 
4519 					if (priv_wakeup_all == TRUE) {
4520 						thread_wakeup(priv_wakeup_event);
4521 					}
4522 
4523 #if CONFIG_SECLUDED_MEMORY
4524 					if (secluded_wakeup_all == TRUE) {
4525 						thread_wakeup(secluded_wakeup_event);
4526 					}
4527 
4528 					while (need_wakeup_secluded-- != 0) {
4529 						/*
4530 						 * Wake up one waiter per page we just released.
4531 						 */
4532 						thread_wakeup_one(secluded_wakeup_event);
4533 					}
4534 
4535 #endif /* CONFIG_SECLUDED_MEMORY */
4536 					if (normal_wakeup_all == TRUE) {
4537 						thread_wakeup(normal_wakeup_event);
4538 					}
4539 
4540 					while (need_wakeup-- != 0) {
4541 						/*
4542 						 * Wake up one waiter per page we just released.
4543 						 */
4544 						thread_wakeup_one(normal_wakeup_event);
4545 					}
4546 				}
4547 			}
4548 
4549 			VM_CHECK_MEMORYSTATUS;
4550 		}
4551 	}
4552 }
4553 
4554 
4555 /*
4556  *	vm_page_wire:
4557  *
4558  *	Mark this page as wired down by yet
4559  *	another map, removing it from paging queues
4560  *	as necessary.
4561  *
4562  *	The page's object and the page queues must be locked.
4563  */
4564 
4565 
4566 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4567 vm_page_wire(
4568 	vm_page_t mem,
4569 	vm_tag_t           tag,
4570 	boolean_t          check_memorystatus)
4571 {
4572 	vm_object_t     m_object;
4573 
4574 	m_object = VM_PAGE_OBJECT(mem);
4575 
4576 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
4577 
4578 	VM_PAGE_CHECK(mem);
4579 	if (m_object) {
4580 		vm_object_lock_assert_exclusive(m_object);
4581 	} else {
4582 		/*
4583 		 * In theory, the page should be in an object before it
4584 		 * gets wired, since we need to hold the object lock
4585 		 * to update some fields in the page structure.
4586 		 * However, some code (i386 pmap, for example) might want
4587 		 * to wire a page before it gets inserted into an object.
4588 		 * That's somewhat OK, as long as nobody else can get to
4589 		 * that page and update it at the same time.
4590 		 */
4591 	}
4592 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4593 	if (!VM_PAGE_WIRED(mem)) {
4594 		if (mem->vmp_laundry) {
4595 			vm_pageout_steal_laundry(mem, TRUE);
4596 		}
4597 
4598 		vm_page_queues_remove(mem, TRUE);
4599 
4600 		assert(mem->vmp_wire_count == 0);
4601 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
4602 
4603 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4604 		if (mem->vmp_unmodified_ro == true) {
4605 			/* Object and PageQ locks are held*/
4606 			mem->vmp_unmodified_ro = false;
4607 			os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4608 			VM_COMPRESSOR_PAGER_STATE_CLR(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4609 		}
4610 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4611 
4612 		if (m_object) {
4613 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4614 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4615 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4616 
4617 			assert(m_object->resident_page_count >=
4618 			    m_object->wired_page_count);
4619 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4620 				assert(vm_page_purgeable_count > 0);
4621 				OSAddAtomic(-1, &vm_page_purgeable_count);
4622 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
4623 			}
4624 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4625 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4626 			    m_object->vo_owner != TASK_NULL) {
4627 				task_t          owner;
4628 				int             ledger_idx_volatile;
4629 				int             ledger_idx_nonvolatile;
4630 				int             ledger_idx_volatile_compressed;
4631 				int             ledger_idx_nonvolatile_compressed;
4632 				boolean_t       do_footprint;
4633 
4634 				owner = VM_OBJECT_OWNER(m_object);
4635 				vm_object_ledger_tag_ledgers(
4636 					m_object,
4637 					&ledger_idx_volatile,
4638 					&ledger_idx_nonvolatile,
4639 					&ledger_idx_volatile_compressed,
4640 					&ledger_idx_nonvolatile_compressed,
4641 					&do_footprint);
4642 				/* less volatile bytes */
4643 				ledger_debit(owner->ledger,
4644 				    ledger_idx_volatile,
4645 				    PAGE_SIZE);
4646 				/* more not-quite-volatile bytes */
4647 				ledger_credit(owner->ledger,
4648 				    ledger_idx_nonvolatile,
4649 				    PAGE_SIZE);
4650 				if (do_footprint) {
4651 					/* more footprint */
4652 					ledger_credit(owner->ledger,
4653 					    task_ledgers.phys_footprint,
4654 					    PAGE_SIZE);
4655 				}
4656 			}
4657 			if (m_object->all_reusable) {
4658 				/*
4659 				 * Wired pages are not counted as "re-usable"
4660 				 * in "all_reusable" VM objects, so nothing
4661 				 * to do here.
4662 				 */
4663 			} else if (mem->vmp_reusable) {
4664 				/*
4665 				 * This page is not "re-usable" when it's
4666 				 * wired, so adjust its state and the
4667 				 * accounting.
4668 				 */
4669 				vm_object_reuse_pages(m_object,
4670 				    mem->vmp_offset,
4671 				    mem->vmp_offset + PAGE_SIZE_64,
4672 				    FALSE);
4673 			}
4674 		}
4675 		assert(!mem->vmp_reusable);
4676 
4677 		if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4678 			vm_page_wire_count++;
4679 		}
4680 		if (mem->vmp_gobbled) {
4681 			vm_page_gobble_count--;
4682 		}
4683 		mem->vmp_gobbled = FALSE;
4684 
4685 		if (check_memorystatus == TRUE) {
4686 			VM_CHECK_MEMORYSTATUS;
4687 		}
4688 	}
4689 	assert(!mem->vmp_gobbled);
4690 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4691 	mem->vmp_wire_count++;
4692 	if (__improbable(mem->vmp_wire_count == 0)) {
4693 		panic("vm_page_wire(%p): wire_count overflow", mem);
4694 	}
4695 	VM_PAGE_CHECK(mem);
4696 }
4697 
4698 /*
4699  *	vm_page_unwire:
4700  *
4701  *	Release one wiring of this page, potentially
4702  *	enabling it to be paged again.
4703  *
4704  *	The page's object and the page queues must be locked.
4705  */
4706 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4707 vm_page_unwire(
4708 	vm_page_t       mem,
4709 	boolean_t       queueit)
4710 {
4711 	vm_object_t     m_object;
4712 
4713 	m_object = VM_PAGE_OBJECT(mem);
4714 
4715 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
4716 
4717 	VM_PAGE_CHECK(mem);
4718 	assert(VM_PAGE_WIRED(mem));
4719 	assert(mem->vmp_wire_count > 0);
4720 	assert(!mem->vmp_gobbled);
4721 	assert(m_object != VM_OBJECT_NULL);
4722 	vm_object_lock_assert_exclusive(m_object);
4723 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4724 	if (--mem->vmp_wire_count == 0) {
4725 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4726 
4727 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4728 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4729 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4730 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4731 			vm_page_wire_count--;
4732 		}
4733 
4734 		assert(m_object->resident_page_count >=
4735 		    m_object->wired_page_count);
4736 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4737 			OSAddAtomic(+1, &vm_page_purgeable_count);
4738 			assert(vm_page_purgeable_wired_count > 0);
4739 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4740 		}
4741 		if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4742 		    m_object->purgable == VM_PURGABLE_EMPTY) &&
4743 		    m_object->vo_owner != TASK_NULL) {
4744 			task_t          owner;
4745 			int             ledger_idx_volatile;
4746 			int             ledger_idx_nonvolatile;
4747 			int             ledger_idx_volatile_compressed;
4748 			int             ledger_idx_nonvolatile_compressed;
4749 			boolean_t       do_footprint;
4750 
4751 			owner = VM_OBJECT_OWNER(m_object);
4752 			vm_object_ledger_tag_ledgers(
4753 				m_object,
4754 				&ledger_idx_volatile,
4755 				&ledger_idx_nonvolatile,
4756 				&ledger_idx_volatile_compressed,
4757 				&ledger_idx_nonvolatile_compressed,
4758 				&do_footprint);
4759 			/* more volatile bytes */
4760 			ledger_credit(owner->ledger,
4761 			    ledger_idx_volatile,
4762 			    PAGE_SIZE);
4763 			/* less not-quite-volatile bytes */
4764 			ledger_debit(owner->ledger,
4765 			    ledger_idx_nonvolatile,
4766 			    PAGE_SIZE);
4767 			if (do_footprint) {
4768 				/* less footprint */
4769 				ledger_debit(owner->ledger,
4770 				    task_ledgers.phys_footprint,
4771 				    PAGE_SIZE);
4772 			}
4773 		}
4774 		assert(!is_kernel_object(m_object));
4775 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4776 
4777 		if (queueit == TRUE) {
4778 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
4779 				vm_page_deactivate(mem);
4780 			} else {
4781 				vm_page_activate(mem);
4782 			}
4783 		}
4784 
4785 		VM_CHECK_MEMORYSTATUS;
4786 	}
4787 	VM_PAGE_CHECK(mem);
4788 }
4789 
4790 /*
4791  *	vm_page_deactivate:
4792  *
4793  *	Returns the given page to the inactive list,
4794  *	indicating that no physical maps have access
4795  *	to this page.  [Used by the physical mapping system.]
4796  *
4797  *	The page queues must be locked.
4798  */
4799 void
vm_page_deactivate(vm_page_t m)4800 vm_page_deactivate(
4801 	vm_page_t       m)
4802 {
4803 	vm_page_deactivate_internal(m, TRUE);
4804 }
4805 
4806 
4807 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4808 vm_page_deactivate_internal(
4809 	vm_page_t       m,
4810 	boolean_t       clear_hw_reference)
4811 {
4812 	vm_object_t     m_object;
4813 
4814 	m_object = VM_PAGE_OBJECT(m);
4815 
4816 	VM_PAGE_CHECK(m);
4817 	assert(!is_kernel_object(m_object));
4818 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4819 
4820 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
4821 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4822 	/*
4823 	 *	This page is no longer very interesting.  If it was
4824 	 *	interesting (active or inactive/referenced), then we
4825 	 *	clear the reference bit and (re)enter it in the
4826 	 *	inactive queue.  Note wired pages should not have
4827 	 *	their reference bit cleared.
4828 	 */
4829 	assert( !(m->vmp_absent && !m->vmp_unusual));
4830 
4831 	if (m->vmp_gobbled) {           /* can this happen? */
4832 		assert( !VM_PAGE_WIRED(m));
4833 
4834 		if (!m->vmp_private && !m->vmp_fictitious) {
4835 			vm_page_wire_count--;
4836 		}
4837 		vm_page_gobble_count--;
4838 		m->vmp_gobbled = FALSE;
4839 	}
4840 	/*
4841 	 * if this page is currently on the pageout queue, we can't do the
4842 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4843 	 * and we can't remove it manually since we would need the object lock
4844 	 * (which is not required here) to decrement the activity_in_progress
4845 	 * reference which is held on the object while the page is in the pageout queue...
4846 	 * just let the normal laundry processing proceed
4847 	 */
4848 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4849 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4850 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4851 	    VM_PAGE_WIRED(m)) {
4852 		return;
4853 	}
4854 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
4855 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4856 	}
4857 
4858 	m->vmp_reference = FALSE;
4859 	m->vmp_no_cache = FALSE;
4860 
4861 	if (!VM_PAGE_INACTIVE(m)) {
4862 		vm_page_queues_remove(m, FALSE);
4863 
4864 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
4865 		    m->vmp_dirty && m_object->internal &&
4866 		    (m_object->purgable == VM_PURGABLE_DENY ||
4867 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4868 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
4869 			vm_page_check_pageable_safe(m);
4870 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4871 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4872 			vm_page_throttled_count++;
4873 		} else {
4874 			if (m_object->named && m_object->ref_count == 1) {
4875 				vm_page_speculate(m, FALSE);
4876 #if DEVELOPMENT || DEBUG
4877 				vm_page_speculative_recreated++;
4878 #endif
4879 			} else {
4880 				vm_page_enqueue_inactive(m, FALSE);
4881 			}
4882 		}
4883 	}
4884 }
4885 
4886 /*
4887  * vm_page_enqueue_cleaned
4888  *
4889  * Put the page on the cleaned queue, mark it cleaned, etc.
4890  * Being on the cleaned queue (and having m->clean_queue set)
4891  * does ** NOT ** guarantee that the page is clean!
4892  *
4893  * Call with the queues lock held.
4894  */
4895 
4896 void
vm_page_enqueue_cleaned(vm_page_t m)4897 vm_page_enqueue_cleaned(vm_page_t m)
4898 {
4899 	vm_object_t     m_object;
4900 
4901 	m_object = VM_PAGE_OBJECT(m);
4902 
4903 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4904 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4905 	assert( !(m->vmp_absent && !m->vmp_unusual));
4906 
4907 	if (VM_PAGE_WIRED(m)) {
4908 		return;
4909 	}
4910 
4911 	if (m->vmp_gobbled) {
4912 		if (!m->vmp_private && !m->vmp_fictitious) {
4913 			vm_page_wire_count--;
4914 		}
4915 		vm_page_gobble_count--;
4916 		m->vmp_gobbled = FALSE;
4917 	}
4918 	/*
4919 	 * if this page is currently on the pageout queue, we can't do the
4920 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4921 	 * and we can't remove it manually since we would need the object lock
4922 	 * (which is not required here) to decrement the activity_in_progress
4923 	 * reference which is held on the object while the page is in the pageout queue...
4924 	 * just let the normal laundry processing proceed
4925 	 */
4926 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4927 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
4928 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4929 		return;
4930 	}
4931 	vm_page_queues_remove(m, FALSE);
4932 
4933 	vm_page_check_pageable_safe(m);
4934 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
4935 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
4936 	vm_page_cleaned_count++;
4937 
4938 	vm_page_inactive_count++;
4939 	if (m_object->internal) {
4940 		vm_page_pageable_internal_count++;
4941 	} else {
4942 		vm_page_pageable_external_count++;
4943 	}
4944 	vm_page_add_to_specialq(m, TRUE);
4945 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
4946 }
4947 
4948 /*
4949  *	vm_page_activate:
4950  *
4951  *	Put the specified page on the active list (if appropriate).
4952  *
4953  *	The page queues must be locked.
4954  */
4955 
4956 void
vm_page_activate(vm_page_t m)4957 vm_page_activate(
4958 	vm_page_t       m)
4959 {
4960 	vm_object_t     m_object;
4961 
4962 	m_object = VM_PAGE_OBJECT(m);
4963 
4964 	VM_PAGE_CHECK(m);
4965 #ifdef  FIXME_4778297
4966 	assert(!is_kernel_object(m_object));
4967 #endif
4968 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4969 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4970 	assert( !(m->vmp_absent && !m->vmp_unusual));
4971 
4972 	if (m->vmp_gobbled) {
4973 		assert( !VM_PAGE_WIRED(m));
4974 		if (!m->vmp_private && !m->vmp_fictitious) {
4975 			vm_page_wire_count--;
4976 		}
4977 		vm_page_gobble_count--;
4978 		m->vmp_gobbled = FALSE;
4979 	}
4980 	/*
4981 	 * if this page is currently on the pageout queue, we can't do the
4982 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4983 	 * and we can't remove it manually since we would need the object lock
4984 	 * (which is not required here) to decrement the activity_in_progress
4985 	 * reference which is held on the object while the page is in the pageout queue...
4986 	 * just let the normal laundry processing proceed
4987 	 */
4988 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4989 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4990 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4991 		return;
4992 	}
4993 
4994 #if DEBUG
4995 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
4996 		panic("vm_page_activate: already active");
4997 	}
4998 #endif
4999 
5000 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5001 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5002 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5003 	}
5004 
5005 	/*
5006 	 * A freshly activated page should be promoted in the donation queue.
5007 	 * So we remove it here while preserving its hint and we will enqueue
5008 	 * it again in vm_page_enqueue_active.
5009 	 */
5010 	vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5011 
5012 	if (!VM_PAGE_WIRED(m)) {
5013 		vm_page_check_pageable_safe(m);
5014 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5015 		    m->vmp_dirty && m_object->internal &&
5016 		    (m_object->purgable == VM_PURGABLE_DENY ||
5017 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5018 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5019 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5020 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5021 			vm_page_throttled_count++;
5022 		} else {
5023 #if CONFIG_SECLUDED_MEMORY
5024 			if (secluded_for_filecache &&
5025 			    vm_page_secluded_target != 0 &&
5026 			    num_tasks_can_use_secluded_mem == 0 &&
5027 			    m_object->eligible_for_secluded &&
5028 			    !m->vmp_realtime) {
5029 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5030 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5031 				vm_page_secluded_count++;
5032 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5033 				vm_page_secluded_count_inuse++;
5034 				assert(!m_object->internal);
5035 //				vm_page_pageable_external_count++;
5036 			} else
5037 #endif /* CONFIG_SECLUDED_MEMORY */
5038 			vm_page_enqueue_active(m, FALSE);
5039 		}
5040 		m->vmp_reference = TRUE;
5041 		m->vmp_no_cache = FALSE;
5042 	}
5043 	VM_PAGE_CHECK(m);
5044 }
5045 
5046 
5047 /*
5048  *      vm_page_speculate:
5049  *
5050  *      Put the specified page on the speculative list (if appropriate).
5051  *
5052  *      The page queues must be locked.
5053  */
5054 void
vm_page_speculate(vm_page_t m,boolean_t new)5055 vm_page_speculate(
5056 	vm_page_t       m,
5057 	boolean_t       new)
5058 {
5059 	struct vm_speculative_age_q     *aq;
5060 	vm_object_t     m_object;
5061 
5062 	m_object = VM_PAGE_OBJECT(m);
5063 
5064 	VM_PAGE_CHECK(m);
5065 	vm_page_check_pageable_safe(m);
5066 
5067 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5068 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5069 	assert( !(m->vmp_absent && !m->vmp_unusual));
5070 	assert(m_object->internal == FALSE);
5071 
5072 	/*
5073 	 * if this page is currently on the pageout queue, we can't do the
5074 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5075 	 * and we can't remove it manually since we would need the object lock
5076 	 * (which is not required here) to decrement the activity_in_progress
5077 	 * reference which is held on the object while the page is in the pageout queue...
5078 	 * just let the normal laundry processing proceed
5079 	 */
5080 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5081 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5082 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5083 		return;
5084 	}
5085 
5086 	vm_page_queues_remove(m, FALSE);
5087 
5088 	if (!VM_PAGE_WIRED(m)) {
5089 		mach_timespec_t         ts;
5090 		clock_sec_t sec;
5091 		clock_nsec_t nsec;
5092 
5093 		clock_get_system_nanotime(&sec, &nsec);
5094 		ts.tv_sec = (unsigned int) sec;
5095 		ts.tv_nsec = nsec;
5096 
5097 		if (vm_page_speculative_count == 0) {
5098 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5099 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5100 
5101 			aq = &vm_page_queue_speculative[speculative_age_index];
5102 
5103 			/*
5104 			 * set the timer to begin a new group
5105 			 */
5106 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5107 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5108 
5109 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5110 		} else {
5111 			aq = &vm_page_queue_speculative[speculative_age_index];
5112 
5113 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5114 				speculative_age_index++;
5115 
5116 				if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5117 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5118 				}
5119 				if (speculative_age_index == speculative_steal_index) {
5120 					speculative_steal_index = speculative_age_index + 1;
5121 
5122 					if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5123 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5124 					}
5125 				}
5126 				aq = &vm_page_queue_speculative[speculative_age_index];
5127 
5128 				if (!vm_page_queue_empty(&aq->age_q)) {
5129 					vm_page_speculate_ageit(aq);
5130 				}
5131 
5132 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5133 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5134 
5135 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5136 			}
5137 		}
5138 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5139 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5140 		vm_page_speculative_count++;
5141 		vm_page_pageable_external_count++;
5142 
5143 		if (new == TRUE) {
5144 			vm_object_lock_assert_exclusive(m_object);
5145 
5146 			m_object->pages_created++;
5147 #if DEVELOPMENT || DEBUG
5148 			vm_page_speculative_created++;
5149 #endif
5150 		}
5151 	}
5152 	VM_PAGE_CHECK(m);
5153 }
5154 
5155 
5156 /*
5157  * move pages from the specified aging bin to
5158  * the speculative bin that pageout_scan claims from
5159  *
5160  *      The page queues must be locked.
5161  */
5162 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5163 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5164 {
5165 	struct vm_speculative_age_q     *sq;
5166 	vm_page_t       t;
5167 
5168 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5169 
5170 	if (vm_page_queue_empty(&sq->age_q)) {
5171 		sq->age_q.next = aq->age_q.next;
5172 		sq->age_q.prev = aq->age_q.prev;
5173 
5174 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5175 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5176 
5177 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5178 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5179 	} else {
5180 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5181 		t->vmp_pageq.next = aq->age_q.next;
5182 
5183 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5184 		t->vmp_pageq.prev = sq->age_q.prev;
5185 
5186 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5187 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5188 
5189 		sq->age_q.prev = aq->age_q.prev;
5190 	}
5191 	vm_page_queue_init(&aq->age_q);
5192 }
5193 
5194 
5195 void
vm_page_lru(vm_page_t m)5196 vm_page_lru(
5197 	vm_page_t       m)
5198 {
5199 	VM_PAGE_CHECK(m);
5200 	assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5201 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5202 
5203 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5204 
5205 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5206 		/*
5207 		 * we don't need to do all the other work that
5208 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5209 		 * bring along for the ride
5210 		 */
5211 		assert(!m->vmp_laundry);
5212 		assert(!m->vmp_private);
5213 
5214 		m->vmp_no_cache = FALSE;
5215 
5216 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5217 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5218 
5219 		return;
5220 	}
5221 	/*
5222 	 * if this page is currently on the pageout queue, we can't do the
5223 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5224 	 * and we can't remove it manually since we would need the object lock
5225 	 * (which is not required here) to decrement the activity_in_progress
5226 	 * reference which is held on the object while the page is in the pageout queue...
5227 	 * just let the normal laundry processing proceed
5228 	 */
5229 	if (m->vmp_laundry || m->vmp_private ||
5230 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5231 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5232 	    VM_PAGE_WIRED(m)) {
5233 		return;
5234 	}
5235 
5236 	m->vmp_no_cache = FALSE;
5237 
5238 	vm_page_queues_remove(m, FALSE);
5239 
5240 	vm_page_enqueue_inactive(m, FALSE);
5241 }
5242 
5243 
5244 void
vm_page_reactivate_all_throttled(void)5245 vm_page_reactivate_all_throttled(void)
5246 {
5247 	vm_page_t       first_throttled, last_throttled;
5248 	vm_page_t       first_active;
5249 	vm_page_t       m;
5250 	int             extra_active_count;
5251 	int             extra_internal_count, extra_external_count;
5252 	vm_object_t     m_object;
5253 
5254 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5255 		return;
5256 	}
5257 
5258 	extra_active_count = 0;
5259 	extra_internal_count = 0;
5260 	extra_external_count = 0;
5261 	vm_page_lock_queues();
5262 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5263 		/*
5264 		 * Switch "throttled" pages to "active".
5265 		 */
5266 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5267 			VM_PAGE_CHECK(m);
5268 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5269 
5270 			m_object = VM_PAGE_OBJECT(m);
5271 
5272 			extra_active_count++;
5273 			if (m_object->internal) {
5274 				extra_internal_count++;
5275 			} else {
5276 				extra_external_count++;
5277 			}
5278 
5279 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5280 			VM_PAGE_CHECK(m);
5281 			vm_page_add_to_specialq(m, FALSE);
5282 		}
5283 
5284 		/*
5285 		 * Transfer the entire throttled queue to a regular LRU page queues.
5286 		 * We insert it at the head of the active queue, so that these pages
5287 		 * get re-evaluated by the LRU algorithm first, since they've been
5288 		 * completely out of it until now.
5289 		 */
5290 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5291 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5292 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5293 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5294 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5295 		} else {
5296 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5297 		}
5298 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5299 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5300 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5301 
5302 #if DEBUG
5303 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5304 #endif
5305 		vm_page_queue_init(&vm_page_queue_throttled);
5306 		/*
5307 		 * Adjust the global page counts.
5308 		 */
5309 		vm_page_active_count += extra_active_count;
5310 		vm_page_pageable_internal_count += extra_internal_count;
5311 		vm_page_pageable_external_count += extra_external_count;
5312 		vm_page_throttled_count = 0;
5313 	}
5314 	assert(vm_page_throttled_count == 0);
5315 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5316 	vm_page_unlock_queues();
5317 }
5318 
5319 
5320 /*
5321  * move pages from the indicated local queue to the global active queue
5322  * its ok to fail if we're below the hard limit and force == FALSE
5323  * the nolocks == TRUE case is to allow this function to be run on
5324  * the hibernate path
5325  */
5326 
5327 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5328 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5329 {
5330 	struct vpl      *lq;
5331 	vm_page_t       first_local, last_local;
5332 	vm_page_t       first_active;
5333 	vm_page_t       m;
5334 	uint32_t        count = 0;
5335 
5336 	if (vm_page_local_q == NULL) {
5337 		return;
5338 	}
5339 
5340 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5341 
5342 	if (nolocks == FALSE) {
5343 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5344 			if (!vm_page_trylockspin_queues()) {
5345 				return;
5346 			}
5347 		} else {
5348 			vm_page_lockspin_queues();
5349 		}
5350 
5351 		VPL_LOCK(&lq->vpl_lock);
5352 	}
5353 	if (lq->vpl_count) {
5354 		/*
5355 		 * Switch "local" pages to "active".
5356 		 */
5357 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5358 
5359 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5360 			VM_PAGE_CHECK(m);
5361 			vm_page_check_pageable_safe(m);
5362 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5363 			assert(!m->vmp_fictitious);
5364 
5365 			if (m->vmp_local_id != lid) {
5366 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5367 			}
5368 
5369 			m->vmp_local_id = 0;
5370 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5371 			VM_PAGE_CHECK(m);
5372 			vm_page_add_to_specialq(m, FALSE);
5373 			count++;
5374 		}
5375 		if (count != lq->vpl_count) {
5376 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5377 		}
5378 
5379 		/*
5380 		 * Transfer the entire local queue to a regular LRU page queues.
5381 		 */
5382 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5383 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5384 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5385 
5386 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5387 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5388 		} else {
5389 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5390 		}
5391 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5392 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5393 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5394 
5395 		vm_page_queue_init(&lq->vpl_queue);
5396 		/*
5397 		 * Adjust the global page counts.
5398 		 */
5399 		vm_page_active_count += lq->vpl_count;
5400 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5401 		vm_page_pageable_external_count += lq->vpl_external_count;
5402 		lq->vpl_count = 0;
5403 		lq->vpl_internal_count = 0;
5404 		lq->vpl_external_count = 0;
5405 	}
5406 	assert(vm_page_queue_empty(&lq->vpl_queue));
5407 
5408 	if (nolocks == FALSE) {
5409 		VPL_UNLOCK(&lq->vpl_lock);
5410 
5411 		vm_page_balance_inactive(count / 4);
5412 		vm_page_unlock_queues();
5413 	}
5414 }
5415 
5416 /*
5417  *	vm_page_part_zero_fill:
5418  *
5419  *	Zero-fill a part of the page.
5420  */
5421 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5422 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5423 vm_page_part_zero_fill(
5424 	vm_page_t       m,
5425 	vm_offset_t     m_pa,
5426 	vm_size_t       len)
5427 {
5428 #if 0
5429 	/*
5430 	 * we don't hold the page queue lock
5431 	 * so this check isn't safe to make
5432 	 */
5433 	VM_PAGE_CHECK(m);
5434 #endif
5435 
5436 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5437 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5438 #else
5439 	vm_page_t       tmp;
5440 	while (1) {
5441 		tmp = vm_page_grab();
5442 		if (tmp == VM_PAGE_NULL) {
5443 			vm_page_wait(THREAD_UNINT);
5444 			continue;
5445 		}
5446 		break;
5447 	}
5448 	vm_page_zero_fill(tmp);
5449 	if (m_pa != 0) {
5450 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
5451 	}
5452 	if ((m_pa + len) < PAGE_SIZE) {
5453 		vm_page_part_copy(m, m_pa + len, tmp,
5454 		    m_pa + len, PAGE_SIZE - (m_pa + len));
5455 	}
5456 	vm_page_copy(tmp, m);
5457 	VM_PAGE_FREE(tmp);
5458 #endif
5459 }
5460 
5461 /*
5462  *	vm_page_zero_fill:
5463  *
5464  *	Zero-fill the specified page.
5465  */
5466 void
vm_page_zero_fill(vm_page_t m)5467 vm_page_zero_fill(
5468 	vm_page_t       m)
5469 {
5470 #if 0
5471 	/*
5472 	 * we don't hold the page queue lock
5473 	 * so this check isn't safe to make
5474 	 */
5475 	VM_PAGE_CHECK(m);
5476 #endif
5477 
5478 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
5479 	pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5480 }
5481 
5482 /*
5483  *	vm_page_part_copy:
5484  *
5485  *	copy part of one page to another
5486  */
5487 
5488 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5489 vm_page_part_copy(
5490 	vm_page_t       src_m,
5491 	vm_offset_t     src_pa,
5492 	vm_page_t       dst_m,
5493 	vm_offset_t     dst_pa,
5494 	vm_size_t       len)
5495 {
5496 #if 0
5497 	/*
5498 	 * we don't hold the page queue lock
5499 	 * so this check isn't safe to make
5500 	 */
5501 	VM_PAGE_CHECK(src_m);
5502 	VM_PAGE_CHECK(dst_m);
5503 #endif
5504 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5505 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5506 }
5507 
5508 /*
5509  *	vm_page_copy:
5510  *
5511  *	Copy one page to another
5512  */
5513 
5514 int vm_page_copy_cs_validations = 0;
5515 int vm_page_copy_cs_tainted = 0;
5516 
5517 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5518 vm_page_copy(
5519 	vm_page_t       src_m,
5520 	vm_page_t       dest_m)
5521 {
5522 	vm_object_t     src_m_object;
5523 
5524 	src_m_object = VM_PAGE_OBJECT(src_m);
5525 
5526 #if 0
5527 	/*
5528 	 * we don't hold the page queue lock
5529 	 * so this check isn't safe to make
5530 	 */
5531 	VM_PAGE_CHECK(src_m);
5532 	VM_PAGE_CHECK(dest_m);
5533 #endif
5534 	vm_object_lock_assert_held(src_m_object);
5535 
5536 	if (src_m_object != VM_OBJECT_NULL &&
5537 	    src_m_object->code_signed) {
5538 		/*
5539 		 * We're copying a page from a code-signed object.
5540 		 * Whoever ends up mapping the copy page might care about
5541 		 * the original page's integrity, so let's validate the
5542 		 * source page now.
5543 		 */
5544 		vm_page_copy_cs_validations++;
5545 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5546 #if DEVELOPMENT || DEBUG
5547 		DTRACE_VM4(codesigned_copy,
5548 		    vm_object_t, src_m_object,
5549 		    vm_object_offset_t, src_m->vmp_offset,
5550 		    int, src_m->vmp_cs_validated,
5551 		    int, src_m->vmp_cs_tainted);
5552 #endif /* DEVELOPMENT || DEBUG */
5553 	}
5554 
5555 	/*
5556 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
5557 	 * the cs_validated bit.
5558 	 */
5559 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5560 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5561 	if (dest_m->vmp_cs_tainted) {
5562 		vm_page_copy_cs_tainted++;
5563 	}
5564 	dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5565 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5566 }
5567 
5568 #if MACH_ASSERT
5569 static void
_vm_page_print(vm_page_t p)5570 _vm_page_print(
5571 	vm_page_t       p)
5572 {
5573 	printf("vm_page %p: \n", p);
5574 	printf("  pageq: next=%p prev=%p\n",
5575 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5576 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5577 	printf("  listq: next=%p prev=%p\n",
5578 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5579 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5580 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5581 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5582 	printf("  wire_count=%u\n", p->vmp_wire_count);
5583 	printf("  q_state=%u\n", p->vmp_q_state);
5584 
5585 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
5586 	    (p->vmp_laundry ? "" : "!"),
5587 	    (p->vmp_reference ? "" : "!"),
5588 	    (p->vmp_gobbled ? "" : "!"),
5589 	    (p->vmp_private ? "" : "!"));
5590 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5591 	    (p->vmp_busy ? "" : "!"),
5592 	    (p->vmp_wanted ? "" : "!"),
5593 	    (p->vmp_tabled ? "" : "!"),
5594 	    (p->vmp_fictitious ? "" : "!"),
5595 	    (p->vmp_pmapped ? "" : "!"),
5596 	    (p->vmp_wpmapped ? "" : "!"));
5597 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5598 	    (p->vmp_free_when_done ? "" : "!"),
5599 	    (p->vmp_absent ? "" : "!"),
5600 	    (VMP_ERROR_GET(p) ? "" : "!"),
5601 	    (p->vmp_dirty ? "" : "!"),
5602 	    (p->vmp_cleaning ? "" : "!"),
5603 	    (p->vmp_precious ? "" : "!"),
5604 	    (p->vmp_clustered ? "" : "!"));
5605 	printf("  %soverwriting, %srestart, %sunusual\n",
5606 	    (p->vmp_overwriting ? "" : "!"),
5607 	    (p->vmp_restart ? "" : "!"),
5608 	    (p->vmp_unusual ? "" : "!"));
5609 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5610 	    p->vmp_cs_validated,
5611 	    p->vmp_cs_tainted,
5612 	    p->vmp_cs_nx,
5613 	    (p->vmp_no_cache ? "" : "!"));
5614 
5615 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5616 }
5617 
5618 /*
5619  *	Check that the list of pages is ordered by
5620  *	ascending physical address and has no holes.
5621  */
5622 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5623 vm_page_verify_contiguous(
5624 	vm_page_t       pages,
5625 	unsigned int    npages)
5626 {
5627 	vm_page_t               m;
5628 	unsigned int            page_count;
5629 	vm_offset_t             prev_addr;
5630 
5631 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5632 	page_count = 1;
5633 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5634 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5635 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5636 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5637 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5638 			panic("vm_page_verify_contiguous:  not contiguous!");
5639 		}
5640 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5641 		++page_count;
5642 	}
5643 	if (page_count != npages) {
5644 		printf("pages %p actual count 0x%x but requested 0x%x\n",
5645 		    pages, page_count, npages);
5646 		panic("vm_page_verify_contiguous:  count error");
5647 	}
5648 	return 1;
5649 }
5650 
5651 
5652 /*
5653  *	Check the free lists for proper length etc.
5654  */
5655 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5656 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5657 vm_page_verify_free_list(
5658 	vm_page_queue_head_t    *vm_page_queue,
5659 	unsigned int    color,
5660 	vm_page_t       look_for_page,
5661 	boolean_t       expect_page)
5662 {
5663 	unsigned int    npages;
5664 	vm_page_t       m;
5665 	vm_page_t       prev_m;
5666 	boolean_t       found_page;
5667 
5668 	if (!vm_page_verify_this_free_list_enabled) {
5669 		return 0;
5670 	}
5671 
5672 	found_page = FALSE;
5673 	npages = 0;
5674 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5675 
5676 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5677 		if (m == look_for_page) {
5678 			found_page = TRUE;
5679 		}
5680 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5681 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5682 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5683 		}
5684 		if (!m->vmp_busy) {
5685 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5686 			    color, npages, m);
5687 		}
5688 		if (color != (unsigned int) -1) {
5689 			if (VM_PAGE_GET_COLOR(m) != color) {
5690 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5691 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
5692 			}
5693 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5694 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5695 				    color, npages, m, m->vmp_q_state);
5696 			}
5697 		} else {
5698 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5699 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5700 				    npages, m, m->vmp_q_state);
5701 			}
5702 		}
5703 		++npages;
5704 		prev_m = m;
5705 	}
5706 	if (look_for_page != VM_PAGE_NULL) {
5707 		unsigned int other_color;
5708 
5709 		if (expect_page && !found_page) {
5710 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5711 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5712 			_vm_page_print(look_for_page);
5713 			for (other_color = 0;
5714 			    other_color < vm_colors;
5715 			    other_color++) {
5716 				if (other_color == color) {
5717 					continue;
5718 				}
5719 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5720 				    other_color, look_for_page, FALSE);
5721 			}
5722 			if (color == (unsigned int) -1) {
5723 				vm_page_verify_free_list(&vm_lopage_queue_free,
5724 				    (unsigned int) -1, look_for_page, FALSE);
5725 			}
5726 			panic("vm_page_verify_free_list(color=%u)", color);
5727 		}
5728 		if (!expect_page && found_page) {
5729 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5730 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5731 		}
5732 	}
5733 	return npages;
5734 }
5735 
5736 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5737 static void
vm_page_verify_free_lists(void)5738 vm_page_verify_free_lists( void )
5739 {
5740 	unsigned int    color, npages, nlopages;
5741 	boolean_t       toggle = TRUE;
5742 
5743 	if (!vm_page_verify_all_free_lists_enabled) {
5744 		return;
5745 	}
5746 
5747 	npages = 0;
5748 
5749 	vm_free_page_lock();
5750 
5751 	if (vm_page_verify_this_free_list_enabled == TRUE) {
5752 		/*
5753 		 * This variable has been set globally for extra checking of
5754 		 * each free list Q. Since we didn't set it, we don't own it
5755 		 * and we shouldn't toggle it.
5756 		 */
5757 		toggle = FALSE;
5758 	}
5759 
5760 	if (toggle == TRUE) {
5761 		vm_page_verify_this_free_list_enabled = TRUE;
5762 	}
5763 
5764 	for (color = 0; color < vm_colors; color++) {
5765 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5766 		    color, VM_PAGE_NULL, FALSE);
5767 	}
5768 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5769 	    (unsigned int) -1,
5770 	    VM_PAGE_NULL, FALSE);
5771 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5772 		panic("vm_page_verify_free_lists:  "
5773 		    "npages %u free_count %d nlopages %u lo_free_count %u",
5774 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5775 	}
5776 
5777 	if (toggle == TRUE) {
5778 		vm_page_verify_this_free_list_enabled = FALSE;
5779 	}
5780 
5781 	vm_free_page_unlock();
5782 }
5783 
5784 #endif  /* MACH_ASSERT */
5785 
5786 /*
5787  * wrapper for pmap_enter()
5788  */
5789 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5790 pmap_enter_check(
5791 	pmap_t           pmap,
5792 	vm_map_address_t virtual_address,
5793 	vm_page_t        page,
5794 	vm_prot_t        protection,
5795 	vm_prot_t        fault_type,
5796 	unsigned int     flags,
5797 	boolean_t        wired)
5798 {
5799 	int             options = 0;
5800 	vm_object_t     obj;
5801 
5802 	if (VMP_ERROR_GET(page)) {
5803 		return KERN_MEMORY_FAILURE;
5804 	}
5805 	obj = VM_PAGE_OBJECT(page);
5806 	if (obj->internal) {
5807 		options |= PMAP_OPTIONS_INTERNAL;
5808 	}
5809 	if (page->vmp_reusable || obj->all_reusable) {
5810 		options |= PMAP_OPTIONS_REUSABLE;
5811 	}
5812 	return pmap_enter_options(pmap,
5813 	           virtual_address,
5814 	           VM_PAGE_GET_PHYS_PAGE(page),
5815 	           protection,
5816 	           fault_type,
5817 	           flags,
5818 	           wired,
5819 	           options,
5820 	           NULL,
5821 	           PMAP_MAPPING_TYPE_INFER);
5822 }
5823 
5824 
5825 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5826 
5827 /*
5828  *	CONTIGUOUS PAGE ALLOCATION
5829  *
5830  *	Find a region large enough to contain at least n pages
5831  *	of contiguous physical memory.
5832  *
5833  *	This is done by traversing the vm_page_t array in a linear fashion
5834  *	we assume that the vm_page_t array has the avaiable physical pages in an
5835  *	ordered, ascending list... this is currently true of all our implementations
5836  *      and must remain so... there can be 'holes' in the array...  we also can
5837  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5838  *      which use to happen via 'vm_page_convert'... that function was no longer
5839  *      being called and was removed...
5840  *
5841  *	The basic flow consists of stabilizing some of the interesting state of
5842  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5843  *	sweep at the beginning of the array looking for pages that meet our criterea
5844  *	for a 'stealable' page... currently we are pretty conservative... if the page
5845  *	meets this criterea and is physically contiguous to the previous page in the 'run'
5846  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
5847  *	and start to develop a new run... if at this point we've already considered
5848  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5849  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
5850  *	to other threads trying to acquire free pages (or move pages from q to q),
5851  *	and then continue from the spot we left off... we only make 1 pass through the
5852  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
5853  *      which steals the pages from the queues they're currently on... pages on the free
5854  *	queue can be stolen directly... pages that are on any of the other queues
5855  *	must be removed from the object they are tabled on... this requires taking the
5856  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5857  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
5858  *	dump the pages we've currently stolen back to the free list, and pick up our
5859  *	scan from the point where we aborted the 'current' run.
5860  *
5861  *
5862  *	Requirements:
5863  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
5864  *
5865  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5866  *
5867  * Algorithm:
5868  */
5869 
5870 #define MAX_CONSIDERED_BEFORE_YIELD     1000
5871 
5872 
5873 #define RESET_STATE_OF_RUN()    \
5874 	MACRO_BEGIN             \
5875 	prevcontaddr = -2;      \
5876 	start_pnum = -1;        \
5877 	free_considered = 0;    \
5878 	substitute_needed = 0;  \
5879 	npages = 0;             \
5880 	MACRO_END
5881 
5882 /*
5883  * Can we steal in-use (i.e. not free) pages when searching for
5884  * physically-contiguous pages ?
5885  */
5886 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5887 
5888 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
5889 #if DEBUG
5890 int vm_page_find_contig_debug = 0;
5891 #endif
5892 
5893 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)5894 vm_page_find_contiguous(
5895 	unsigned int    contig_pages,
5896 	ppnum_t         max_pnum,
5897 	ppnum_t         pnum_mask,
5898 	boolean_t       wire,
5899 	int             flags)
5900 {
5901 	vm_page_t       m = NULL;
5902 	ppnum_t         prevcontaddr = 0;
5903 	ppnum_t         start_pnum = 0;
5904 	unsigned int    npages = 0, considered = 0, scanned = 0;
5905 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
5906 	unsigned int    idx_last_contig_page_found = 0;
5907 	int             free_considered = 0, free_available = 0;
5908 	int             substitute_needed = 0;
5909 	int             zone_gc_called = 0;
5910 	boolean_t       wrapped;
5911 	kern_return_t   kr;
5912 #if DEBUG
5913 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
5914 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
5915 #endif
5916 
5917 	int             yielded = 0;
5918 	int             dumped_run = 0;
5919 	int             stolen_pages = 0;
5920 	int             compressed_pages = 0;
5921 
5922 
5923 	if (contig_pages == 0) {
5924 		return VM_PAGE_NULL;
5925 	}
5926 
5927 full_scan_again:
5928 
5929 #if MACH_ASSERT
5930 	vm_page_verify_free_lists();
5931 #endif
5932 #if DEBUG
5933 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
5934 #endif
5935 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5936 
5937 	/*
5938 	 * If there are still delayed pages, try to free up some that match.
5939 	 */
5940 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
5941 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
5942 	}
5943 
5944 	vm_page_lock_queues();
5945 	vm_free_page_lock();
5946 
5947 	RESET_STATE_OF_RUN();
5948 
5949 	scanned = 0;
5950 	considered = 0;
5951 	free_available = vm_page_free_count - vm_page_free_reserved;
5952 
5953 	wrapped = FALSE;
5954 
5955 	if (flags & KMA_LOMEM) {
5956 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
5957 	} else {
5958 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
5959 	}
5960 
5961 	orig_last_idx = idx_last_contig_page_found;
5962 	last_idx = orig_last_idx;
5963 
5964 	for (page_idx = last_idx, start_idx = last_idx;
5965 	    npages < contig_pages && page_idx < vm_pages_count;
5966 	    page_idx++) {
5967 retry:
5968 		if (wrapped &&
5969 		    npages == 0 &&
5970 		    page_idx >= orig_last_idx) {
5971 			/*
5972 			 * We're back where we started and we haven't
5973 			 * found any suitable contiguous range.  Let's
5974 			 * give up.
5975 			 */
5976 			break;
5977 		}
5978 		scanned++;
5979 		m = &vm_pages[page_idx];
5980 
5981 		assert(!m->vmp_fictitious);
5982 		assert(!m->vmp_private);
5983 
5984 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
5985 			/* no more low pages... */
5986 			break;
5987 		}
5988 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
5989 			/*
5990 			 * not aligned
5991 			 */
5992 			RESET_STATE_OF_RUN();
5993 		} else if (VM_PAGE_WIRED(m) || m->vmp_gobbled ||
5994 		    m->vmp_laundry || m->vmp_wanted ||
5995 		    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5996 			/*
5997 			 * page is in a transient state
5998 			 * or a state we don't want to deal
5999 			 * with, so don't consider it which
6000 			 * means starting a new run
6001 			 */
6002 			RESET_STATE_OF_RUN();
6003 		} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
6004 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
6005 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
6006 		    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6007 			/*
6008 			 * page needs to be on one of our queues (other then the pageout or special free queues)
6009 			 * or it needs to belong to the compressor pool (which is now indicated
6010 			 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
6011 			 * from the check for VM_PAGE_NOT_ON_Q)
6012 			 * in order for it to be stable behind the
6013 			 * locks we hold at this point...
6014 			 * if not, don't consider it which
6015 			 * means starting a new run
6016 			 */
6017 			RESET_STATE_OF_RUN();
6018 		} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) {
6019 			/*
6020 			 * pages on the free list are always 'busy'
6021 			 * so we couldn't test for 'busy' in the check
6022 			 * for the transient states... pages that are
6023 			 * 'free' are never 'tabled', so we also couldn't
6024 			 * test for 'tabled'.  So we check here to make
6025 			 * sure that a non-free page is not busy and is
6026 			 * tabled on an object...
6027 			 * if not, don't consider it which
6028 			 * means starting a new run
6029 			 */
6030 			RESET_STATE_OF_RUN();
6031 		} else {
6032 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6033 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6034 					RESET_STATE_OF_RUN();
6035 					goto did_consider;
6036 				} else {
6037 					npages = 1;
6038 					start_idx = page_idx;
6039 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6040 				}
6041 			} else {
6042 				npages++;
6043 			}
6044 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6045 
6046 			VM_PAGE_CHECK(m);
6047 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6048 				free_considered++;
6049 			} else {
6050 				/*
6051 				 * This page is not free.
6052 				 * If we can't steal used pages,
6053 				 * we have to give up this run
6054 				 * and keep looking.
6055 				 * Otherwise, we might need to
6056 				 * move the contents of this page
6057 				 * into a substitute page.
6058 				 */
6059 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6060 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6061 					substitute_needed++;
6062 				}
6063 #else
6064 				RESET_STATE_OF_RUN();
6065 #endif
6066 			}
6067 
6068 			if ((free_considered + substitute_needed) > free_available) {
6069 				/*
6070 				 * if we let this run continue
6071 				 * we will end up dropping the vm_page_free_count
6072 				 * below the reserve limit... we need to abort
6073 				 * this run, but we can at least re-consider this
6074 				 * page... thus the jump back to 'retry'
6075 				 */
6076 				RESET_STATE_OF_RUN();
6077 
6078 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6079 					considered++;
6080 					goto retry;
6081 				}
6082 				/*
6083 				 * free_available == 0
6084 				 * so can't consider any free pages... if
6085 				 * we went to retry in this case, we'd
6086 				 * get stuck looking at the same page
6087 				 * w/o making any forward progress
6088 				 * we also want to take this path if we've already
6089 				 * reached our limit that controls the lock latency
6090 				 */
6091 			}
6092 		}
6093 did_consider:
6094 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6095 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6096 
6097 			vm_free_page_unlock();
6098 			vm_page_unlock_queues();
6099 
6100 			mutex_pause(0);
6101 
6102 			PAGE_REPLACEMENT_ALLOWED(TRUE);
6103 
6104 			vm_page_lock_queues();
6105 			vm_free_page_lock();
6106 
6107 			RESET_STATE_OF_RUN();
6108 			/*
6109 			 * reset our free page limit since we
6110 			 * dropped the lock protecting the vm_page_free_queue
6111 			 */
6112 			free_available = vm_page_free_count - vm_page_free_reserved;
6113 			considered = 0;
6114 
6115 			yielded++;
6116 
6117 			goto retry;
6118 		}
6119 		considered++;
6120 	}
6121 	m = VM_PAGE_NULL;
6122 
6123 	if (npages != contig_pages) {
6124 		if (!wrapped) {
6125 			/*
6126 			 * We didn't find a contiguous range but we didn't
6127 			 * start from the very first page.
6128 			 * Start again from the very first page.
6129 			 */
6130 			RESET_STATE_OF_RUN();
6131 			if (flags & KMA_LOMEM) {
6132 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6133 			} else {
6134 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6135 			}
6136 			last_idx = 0;
6137 			page_idx = last_idx;
6138 			wrapped = TRUE;
6139 			goto retry;
6140 		}
6141 		vm_free_page_unlock();
6142 	} else {
6143 		vm_page_t       m1;
6144 		vm_page_t       m2;
6145 		unsigned int    cur_idx;
6146 		unsigned int    tmp_start_idx;
6147 		vm_object_t     locked_object = VM_OBJECT_NULL;
6148 		boolean_t       abort_run = FALSE;
6149 
6150 		assert(page_idx - start_idx == contig_pages);
6151 
6152 		tmp_start_idx = start_idx;
6153 
6154 		/*
6155 		 * first pass through to pull the free pages
6156 		 * off of the free queue so that in case we
6157 		 * need substitute pages, we won't grab any
6158 		 * of the free pages in the run... we'll clear
6159 		 * the 'free' bit in the 2nd pass, and even in
6160 		 * an abort_run case, we'll collect all of the
6161 		 * free pages in this run and return them to the free list
6162 		 */
6163 		while (start_idx < page_idx) {
6164 			m1 = &vm_pages[start_idx++];
6165 
6166 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6167 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6168 #endif
6169 
6170 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6171 				unsigned int color;
6172 
6173 				color = VM_PAGE_GET_COLOR(m1);
6174 #if MACH_ASSERT
6175 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6176 #endif
6177 				vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6178 
6179 				VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6180 #if MACH_ASSERT
6181 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6182 #endif
6183 				/*
6184 				 * Clear the "free" bit so that this page
6185 				 * does not get considered for another
6186 				 * concurrent physically-contiguous allocation.
6187 				 */
6188 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6189 				assert(m1->vmp_busy);
6190 
6191 				vm_page_free_count--;
6192 			}
6193 		}
6194 		if (flags & KMA_LOMEM) {
6195 			vm_page_lomem_find_contiguous_last_idx = page_idx;
6196 		} else {
6197 			vm_page_find_contiguous_last_idx = page_idx;
6198 		}
6199 
6200 		/*
6201 		 * we can drop the free queue lock at this point since
6202 		 * we've pulled any 'free' candidates off of the list
6203 		 * we need it dropped so that we can do a vm_page_grab
6204 		 * when substituing for pmapped/dirty pages
6205 		 */
6206 		vm_free_page_unlock();
6207 
6208 		start_idx = tmp_start_idx;
6209 		cur_idx = page_idx - 1;
6210 
6211 		while (start_idx++ < page_idx) {
6212 			/*
6213 			 * must go through the list from back to front
6214 			 * so that the page list is created in the
6215 			 * correct order - low -> high phys addresses
6216 			 */
6217 			m1 = &vm_pages[cur_idx--];
6218 
6219 			if (m1->vmp_object == 0) {
6220 				/*
6221 				 * page has already been removed from
6222 				 * the free list in the 1st pass
6223 				 */
6224 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6225 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
6226 				assert(m1->vmp_busy);
6227 				assert(!m1->vmp_wanted);
6228 				assert(!m1->vmp_laundry);
6229 			} else {
6230 				vm_object_t object;
6231 				int refmod;
6232 				boolean_t disconnected, reusable;
6233 
6234 				if (abort_run == TRUE) {
6235 					continue;
6236 				}
6237 
6238 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6239 
6240 				object = VM_PAGE_OBJECT(m1);
6241 
6242 				if (object != locked_object) {
6243 					if (locked_object) {
6244 						vm_object_unlock(locked_object);
6245 						locked_object = VM_OBJECT_NULL;
6246 					}
6247 					if (vm_object_lock_try(object)) {
6248 						locked_object = object;
6249 					}
6250 				}
6251 				if (locked_object == VM_OBJECT_NULL ||
6252 				    (VM_PAGE_WIRED(m1) || m1->vmp_gobbled ||
6253 				    m1->vmp_laundry || m1->vmp_wanted ||
6254 				    m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) ||
6255 				    (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6256 					if (locked_object) {
6257 						vm_object_unlock(locked_object);
6258 						locked_object = VM_OBJECT_NULL;
6259 					}
6260 					tmp_start_idx = cur_idx;
6261 					abort_run = TRUE;
6262 					continue;
6263 				}
6264 
6265 				disconnected = FALSE;
6266 				reusable = FALSE;
6267 
6268 				if ((m1->vmp_reusable ||
6269 				    object->all_reusable) &&
6270 				    (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
6271 				    !m1->vmp_dirty &&
6272 				    !m1->vmp_reference) {
6273 					/* reusable page... */
6274 					refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6275 					disconnected = TRUE;
6276 					if (refmod == 0) {
6277 						/*
6278 						 * ... not reused: can steal
6279 						 * without relocating contents.
6280 						 */
6281 						reusable = TRUE;
6282 					}
6283 				}
6284 
6285 				if ((m1->vmp_pmapped &&
6286 				    !reusable) ||
6287 				    m1->vmp_dirty ||
6288 				    m1->vmp_precious) {
6289 					vm_object_offset_t offset;
6290 
6291 					m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6292 
6293 					if (m2 == VM_PAGE_NULL) {
6294 						if (locked_object) {
6295 							vm_object_unlock(locked_object);
6296 							locked_object = VM_OBJECT_NULL;
6297 						}
6298 						tmp_start_idx = cur_idx;
6299 						abort_run = TRUE;
6300 						continue;
6301 					}
6302 					if (!disconnected) {
6303 						if (m1->vmp_pmapped) {
6304 							refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6305 						} else {
6306 							refmod = 0;
6307 						}
6308 					}
6309 
6310 					/* copy the page's contents */
6311 					pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6312 					/* copy the page's state */
6313 					assert(!VM_PAGE_WIRED(m1));
6314 					assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6315 					assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6316 					assert(!m1->vmp_laundry);
6317 					m2->vmp_reference       = m1->vmp_reference;
6318 					assert(!m1->vmp_gobbled);
6319 					assert(!m1->vmp_private);
6320 					m2->vmp_no_cache        = m1->vmp_no_cache;
6321 					m2->vmp_xpmapped        = 0;
6322 					assert(!m1->vmp_busy);
6323 					assert(!m1->vmp_wanted);
6324 					assert(!m1->vmp_fictitious);
6325 					m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6326 					m2->vmp_wpmapped        = m1->vmp_wpmapped;
6327 					assert(!m1->vmp_free_when_done);
6328 					m2->vmp_absent  = m1->vmp_absent;
6329 					m2->vmp_error   = VMP_ERROR_GET(m1);
6330 					m2->vmp_dirty   = m1->vmp_dirty;
6331 					assert(!m1->vmp_cleaning);
6332 					m2->vmp_precious        = m1->vmp_precious;
6333 					m2->vmp_clustered       = m1->vmp_clustered;
6334 					assert(!m1->vmp_overwriting);
6335 					m2->vmp_restart = m1->vmp_restart;
6336 					m2->vmp_unusual = m1->vmp_unusual;
6337 					m2->vmp_cs_validated = m1->vmp_cs_validated;
6338 					m2->vmp_cs_tainted      = m1->vmp_cs_tainted;
6339 					m2->vmp_cs_nx   = m1->vmp_cs_nx;
6340 
6341 					m2->vmp_realtime = m1->vmp_realtime;
6342 					m1->vmp_realtime = false;
6343 
6344 					/*
6345 					 * If m1 had really been reusable,
6346 					 * we would have just stolen it, so
6347 					 * let's not propagate it's "reusable"
6348 					 * bit and assert that m2 is not
6349 					 * marked as "reusable".
6350 					 */
6351 					// m2->vmp_reusable	= m1->vmp_reusable;
6352 					assert(!m2->vmp_reusable);
6353 
6354 					// assert(!m1->vmp_lopage);
6355 
6356 					if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6357 						m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6358 						/*
6359 						 * We just grabbed m2 up above and so it isn't
6360 						 * going to be on any special Q as yet and so
6361 						 * we don't need to 'remove' it from the special
6362 						 * queues. Just resetting the state should be enough.
6363 						 */
6364 						m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6365 					}
6366 
6367 					/*
6368 					 * page may need to be flushed if
6369 					 * it is marshalled into a UPL
6370 					 * that is going to be used by a device
6371 					 * that doesn't support coherency
6372 					 */
6373 					m2->vmp_written_by_kernel = TRUE;
6374 
6375 					/*
6376 					 * make sure we clear the ref/mod state
6377 					 * from the pmap layer... else we risk
6378 					 * inheriting state from the last time
6379 					 * this page was used...
6380 					 */
6381 					pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6382 
6383 					if (refmod & VM_MEM_REFERENCED) {
6384 						m2->vmp_reference = TRUE;
6385 					}
6386 					if (refmod & VM_MEM_MODIFIED) {
6387 						SET_PAGE_DIRTY(m2, TRUE);
6388 					}
6389 					offset = m1->vmp_offset;
6390 
6391 					/*
6392 					 * completely cleans up the state
6393 					 * of the page so that it is ready
6394 					 * to be put onto the free list, or
6395 					 * for this purpose it looks like it
6396 					 * just came off of the free list
6397 					 */
6398 					vm_page_free_prepare(m1);
6399 
6400 					/*
6401 					 * now put the substitute page
6402 					 * on the object
6403 					 */
6404 					vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
6405 
6406 					if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6407 						m2->vmp_pmapped = TRUE;
6408 						m2->vmp_wpmapped = TRUE;
6409 
6410 						kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6411 						    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6412 
6413 						assert(kr == KERN_SUCCESS);
6414 
6415 						compressed_pages++;
6416 					} else {
6417 						if (m2->vmp_reference) {
6418 							vm_page_activate(m2);
6419 						} else {
6420 							vm_page_deactivate(m2);
6421 						}
6422 					}
6423 					PAGE_WAKEUP_DONE(m2);
6424 				} else {
6425 					assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6426 
6427 					/*
6428 					 * completely cleans up the state
6429 					 * of the page so that it is ready
6430 					 * to be put onto the free list, or
6431 					 * for this purpose it looks like it
6432 					 * just came off of the free list
6433 					 */
6434 					vm_page_free_prepare(m1);
6435 				}
6436 
6437 				stolen_pages++;
6438 			}
6439 			if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6440 				/*
6441 				 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6442 				 * change it for pages marked as used-by-compressor.
6443 				 */
6444 				vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6445 			}
6446 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6447 			m1->vmp_snext = m;
6448 			m = m1;
6449 		}
6450 		if (locked_object) {
6451 			vm_object_unlock(locked_object);
6452 			locked_object = VM_OBJECT_NULL;
6453 		}
6454 
6455 		if (abort_run == TRUE) {
6456 			/*
6457 			 * want the index of the last
6458 			 * page in this run that was
6459 			 * successfully 'stolen', so back
6460 			 * it up 1 for the auto-decrement on use
6461 			 * and 1 more to bump back over this page
6462 			 */
6463 			page_idx = tmp_start_idx + 2;
6464 			if (page_idx >= vm_pages_count) {
6465 				if (wrapped) {
6466 					if (m != VM_PAGE_NULL) {
6467 						vm_page_unlock_queues();
6468 						vm_page_free_list(m, FALSE);
6469 						vm_page_lock_queues();
6470 						m = VM_PAGE_NULL;
6471 					}
6472 					dumped_run++;
6473 					goto done_scanning;
6474 				}
6475 				page_idx = last_idx = 0;
6476 				wrapped = TRUE;
6477 			}
6478 			abort_run = FALSE;
6479 
6480 			/*
6481 			 * We didn't find a contiguous range but we didn't
6482 			 * start from the very first page.
6483 			 * Start again from the very first page.
6484 			 */
6485 			RESET_STATE_OF_RUN();
6486 
6487 			if (flags & KMA_LOMEM) {
6488 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
6489 			} else {
6490 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6491 			}
6492 
6493 			last_idx = page_idx;
6494 
6495 			if (m != VM_PAGE_NULL) {
6496 				vm_page_unlock_queues();
6497 				vm_page_free_list(m, FALSE);
6498 				vm_page_lock_queues();
6499 				m = VM_PAGE_NULL;
6500 			}
6501 			dumped_run++;
6502 
6503 			vm_free_page_lock();
6504 			/*
6505 			 * reset our free page limit since we
6506 			 * dropped the lock protecting the vm_page_free_queue
6507 			 */
6508 			free_available = vm_page_free_count - vm_page_free_reserved;
6509 			goto retry;
6510 		}
6511 
6512 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6513 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6514 			assert(m1->vmp_wire_count == 0);
6515 
6516 			if (wire == TRUE) {
6517 				m1->vmp_wire_count++;
6518 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
6519 			} else {
6520 				m1->vmp_gobbled = TRUE;
6521 			}
6522 		}
6523 		if (wire == FALSE) {
6524 			vm_page_gobble_count += npages;
6525 		}
6526 
6527 		/*
6528 		 * gobbled pages are also counted as wired pages
6529 		 */
6530 		vm_page_wire_count += npages;
6531 
6532 		assert(vm_page_verify_contiguous(m, npages));
6533 	}
6534 done_scanning:
6535 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6536 
6537 	vm_page_unlock_queues();
6538 
6539 #if DEBUG
6540 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6541 
6542 	tv_end_sec -= tv_start_sec;
6543 	if (tv_end_usec < tv_start_usec) {
6544 		tv_end_sec--;
6545 		tv_end_usec += 1000000;
6546 	}
6547 	tv_end_usec -= tv_start_usec;
6548 	if (tv_end_usec >= 1000000) {
6549 		tv_end_sec++;
6550 		tv_end_sec -= 1000000;
6551 	}
6552 	if (vm_page_find_contig_debug) {
6553 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
6554 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6555 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
6556 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6557 	}
6558 
6559 #endif
6560 #if MACH_ASSERT
6561 	vm_page_verify_free_lists();
6562 #endif
6563 	if (m == NULL && zone_gc_called < 2) {
6564 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6565 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6566 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6567 
6568 		if (consider_buffer_cache_collect != NULL) {
6569 			(void)(*consider_buffer_cache_collect)(1);
6570 		}
6571 
6572 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6573 
6574 		zone_gc_called++;
6575 
6576 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6577 		goto full_scan_again;
6578 	}
6579 
6580 	return m;
6581 }
6582 
6583 /*
6584  *	Allocate a list of contiguous, wired pages.
6585  */
6586 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6587 cpm_allocate(
6588 	vm_size_t       size,
6589 	vm_page_t       *list,
6590 	ppnum_t         max_pnum,
6591 	ppnum_t         pnum_mask,
6592 	boolean_t       wire,
6593 	int             flags)
6594 {
6595 	vm_page_t               pages;
6596 	unsigned int            npages;
6597 
6598 	if (size % PAGE_SIZE != 0) {
6599 		return KERN_INVALID_ARGUMENT;
6600 	}
6601 
6602 	npages = (unsigned int) (size / PAGE_SIZE);
6603 	if (npages != size / PAGE_SIZE) {
6604 		/* 32-bit overflow */
6605 		return KERN_INVALID_ARGUMENT;
6606 	}
6607 
6608 	/*
6609 	 *	Obtain a pointer to a subset of the free
6610 	 *	list large enough to satisfy the request;
6611 	 *	the region will be physically contiguous.
6612 	 */
6613 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6614 
6615 	if (pages == VM_PAGE_NULL) {
6616 		return KERN_NO_SPACE;
6617 	}
6618 	/*
6619 	 * determine need for wakeups
6620 	 */
6621 	if (vm_page_free_count < vm_page_free_min) {
6622 		vm_free_page_lock();
6623 		if (vm_pageout_running == FALSE) {
6624 			vm_free_page_unlock();
6625 			thread_wakeup((event_t) &vm_page_free_wanted);
6626 		} else {
6627 			vm_free_page_unlock();
6628 		}
6629 	}
6630 
6631 	VM_CHECK_MEMORYSTATUS;
6632 
6633 	/*
6634 	 *	The CPM pages should now be available and
6635 	 *	ordered by ascending physical address.
6636 	 */
6637 	assert(vm_page_verify_contiguous(pages, npages));
6638 
6639 	if (flags & KMA_ZERO) {
6640 		for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6641 			vm_page_zero_fill(m);
6642 		}
6643 	}
6644 
6645 	*list = pages;
6646 	return KERN_SUCCESS;
6647 }
6648 
6649 
6650 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6651 
6652 /*
6653  * when working on a 'run' of pages, it is necessary to hold
6654  * the vm_page_queue_lock (a hot global lock) for certain operations
6655  * on the page... however, the majority of the work can be done
6656  * while merely holding the object lock... in fact there are certain
6657  * collections of pages that don't require any work brokered by the
6658  * vm_page_queue_lock... to mitigate the time spent behind the global
6659  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6660  * while doing all of the work that doesn't require the vm_page_queue_lock...
6661  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6662  * necessary work for each page... we will grab the busy bit on the page
6663  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6664  * if it can't immediately take the vm_page_queue_lock in order to compete
6665  * for the locks in the same order that vm_pageout_scan takes them.
6666  * the operation names are modeled after the names of the routines that
6667  * need to be called in order to make the changes very obvious in the
6668  * original loop
6669  */
6670 
6671 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6672 vm_page_do_delayed_work(
6673 	vm_object_t     object,
6674 	vm_tag_t        tag,
6675 	struct vm_page_delayed_work *dwp,
6676 	int             dw_count)
6677 {
6678 	int             j;
6679 	vm_page_t       m;
6680 	vm_page_t       local_free_q = VM_PAGE_NULL;
6681 
6682 	/*
6683 	 * pageout_scan takes the vm_page_lock_queues first
6684 	 * then tries for the object lock... to avoid what
6685 	 * is effectively a lock inversion, we'll go to the
6686 	 * trouble of taking them in that same order... otherwise
6687 	 * if this object contains the majority of the pages resident
6688 	 * in the UBC (or a small set of large objects actively being
6689 	 * worked on contain the majority of the pages), we could
6690 	 * cause the pageout_scan thread to 'starve' in its attempt
6691 	 * to find pages to move to the free queue, since it has to
6692 	 * successfully acquire the object lock of any candidate page
6693 	 * before it can steal/clean it.
6694 	 */
6695 	if (!vm_page_trylockspin_queues()) {
6696 		vm_object_unlock(object);
6697 
6698 		/*
6699 		 * "Turnstile enabled vm_pageout_scan" can be runnable
6700 		 * for a very long time without getting on a core.
6701 		 * If this is a higher priority thread it could be
6702 		 * waiting here for a very long time respecting the fact
6703 		 * that pageout_scan would like its object after VPS does
6704 		 * a mutex_pause(0).
6705 		 * So we cap the number of yields in the vm_object_lock_avoid()
6706 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
6707 		 * 10us to run and grab the object if needed.
6708 		 */
6709 		vm_page_lockspin_queues();
6710 
6711 		for (j = 0;; j++) {
6712 			if ((!vm_object_lock_avoid(object) ||
6713 			    (vps_dynamic_priority_enabled && (j > 0))) &&
6714 			    _vm_object_lock_try(object)) {
6715 				break;
6716 			}
6717 			vm_page_unlock_queues();
6718 			mutex_pause(j);
6719 			vm_page_lockspin_queues();
6720 		}
6721 	}
6722 	for (j = 0; j < dw_count; j++, dwp++) {
6723 		m = dwp->dw_m;
6724 
6725 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6726 			vm_pageout_throttle_up(m);
6727 		}
6728 #if CONFIG_PHANTOM_CACHE
6729 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6730 			vm_phantom_cache_update(m);
6731 		}
6732 #endif
6733 		if (dwp->dw_mask & DW_vm_page_wire) {
6734 			vm_page_wire(m, tag, FALSE);
6735 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
6736 			boolean_t       queueit;
6737 
6738 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6739 
6740 			vm_page_unwire(m, queueit);
6741 		}
6742 		if (dwp->dw_mask & DW_vm_page_free) {
6743 			vm_page_free_prepare_queues(m);
6744 
6745 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6746 			/*
6747 			 * Add this page to our list of reclaimed pages,
6748 			 * to be freed later.
6749 			 */
6750 			m->vmp_snext = local_free_q;
6751 			local_free_q = m;
6752 		} else {
6753 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6754 				vm_page_deactivate_internal(m, FALSE);
6755 			} else if (dwp->dw_mask & DW_vm_page_activate) {
6756 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6757 					vm_page_activate(m);
6758 				}
6759 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
6760 				vm_page_speculate(m, TRUE);
6761 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
6762 				/*
6763 				 * if we didn't hold the object lock and did this,
6764 				 * we might disconnect the page, then someone might
6765 				 * soft fault it back in, then we would put it on the
6766 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
6767 				 * page on that queue, which we don't want
6768 				 */
6769 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6770 
6771 				if ((refmod_state & VM_MEM_REFERENCED)) {
6772 					/*
6773 					 * this page has been touched since it got cleaned; let's activate it
6774 					 * if it hasn't already been
6775 					 */
6776 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6777 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6778 
6779 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6780 						vm_page_activate(m);
6781 					}
6782 				} else {
6783 					m->vmp_reference = FALSE;
6784 					vm_page_enqueue_cleaned(m);
6785 				}
6786 			} else if (dwp->dw_mask & DW_vm_page_lru) {
6787 				vm_page_lru(m);
6788 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6789 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6790 					vm_page_queues_remove(m, TRUE);
6791 				}
6792 			}
6793 			if (dwp->dw_mask & DW_set_reference) {
6794 				m->vmp_reference = TRUE;
6795 			} else if (dwp->dw_mask & DW_clear_reference) {
6796 				m->vmp_reference = FALSE;
6797 			}
6798 
6799 			if (dwp->dw_mask & DW_move_page) {
6800 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6801 					vm_page_queues_remove(m, FALSE);
6802 
6803 					assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
6804 
6805 					vm_page_enqueue_inactive(m, FALSE);
6806 				}
6807 			}
6808 			if (dwp->dw_mask & DW_clear_busy) {
6809 				m->vmp_busy = FALSE;
6810 			}
6811 
6812 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6813 				PAGE_WAKEUP(m);
6814 			}
6815 		}
6816 	}
6817 	vm_page_unlock_queues();
6818 
6819 	if (local_free_q) {
6820 		vm_page_free_list(local_free_q, TRUE);
6821 	}
6822 
6823 	VM_CHECK_MEMORYSTATUS;
6824 }
6825 
6826 __abortlike
6827 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)6828 __vm_page_alloc_list_failed_panic(
6829 	vm_size_t       page_count,
6830 	kma_flags_t     flags,
6831 	kern_return_t   kr)
6832 {
6833 	panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
6834 	    (size_t)page_count, flags, kr);
6835 }
6836 
6837 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)6838 vm_page_alloc_list(
6839 	vm_size_t   page_count,
6840 	kma_flags_t flags,
6841 	vm_page_t  *list)
6842 {
6843 	vm_page_t       page_list = VM_PAGE_NULL;
6844 	vm_page_t       mem;
6845 	kern_return_t   kr = KERN_SUCCESS;
6846 	int             page_grab_count = 0;
6847 #if DEVELOPMENT || DEBUG
6848 	task_t          task;
6849 #endif /* DEVELOPMENT || DEBUG */
6850 
6851 	for (vm_size_t i = 0; i < page_count; i++) {
6852 		for (;;) {
6853 			if (flags & KMA_LOMEM) {
6854 				mem = vm_page_grablo();
6855 			} else {
6856 				mem = vm_page_grab();
6857 			}
6858 
6859 			if (mem != VM_PAGE_NULL) {
6860 				break;
6861 			}
6862 
6863 			if (flags & KMA_NOPAGEWAIT) {
6864 				kr = KERN_RESOURCE_SHORTAGE;
6865 				goto out;
6866 			}
6867 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
6868 				kr = KERN_RESOURCE_SHORTAGE;
6869 				goto out;
6870 			}
6871 
6872 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
6873 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
6874 
6875 			if ((flags & KMA_NOFAIL) == 0) {
6876 				uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
6877 				if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
6878 					kr = KERN_RESOURCE_SHORTAGE;
6879 					goto out;
6880 				}
6881 			}
6882 			VM_PAGE_WAIT();
6883 		}
6884 
6885 		page_grab_count++;
6886 		mem->vmp_snext = page_list;
6887 		page_list = mem;
6888 	}
6889 
6890 	if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
6891 		for (mem = page_list; mem; mem = mem->vmp_snext) {
6892 			vm_page_zero_fill(mem);
6893 		}
6894 	}
6895 
6896 out:
6897 #if DEBUG || DEVELOPMENT
6898 	task = current_task_early();
6899 	if (task != NULL) {
6900 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
6901 	}
6902 #endif
6903 
6904 	if (kr == KERN_SUCCESS) {
6905 		*list = page_list;
6906 	} else if (flags & KMA_NOFAIL) {
6907 		__vm_page_alloc_list_failed_panic(page_count, flags, kr);
6908 	} else {
6909 		vm_page_free_list(page_list, FALSE);
6910 	}
6911 
6912 	return kr;
6913 }
6914 
6915 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)6916 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
6917 {
6918 	page->vmp_offset = offset;
6919 }
6920 
6921 vm_page_t
vm_page_get_next(vm_page_t page)6922 vm_page_get_next(vm_page_t page)
6923 {
6924 	return page->vmp_snext;
6925 }
6926 
6927 vm_object_offset_t
vm_page_get_offset(vm_page_t page)6928 vm_page_get_offset(vm_page_t page)
6929 {
6930 	return page->vmp_offset;
6931 }
6932 
6933 ppnum_t
vm_page_get_phys_page(vm_page_t page)6934 vm_page_get_phys_page(vm_page_t page)
6935 {
6936 	return VM_PAGE_GET_PHYS_PAGE(page);
6937 }
6938 
6939 
6940 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6941 
6942 #if HIBERNATION
6943 
6944 static vm_page_t hibernate_gobble_queue;
6945 
6946 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
6947 static int  hibernate_flush_dirty_pages(int);
6948 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
6949 
6950 void hibernate_flush_wait(void);
6951 void hibernate_mark_in_progress(void);
6952 void hibernate_clear_in_progress(void);
6953 
6954 void            hibernate_free_range(int, int);
6955 void            hibernate_hash_insert_page(vm_page_t);
6956 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
6957 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
6958 ppnum_t         hibernate_lookup_paddr(unsigned int);
6959 
6960 struct hibernate_statistics {
6961 	int hibernate_considered;
6962 	int hibernate_reentered_on_q;
6963 	int hibernate_found_dirty;
6964 	int hibernate_skipped_cleaning;
6965 	int hibernate_skipped_transient;
6966 	int hibernate_skipped_precious;
6967 	int hibernate_skipped_external;
6968 	int hibernate_queue_nolock;
6969 	int hibernate_queue_paused;
6970 	int hibernate_throttled;
6971 	int hibernate_throttle_timeout;
6972 	int hibernate_drained;
6973 	int hibernate_drain_timeout;
6974 	int cd_lock_failed;
6975 	int cd_found_precious;
6976 	int cd_found_wired;
6977 	int cd_found_busy;
6978 	int cd_found_unusual;
6979 	int cd_found_cleaning;
6980 	int cd_found_laundry;
6981 	int cd_found_dirty;
6982 	int cd_found_xpmapped;
6983 	int cd_skipped_xpmapped;
6984 	int cd_local_free;
6985 	int cd_total_free;
6986 	int cd_vm_page_wire_count;
6987 	int cd_vm_struct_pages_unneeded;
6988 	int cd_pages;
6989 	int cd_discarded;
6990 	int cd_count_wire;
6991 } hibernate_stats;
6992 
6993 
6994 /*
6995  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6996  * so that we don't overrun the estimated image size, which would
6997  * result in a hibernation failure.
6998  *
6999  * We use a size value instead of pages because we don't want to take up more space
7000  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7001  * to have that additional space available.
7002  *
7003  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7004  * xpmapped size.
7005  */
7006 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7007 
7008 
7009 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7010 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7011 {
7012 	wait_result_t   wait_result;
7013 
7014 	vm_page_lock_queues();
7015 
7016 	while (!vm_page_queue_empty(&q->pgo_pending)) {
7017 		q->pgo_draining = TRUE;
7018 
7019 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7020 
7021 		vm_page_unlock_queues();
7022 
7023 		wait_result = thread_block(THREAD_CONTINUE_NULL);
7024 
7025 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7026 			hibernate_stats.hibernate_drain_timeout++;
7027 
7028 			if (q == &vm_pageout_queue_external) {
7029 				return 0;
7030 			}
7031 
7032 			return 1;
7033 		}
7034 		vm_page_lock_queues();
7035 
7036 		hibernate_stats.hibernate_drained++;
7037 	}
7038 	vm_page_unlock_queues();
7039 
7040 	return 0;
7041 }
7042 
7043 
7044 boolean_t hibernate_skip_external = FALSE;
7045 
7046 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7047 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7048 {
7049 	vm_page_t       m;
7050 	vm_object_t     l_object = NULL;
7051 	vm_object_t     m_object = NULL;
7052 	int             refmod_state = 0;
7053 	int             try_failed_count = 0;
7054 	int             retval = 0;
7055 	int             current_run = 0;
7056 	struct  vm_pageout_queue *iq;
7057 	struct  vm_pageout_queue *eq;
7058 	struct  vm_pageout_queue *tq;
7059 
7060 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7061 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7062 
7063 	iq = &vm_pageout_queue_internal;
7064 	eq = &vm_pageout_queue_external;
7065 
7066 	vm_page_lock_queues();
7067 
7068 	while (qcount && !vm_page_queue_empty(q)) {
7069 		if (current_run++ == 1000) {
7070 			if (hibernate_should_abort()) {
7071 				retval = 1;
7072 				break;
7073 			}
7074 			current_run = 0;
7075 		}
7076 
7077 		m = (vm_page_t) vm_page_queue_first(q);
7078 		m_object = VM_PAGE_OBJECT(m);
7079 
7080 		/*
7081 		 * check to see if we currently are working
7082 		 * with the same object... if so, we've
7083 		 * already got the lock
7084 		 */
7085 		if (m_object != l_object) {
7086 			/*
7087 			 * the object associated with candidate page is
7088 			 * different from the one we were just working
7089 			 * with... dump the lock if we still own it
7090 			 */
7091 			if (l_object != NULL) {
7092 				vm_object_unlock(l_object);
7093 				l_object = NULL;
7094 			}
7095 			/*
7096 			 * Try to lock object; since we've alread got the
7097 			 * page queues lock, we can only 'try' for this one.
7098 			 * if the 'try' fails, we need to do a mutex_pause
7099 			 * to allow the owner of the object lock a chance to
7100 			 * run...
7101 			 */
7102 			if (!vm_object_lock_try_scan(m_object)) {
7103 				if (try_failed_count > 20) {
7104 					hibernate_stats.hibernate_queue_nolock++;
7105 
7106 					goto reenter_pg_on_q;
7107 				}
7108 
7109 				vm_page_unlock_queues();
7110 				mutex_pause(try_failed_count++);
7111 				vm_page_lock_queues();
7112 
7113 				hibernate_stats.hibernate_queue_paused++;
7114 				continue;
7115 			} else {
7116 				l_object = m_object;
7117 			}
7118 		}
7119 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7120 			/*
7121 			 * page is not to be cleaned
7122 			 * put it back on the head of its queue
7123 			 */
7124 			if (m->vmp_cleaning) {
7125 				hibernate_stats.hibernate_skipped_cleaning++;
7126 			} else {
7127 				hibernate_stats.hibernate_skipped_transient++;
7128 			}
7129 
7130 			goto reenter_pg_on_q;
7131 		}
7132 		if (m_object->vo_copy == VM_OBJECT_NULL) {
7133 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7134 				/*
7135 				 * let the normal hibernate image path
7136 				 * deal with these
7137 				 */
7138 				goto reenter_pg_on_q;
7139 			}
7140 		}
7141 		if (!m->vmp_dirty && m->vmp_pmapped) {
7142 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7143 
7144 			if ((refmod_state & VM_MEM_MODIFIED)) {
7145 				SET_PAGE_DIRTY(m, FALSE);
7146 			}
7147 		} else {
7148 			refmod_state = 0;
7149 		}
7150 
7151 		if (!m->vmp_dirty) {
7152 			/*
7153 			 * page is not to be cleaned
7154 			 * put it back on the head of its queue
7155 			 */
7156 			if (m->vmp_precious) {
7157 				hibernate_stats.hibernate_skipped_precious++;
7158 			}
7159 
7160 			goto reenter_pg_on_q;
7161 		}
7162 
7163 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7164 			hibernate_stats.hibernate_skipped_external++;
7165 
7166 			goto reenter_pg_on_q;
7167 		}
7168 		tq = NULL;
7169 
7170 		if (m_object->internal) {
7171 			if (VM_PAGE_Q_THROTTLED(iq)) {
7172 				tq = iq;
7173 			}
7174 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7175 			tq = eq;
7176 		}
7177 
7178 		if (tq != NULL) {
7179 			wait_result_t   wait_result;
7180 			int             wait_count = 5;
7181 
7182 			if (l_object != NULL) {
7183 				vm_object_unlock(l_object);
7184 				l_object = NULL;
7185 			}
7186 
7187 			while (retval == 0) {
7188 				tq->pgo_throttled = TRUE;
7189 
7190 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7191 
7192 				vm_page_unlock_queues();
7193 
7194 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7195 
7196 				vm_page_lock_queues();
7197 
7198 				if (wait_result != THREAD_TIMED_OUT) {
7199 					break;
7200 				}
7201 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7202 					break;
7203 				}
7204 
7205 				if (hibernate_should_abort()) {
7206 					retval = 1;
7207 				}
7208 
7209 				if (--wait_count == 0) {
7210 					hibernate_stats.hibernate_throttle_timeout++;
7211 
7212 					if (tq == eq) {
7213 						hibernate_skip_external = TRUE;
7214 						break;
7215 					}
7216 					retval = 1;
7217 				}
7218 			}
7219 			if (retval) {
7220 				break;
7221 			}
7222 
7223 			hibernate_stats.hibernate_throttled++;
7224 
7225 			continue;
7226 		}
7227 		/*
7228 		 * we've already factored out pages in the laundry which
7229 		 * means this page can't be on the pageout queue so it's
7230 		 * safe to do the vm_page_queues_remove
7231 		 */
7232 		vm_page_queues_remove(m, TRUE);
7233 
7234 		if (m_object->internal == TRUE) {
7235 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7236 		}
7237 
7238 		vm_pageout_cluster(m);
7239 
7240 		hibernate_stats.hibernate_found_dirty++;
7241 
7242 		goto next_pg;
7243 
7244 reenter_pg_on_q:
7245 		vm_page_queue_remove(q, m, vmp_pageq);
7246 		vm_page_queue_enter(q, m, vmp_pageq);
7247 
7248 		hibernate_stats.hibernate_reentered_on_q++;
7249 next_pg:
7250 		hibernate_stats.hibernate_considered++;
7251 
7252 		qcount--;
7253 		try_failed_count = 0;
7254 	}
7255 	if (l_object != NULL) {
7256 		vm_object_unlock(l_object);
7257 		l_object = NULL;
7258 	}
7259 
7260 	vm_page_unlock_queues();
7261 
7262 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7263 
7264 	return retval;
7265 }
7266 
7267 
7268 static int
hibernate_flush_dirty_pages(int pass)7269 hibernate_flush_dirty_pages(int pass)
7270 {
7271 	struct vm_speculative_age_q     *aq;
7272 	uint32_t        i;
7273 
7274 	if (vm_page_local_q) {
7275 		zpercpu_foreach_cpu(lid) {
7276 			vm_page_reactivate_local(lid, TRUE, FALSE);
7277 		}
7278 	}
7279 
7280 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7281 		int             qcount;
7282 		vm_page_t       m;
7283 
7284 		aq = &vm_page_queue_speculative[i];
7285 
7286 		if (vm_page_queue_empty(&aq->age_q)) {
7287 			continue;
7288 		}
7289 		qcount = 0;
7290 
7291 		vm_page_lockspin_queues();
7292 
7293 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7294 			qcount++;
7295 		}
7296 		vm_page_unlock_queues();
7297 
7298 		if (qcount) {
7299 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7300 				return 1;
7301 			}
7302 		}
7303 	}
7304 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7305 		return 1;
7306 	}
7307 	/* XXX FBDP TODO: flush secluded queue */
7308 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7309 		return 1;
7310 	}
7311 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7312 		return 1;
7313 	}
7314 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7315 		return 1;
7316 	}
7317 
7318 	if (pass == 1) {
7319 		vm_compressor_record_warmup_start();
7320 	}
7321 
7322 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7323 		if (pass == 1) {
7324 			vm_compressor_record_warmup_end();
7325 		}
7326 		return 1;
7327 	}
7328 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7329 		if (pass == 1) {
7330 			vm_compressor_record_warmup_end();
7331 		}
7332 		return 1;
7333 	}
7334 	if (pass == 1) {
7335 		vm_compressor_record_warmup_end();
7336 	}
7337 
7338 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7339 		return 1;
7340 	}
7341 
7342 	return 0;
7343 }
7344 
7345 
7346 void
hibernate_reset_stats()7347 hibernate_reset_stats()
7348 {
7349 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7350 }
7351 
7352 
7353 int
hibernate_flush_memory()7354 hibernate_flush_memory()
7355 {
7356 	int     retval;
7357 
7358 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7359 
7360 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7361 
7362 	hibernate_cleaning_in_progress = TRUE;
7363 	hibernate_skip_external = FALSE;
7364 
7365 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7366 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7367 
7368 		vm_compressor_flush();
7369 
7370 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7371 
7372 		if (consider_buffer_cache_collect != NULL) {
7373 			unsigned int orig_wire_count;
7374 
7375 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7376 			orig_wire_count = vm_page_wire_count;
7377 
7378 			(void)(*consider_buffer_cache_collect)(1);
7379 			zone_gc(ZONE_GC_DRAIN);
7380 
7381 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7382 
7383 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7384 		}
7385 	}
7386 	hibernate_cleaning_in_progress = FALSE;
7387 
7388 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7389 
7390 	if (retval) {
7391 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7392 	}
7393 
7394 
7395 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7396 	    hibernate_stats.hibernate_considered,
7397 	    hibernate_stats.hibernate_reentered_on_q,
7398 	    hibernate_stats.hibernate_found_dirty);
7399 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7400 	    hibernate_stats.hibernate_skipped_cleaning,
7401 	    hibernate_stats.hibernate_skipped_transient,
7402 	    hibernate_stats.hibernate_skipped_precious,
7403 	    hibernate_stats.hibernate_skipped_external,
7404 	    hibernate_stats.hibernate_queue_nolock);
7405 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7406 	    hibernate_stats.hibernate_queue_paused,
7407 	    hibernate_stats.hibernate_throttled,
7408 	    hibernate_stats.hibernate_throttle_timeout,
7409 	    hibernate_stats.hibernate_drained,
7410 	    hibernate_stats.hibernate_drain_timeout);
7411 
7412 	return retval;
7413 }
7414 
7415 
7416 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7417 hibernate_page_list_zero(hibernate_page_list_t *list)
7418 {
7419 	uint32_t             bank;
7420 	hibernate_bitmap_t * bitmap;
7421 
7422 	bitmap = &list->bank_bitmap[0];
7423 	for (bank = 0; bank < list->bank_count; bank++) {
7424 		uint32_t last_bit;
7425 
7426 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7427 		// set out-of-bound bits at end of bitmap.
7428 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7429 		if (last_bit) {
7430 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7431 		}
7432 
7433 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7434 	}
7435 }
7436 
7437 void
hibernate_free_gobble_pages(void)7438 hibernate_free_gobble_pages(void)
7439 {
7440 	vm_page_t m, next;
7441 	uint32_t  count = 0;
7442 
7443 	m = (vm_page_t) hibernate_gobble_queue;
7444 	while (m) {
7445 		next = m->vmp_snext;
7446 		vm_page_free(m);
7447 		count++;
7448 		m = next;
7449 	}
7450 	hibernate_gobble_queue = VM_PAGE_NULL;
7451 
7452 	if (count) {
7453 		HIBLOG("Freed %d pages\n", count);
7454 	}
7455 }
7456 
7457 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7458 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7459 {
7460 	vm_object_t object = NULL;
7461 	int                  refmod_state;
7462 	boolean_t            discard = FALSE;
7463 
7464 	do{
7465 		if (m->vmp_private) {
7466 			panic("hibernate_consider_discard: private");
7467 		}
7468 
7469 		object = VM_PAGE_OBJECT(m);
7470 
7471 		if (!vm_object_lock_try(object)) {
7472 			object = NULL;
7473 			if (!preflight) {
7474 				hibernate_stats.cd_lock_failed++;
7475 			}
7476 			break;
7477 		}
7478 		if (VM_PAGE_WIRED(m)) {
7479 			if (!preflight) {
7480 				hibernate_stats.cd_found_wired++;
7481 			}
7482 			break;
7483 		}
7484 		if (m->vmp_precious) {
7485 			if (!preflight) {
7486 				hibernate_stats.cd_found_precious++;
7487 			}
7488 			break;
7489 		}
7490 		if (m->vmp_busy || !object->alive) {
7491 			/*
7492 			 *	Somebody is playing with this page.
7493 			 */
7494 			if (!preflight) {
7495 				hibernate_stats.cd_found_busy++;
7496 			}
7497 			break;
7498 		}
7499 		if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7500 			/*
7501 			 * If it's unusual in anyway, ignore it
7502 			 */
7503 			if (!preflight) {
7504 				hibernate_stats.cd_found_unusual++;
7505 			}
7506 			break;
7507 		}
7508 		if (m->vmp_cleaning) {
7509 			if (!preflight) {
7510 				hibernate_stats.cd_found_cleaning++;
7511 			}
7512 			break;
7513 		}
7514 		if (m->vmp_laundry) {
7515 			if (!preflight) {
7516 				hibernate_stats.cd_found_laundry++;
7517 			}
7518 			break;
7519 		}
7520 		if (!m->vmp_dirty) {
7521 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7522 
7523 			if (refmod_state & VM_MEM_REFERENCED) {
7524 				m->vmp_reference = TRUE;
7525 			}
7526 			if (refmod_state & VM_MEM_MODIFIED) {
7527 				SET_PAGE_DIRTY(m, FALSE);
7528 			}
7529 		}
7530 
7531 		/*
7532 		 * If it's clean or purgeable we can discard the page on wakeup.
7533 		 */
7534 		discard = (!m->vmp_dirty)
7535 		    || (VM_PURGABLE_VOLATILE == object->purgable)
7536 		    || (VM_PURGABLE_EMPTY == object->purgable);
7537 
7538 
7539 		if (discard == FALSE) {
7540 			if (!preflight) {
7541 				hibernate_stats.cd_found_dirty++;
7542 			}
7543 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7544 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7545 				if (!preflight) {
7546 					hibernate_stats.cd_found_xpmapped++;
7547 				}
7548 				discard = FALSE;
7549 			} else {
7550 				if (!preflight) {
7551 					hibernate_stats.cd_skipped_xpmapped++;
7552 				}
7553 			}
7554 		}
7555 	}while (FALSE);
7556 
7557 	if (object) {
7558 		vm_object_unlock(object);
7559 	}
7560 
7561 	return discard;
7562 }
7563 
7564 
7565 static void
hibernate_discard_page(vm_page_t m)7566 hibernate_discard_page(vm_page_t m)
7567 {
7568 	vm_object_t m_object;
7569 
7570 	if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7571 		/*
7572 		 * If it's unusual in anyway, ignore
7573 		 */
7574 		return;
7575 	}
7576 
7577 	m_object = VM_PAGE_OBJECT(m);
7578 
7579 #if MACH_ASSERT || DEBUG
7580 	if (!vm_object_lock_try(m_object)) {
7581 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7582 	}
7583 #else
7584 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
7585 	 *  makes sure these locks are uncontended before sleep */
7586 #endif /* MACH_ASSERT || DEBUG */
7587 
7588 	if (m->vmp_pmapped == TRUE) {
7589 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7590 	}
7591 
7592 	if (m->vmp_laundry) {
7593 		panic("hibernate_discard_page(%p) laundry", m);
7594 	}
7595 	if (m->vmp_private) {
7596 		panic("hibernate_discard_page(%p) private", m);
7597 	}
7598 	if (m->vmp_fictitious) {
7599 		panic("hibernate_discard_page(%p) fictitious", m);
7600 	}
7601 
7602 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7603 		/* object should be on a queue */
7604 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7605 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7606 		assert(old_queue);
7607 		if (m_object->purgeable_when_ripe) {
7608 			vm_purgeable_token_delete_first(old_queue);
7609 		}
7610 		vm_object_lock_assert_exclusive(m_object);
7611 		m_object->purgable = VM_PURGABLE_EMPTY;
7612 
7613 		/*
7614 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
7615 		 * accounted in the "volatile" ledger, so no change here.
7616 		 * We have to update vm_page_purgeable_count, though, since we're
7617 		 * effectively purging this object.
7618 		 */
7619 		unsigned int delta;
7620 		assert(m_object->resident_page_count >= m_object->wired_page_count);
7621 		delta = (m_object->resident_page_count - m_object->wired_page_count);
7622 		assert(vm_page_purgeable_count >= delta);
7623 		assert(delta > 0);
7624 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7625 	}
7626 
7627 	vm_page_free(m);
7628 
7629 #if MACH_ASSERT || DEBUG
7630 	vm_object_unlock(m_object);
7631 #endif  /* MACH_ASSERT || DEBUG */
7632 }
7633 
7634 /*
7635  *  Grab locks for hibernate_page_list_setall()
7636  */
7637 void
hibernate_vm_lock_queues(void)7638 hibernate_vm_lock_queues(void)
7639 {
7640 	vm_object_lock(compressor_object);
7641 	vm_page_lock_queues();
7642 	vm_free_page_lock();
7643 	lck_mtx_lock(&vm_purgeable_queue_lock);
7644 
7645 	if (vm_page_local_q) {
7646 		zpercpu_foreach(lq, vm_page_local_q) {
7647 			VPL_LOCK(&lq->vpl_lock);
7648 		}
7649 	}
7650 }
7651 
7652 void
hibernate_vm_unlock_queues(void)7653 hibernate_vm_unlock_queues(void)
7654 {
7655 	if (vm_page_local_q) {
7656 		zpercpu_foreach(lq, vm_page_local_q) {
7657 			VPL_UNLOCK(&lq->vpl_lock);
7658 		}
7659 	}
7660 	lck_mtx_unlock(&vm_purgeable_queue_lock);
7661 	vm_free_page_unlock();
7662 	vm_page_unlock_queues();
7663 	vm_object_unlock(compressor_object);
7664 }
7665 
7666 /*
7667  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7668  *  pages known to VM to not need saving are subtracted.
7669  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
7670  */
7671 
7672 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7673 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7674     hibernate_page_list_t * page_list_wired,
7675     hibernate_page_list_t * page_list_pal,
7676     boolean_t preflight,
7677     boolean_t will_discard,
7678     uint32_t * pagesOut)
7679 {
7680 	uint64_t start, end, nsec;
7681 	vm_page_t m;
7682 	vm_page_t next;
7683 	uint32_t pages = page_list->page_count;
7684 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7685 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7686 	uint32_t count_wire = pages;
7687 	uint32_t count_discard_active    = 0;
7688 	uint32_t count_discard_inactive  = 0;
7689 	uint32_t count_retired = 0;
7690 	uint32_t count_discard_cleaned   = 0;
7691 	uint32_t count_discard_purgeable = 0;
7692 	uint32_t count_discard_speculative = 0;
7693 	uint32_t count_discard_vm_struct_pages = 0;
7694 	uint32_t i;
7695 	uint32_t             bank;
7696 	hibernate_bitmap_t * bitmap;
7697 	hibernate_bitmap_t * bitmap_wired;
7698 	boolean_t                    discard_all;
7699 	boolean_t            discard = FALSE;
7700 
7701 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7702 
7703 	if (preflight) {
7704 		page_list       = NULL;
7705 		page_list_wired = NULL;
7706 		page_list_pal   = NULL;
7707 		discard_all     = FALSE;
7708 	} else {
7709 		discard_all     = will_discard;
7710 	}
7711 
7712 #if MACH_ASSERT || DEBUG
7713 	if (!preflight) {
7714 		assert(hibernate_vm_locks_are_safe());
7715 		vm_page_lock_queues();
7716 		if (vm_page_local_q) {
7717 			zpercpu_foreach(lq, vm_page_local_q) {
7718 				VPL_LOCK(&lq->vpl_lock);
7719 			}
7720 		}
7721 	}
7722 #endif  /* MACH_ASSERT || DEBUG */
7723 
7724 
7725 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7726 
7727 	clock_get_uptime(&start);
7728 
7729 	if (!preflight) {
7730 		hibernate_page_list_zero(page_list);
7731 		hibernate_page_list_zero(page_list_wired);
7732 		hibernate_page_list_zero(page_list_pal);
7733 
7734 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7735 		hibernate_stats.cd_pages = pages;
7736 	}
7737 
7738 	if (vm_page_local_q) {
7739 		zpercpu_foreach_cpu(lid) {
7740 			vm_page_reactivate_local(lid, TRUE, !preflight);
7741 		}
7742 	}
7743 
7744 	if (preflight) {
7745 		vm_object_lock(compressor_object);
7746 		vm_page_lock_queues();
7747 		vm_free_page_lock();
7748 	}
7749 
7750 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7751 
7752 	hibernation_vmqueues_inspection = TRUE;
7753 
7754 	m = (vm_page_t) hibernate_gobble_queue;
7755 	while (m) {
7756 		pages--;
7757 		count_wire--;
7758 		if (!preflight) {
7759 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7760 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7761 		}
7762 		m = m->vmp_snext;
7763 	}
7764 
7765 	if (!preflight) {
7766 		percpu_foreach(free_pages_head, free_pages) {
7767 			for (m = *free_pages_head; m; m = m->vmp_snext) {
7768 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7769 
7770 				pages--;
7771 				count_wire--;
7772 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7773 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7774 
7775 				hibernate_stats.cd_local_free++;
7776 				hibernate_stats.cd_total_free++;
7777 			}
7778 		}
7779 	}
7780 
7781 	for (i = 0; i < vm_colors; i++) {
7782 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7783 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7784 
7785 			pages--;
7786 			count_wire--;
7787 			if (!preflight) {
7788 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7789 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7790 
7791 				hibernate_stats.cd_total_free++;
7792 			}
7793 		}
7794 	}
7795 
7796 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7797 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7798 
7799 		pages--;
7800 		count_wire--;
7801 		if (!preflight) {
7802 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7803 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7804 
7805 			hibernate_stats.cd_total_free++;
7806 		}
7807 	}
7808 
7809 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
7810 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
7811 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
7812 
7813 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7814 		discard = FALSE;
7815 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
7816 		    && hibernate_consider_discard(m, preflight)) {
7817 			if (!preflight) {
7818 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7819 			}
7820 			count_discard_inactive++;
7821 			discard = discard_all;
7822 		} else {
7823 			count_throttled++;
7824 		}
7825 		count_wire--;
7826 		if (!preflight) {
7827 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7828 		}
7829 
7830 		if (discard) {
7831 			hibernate_discard_page(m);
7832 		}
7833 		m = next;
7834 	}
7835 
7836 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
7837 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7838 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7839 
7840 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7841 		discard = FALSE;
7842 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7843 		    hibernate_consider_discard(m, preflight)) {
7844 			if (!preflight) {
7845 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7846 			}
7847 			if (m->vmp_dirty) {
7848 				count_discard_purgeable++;
7849 			} else {
7850 				count_discard_inactive++;
7851 			}
7852 			discard = discard_all;
7853 		} else {
7854 			count_anonymous++;
7855 		}
7856 		count_wire--;
7857 		if (!preflight) {
7858 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7859 		}
7860 		if (discard) {
7861 			hibernate_discard_page(m);
7862 		}
7863 		m = next;
7864 	}
7865 
7866 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7867 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7868 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7869 
7870 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7871 		discard = FALSE;
7872 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7873 		    hibernate_consider_discard(m, preflight)) {
7874 			if (!preflight) {
7875 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7876 			}
7877 			if (m->vmp_dirty) {
7878 				count_discard_purgeable++;
7879 			} else {
7880 				count_discard_cleaned++;
7881 			}
7882 			discard = discard_all;
7883 		} else {
7884 			count_cleaned++;
7885 		}
7886 		count_wire--;
7887 		if (!preflight) {
7888 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7889 		}
7890 		if (discard) {
7891 			hibernate_discard_page(m);
7892 		}
7893 		m = next;
7894 	}
7895 
7896 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7897 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7898 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7899 
7900 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7901 		discard = FALSE;
7902 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
7903 		    hibernate_consider_discard(m, preflight)) {
7904 			if (!preflight) {
7905 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7906 			}
7907 			if (m->vmp_dirty) {
7908 				count_discard_purgeable++;
7909 			} else {
7910 				count_discard_active++;
7911 			}
7912 			discard = discard_all;
7913 		} else {
7914 			count_active++;
7915 		}
7916 		count_wire--;
7917 		if (!preflight) {
7918 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7919 		}
7920 		if (discard) {
7921 			hibernate_discard_page(m);
7922 		}
7923 		m = next;
7924 	}
7925 
7926 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7927 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7928 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7929 
7930 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7931 		discard = FALSE;
7932 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7933 		    hibernate_consider_discard(m, preflight)) {
7934 			if (!preflight) {
7935 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7936 			}
7937 			if (m->vmp_dirty) {
7938 				count_discard_purgeable++;
7939 			} else {
7940 				count_discard_inactive++;
7941 			}
7942 			discard = discard_all;
7943 		} else {
7944 			count_inactive++;
7945 		}
7946 		count_wire--;
7947 		if (!preflight) {
7948 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7949 		}
7950 		if (discard) {
7951 			hibernate_discard_page(m);
7952 		}
7953 		m = next;
7954 	}
7955 	/* XXX FBDP TODO: secluded queue */
7956 
7957 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7958 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7959 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7960 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
7961 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7962 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
7963 
7964 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7965 			discard = FALSE;
7966 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7967 			    hibernate_consider_discard(m, preflight)) {
7968 				if (!preflight) {
7969 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7970 				}
7971 				count_discard_speculative++;
7972 				discard = discard_all;
7973 			} else {
7974 				count_speculative++;
7975 			}
7976 			count_wire--;
7977 			if (!preflight) {
7978 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7979 			}
7980 			if (discard) {
7981 				hibernate_discard_page(m);
7982 			}
7983 			m = next;
7984 		}
7985 	}
7986 
7987 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
7988 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
7989 
7990 		count_compressor++;
7991 		count_wire--;
7992 		if (!preflight) {
7993 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7994 		}
7995 	}
7996 
7997 
7998 	if (preflight == FALSE && discard_all == TRUE) {
7999 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8000 
8001 		HIBLOG("hibernate_teardown started\n");
8002 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8003 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8004 
8005 		pages -= count_discard_vm_struct_pages;
8006 		count_wire -= count_discard_vm_struct_pages;
8007 
8008 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8009 
8010 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8011 	}
8012 
8013 	if (!preflight) {
8014 		// pull wired from hibernate_bitmap
8015 		bitmap = &page_list->bank_bitmap[0];
8016 		bitmap_wired = &page_list_wired->bank_bitmap[0];
8017 		for (bank = 0; bank < page_list->bank_count; bank++) {
8018 			for (i = 0; i < bitmap->bitmapwords; i++) {
8019 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8020 			}
8021 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8022 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8023 		}
8024 	}
8025 
8026 	// machine dependent adjustments
8027 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8028 
8029 	if (!preflight) {
8030 		hibernate_stats.cd_count_wire = count_wire;
8031 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8032 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8033 	}
8034 
8035 	clock_get_uptime(&end);
8036 	absolutetime_to_nanoseconds(end - start, &nsec);
8037 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8038 
8039 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8040 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8041 	    discard_all ? "did" : "could",
8042 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8043 
8044 	if (hibernate_stats.cd_skipped_xpmapped) {
8045 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8046 	}
8047 
8048 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8049 
8050 	if (preflight && will_discard) {
8051 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8052 		/*
8053 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8054 		 * even if these are clean and so we need to size the hibernation image accordingly.
8055 		 *
8056 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8057 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8058 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8059 		 * clean xpmapped pages.
8060 		 *
8061 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8062 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8063 		 */
8064 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
8065 	}
8066 
8067 	hibernation_vmqueues_inspection = FALSE;
8068 
8069 #if MACH_ASSERT || DEBUG
8070 	if (!preflight) {
8071 		if (vm_page_local_q) {
8072 			zpercpu_foreach(lq, vm_page_local_q) {
8073 				VPL_UNLOCK(&lq->vpl_lock);
8074 			}
8075 		}
8076 		vm_page_unlock_queues();
8077 	}
8078 #endif  /* MACH_ASSERT || DEBUG */
8079 
8080 	if (preflight) {
8081 		vm_free_page_unlock();
8082 		vm_page_unlock_queues();
8083 		vm_object_unlock(compressor_object);
8084 	}
8085 
8086 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8087 }
8088 
8089 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8090 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8091 {
8092 	uint64_t  start, end, nsec;
8093 	vm_page_t m;
8094 	vm_page_t next;
8095 	uint32_t  i;
8096 	uint32_t  count_discard_active    = 0;
8097 	uint32_t  count_discard_inactive  = 0;
8098 	uint32_t  count_discard_purgeable = 0;
8099 	uint32_t  count_discard_cleaned   = 0;
8100 	uint32_t  count_discard_speculative = 0;
8101 
8102 
8103 #if MACH_ASSERT || DEBUG
8104 	vm_page_lock_queues();
8105 	if (vm_page_local_q) {
8106 		zpercpu_foreach(lq, vm_page_local_q) {
8107 			VPL_LOCK(&lq->vpl_lock);
8108 		}
8109 	}
8110 #endif  /* MACH_ASSERT || DEBUG */
8111 
8112 	clock_get_uptime(&start);
8113 
8114 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8115 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8116 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8117 
8118 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8119 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8120 			if (m->vmp_dirty) {
8121 				count_discard_purgeable++;
8122 			} else {
8123 				count_discard_inactive++;
8124 			}
8125 			hibernate_discard_page(m);
8126 		}
8127 		m = next;
8128 	}
8129 
8130 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
8131 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8132 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8133 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8134 
8135 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8136 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8137 				count_discard_speculative++;
8138 				hibernate_discard_page(m);
8139 			}
8140 			m = next;
8141 		}
8142 	}
8143 
8144 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8145 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8146 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8147 
8148 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8149 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8150 			if (m->vmp_dirty) {
8151 				count_discard_purgeable++;
8152 			} else {
8153 				count_discard_inactive++;
8154 			}
8155 			hibernate_discard_page(m);
8156 		}
8157 		m = next;
8158 	}
8159 	/* XXX FBDP TODO: secluded queue */
8160 
8161 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8162 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8163 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8164 
8165 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8166 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8167 			if (m->vmp_dirty) {
8168 				count_discard_purgeable++;
8169 			} else {
8170 				count_discard_active++;
8171 			}
8172 			hibernate_discard_page(m);
8173 		}
8174 		m = next;
8175 	}
8176 
8177 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8178 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8179 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8180 
8181 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8182 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8183 			if (m->vmp_dirty) {
8184 				count_discard_purgeable++;
8185 			} else {
8186 				count_discard_cleaned++;
8187 			}
8188 			hibernate_discard_page(m);
8189 		}
8190 		m = next;
8191 	}
8192 
8193 #if MACH_ASSERT || DEBUG
8194 	if (vm_page_local_q) {
8195 		zpercpu_foreach(lq, vm_page_local_q) {
8196 			VPL_UNLOCK(&lq->vpl_lock);
8197 		}
8198 	}
8199 	vm_page_unlock_queues();
8200 #endif  /* MACH_ASSERT || DEBUG */
8201 
8202 	clock_get_uptime(&end);
8203 	absolutetime_to_nanoseconds(end - start, &nsec);
8204 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8205 	    nsec / 1000000ULL,
8206 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8207 }
8208 
8209 boolean_t       hibernate_paddr_map_inited = FALSE;
8210 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8211 vm_page_t       hibernate_rebuild_hash_list = NULL;
8212 
8213 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8214 unsigned int    hibernate_teardown_found_created_pages = 0;
8215 unsigned int    hibernate_teardown_found_free_pages = 0;
8216 unsigned int    hibernate_teardown_vm_page_free_count;
8217 
8218 
8219 struct ppnum_mapping {
8220 	struct ppnum_mapping    *ppnm_next;
8221 	ppnum_t                 ppnm_base_paddr;
8222 	unsigned int            ppnm_sindx;
8223 	unsigned int            ppnm_eindx;
8224 };
8225 
8226 struct ppnum_mapping    *ppnm_head;
8227 struct ppnum_mapping    *ppnm_last_found = NULL;
8228 
8229 
8230 void
hibernate_create_paddr_map(void)8231 hibernate_create_paddr_map(void)
8232 {
8233 	unsigned int    i;
8234 	ppnum_t         next_ppnum_in_run = 0;
8235 	struct ppnum_mapping *ppnm = NULL;
8236 
8237 	if (hibernate_paddr_map_inited == FALSE) {
8238 		for (i = 0; i < vm_pages_count; i++) {
8239 			if (ppnm) {
8240 				ppnm->ppnm_eindx = i;
8241 			}
8242 
8243 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8244 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
8245 
8246 				ppnm->ppnm_next = ppnm_head;
8247 				ppnm_head = ppnm;
8248 
8249 				ppnm->ppnm_sindx = i;
8250 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8251 			}
8252 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8253 		}
8254 		ppnm->ppnm_eindx = vm_pages_count;
8255 
8256 		hibernate_paddr_map_inited = TRUE;
8257 	}
8258 }
8259 
8260 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8261 hibernate_lookup_paddr(unsigned int indx)
8262 {
8263 	struct ppnum_mapping *ppnm = NULL;
8264 
8265 	ppnm = ppnm_last_found;
8266 
8267 	if (ppnm) {
8268 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8269 			goto done;
8270 		}
8271 	}
8272 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8273 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8274 			ppnm_last_found = ppnm;
8275 			break;
8276 		}
8277 	}
8278 	if (ppnm == NULL) {
8279 		panic("hibernate_lookup_paddr of %d failed", indx);
8280 	}
8281 done:
8282 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8283 }
8284 
8285 
8286 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8287 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8288 {
8289 	addr64_t        saddr_aligned;
8290 	addr64_t        eaddr_aligned;
8291 	addr64_t        addr;
8292 	ppnum_t         paddr;
8293 	unsigned int    mark_as_unneeded_pages = 0;
8294 
8295 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8296 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
8297 
8298 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8299 		paddr = pmap_find_phys(kernel_pmap, addr);
8300 
8301 		assert(paddr);
8302 
8303 		hibernate_page_bitset(page_list, TRUE, paddr);
8304 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
8305 
8306 		mark_as_unneeded_pages++;
8307 	}
8308 	return mark_as_unneeded_pages;
8309 }
8310 
8311 
8312 void
hibernate_hash_insert_page(vm_page_t mem)8313 hibernate_hash_insert_page(vm_page_t mem)
8314 {
8315 	vm_page_bucket_t *bucket;
8316 	int             hash_id;
8317 	vm_object_t     m_object;
8318 
8319 	m_object = VM_PAGE_OBJECT(mem);
8320 
8321 	assert(mem->vmp_hashed);
8322 	assert(m_object);
8323 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
8324 
8325 	/*
8326 	 *	Insert it into the object_object/offset hash table
8327 	 */
8328 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
8329 	bucket = &vm_page_buckets[hash_id];
8330 
8331 	mem->vmp_next_m = bucket->page_list;
8332 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
8333 }
8334 
8335 
8336 void
hibernate_free_range(int sindx,int eindx)8337 hibernate_free_range(int sindx, int eindx)
8338 {
8339 	vm_page_t       mem;
8340 	unsigned int    color;
8341 
8342 	while (sindx < eindx) {
8343 		mem = &vm_pages[sindx];
8344 
8345 		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8346 
8347 		mem->vmp_lopage = FALSE;
8348 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8349 
8350 		color = VM_PAGE_GET_COLOR(mem);
8351 #if defined(__x86_64__)
8352 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8353 #else
8354 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8355 #endif
8356 		vm_page_free_count++;
8357 
8358 		sindx++;
8359 	}
8360 }
8361 
8362 void
hibernate_rebuild_vm_structs(void)8363 hibernate_rebuild_vm_structs(void)
8364 {
8365 	int             i, cindx, sindx, eindx;
8366 	vm_page_t       mem, tmem, mem_next;
8367 	AbsoluteTime    startTime, endTime;
8368 	uint64_t        nsec;
8369 
8370 	if (hibernate_rebuild_needed == FALSE) {
8371 		return;
8372 	}
8373 
8374 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8375 	HIBLOG("hibernate_rebuild started\n");
8376 
8377 	clock_get_uptime(&startTime);
8378 
8379 	pal_hib_rebuild_pmap_structs();
8380 
8381 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8382 	eindx = vm_pages_count;
8383 
8384 	/*
8385 	 * Mark all the vm_pages[] that have not been initialized yet as being
8386 	 * transient. This is needed to ensure that buddy page search is corrrect.
8387 	 * Without this random data in these vm_pages[] can trip the buddy search
8388 	 */
8389 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8390 		vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8391 	}
8392 
8393 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8394 		mem = &vm_pages[cindx];
8395 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8396 		/*
8397 		 * hibernate_teardown_vm_structs leaves the location where
8398 		 * this vm_page_t must be located in "next".
8399 		 */
8400 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8401 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8402 
8403 		sindx = (int)(tmem - &vm_pages[0]);
8404 
8405 		if (mem != tmem) {
8406 			/*
8407 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8408 			 * so move it back to its real location
8409 			 */
8410 			*tmem = *mem;
8411 			mem = tmem;
8412 		}
8413 		if (mem->vmp_hashed) {
8414 			hibernate_hash_insert_page(mem);
8415 		}
8416 		/*
8417 		 * the 'hole' between this vm_page_t and the previous
8418 		 * vm_page_t we moved needs to be initialized as
8419 		 * a range of free vm_page_t's
8420 		 */
8421 		hibernate_free_range(sindx + 1, eindx);
8422 
8423 		eindx = sindx;
8424 	}
8425 	if (sindx) {
8426 		hibernate_free_range(0, sindx);
8427 	}
8428 
8429 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8430 
8431 	/*
8432 	 * process the list of vm_page_t's that were entered in the hash,
8433 	 * but were not located in the vm_pages arrary... these are
8434 	 * vm_page_t's that were created on the fly (i.e. fictitious)
8435 	 */
8436 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8437 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8438 
8439 		mem->vmp_next_m = 0;
8440 		hibernate_hash_insert_page(mem);
8441 	}
8442 	hibernate_rebuild_hash_list = NULL;
8443 
8444 	clock_get_uptime(&endTime);
8445 	SUB_ABSOLUTETIME(&endTime, &startTime);
8446 	absolutetime_to_nanoseconds(endTime, &nsec);
8447 
8448 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8449 
8450 	hibernate_rebuild_needed = FALSE;
8451 
8452 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8453 }
8454 
8455 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8456 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8457 {
8458 	unsigned int    i;
8459 	unsigned int    compact_target_indx;
8460 	vm_page_t       mem, mem_next;
8461 	vm_page_bucket_t *bucket;
8462 	unsigned int    mark_as_unneeded_pages = 0;
8463 	unsigned int    unneeded_vm_page_bucket_pages = 0;
8464 	unsigned int    unneeded_vm_pages_pages = 0;
8465 	unsigned int    unneeded_pmap_pages = 0;
8466 	addr64_t        start_of_unneeded = 0;
8467 	addr64_t        end_of_unneeded = 0;
8468 
8469 
8470 	if (hibernate_should_abort()) {
8471 		return 0;
8472 	}
8473 
8474 	hibernate_rebuild_needed = TRUE;
8475 
8476 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8477 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8478 	    vm_page_cleaned_count, compressor_object->resident_page_count);
8479 
8480 	for (i = 0; i < vm_page_bucket_count; i++) {
8481 		bucket = &vm_page_buckets[i];
8482 
8483 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8484 			assert(mem->vmp_hashed);
8485 
8486 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8487 
8488 			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8489 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8490 				hibernate_rebuild_hash_list = mem;
8491 			}
8492 		}
8493 	}
8494 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8495 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8496 
8497 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
8498 
8499 	compact_target_indx = 0;
8500 
8501 	for (i = 0; i < vm_pages_count; i++) {
8502 		mem = &vm_pages[i];
8503 
8504 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8505 			unsigned int color;
8506 
8507 			assert(mem->vmp_busy);
8508 			assert(!mem->vmp_lopage);
8509 
8510 			color = VM_PAGE_GET_COLOR(mem);
8511 
8512 			vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8513 
8514 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8515 
8516 			vm_page_free_count--;
8517 
8518 			hibernate_teardown_found_free_pages++;
8519 
8520 			if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8521 				compact_target_indx = i;
8522 			}
8523 		} else {
8524 			/*
8525 			 * record this vm_page_t's original location
8526 			 * we need this even if it doesn't get moved
8527 			 * as an indicator to the rebuild function that
8528 			 * we don't have to move it
8529 			 */
8530 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8531 
8532 			if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8533 				/*
8534 				 * we've got a hole to fill, so
8535 				 * move this vm_page_t to it's new home
8536 				 */
8537 				vm_pages[compact_target_indx] = *mem;
8538 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8539 
8540 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8541 				compact_target_indx++;
8542 			} else {
8543 				hibernate_teardown_last_valid_compact_indx = i;
8544 			}
8545 		}
8546 	}
8547 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8548 	    (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8549 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
8550 
8551 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8552 
8553 	if (start_of_unneeded) {
8554 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8555 		mark_as_unneeded_pages += unneeded_pmap_pages;
8556 	}
8557 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8558 
8559 	return mark_as_unneeded_pages;
8560 }
8561 
8562 
8563 #endif /* HIBERNATION */
8564 
8565 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8566 
8567 #include <mach_vm_debug.h>
8568 #if     MACH_VM_DEBUG
8569 
8570 #include <mach_debug/hash_info.h>
8571 #include <vm/vm_debug.h>
8572 
8573 /*
8574  *	Routine:	vm_page_info
8575  *	Purpose:
8576  *		Return information about the global VP table.
8577  *		Fills the buffer with as much information as possible
8578  *		and returns the desired size of the buffer.
8579  *	Conditions:
8580  *		Nothing locked.  The caller should provide
8581  *		possibly-pageable memory.
8582  */
8583 
8584 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8585 vm_page_info(
8586 	hash_info_bucket_t *info,
8587 	unsigned int count)
8588 {
8589 	unsigned int i;
8590 	lck_spin_t      *bucket_lock;
8591 
8592 	if (vm_page_bucket_count < count) {
8593 		count = vm_page_bucket_count;
8594 	}
8595 
8596 	for (i = 0; i < count; i++) {
8597 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
8598 		unsigned int bucket_count = 0;
8599 		vm_page_t m;
8600 
8601 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8602 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8603 
8604 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8605 		    m != VM_PAGE_NULL;
8606 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8607 			bucket_count++;
8608 		}
8609 
8610 		lck_spin_unlock(bucket_lock);
8611 
8612 		/* don't touch pageable memory while holding locks */
8613 		info[i].hib_count = bucket_count;
8614 	}
8615 
8616 	return vm_page_bucket_count;
8617 }
8618 #endif  /* MACH_VM_DEBUG */
8619 
8620 #if VM_PAGE_BUCKETS_CHECK
8621 void
vm_page_buckets_check(void)8622 vm_page_buckets_check(void)
8623 {
8624 	unsigned int i;
8625 	vm_page_t p;
8626 	unsigned int p_hash;
8627 	vm_page_bucket_t *bucket;
8628 	lck_spin_t      *bucket_lock;
8629 
8630 	if (!vm_page_buckets_check_ready) {
8631 		return;
8632 	}
8633 
8634 #if HIBERNATION
8635 	if (hibernate_rebuild_needed ||
8636 	    hibernate_rebuild_hash_list) {
8637 		panic("BUCKET_CHECK: hibernation in progress: "
8638 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
8639 		    hibernate_rebuild_needed,
8640 		    hibernate_rebuild_hash_list);
8641 	}
8642 #endif /* HIBERNATION */
8643 
8644 #if VM_PAGE_FAKE_BUCKETS
8645 	char *cp;
8646 	for (cp = (char *) vm_page_fake_buckets_start;
8647 	    cp < (char *) vm_page_fake_buckets_end;
8648 	    cp++) {
8649 		if (*cp != 0x5a) {
8650 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
8651 			    "[0x%llx:0x%llx]\n",
8652 			    cp,
8653 			    (uint64_t) vm_page_fake_buckets_start,
8654 			    (uint64_t) vm_page_fake_buckets_end);
8655 		}
8656 	}
8657 #endif /* VM_PAGE_FAKE_BUCKETS */
8658 
8659 	for (i = 0; i < vm_page_bucket_count; i++) {
8660 		vm_object_t     p_object;
8661 
8662 		bucket = &vm_page_buckets[i];
8663 		if (!bucket->page_list) {
8664 			continue;
8665 		}
8666 
8667 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8668 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8669 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8670 
8671 		while (p != VM_PAGE_NULL) {
8672 			p_object = VM_PAGE_OBJECT(p);
8673 
8674 			if (!p->vmp_hashed) {
8675 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8676 				    "hash %d in bucket %d at %p "
8677 				    "is not hashed\n",
8678 				    p, p_object, p->vmp_offset,
8679 				    p_hash, i, bucket);
8680 			}
8681 			p_hash = vm_page_hash(p_object, p->vmp_offset);
8682 			if (p_hash != i) {
8683 				panic("BUCKET_CHECK: corruption in bucket %d "
8684 				    "at %p: page %p object %p offset 0x%llx "
8685 				    "hash %d\n",
8686 				    i, bucket, p, p_object, p->vmp_offset,
8687 				    p_hash);
8688 			}
8689 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8690 		}
8691 		lck_spin_unlock(bucket_lock);
8692 	}
8693 
8694 //	printf("BUCKET_CHECK: checked buckets\n");
8695 }
8696 #endif /* VM_PAGE_BUCKETS_CHECK */
8697 
8698 /*
8699  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8700  * local queues if they exist... its the only spot in the system where we add pages
8701  * to those queues...  once on those queues, those pages can only move to one of the
8702  * global page queues or the free queues... they NEVER move from local q to local q.
8703  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8704  * the global vm_page_queue_lock at this point...  we still need to take the local lock
8705  * in case this operation is being run on a different CPU then the local queue's identity,
8706  * but we don't have to worry about the page moving to a global queue or becoming wired
8707  * while we're grabbing the local lock since those operations would require the global
8708  * vm_page_queue_lock to be held, and we already own it.
8709  *
8710  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8711  * 'wired' and local are ALWAYS mutually exclusive conditions.
8712  */
8713 
8714 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8715 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8716 {
8717 	boolean_t       was_pageable = TRUE;
8718 	vm_object_t     m_object;
8719 
8720 	m_object = VM_PAGE_OBJECT(mem);
8721 
8722 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8723 
8724 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8725 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8726 		if (remove_from_specialq == TRUE) {
8727 			vm_page_remove_from_specialq(mem);
8728 		}
8729 		/*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
8730 		 *       assert(mem->vmp_specialq.next != 0);
8731 		 *       assert(mem->vmp_specialq.prev != 0);
8732 		 *  } else {*/
8733 		if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
8734 			assert(mem->vmp_specialq.next == 0);
8735 			assert(mem->vmp_specialq.prev == 0);
8736 		}
8737 		return;
8738 	}
8739 
8740 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8741 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8742 		assert(mem->vmp_specialq.next == 0 &&
8743 		    mem->vmp_specialq.prev == 0 &&
8744 		    mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8745 		return;
8746 	}
8747 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
8748 		/*
8749 		 * might put these guys on a list for debugging purposes
8750 		 * if we do, we'll need to remove this assert
8751 		 */
8752 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8753 		assert(mem->vmp_specialq.next == 0 &&
8754 		    mem->vmp_specialq.prev == 0);
8755 		/*
8756 		 * Recall that vmp_on_specialq also means a request to put
8757 		 * it on the special Q. So we don't want to reset that bit
8758 		 * just because a wiring request came in. We might want to
8759 		 * put it on the special queue post-unwiring.
8760 		 *
8761 		 * &&
8762 		 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8763 		 */
8764 		return;
8765 	}
8766 
8767 	assert(m_object != compressor_object);
8768 	assert(!is_kernel_object(m_object));
8769 	assert(!mem->vmp_fictitious);
8770 
8771 	switch (mem->vmp_q_state) {
8772 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
8773 	{
8774 		struct vpl      *lq;
8775 
8776 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
8777 		VPL_LOCK(&lq->vpl_lock);
8778 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
8779 		mem->vmp_local_id = 0;
8780 		lq->vpl_count--;
8781 		if (m_object->internal) {
8782 			lq->vpl_internal_count--;
8783 		} else {
8784 			lq->vpl_external_count--;
8785 		}
8786 		VPL_UNLOCK(&lq->vpl_lock);
8787 		was_pageable = FALSE;
8788 		break;
8789 	}
8790 	case VM_PAGE_ON_ACTIVE_Q:
8791 	{
8792 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
8793 		vm_page_active_count--;
8794 		break;
8795 	}
8796 
8797 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
8798 	{
8799 		assert(m_object->internal == TRUE);
8800 
8801 		vm_page_inactive_count--;
8802 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
8803 		vm_page_anonymous_count--;
8804 
8805 		vm_purgeable_q_advance_all();
8806 		vm_page_balance_inactive(3);
8807 		break;
8808 	}
8809 
8810 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
8811 	{
8812 		assert(m_object->internal == FALSE);
8813 
8814 		vm_page_inactive_count--;
8815 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
8816 		vm_purgeable_q_advance_all();
8817 		vm_page_balance_inactive(3);
8818 		break;
8819 	}
8820 
8821 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
8822 	{
8823 		assert(m_object->internal == FALSE);
8824 
8825 		vm_page_inactive_count--;
8826 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
8827 		vm_page_cleaned_count--;
8828 		vm_page_balance_inactive(3);
8829 		break;
8830 	}
8831 
8832 	case VM_PAGE_ON_THROTTLED_Q:
8833 	{
8834 		assert(m_object->internal == TRUE);
8835 
8836 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
8837 		vm_page_throttled_count--;
8838 		was_pageable = FALSE;
8839 		break;
8840 	}
8841 
8842 	case VM_PAGE_ON_SPECULATIVE_Q:
8843 	{
8844 		assert(m_object->internal == FALSE);
8845 
8846 		vm_page_remque(&mem->vmp_pageq);
8847 		vm_page_speculative_count--;
8848 		vm_page_balance_inactive(3);
8849 		break;
8850 	}
8851 
8852 #if CONFIG_SECLUDED_MEMORY
8853 	case VM_PAGE_ON_SECLUDED_Q:
8854 	{
8855 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
8856 		vm_page_secluded_count--;
8857 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8858 		if (m_object == VM_OBJECT_NULL) {
8859 			vm_page_secluded_count_free--;
8860 			was_pageable = FALSE;
8861 		} else {
8862 			assert(!m_object->internal);
8863 			vm_page_secluded_count_inuse--;
8864 			was_pageable = FALSE;
8865 //			was_pageable = TRUE;
8866 		}
8867 		break;
8868 	}
8869 #endif /* CONFIG_SECLUDED_MEMORY */
8870 
8871 	default:
8872 	{
8873 		/*
8874 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8875 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8876 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
8877 		 *              either first remove it (it needs both the page queues lock and the object lock to do
8878 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8879 		 *
8880 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8881 		 *	or any of the undefined states
8882 		 */
8883 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
8884 		break;
8885 	}
8886 	}
8887 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8888 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
8889 
8890 	if (remove_from_specialq == TRUE) {
8891 		vm_page_remove_from_specialq(mem);
8892 	}
8893 	if (was_pageable) {
8894 		if (m_object->internal) {
8895 			vm_page_pageable_internal_count--;
8896 		} else {
8897 			vm_page_pageable_external_count--;
8898 		}
8899 	}
8900 }
8901 
8902 void
vm_page_remove_internal(vm_page_t page)8903 vm_page_remove_internal(vm_page_t page)
8904 {
8905 	vm_object_t __object = VM_PAGE_OBJECT(page);
8906 	if (page == __object->memq_hint) {
8907 		vm_page_t       __new_hint;
8908 		vm_page_queue_entry_t   __qe;
8909 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
8910 		if (vm_page_queue_end(&__object->memq, __qe)) {
8911 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
8912 			if (vm_page_queue_end(&__object->memq, __qe)) {
8913 				__qe = NULL;
8914 			}
8915 		}
8916 		__new_hint = (vm_page_t)((uintptr_t) __qe);
8917 		__object->memq_hint = __new_hint;
8918 	}
8919 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
8920 #if CONFIG_SECLUDED_MEMORY
8921 	if (__object->eligible_for_secluded) {
8922 		vm_page_secluded.eligible_for_secluded--;
8923 	}
8924 #endif /* CONFIG_SECLUDED_MEMORY */
8925 }
8926 
8927 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)8928 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
8929 {
8930 	vm_object_t     m_object;
8931 
8932 	m_object = VM_PAGE_OBJECT(mem);
8933 
8934 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8935 	assert(!mem->vmp_fictitious);
8936 	assert(!mem->vmp_laundry);
8937 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8938 	vm_page_check_pageable_safe(mem);
8939 
8940 	if (m_object->internal) {
8941 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
8942 
8943 		if (first == TRUE) {
8944 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
8945 		} else {
8946 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
8947 		}
8948 
8949 		vm_page_anonymous_count++;
8950 		vm_page_pageable_internal_count++;
8951 	} else {
8952 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
8953 
8954 		if (first == TRUE) {
8955 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
8956 		} else {
8957 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
8958 		}
8959 
8960 		vm_page_pageable_external_count++;
8961 	}
8962 	vm_page_inactive_count++;
8963 	token_new_pagecount++;
8964 
8965 	vm_page_add_to_specialq(mem, FALSE);
8966 }
8967 
8968 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)8969 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
8970 {
8971 	vm_object_t     m_object;
8972 
8973 	m_object = VM_PAGE_OBJECT(mem);
8974 
8975 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8976 	assert(!mem->vmp_fictitious);
8977 	assert(!mem->vmp_laundry);
8978 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8979 	vm_page_check_pageable_safe(mem);
8980 
8981 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
8982 	if (first == TRUE) {
8983 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
8984 	} else {
8985 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
8986 	}
8987 	vm_page_active_count++;
8988 
8989 	if (m_object->internal) {
8990 		vm_page_pageable_internal_count++;
8991 	} else {
8992 		vm_page_pageable_external_count++;
8993 	}
8994 
8995 	vm_page_add_to_specialq(mem, FALSE);
8996 	vm_page_balance_inactive(3);
8997 }
8998 
8999 /*
9000  * Pages from special kernel objects shouldn't
9001  * be placed on pageable queues.
9002  */
9003 void
vm_page_check_pageable_safe(vm_page_t page)9004 vm_page_check_pageable_safe(vm_page_t page)
9005 {
9006 	vm_object_t     page_object;
9007 
9008 	page_object = VM_PAGE_OBJECT(page);
9009 
9010 	if (is_kernel_object(page_object)) {
9011 		panic("vm_page_check_pageable_safe: trying to add page"
9012 		    "from kernel object (%p) to pageable queue", page_object);
9013 	}
9014 
9015 	if (page_object == compressor_object) {
9016 		panic("vm_page_check_pageable_safe: trying to add page"
9017 		    "from compressor object (%p) to pageable queue", compressor_object);
9018 	}
9019 }
9020 
9021 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9022 * wired page diagnose
9023 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9024 
9025 #include <libkern/OSKextLibPrivate.h>
9026 
9027 #define KA_SIZE(namelen, subtotalscount)        \
9028 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9029 
9030 #define KA_NAME(alloc)  \
9031 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9032 
9033 #define KA_NAME_LEN(alloc)      \
9034     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9035 
9036 vm_tag_t
vm_tag_bt(void)9037 vm_tag_bt(void)
9038 {
9039 	uintptr_t* frameptr;
9040 	uintptr_t* frameptr_next;
9041 	uintptr_t retaddr;
9042 	uintptr_t kstackb, kstackt;
9043 	const vm_allocation_site_t * site;
9044 	thread_t cthread;
9045 	kern_allocation_name_t name;
9046 
9047 	cthread = current_thread();
9048 	if (__improbable(cthread == NULL)) {
9049 		return VM_KERN_MEMORY_OSFMK;
9050 	}
9051 
9052 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9053 		if (!name->tag) {
9054 			vm_tag_alloc(name);
9055 		}
9056 		return name->tag;
9057 	}
9058 
9059 	kstackb = cthread->kernel_stack;
9060 	kstackt = kstackb + kernel_stack_size;
9061 
9062 	/* Load stack frame pointer (EBP on x86) into frameptr */
9063 	frameptr = __builtin_frame_address(0);
9064 	site = NULL;
9065 	while (frameptr != NULL) {
9066 		/* Verify thread stack bounds */
9067 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9068 			break;
9069 		}
9070 
9071 		/* Next frame pointer is pointed to by the previous one */
9072 		frameptr_next = (uintptr_t*) *frameptr;
9073 #if defined(HAS_APPLE_PAC)
9074 		frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9075 #endif
9076 
9077 		/* Pull return address from one spot above the frame pointer */
9078 		retaddr = *(frameptr + 1);
9079 
9080 #if defined(HAS_APPLE_PAC)
9081 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9082 #endif
9083 
9084 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9085 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9086 			site = OSKextGetAllocationSiteForCaller(retaddr);
9087 			break;
9088 		}
9089 		frameptr = frameptr_next;
9090 	}
9091 
9092 	return site ? site->tag : VM_KERN_MEMORY_NONE;
9093 }
9094 
9095 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9096 
9097 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9098 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9099 {
9100 	vm_tag_t tag;
9101 	uint64_t avail;
9102 	uint32_t idx;
9103 	vm_allocation_site_t * prev;
9104 
9105 	if (site->tag) {
9106 		return;
9107 	}
9108 
9109 	idx = 0;
9110 	while (TRUE) {
9111 		avail = free_tag_bits[idx];
9112 		if (avail) {
9113 			tag = (vm_tag_t)__builtin_clzll(avail);
9114 			avail &= ~(1ULL << (63 - tag));
9115 			free_tag_bits[idx] = avail;
9116 			tag += (idx << 6);
9117 			break;
9118 		}
9119 		idx++;
9120 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
9121 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9122 				prev = vm_allocation_sites[idx];
9123 				if (!prev) {
9124 					continue;
9125 				}
9126 				if (!KA_NAME_LEN(prev)) {
9127 					continue;
9128 				}
9129 				if (!prev->tag) {
9130 					continue;
9131 				}
9132 				if (prev->total) {
9133 					continue;
9134 				}
9135 				if (1 != prev->refcount) {
9136 					continue;
9137 				}
9138 
9139 				assert(idx == prev->tag);
9140 				tag = (vm_tag_t)idx;
9141 				prev->tag = VM_KERN_MEMORY_NONE;
9142 				*releasesiteP = prev;
9143 				break;
9144 			}
9145 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9146 				tag = VM_KERN_MEMORY_ANY;
9147 			}
9148 			break;
9149 		}
9150 	}
9151 	site->tag = tag;
9152 
9153 	OSAddAtomic16(1, &site->refcount);
9154 
9155 	if (VM_KERN_MEMORY_ANY != tag) {
9156 		vm_allocation_sites[tag] = site;
9157 	}
9158 
9159 	if (tag > vm_allocation_tag_highest) {
9160 		vm_allocation_tag_highest = tag;
9161 	}
9162 }
9163 
9164 static void
vm_tag_free_locked(vm_tag_t tag)9165 vm_tag_free_locked(vm_tag_t tag)
9166 {
9167 	uint64_t avail;
9168 	uint32_t idx;
9169 	uint64_t bit;
9170 
9171 	if (VM_KERN_MEMORY_ANY == tag) {
9172 		return;
9173 	}
9174 
9175 	idx = (tag >> 6);
9176 	avail = free_tag_bits[idx];
9177 	tag &= 63;
9178 	bit = (1ULL << (63 - tag));
9179 	assert(!(avail & bit));
9180 	free_tag_bits[idx] = (avail | bit);
9181 }
9182 
9183 static void
vm_tag_init(void)9184 vm_tag_init(void)
9185 {
9186 	vm_tag_t tag;
9187 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9188 		vm_tag_free_locked(tag);
9189 	}
9190 
9191 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9192 		vm_tag_free_locked(tag);
9193 	}
9194 }
9195 
9196 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9197 vm_tag_alloc(vm_allocation_site_t * site)
9198 {
9199 	vm_allocation_site_t * releasesite;
9200 
9201 	if (!site->tag) {
9202 		releasesite = NULL;
9203 		lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9204 		vm_tag_alloc_locked(site, &releasesite);
9205 		lck_ticket_unlock(&vm_allocation_sites_lock);
9206 		if (releasesite) {
9207 			kern_allocation_name_release(releasesite);
9208 		}
9209 	}
9210 
9211 	return site->tag;
9212 }
9213 
9214 #if VM_BTLOG_TAGS
9215 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9216 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9217 #define VM_TAG_BTLOG_SIZE (16u << 10)
9218 
9219 btlog_t vmtaglog_btlog;
9220 vm_tag_t vmtaglog_tag;
9221 
9222 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9223 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9224 {
9225 	if (is_kernel_object(object)) {
9226 		/* kernel object backtraces are tracked in vm entries */
9227 		return;
9228 	}
9229 	if (delta > 0) {
9230 		btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9231 		btlog_record(vmtaglog_btlog, object, 0, ref);
9232 	} else if (object->wired_page_count == 0) {
9233 		btlog_erase(vmtaglog_btlog, object);
9234 	}
9235 }
9236 
9237 #ifndef ARRAY_SIZE
9238 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9239 #endif /* ARRAY_SIZE */
9240 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9241 const char *vm_kern_memory_strs[] = {
9242 	VM_KERN_MEMORY_ELEM(OSFMK),
9243 	VM_KERN_MEMORY_ELEM(BSD),
9244 	VM_KERN_MEMORY_ELEM(IOKIT),
9245 	VM_KERN_MEMORY_ELEM(LIBKERN),
9246 	VM_KERN_MEMORY_ELEM(OSKEXT),
9247 	VM_KERN_MEMORY_ELEM(KEXT),
9248 	VM_KERN_MEMORY_ELEM(IPC),
9249 	VM_KERN_MEMORY_ELEM(STACK),
9250 	VM_KERN_MEMORY_ELEM(CPU),
9251 	VM_KERN_MEMORY_ELEM(PMAP),
9252 	VM_KERN_MEMORY_ELEM(PTE),
9253 	VM_KERN_MEMORY_ELEM(ZONE),
9254 	VM_KERN_MEMORY_ELEM(KALLOC),
9255 	VM_KERN_MEMORY_ELEM(COMPRESSOR),
9256 	VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9257 	VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9258 	VM_KERN_MEMORY_ELEM(WAITQ),
9259 	VM_KERN_MEMORY_ELEM(DIAG),
9260 	VM_KERN_MEMORY_ELEM(LOG),
9261 	VM_KERN_MEMORY_ELEM(FILE),
9262 	VM_KERN_MEMORY_ELEM(MBUF),
9263 	VM_KERN_MEMORY_ELEM(UBC),
9264 	VM_KERN_MEMORY_ELEM(SECURITY),
9265 	VM_KERN_MEMORY_ELEM(MLOCK),
9266 	VM_KERN_MEMORY_ELEM(REASON),
9267 	VM_KERN_MEMORY_ELEM(SKYWALK),
9268 	VM_KERN_MEMORY_ELEM(LTABLE),
9269 	VM_KERN_MEMORY_ELEM(HV),
9270 	VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9271 	VM_KERN_MEMORY_ELEM(RETIRED),
9272 	VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9273 	VM_KERN_MEMORY_ELEM(TRIAGE),
9274 	VM_KERN_MEMORY_ELEM(RECOUNT),
9275 };
9276 
9277 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9278 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9279 {
9280 	for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9281 		if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9282 			return i;
9283 		}
9284 	}
9285 
9286 	printf("Unable to find vm tag %s for btlog\n", tagstr);
9287 	return VM_KERN_MEMORY_NONE;
9288 }
9289 
9290 __startup_func
9291 static void
vm_btlog_init(void)9292 vm_btlog_init(void)
9293 {
9294 	vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9295 
9296 	if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9297 		vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9298 	}
9299 }
9300 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9301 #endif /* VM_BTLOG_TAGS */
9302 
9303 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9304 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9305 {
9306 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9307 
9308 	kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9309 }
9310 
9311 uint64_t
vm_tag_get_size(vm_tag_t tag)9312 vm_tag_get_size(vm_tag_t tag)
9313 {
9314 	vm_allocation_site_t *allocation;
9315 
9316 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9317 
9318 	allocation = vm_allocation_sites[tag];
9319 	return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9320 }
9321 
9322 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9323 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9324 {
9325 	uint64_t value;
9326 
9327 	value = os_atomic_add(&allocation->total, delta, relaxed);
9328 	if (delta < 0) {
9329 		assertf(value + (uint64_t)-delta > value,
9330 		    "tag %d, site %p", allocation->tag, allocation);
9331 	}
9332 
9333 #if DEBUG || DEVELOPMENT
9334 	if (value > allocation->peak) {
9335 		os_atomic_max(&allocation->peak, value, relaxed);
9336 	}
9337 #endif /* DEBUG || DEVELOPMENT */
9338 
9339 	if (value == (uint64_t)delta && !allocation->tag) {
9340 		vm_tag_alloc(allocation);
9341 	}
9342 
9343 #if VM_BTLOG_TAGS
9344 	if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9345 		vm_tag_log(object, delta, __builtin_frame_address(0));
9346 	}
9347 #endif /* VM_BTLOG_TAGS */
9348 }
9349 
9350 #if VM_TAG_SIZECLASSES
9351 
9352 void
vm_allocation_zones_init(void)9353 vm_allocation_zones_init(void)
9354 {
9355 	vm_offset_t   addr;
9356 	vm_size_t     size;
9357 
9358 	const vm_tag_t early_tags[] = {
9359 		VM_KERN_MEMORY_DIAG,
9360 		VM_KERN_MEMORY_KALLOC,
9361 		VM_KERN_MEMORY_KALLOC_DATA,
9362 		VM_KERN_MEMORY_KALLOC_TYPE,
9363 		VM_KERN_MEMORY_LIBKERN,
9364 		VM_KERN_MEMORY_OSFMK,
9365 		VM_KERN_MEMORY_RECOUNT,
9366 	};
9367 
9368 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9369 	    + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9370 
9371 	kmem_alloc(kernel_map, &addr, round_page(size),
9372 	    KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9373 	    VM_KERN_MEMORY_DIAG);
9374 
9375 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9376 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9377 
9378 	// prepopulate early tag ranges so allocations
9379 	// in vm_tag_update_zone_size() and early boot won't recurse
9380 	for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9381 		vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9382 		addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9383 	}
9384 }
9385 
9386 __attribute__((noinline))
9387 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9388 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9389 {
9390 	vm_allocation_zone_total_t *stats;
9391 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9392 
9393 	flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9394 	stats = kalloc_data(size, flags);
9395 	if (!stats) {
9396 		return VM_KERN_MEMORY_NONE;
9397 	}
9398 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9399 		kfree_data(stats, size);
9400 	}
9401 	return tag;
9402 }
9403 
9404 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zidx,uint32_t zflags)9405 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags)
9406 {
9407 	assert(VM_KERN_MEMORY_NONE != tag);
9408 	assert(tag < VM_MAX_TAG_VALUE);
9409 
9410 	if (zidx >= VM_TAG_SIZECLASSES) {
9411 		return VM_KERN_MEMORY_NONE;
9412 	}
9413 
9414 	if (__probable(vm_allocation_zone_totals[tag])) {
9415 		return tag;
9416 	}
9417 	return vm_tag_zone_stats_alloc(tag, zflags);
9418 }
9419 
9420 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9421 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9422 {
9423 	vm_allocation_zone_total_t *stats;
9424 	vm_size_t value;
9425 
9426 	assert(VM_KERN_MEMORY_NONE != tag);
9427 	assert(tag < VM_MAX_TAG_VALUE);
9428 
9429 	if (zidx >= VM_TAG_SIZECLASSES) {
9430 		return;
9431 	}
9432 
9433 	stats = vm_allocation_zone_totals[tag];
9434 	assert(stats);
9435 	stats += zidx;
9436 
9437 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9438 	if (delta < 0) {
9439 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9440 		return;
9441 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9442 		os_atomic_max(&stats->vazt_peak, value, relaxed);
9443 	}
9444 }
9445 
9446 #endif /* VM_TAG_SIZECLASSES */
9447 
9448 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,uint32_t subtag,int64_t delta)9449 kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
9450 {
9451 	kern_allocation_name_t other;
9452 	struct vm_allocation_total * total;
9453 	uint32_t subidx;
9454 
9455 	subidx = 0;
9456 	assert(VM_KERN_MEMORY_NONE != subtag);
9457 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9458 	for (; subidx < allocation->subtotalscount; subidx++) {
9459 		if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) {
9460 			allocation->subtotals[subidx].tag = (vm_tag_t)subtag;
9461 			break;
9462 		}
9463 		if (subtag == allocation->subtotals[subidx].tag) {
9464 			break;
9465 		}
9466 	}
9467 	lck_ticket_unlock(&vm_allocation_sites_lock);
9468 	assert(subidx < allocation->subtotalscount);
9469 	if (subidx >= allocation->subtotalscount) {
9470 		return;
9471 	}
9472 
9473 	total = &allocation->subtotals[subidx];
9474 	other = vm_allocation_sites[subtag];
9475 	assert(other);
9476 
9477 	if (delta < 0) {
9478 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9479 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9480 	}
9481 	OSAddAtomic64(delta, &other->mapped);
9482 	OSAddAtomic64(delta, &total->total);
9483 }
9484 
9485 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9486 kern_allocation_get_name(kern_allocation_name_t allocation)
9487 {
9488 	return KA_NAME(allocation);
9489 }
9490 
9491 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9492 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9493 {
9494 	kern_allocation_name_t allocation;
9495 	uint16_t namelen;
9496 
9497 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9498 
9499 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9500 	allocation->refcount       = 1;
9501 	allocation->subtotalscount = subtotalscount;
9502 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9503 	strlcpy(KA_NAME(allocation), name, namelen + 1);
9504 
9505 	vm_tag_alloc(allocation);
9506 	return allocation;
9507 }
9508 
9509 void
kern_allocation_name_release(kern_allocation_name_t allocation)9510 kern_allocation_name_release(kern_allocation_name_t allocation)
9511 {
9512 	assert(allocation->refcount > 0);
9513 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9514 		kfree_data(allocation,
9515 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9516 	}
9517 }
9518 
9519 vm_tag_t
kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)9520 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
9521 {
9522 	return vm_tag_alloc(allocation);
9523 }
9524 
9525 #if !VM_TAG_ACTIVE_UPDATE
9526 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9527 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9528 {
9529 	if (!object->wired_page_count) {
9530 		return;
9531 	}
9532 	if (!is_kernel_object(object)) {
9533 		assert(object->wire_tag < num_info);
9534 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9535 	}
9536 }
9537 
9538 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9539     unsigned int num_info, vm_object_t object);
9540 
9541 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9542 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9543     vm_page_iterate_proc proc, purgeable_q_t queue,
9544     int group)
9545 {
9546 	vm_object_t object;
9547 
9548 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
9549 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
9550 	    object = (vm_object_t) queue_next(&object->objq)) {
9551 		proc(info, num_info, object);
9552 	}
9553 }
9554 
9555 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9556 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9557     vm_page_iterate_proc proc)
9558 {
9559 	vm_object_t     object;
9560 
9561 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9562 	queue_iterate(&vm_objects_wired,
9563 	    object,
9564 	    vm_object_t,
9565 	    wired_objq)
9566 	{
9567 		proc(info, num_info, object);
9568 	}
9569 	lck_spin_unlock(&vm_objects_wired_lock);
9570 }
9571 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9572 
9573 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9574 process_account(mach_memory_info_t * info, unsigned int num_info,
9575     uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9576 {
9577 	size_t                 namelen;
9578 	unsigned int           idx, count, nextinfo;
9579 	vm_allocation_site_t * site;
9580 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9581 
9582 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9583 		site = vm_allocation_sites[idx];
9584 		if (!site) {
9585 			continue;
9586 		}
9587 		info[idx].mapped = site->mapped;
9588 		info[idx].tag    = site->tag;
9589 		if (!iterated) {
9590 			info[idx].size = site->total;
9591 #if DEBUG || DEVELOPMENT
9592 			info[idx].peak = site->peak;
9593 #endif /* DEBUG || DEVELOPMENT */
9594 		} else {
9595 			if (!site->subtotalscount && (site->total != info[idx].size)) {
9596 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9597 				info[idx].size = site->total;
9598 			}
9599 		}
9600 		info[idx].flags |= VM_KERN_SITE_WIRED;
9601 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9602 			info[idx].site   = idx;
9603 			info[idx].flags |= VM_KERN_SITE_TAG;
9604 			if (VM_KERN_MEMORY_ZONE == idx) {
9605 				info[idx].flags |= VM_KERN_SITE_HIDE;
9606 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
9607 				info[idx].collectable_bytes = zones_collectable_bytes;
9608 			}
9609 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9610 			info[idx].site   = 0;
9611 			info[idx].flags |= VM_KERN_SITE_NAMED;
9612 			if (namelen > sizeof(info[idx].name)) {
9613 				namelen = sizeof(info[idx].name);
9614 			}
9615 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9616 		} else if (VM_TAG_KMOD & site->flags) {
9617 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
9618 			info[idx].flags |= VM_KERN_SITE_KMOD;
9619 		} else {
9620 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
9621 			info[idx].flags |= VM_KERN_SITE_KERNEL;
9622 		}
9623 	}
9624 
9625 	nextinfo = (vm_allocation_tag_highest + 1);
9626 	count    = nextinfo;
9627 	if (count >= num_info) {
9628 		count = num_info;
9629 	}
9630 
9631 	for (idx = 0; idx < count; idx++) {
9632 		site = vm_allocation_sites[idx];
9633 		if (!site) {
9634 			continue;
9635 		}
9636 #if VM_TAG_SIZECLASSES
9637 		vm_allocation_zone_total_t * zone;
9638 		unsigned int                 zidx;
9639 
9640 		if (!redact_info
9641 		    && vm_allocation_zone_totals
9642 		    && (zone = vm_allocation_zone_totals[idx])
9643 		    && (nextinfo < num_info)) {
9644 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9645 				if (!zone[zidx].vazt_peak) {
9646 					continue;
9647 				}
9648 				info[nextinfo]        = info[idx];
9649 				info[nextinfo].zone   = zone_index_from_tag_index(zidx);
9650 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
9651 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
9652 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
9653 				info[nextinfo].size   = zone[zidx].vazt_total;
9654 				info[nextinfo].peak   = zone[zidx].vazt_peak;
9655 				info[nextinfo].mapped = 0;
9656 				nextinfo++;
9657 			}
9658 		}
9659 #endif /* VM_TAG_SIZECLASSES */
9660 		if (site->subtotalscount) {
9661 			uint64_t mapped, mapcost, take;
9662 			uint32_t sub;
9663 			vm_tag_t alloctag;
9664 
9665 			info[idx].size = site->total;
9666 			mapped = info[idx].size;
9667 			info[idx].mapped = mapped;
9668 			mapcost = 0;
9669 			for (sub = 0; sub < site->subtotalscount; sub++) {
9670 				alloctag = site->subtotals[sub].tag;
9671 				assert(alloctag < num_info);
9672 				if (info[alloctag].name[0]) {
9673 					continue;
9674 				}
9675 				take = site->subtotals[sub].total;
9676 				if (take > info[alloctag].size) {
9677 					take = info[alloctag].size;
9678 				}
9679 				if (take > mapped) {
9680 					take = mapped;
9681 				}
9682 				info[alloctag].mapped  -= take;
9683 				info[alloctag].size    -= take;
9684 				mapped                 -= take;
9685 				mapcost                += take;
9686 			}
9687 			info[idx].size = mapcost;
9688 		}
9689 	}
9690 	lck_ticket_unlock(&vm_allocation_sites_lock);
9691 
9692 	return 0;
9693 }
9694 
9695 uint32_t
vm_page_diagnose_estimate(void)9696 vm_page_diagnose_estimate(void)
9697 {
9698 	vm_allocation_site_t * site;
9699 	uint32_t               count = zone_view_count;
9700 	uint32_t               idx;
9701 
9702 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9703 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9704 		site = vm_allocation_sites[idx];
9705 		if (!site) {
9706 			continue;
9707 		}
9708 		count++;
9709 #if VM_TAG_SIZECLASSES
9710 		if (vm_allocation_zone_totals) {
9711 			vm_allocation_zone_total_t * zone;
9712 			zone = vm_allocation_zone_totals[idx];
9713 			if (!zone) {
9714 				continue;
9715 			}
9716 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9717 				count += (zone[zidx].vazt_peak != 0);
9718 			}
9719 		}
9720 #endif
9721 	}
9722 	lck_ticket_unlock(&vm_allocation_sites_lock);
9723 
9724 	/* some slop for new tags created */
9725 	count += 8;
9726 	count += VM_KERN_COUNTER_COUNT;
9727 
9728 	return count;
9729 }
9730 
9731 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)9732 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9733     bool percpu)
9734 {
9735 	zpercpu_foreach(zs, zstats) {
9736 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9737 	}
9738 	if (percpu) {
9739 		info->size *= zpercpu_count();
9740 	}
9741 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9742 }
9743 
9744 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)9745 vm_page_add_info(
9746 	mach_memory_info_t     *info,
9747 	zone_stats_t            stats,
9748 	bool                    per_cpu,
9749 	const char             *parent_heap_name,
9750 	const char             *parent_zone_name,
9751 	const char             *view_name)
9752 {
9753 	vm_page_diagnose_zone_stats(info, stats, per_cpu);
9754 	snprintf(info->name, sizeof(info->name),
9755 	    "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
9756 }
9757 
9758 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)9759 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
9760 {
9761 	vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
9762 	    z->z_name, "raw");
9763 }
9764 
9765 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)9766 vm_page_add_view(
9767 	mach_memory_info_t     *info,
9768 	zone_stats_t            stats,
9769 	const char             *parent_heap_name,
9770 	const char             *parent_zone_name,
9771 	const char             *view_name)
9772 {
9773 	vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
9774 	    view_name);
9775 }
9776 
9777 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)9778 vm_page_diagnose_heap_views(
9779 	mach_memory_info_t     *info,
9780 	kalloc_heap_t           kh,
9781 	const char             *parent_heap_name,
9782 	const char             *parent_zone_name)
9783 {
9784 	uint32_t i = 0;
9785 
9786 	while (kh) {
9787 		vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
9788 		    parent_zone_name, kh->kh_name);
9789 		kh = kh->kh_views;
9790 		i++;
9791 	}
9792 	return i;
9793 }
9794 
9795 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)9796 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
9797 {
9798 	uint32_t i = 0;
9799 
9800 	for (; i < KHEAP_NUM_ZONES; i++) {
9801 		vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
9802 	}
9803 
9804 	i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
9805 	    NULL);
9806 	return i;
9807 }
9808 
9809 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)9810 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
9811 {
9812 	uint32_t idx = 0;
9813 	vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
9814 	    "", "raw");
9815 	idx++;
9816 
9817 	for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
9818 		struct kheap_info heap = kalloc_type_heap_array[i];
9819 		char heap_num_tmp[MAX_ZONE_NAME] = "";
9820 		const char *heap_num;
9821 
9822 		snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
9823 		heap_num = &heap_num_tmp[0];
9824 
9825 		for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
9826 		    ktv = (kalloc_type_var_view_t) ktv->kt_next) {
9827 			if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
9828 				vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
9829 				    heap_num, ktv->kt_name);
9830 				idx++;
9831 			}
9832 		}
9833 
9834 		idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
9835 		    KHEAP_KT_VAR->kh_name, heap_num);
9836 	}
9837 
9838 	return idx;
9839 }
9840 
9841 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)9842 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
9843 {
9844 	uint64_t                 wired_size;
9845 	uint64_t                 wired_managed_size;
9846 	uint64_t                 wired_reserved_size;
9847 	boolean_t                iterate;
9848 	mach_memory_info_t     * counts;
9849 	uint32_t                 i;
9850 
9851 	bzero(info, num_info * sizeof(mach_memory_info_t));
9852 
9853 	if (!vm_page_wire_count_initial) {
9854 		return KERN_ABORTED;
9855 	}
9856 
9857 #if !XNU_TARGET_OS_OSX
9858 	wired_size          = ptoa_64(vm_page_wire_count);
9859 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
9860 #else /* !XNU_TARGET_OS_OSX */
9861 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
9862 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
9863 #endif /* !XNU_TARGET_OS_OSX */
9864 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
9865 
9866 	wired_size += booter_size;
9867 
9868 	assert(num_info >= VM_KERN_COUNTER_COUNT);
9869 	num_info -= VM_KERN_COUNTER_COUNT;
9870 	counts = &info[num_info];
9871 
9872 #define SET_COUNT(xcount, xsize, xflags)                        \
9873     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
9874     counts[xcount].site  = (xcount);                            \
9875     counts[xcount].size  = (xsize);                                 \
9876     counts[xcount].mapped  = (xsize);                           \
9877     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9878 
9879 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
9880 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
9881 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
9882 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
9883 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
9884 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
9885 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
9886 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
9887 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
9888 
9889 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9890     counts[xcount].site    = (xcount);                  \
9891     counts[xcount].size    = (xsize);                   \
9892     counts[xcount].mapped  = (xsize);                   \
9893     counts[xcount].free    = (xfree);                   \
9894     counts[xcount].largest = (xlargest);                \
9895     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
9896 
9897 	vm_map_size_t map_size, map_free, map_largest;
9898 
9899 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
9900 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
9901 
9902 	zone_map_sizes(&map_size, &map_free, &map_largest);
9903 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
9904 
9905 	assert(num_info >= zone_view_count);
9906 	num_info -= zone_view_count;
9907 	counts = &info[num_info];
9908 	i = 0;
9909 
9910 	if (!redact_info) {
9911 		if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
9912 			i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
9913 		}
9914 		if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
9915 			i += vm_page_diagnose_kt_heaps(counts + i);
9916 		}
9917 		assert(i <= zone_view_count);
9918 
9919 		zone_index_foreach(zidx) {
9920 			zone_t z = &zone_array[zidx];
9921 			zone_security_flags_t zsflags = zone_security_array[zidx];
9922 			zone_view_t zv = z->z_views;
9923 
9924 			if (zv == NULL) {
9925 				continue;
9926 			}
9927 
9928 			zone_stats_t zv_stats_head = z->z_stats;
9929 			bool has_raw_view = false;
9930 
9931 			for (; zv; zv = zv->zv_next) {
9932 				/*
9933 				 * kalloc_types that allocate from the same zone are linked
9934 				 * as views. Only print the ones that have their own stats.
9935 				 */
9936 				if (zv->zv_stats == zv_stats_head) {
9937 					continue;
9938 				}
9939 				has_raw_view = true;
9940 				vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
9941 				    z->z_percpu);
9942 				snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
9943 				    zone_heap_name(z), z->z_name, zv->zv_name);
9944 				i++;
9945 				assert(i <= zone_view_count);
9946 			}
9947 
9948 			/*
9949 			 * Print raw views for non kalloc or kalloc_type zones
9950 			 */
9951 			bool kalloc_type = zsflags.z_kalloc_type;
9952 			if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
9953 			    (kalloc_type && has_raw_view)) {
9954 				vm_page_diagnose_zone(counts + i, z);
9955 				i++;
9956 				assert(i <= zone_view_count);
9957 			}
9958 		}
9959 	}
9960 
9961 	iterate = !VM_TAG_ACTIVE_UPDATE;
9962 	if (iterate) {
9963 		enum                       { kMaxKernelDepth = 1 };
9964 		vm_map_t                     maps[kMaxKernelDepth];
9965 		vm_map_entry_t               entries[kMaxKernelDepth];
9966 		vm_map_t                     map;
9967 		vm_map_entry_t               entry;
9968 		vm_object_offset_t           offset;
9969 		vm_page_t                    page;
9970 		int                          stackIdx, count;
9971 
9972 #if !VM_TAG_ACTIVE_UPDATE
9973 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
9974 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9975 
9976 		map = kernel_map;
9977 		stackIdx = 0;
9978 		while (map) {
9979 			vm_map_lock(map);
9980 			for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
9981 				if (entry->is_sub_map) {
9982 					assert(stackIdx < kMaxKernelDepth);
9983 					maps[stackIdx] = map;
9984 					entries[stackIdx] = entry;
9985 					stackIdx++;
9986 					map = VME_SUBMAP(entry);
9987 					entry = NULL;
9988 					break;
9989 				}
9990 				if (is_kernel_object(VME_OBJECT(entry))) {
9991 					count = 0;
9992 					vm_object_lock(VME_OBJECT(entry));
9993 					for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
9994 						page = vm_page_lookup(VME_OBJECT(entry), offset);
9995 						if (page && VM_PAGE_WIRED(page)) {
9996 							count++;
9997 						}
9998 					}
9999 					vm_object_unlock(VME_OBJECT(entry));
10000 
10001 					if (count) {
10002 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10003 						assert(VME_ALIAS(entry) < num_info);
10004 						info[VME_ALIAS(entry)].size += ptoa_64(count);
10005 					}
10006 				}
10007 				while (map && (entry == vm_map_last_entry(map))) {
10008 					vm_map_unlock(map);
10009 					if (!stackIdx) {
10010 						map = NULL;
10011 					} else {
10012 						--stackIdx;
10013 						map = maps[stackIdx];
10014 						entry = entries[stackIdx];
10015 					}
10016 				}
10017 			}
10018 		}
10019 	}
10020 
10021 	process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10022 
10023 	return KERN_SUCCESS;
10024 }
10025 
10026 #if DEBUG || DEVELOPMENT
10027 
10028 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10029 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10030 {
10031 	kern_return_t  ret;
10032 	vm_size_t      zsize;
10033 	vm_map_t       map;
10034 	vm_map_entry_t entry;
10035 
10036 	zsize = zone_element_info((void *) addr, tag);
10037 	if (zsize) {
10038 		*zone_size = *size = zsize;
10039 		return KERN_SUCCESS;
10040 	}
10041 
10042 	*zone_size = 0;
10043 	ret = KERN_INVALID_ADDRESS;
10044 	for (map = kernel_map; map;) {
10045 		vm_map_lock(map);
10046 		if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10047 			break;
10048 		}
10049 		if (entry->is_sub_map) {
10050 			if (map != kernel_map) {
10051 				break;
10052 			}
10053 			map = VME_SUBMAP(entry);
10054 			continue;
10055 		}
10056 		if (entry->vme_start != addr) {
10057 			break;
10058 		}
10059 		*tag = (vm_tag_t)VME_ALIAS(entry);
10060 		*size = (entry->vme_end - addr);
10061 		ret = KERN_SUCCESS;
10062 		break;
10063 	}
10064 	if (map != kernel_map) {
10065 		vm_map_unlock(map);
10066 	}
10067 	vm_map_unlock(kernel_map);
10068 
10069 	return ret;
10070 }
10071 
10072 #endif /* DEBUG || DEVELOPMENT */
10073 
10074 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10075 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10076 {
10077 	vm_allocation_site_t * site;
10078 	uint32_t               kmodId;
10079 
10080 	kmodId = 0;
10081 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10082 	if ((site = vm_allocation_sites[tag])) {
10083 		if (VM_TAG_KMOD & site->flags) {
10084 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10085 		}
10086 	}
10087 	lck_ticket_unlock(&vm_allocation_sites_lock);
10088 
10089 	return kmodId;
10090 }
10091 
10092 
10093 #if CONFIG_SECLUDED_MEMORY
10094 /*
10095  * Note that there's no locking around other accesses to vm_page_secluded_target.
10096  * That should be OK, since these are the only place where it can be changed after
10097  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10098  * but will eventually get the correct value. This brief mismatch is OK as pageout
10099  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10100  * over time.
10101  */
10102 unsigned int vm_page_secluded_suppress_cnt = 0;
10103 unsigned int vm_page_secluded_save_target;
10104 
10105 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10106 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10107 
10108 void
start_secluded_suppression(task_t task)10109 start_secluded_suppression(task_t task)
10110 {
10111 	if (task->task_suppressed_secluded) {
10112 		return;
10113 	}
10114 	lck_spin_lock(&secluded_suppress_slock);
10115 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10116 		task->task_suppressed_secluded = TRUE;
10117 		vm_page_secluded_save_target = vm_page_secluded_target;
10118 		vm_page_secluded_target = 0;
10119 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10120 	}
10121 	lck_spin_unlock(&secluded_suppress_slock);
10122 }
10123 
10124 void
stop_secluded_suppression(task_t task)10125 stop_secluded_suppression(task_t task)
10126 {
10127 	lck_spin_lock(&secluded_suppress_slock);
10128 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10129 		task->task_suppressed_secluded = FALSE;
10130 		vm_page_secluded_target = vm_page_secluded_save_target;
10131 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10132 	}
10133 	lck_spin_unlock(&secluded_suppress_slock);
10134 }
10135 
10136 #endif /* CONFIG_SECLUDED_MEMORY */
10137 
10138 /*
10139  * Move the list of retired pages on the vm_page_queue_retired to
10140  * their final resting place on retired_pages_object.
10141  */
10142 void
vm_retire_boot_pages(void)10143 vm_retire_boot_pages(void)
10144 {
10145 }
10146 
10147 /*
10148  * This holds the reported physical address if an ECC error leads to a panic.
10149  * SMC will store it in PMU SRAM under the 'sECC' key.
10150  */
10151 uint64_t ecc_panic_physical_address = 0;
10152 
10153