xref: /xnu-8019.80.24/osfmk/vm/vm_resident.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68 
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_init.h>
84 #include <vm/vm_map.h>
85 #include <vm/vm_page.h>
86 #include <vm/vm_pageout.h>
87 #include <vm/vm_kern.h>                 /* kernel_memory_allocate() */
88 #include <kern/misc_protos.h>
89 #include <mach_debug/zone_info.h>
90 #include <vm/cpm.h>
91 #include <pexpert/pexpert.h>
92 #include <san/kasan.h>
93 
94 #include <vm/vm_protos.h>
95 #include <vm/memory_object.h>
96 #include <vm/vm_purgeable_internal.h>
97 #include <vm/vm_compressor.h>
98 #if defined (__x86_64__)
99 #include <i386/misc_protos.h>
100 #endif
101 
102 #if CONFIG_PHANTOM_CACHE
103 #include <vm/vm_phantom_cache.h>
104 #endif
105 
106 #if HIBERNATION
107 #include <IOKit/IOHibernatePrivate.h>
108 #include <machine/pal_hibernate.h>
109 #endif /* HIBERNATION */
110 
111 #include <sys/kdebug.h>
112 
113 #if defined(HAS_APPLE_PAC)
114 #include <ptrauth.h>
115 #endif
116 #if defined(__arm64__)
117 #include <arm/cpu_internal.h>
118 #endif /* defined(__arm64__) */
119 
120 #if MACH_ASSERT
121 
122 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
123 
124 #else /* MACH_ASSERT */
125 
126 #define ASSERT_PMAP_FREE(mem) /* nothing */
127 
128 #endif /* MACH_ASSERT */
129 
130 extern boolean_t vm_pageout_running;
131 extern thread_t  vm_pageout_scan_thread;
132 extern boolean_t vps_dynamic_priority_enabled;
133 
134 char    vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
135 char    vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
136 char    vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
137 char    vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
138 
139 #if CONFIG_SECLUDED_MEMORY
140 struct vm_page_secluded_data vm_page_secluded;
141 #endif /* CONFIG_SECLUDED_MEMORY */
142 
143 #if DEVELOPMENT || DEBUG
144 extern struct memory_object_pager_ops shared_region_pager_ops;
145 unsigned int shared_region_pagers_resident_count = 0;
146 unsigned int shared_region_pagers_resident_peak = 0;
147 #endif /* DEVELOPMENT || DEBUG */
148 
149 int             PERCPU_DATA(start_color);
150 vm_page_t       PERCPU_DATA(free_pages);
151 boolean_t       hibernate_cleaning_in_progress = FALSE;
152 boolean_t       vm_page_free_verify = TRUE;
153 
154 uint32_t        vm_lopage_free_count = 0;
155 uint32_t        vm_lopage_free_limit = 0;
156 uint32_t        vm_lopage_lowater    = 0;
157 boolean_t       vm_lopage_refill = FALSE;
158 boolean_t       vm_lopage_needed = FALSE;
159 
160 lck_mtx_ext_t   vm_page_queue_lock_ext;
161 lck_mtx_ext_t   vm_page_queue_free_lock_ext;
162 lck_mtx_ext_t   vm_purgeable_queue_lock_ext;
163 
164 int             speculative_age_index = 0;
165 int             speculative_steal_index = 0;
166 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
167 
168 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
169                                                           * Updated and checked behind the vm_page_queues_lock. */
170 
171 static void             vm_page_free_prepare(vm_page_t  page);
172 static vm_page_t        vm_page_grab_fictitious_common(ppnum_t, boolean_t);
173 
174 static void vm_tag_init(void);
175 
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
178     VM_PAGE_PACKED_FROM_ARRAY;
179 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
180     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
181 
182 /*
183  *	Associated with page of user-allocatable memory is a
184  *	page structure.
185  */
186 
187 /*
188  *	These variables record the values returned by vm_page_bootstrap,
189  *	for debugging purposes.  The implementation of pmap_steal_memory
190  *	and pmap_startup here also uses them internally.
191  */
192 
193 vm_offset_t virtual_space_start;
194 vm_offset_t virtual_space_end;
195 uint32_t        vm_page_pages;
196 
197 /*
198  *	The vm_page_lookup() routine, which provides for fast
199  *	(virtual memory object, offset) to page lookup, employs
200  *	the following hash table.  The vm_page_{insert,remove}
201  *	routines install and remove associations in the table.
202  *	[This table is often called the virtual-to-physical,
203  *	or VP, table.]
204  */
205 typedef struct {
206 	vm_page_packed_t page_list;
207 #if     MACH_PAGE_HASH_STATS
208 	int             cur_count;              /* current count */
209 	int             hi_count;               /* high water mark */
210 #endif /* MACH_PAGE_HASH_STATS */
211 } vm_page_bucket_t;
212 
213 
214 #define BUCKETS_PER_LOCK        16
215 
216 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
217 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
218 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
219 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
220 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
221 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
222 
223 #ifndef VM_TAG_ACTIVE_UPDATE
224 #error VM_TAG_ACTIVE_UPDATE
225 #endif
226 #ifndef VM_TAG_SIZECLASSES
227 #error VM_TAG_SIZECLASSES
228 #endif
229 
230 /* for debugging */
231 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
232 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
233 
234 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
235 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
236 #if VM_TAG_SIZECLASSES
237 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
238 #endif /* VM_TAG_SIZECLASSES */
239 
240 vm_tag_t vm_allocation_tag_highest;
241 
242 #if VM_PAGE_BUCKETS_CHECK
243 boolean_t vm_page_buckets_check_ready = FALSE;
244 #if VM_PAGE_FAKE_BUCKETS
245 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
246 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
247 #endif /* VM_PAGE_FAKE_BUCKETS */
248 #endif /* VM_PAGE_BUCKETS_CHECK */
249 
250 #if     MACH_PAGE_HASH_STATS
251 /* This routine is only for debug.  It is intended to be called by
252  * hand by a developer using a kernel debugger.  This routine prints
253  * out vm_page_hash table statistics to the kernel debug console.
254  */
255 void
hash_debug(void)256 hash_debug(void)
257 {
258 	int     i;
259 	int     numbuckets = 0;
260 	int     highsum = 0;
261 	int     maxdepth = 0;
262 
263 	for (i = 0; i < vm_page_bucket_count; i++) {
264 		if (vm_page_buckets[i].hi_count) {
265 			numbuckets++;
266 			highsum += vm_page_buckets[i].hi_count;
267 			if (vm_page_buckets[i].hi_count > maxdepth) {
268 				maxdepth = vm_page_buckets[i].hi_count;
269 			}
270 		}
271 	}
272 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
273 	printf("Number used buckets:     %d = %d%%\n",
274 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
275 	printf("Number unused buckets:   %d = %d%%\n",
276 	    vm_page_bucket_count - numbuckets,
277 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
278 	printf("Sum of bucket max depth: %d\n", highsum);
279 	printf("Average bucket depth:    %d.%2d\n",
280 	    highsum / vm_page_bucket_count,
281 	    highsum % vm_page_bucket_count);
282 	printf("Maximum bucket depth:    %d\n", maxdepth);
283 }
284 #endif /* MACH_PAGE_HASH_STATS */
285 
286 /*
287  *	The virtual page size is currently implemented as a runtime
288  *	variable, but is constant once initialized using vm_set_page_size.
289  *	This initialization must be done in the machine-dependent
290  *	bootstrap sequence, before calling other machine-independent
291  *	initializations.
292  *
293  *	All references to the virtual page size outside this
294  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
295  *	constants.
296  */
297 #if defined(__arm__) || defined(__arm64__)
298 vm_size_t       page_size;
299 vm_size_t       page_mask;
300 int             page_shift;
301 #else
302 vm_size_t       page_size  = PAGE_SIZE;
303 vm_size_t       page_mask  = PAGE_MASK;
304 int             page_shift = PAGE_SHIFT;
305 #endif
306 
307 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
308 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
309 vm_page_t                          vm_page_array_ending_addr;
310 
311 unsigned int    vm_pages_count = 0;
312 
313 /*
314  *	Resident pages that represent real memory
315  *	are allocated from a set of free lists,
316  *	one per color.
317  */
318 unsigned int    vm_colors;
319 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
320 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
321 unsigned int    vm_free_magazine_refill_limit = 0;
322 
323 
324 struct vm_page_queue_free_head {
325 	vm_page_queue_head_t    qhead;
326 } VM_PAGE_PACKED_ALIGNED;
327 
328 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
329 
330 
331 unsigned int    vm_page_free_wanted;
332 unsigned int    vm_page_free_wanted_privileged;
333 #if CONFIG_SECLUDED_MEMORY
334 unsigned int    vm_page_free_wanted_secluded;
335 #endif /* CONFIG_SECLUDED_MEMORY */
336 unsigned int    vm_page_free_count;
337 
338 /*
339  *	Occasionally, the virtual memory system uses
340  *	resident page structures that do not refer to
341  *	real pages, for example to leave a page with
342  *	important state information in the VP table.
343  *
344  *	These page structures are allocated the way
345  *	most other kernel structures are.
346  */
347 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
348 vm_locks_array_t vm_page_locks;
349 
350 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
351 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
352 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
353 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
354 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
355 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
356 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
357 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
358 LCK_SPIN_DECLARE_ATTR(vm_allocation_sites_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
359 
360 unsigned int    vm_page_local_q_soft_limit = 250;
361 unsigned int    vm_page_local_q_hard_limit = 500;
362 struct vpl     *__zpercpu vm_page_local_q;
363 
364 /* N.B. Guard and fictitious pages must not
365  * be assigned a zero phys_page value.
366  */
367 /*
368  *	Fictitious pages don't have a physical address,
369  *	but we must initialize phys_page to something.
370  *	For debugging, this should be a strange value
371  *	that the pmap module can recognize in assertions.
372  */
373 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
374 
375 /*
376  *	Guard pages are not accessible so they don't
377  *      need a physical address, but we need to enter
378  *	one in the pmap.
379  *	Let's make it recognizable and make sure that
380  *	we don't use a real physical page with that
381  *	physical address.
382  */
383 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
384 
385 /*
386  *	Resident page structures are also chained on
387  *	queues that are used by the page replacement
388  *	system (pageout daemon).  These queues are
389  *	defined here, but are shared by the pageout
390  *	module.  The inactive queue is broken into
391  *	file backed and anonymous for convenience as the
392  *	pageout daemon often assignes a higher
393  *	importance to anonymous pages (less likely to pick)
394  */
395 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
396 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
397 #if CONFIG_SECLUDED_MEMORY
398 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
399 #endif /* CONFIG_SECLUDED_MEMORY */
400 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
401 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
402 
403 queue_head_t    vm_objects_wired;
404 
405 void vm_update_darkwake_mode(boolean_t);
406 
407 #if CONFIG_BACKGROUND_QUEUE
408 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
409 uint32_t        vm_page_background_target;
410 uint32_t        vm_page_background_target_snapshot;
411 uint32_t        vm_page_background_count;
412 uint64_t        vm_page_background_promoted_count;
413 
414 uint32_t        vm_page_background_internal_count;
415 uint32_t        vm_page_background_external_count;
416 
417 uint32_t        vm_page_background_mode;
418 uint32_t        vm_page_background_exclude_external;
419 #endif
420 
421 unsigned int    vm_page_active_count;
422 unsigned int    vm_page_inactive_count;
423 unsigned int    vm_page_kernelcache_count;
424 #if CONFIG_SECLUDED_MEMORY
425 unsigned int    vm_page_secluded_count;
426 unsigned int    vm_page_secluded_count_free;
427 unsigned int    vm_page_secluded_count_inuse;
428 unsigned int    vm_page_secluded_count_over_target;
429 #endif /* CONFIG_SECLUDED_MEMORY */
430 unsigned int    vm_page_anonymous_count;
431 unsigned int    vm_page_throttled_count;
432 unsigned int    vm_page_speculative_count;
433 
434 unsigned int    vm_page_wire_count;
435 unsigned int    vm_page_wire_count_on_boot = 0;
436 unsigned int    vm_page_stolen_count = 0;
437 unsigned int    vm_page_wire_count_initial;
438 unsigned int    vm_page_gobble_count = 0;
439 unsigned int    vm_page_kern_lpage_count = 0;
440 
441 uint64_t        booter_size;  /* external so it can be found in core dumps */
442 
443 #define VM_PAGE_WIRE_COUNT_WARNING      0
444 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
445 
446 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
447 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
448 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
449 
450 unsigned int    vm_page_xpmapped_external_count = 0;
451 unsigned int    vm_page_external_count = 0;
452 unsigned int    vm_page_internal_count = 0;
453 unsigned int    vm_page_pageable_external_count = 0;
454 unsigned int    vm_page_pageable_internal_count = 0;
455 
456 #if DEVELOPMENT || DEBUG
457 unsigned int    vm_page_speculative_recreated = 0;
458 unsigned int    vm_page_speculative_created = 0;
459 unsigned int    vm_page_speculative_used = 0;
460 #endif
461 
462 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
463 
464 unsigned int    vm_page_cleaned_count = 0;
465 
466 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
467 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
468 
469 
470 /*
471  *	Several page replacement parameters are also
472  *	shared with this module, so that page allocation
473  *	(done here in vm_page_alloc) can trigger the
474  *	pageout daemon.
475  */
476 unsigned int    vm_page_free_target = 0;
477 unsigned int    vm_page_free_min = 0;
478 unsigned int    vm_page_throttle_limit = 0;
479 unsigned int    vm_page_inactive_target = 0;
480 #if CONFIG_SECLUDED_MEMORY
481 unsigned int    vm_page_secluded_target = 0;
482 #endif /* CONFIG_SECLUDED_MEMORY */
483 unsigned int    vm_page_anonymous_min = 0;
484 unsigned int    vm_page_free_reserved = 0;
485 
486 
487 /*
488  *	The VM system has a couple of heuristics for deciding
489  *	that pages are "uninteresting" and should be placed
490  *	on the inactive queue as likely candidates for replacement.
491  *	These variables let the heuristics be controlled at run-time
492  *	to make experimentation easier.
493  */
494 
495 boolean_t vm_page_deactivate_hint = TRUE;
496 
497 struct vm_page_stats_reusable vm_page_stats_reusable;
498 
499 /*
500  *	vm_set_page_size:
501  *
502  *	Sets the page size, perhaps based upon the memory
503  *	size.  Must be called before any use of page-size
504  *	dependent functions.
505  *
506  *	Sets page_shift and page_mask from page_size.
507  */
508 void
vm_set_page_size(void)509 vm_set_page_size(void)
510 {
511 	page_size  = PAGE_SIZE;
512 	page_mask  = PAGE_MASK;
513 	page_shift = PAGE_SHIFT;
514 
515 	if ((page_mask & page_size) != 0) {
516 		panic("vm_set_page_size: page size not a power of two");
517 	}
518 
519 	for (page_shift = 0;; page_shift++) {
520 		if ((1U << page_shift) == page_size) {
521 			break;
522 		}
523 	}
524 }
525 
526 #if defined (__x86_64__)
527 
528 #define MAX_CLUMP_SIZE      16
529 #define DEFAULT_CLUMP_SIZE  4
530 
531 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
532 
533 #if DEVELOPMENT || DEBUG
534 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
535 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
536 
537 static inline void
vm_clump_update_stats(unsigned int c)538 vm_clump_update_stats(unsigned int c)
539 {
540 	assert(c <= vm_clump_size);
541 	if (c > 0 && c <= vm_clump_size) {
542 		vm_clump_stats[c] += c;
543 	}
544 	vm_clump_allocs += c;
545 }
546 #endif  /*  if DEVELOPMENT || DEBUG */
547 
548 /* Called once to setup the VM clump knobs */
549 static void
vm_page_setup_clump(void)550 vm_page_setup_clump( void )
551 {
552 	unsigned int override, n;
553 
554 	vm_clump_size = DEFAULT_CLUMP_SIZE;
555 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
556 		vm_clump_size = override;
557 	}
558 
559 	if (vm_clump_size > MAX_CLUMP_SIZE) {
560 		panic("vm_page_setup_clump:: clump_size is too large!");
561 	}
562 	if (vm_clump_size < 1) {
563 		panic("vm_page_setup_clump:: clump_size must be >= 1");
564 	}
565 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
566 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
567 	}
568 
569 	vm_clump_promote_threshold = vm_clump_size;
570 	vm_clump_mask = vm_clump_size - 1;
571 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
572 		;
573 	}
574 
575 #if DEVELOPMENT || DEBUG
576 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
577 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
578 #endif  /*  if DEVELOPMENT || DEBUG */
579 }
580 
581 #endif  /* #if defined (__x86_64__) */
582 
583 #define COLOR_GROUPS_TO_STEAL   4
584 
585 /* Called once during statup, once the cache geometry is known.
586  */
587 static void
vm_page_set_colors(void)588 vm_page_set_colors( void )
589 {
590 	unsigned int    n, override;
591 
592 #if defined (__x86_64__)
593 	/* adjust #colors because we need to color outside the clump boundary */
594 	vm_cache_geometry_colors >>= vm_clump_shift;
595 #endif
596 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
597 		n = override;
598 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
599 		n = vm_cache_geometry_colors;
600 	} else {
601 		n = DEFAULT_COLORS;                             /* use default if all else fails */
602 	}
603 	if (n == 0) {
604 		n = 1;
605 	}
606 	if (n > MAX_COLORS) {
607 		n = MAX_COLORS;
608 	}
609 
610 	/* the count must be a power of 2  */
611 	if ((n & (n - 1)) != 0) {
612 		n = DEFAULT_COLORS;                             /* use default if all else fails */
613 	}
614 	vm_colors = n;
615 	vm_color_mask = n - 1;
616 
617 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
618 
619 #if defined (__x86_64__)
620 	/* adjust for reduction in colors due to clumping and multiple cores */
621 	if (real_ncpus) {
622 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
623 	}
624 #endif
625 }
626 
627 /*
628  * During single threaded early boot we don't initialize all pages.
629  * This avoids some delay during boot. They'll be initialized and
630  * added to the free list as needed or after we are multithreaded by
631  * what becomes the pageout thread.
632  */
633 static boolean_t fill = FALSE;
634 static unsigned int fillval;
635 uint_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
636 ppnum_t delay_above_pnum = PPNUM_MAX;
637 
638 /*
639  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
640  * If ARM ever uses delayed page initialization, this value may need to be quite different.
641  */
642 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
643 
644 /*
645  * When we have to dip into more delayed pages due to low memory, free up
646  * a large chunk to get things back to normal. This avoids contention on the
647  * delayed code allocating page by page.
648  */
649 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
650 
651 /*
652  * Get and initialize the next delayed page.
653  */
654 static vm_page_t
vm_get_delayed_page(int grab_options)655 vm_get_delayed_page(int grab_options)
656 {
657 	vm_page_t p;
658 	ppnum_t   pnum;
659 
660 	/*
661 	 * Get a new page if we have one.
662 	 */
663 	lck_mtx_lock(&vm_page_queue_free_lock);
664 	if (vm_delayed_count == 0) {
665 		lck_mtx_unlock(&vm_page_queue_free_lock);
666 		return NULL;
667 	}
668 	if (!pmap_next_page(&pnum)) {
669 		vm_delayed_count = 0;
670 		lck_mtx_unlock(&vm_page_queue_free_lock);
671 		return NULL;
672 	}
673 
674 	assert(vm_delayed_count > 0);
675 	--vm_delayed_count;
676 
677 #if defined(__x86_64__)
678 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
679 	if (vm_pages_count > 0) {
680 		assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
681 	}
682 #endif
683 	p = &vm_pages[vm_pages_count];
684 	assert(p < vm_page_array_ending_addr);
685 	vm_page_init(p, pnum, FALSE);
686 	++vm_pages_count;
687 	++vm_page_pages;
688 	lck_mtx_unlock(&vm_page_queue_free_lock);
689 
690 	/*
691 	 * These pages were initially counted as wired, undo that now.
692 	 */
693 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
694 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
695 	} else {
696 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
697 		vm_page_lockspin_queues();
698 	}
699 	--vm_page_wire_count;
700 	--vm_page_wire_count_initial;
701 	if (vm_page_wire_count_on_boot != 0) {
702 		--vm_page_wire_count_on_boot;
703 	}
704 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
705 		vm_page_unlock_queues();
706 	}
707 
708 
709 	if (fill) {
710 		fillPage(pnum, fillval);
711 	}
712 	return p;
713 }
714 
715 static void vm_page_module_init_delayed(void);
716 
717 /*
718  * Free all remaining delayed pages to the free lists.
719  */
720 void
vm_free_delayed_pages(void)721 vm_free_delayed_pages(void)
722 {
723 	vm_page_t   p;
724 	vm_page_t   list = NULL;
725 	uint_t      cnt = 0;
726 	vm_offset_t start_free_va;
727 	int64_t     free_size;
728 
729 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
730 		if (vm_himemory_mode) {
731 			vm_page_release(p, FALSE);
732 		} else {
733 			p->vmp_snext = list;
734 			list = p;
735 		}
736 		++cnt;
737 	}
738 
739 	/*
740 	 * Free the pages in reverse order if not himemory mode.
741 	 * Hence the low memory pages will be first on free lists. (LIFO)
742 	 */
743 	while (list != NULL) {
744 		p = list;
745 		list = p->vmp_snext;
746 		p->vmp_snext = NULL;
747 		vm_page_release(p, FALSE);
748 	}
749 #if DEVELOPMENT || DEBUG
750 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
751 #endif
752 
753 	/*
754 	 * Free up any unused full pages at the end of the vm_pages[] array
755 	 */
756 	start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
757 
758 #if defined(__x86_64__)
759 	/*
760 	 * Since x86 might have used large pages for vm_pages[], we can't
761 	 * free starting in the middle of a partially used large page.
762 	 */
763 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
764 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
765 	}
766 #endif
767 	if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
768 		free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
769 		if (free_size > 0) {
770 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
771 			vm_page_array_ending_addr = (void *)start_free_va;
772 
773 			/*
774 			 * Note there's no locking here, as only this thread will ever change this value.
775 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
776 			 */
777 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
778 
779 #if DEVELOPMENT || DEBUG
780 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
781 			    (long)free_size, (long)start_free_va);
782 #endif
783 		}
784 	}
785 
786 
787 	/*
788 	 * now we can create the VM page array zone
789 	 */
790 	vm_page_module_init_delayed();
791 }
792 
793 /*
794  * Try and free up enough delayed pages to match a contig memory allocation.
795  */
796 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)797 vm_free_delayed_pages_contig(
798 	uint_t    npages,
799 	ppnum_t   max_pnum,
800 	ppnum_t   pnum_mask)
801 {
802 	vm_page_t p;
803 	ppnum_t   pnum;
804 	uint_t    cnt = 0;
805 
806 	/*
807 	 * Treat 0 as the absolute max page number.
808 	 */
809 	if (max_pnum == 0) {
810 		max_pnum = PPNUM_MAX;
811 	}
812 
813 	/*
814 	 * Free till we get a properly aligned start page
815 	 */
816 	for (;;) {
817 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
818 		if (p == NULL) {
819 			return;
820 		}
821 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
822 		vm_page_release(p, FALSE);
823 		if (pnum >= max_pnum) {
824 			return;
825 		}
826 		if ((pnum & pnum_mask) == 0) {
827 			break;
828 		}
829 	}
830 
831 	/*
832 	 * Having a healthy pool of free pages will help performance. We don't
833 	 * want to fall back to the delayed code for every page allocation.
834 	 */
835 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
836 		npages += VM_DELAY_PAGE_CHUNK;
837 	}
838 
839 	/*
840 	 * Now free up the pages
841 	 */
842 	for (cnt = 1; cnt < npages; ++cnt) {
843 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
844 		if (p == NULL) {
845 			return;
846 		}
847 		vm_page_release(p, FALSE);
848 	}
849 }
850 
851 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
852 
853 void
vm_page_init_local_q(unsigned int num_cpus)854 vm_page_init_local_q(unsigned int num_cpus)
855 {
856 	struct vpl *t_local_q;
857 
858 	/*
859 	 * no point in this for a uni-processor system
860 	 */
861 	if (num_cpus >= 2) {
862 		ml_cpu_info_t cpu_info;
863 
864 		/*
865 		 * Force the allocation alignment to a cacheline,
866 		 * because the `vpl` struct has a lock and will be taken
867 		 * cross CPU so we want to isolate the rest of the per-CPU
868 		 * data to avoid false sharing due to this lock being taken.
869 		 */
870 
871 		ml_cpu_get_info(&cpu_info);
872 
873 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
874 		    cpu_info.cache_line_size - 1);
875 
876 		zpercpu_foreach(lq, t_local_q) {
877 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
878 			vm_page_queue_init(&lq->vpl_queue);
879 		}
880 
881 		/* make the initialization visible to all cores */
882 		os_atomic_store(&vm_page_local_q, t_local_q, release);
883 	}
884 }
885 
886 /*
887  * vm_init_before_launchd
888  *
889  * This should be called right before launchd is loaded.
890  */
891 void
vm_init_before_launchd()892 vm_init_before_launchd()
893 {
894 	vm_page_lockspin_queues();
895 	vm_page_wire_count_on_boot = vm_page_wire_count;
896 	vm_page_unlock_queues();
897 }
898 
899 
900 /*
901  *	vm_page_bootstrap:
902  *
903  *	Initializes the resident memory module.
904  *
905  *	Allocates memory for the page cells, and
906  *	for the object/offset-to-page hash table headers.
907  *	Each page cell is initialized and placed on the free list.
908  *	Returns the range of available kernel virtual memory.
909  */
910 __startup_func
911 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)912 vm_page_bootstrap(
913 	vm_offset_t             *startp,
914 	vm_offset_t             *endp)
915 {
916 	unsigned int            i;
917 	unsigned int            log1;
918 	unsigned int            log2;
919 	unsigned int            size;
920 
921 	/*
922 	 *	Initialize the page queues.
923 	 */
924 
925 	lck_mtx_init_ext(&vm_page_queue_free_lock, &vm_page_queue_free_lock_ext, &vm_page_lck_grp_free, &vm_page_lck_attr);
926 	lck_mtx_init_ext(&vm_page_queue_lock, &vm_page_queue_lock_ext, &vm_page_lck_grp_queue, &vm_page_lck_attr);
927 	lck_mtx_init_ext(&vm_purgeable_queue_lock, &vm_purgeable_queue_lock_ext, &vm_page_lck_grp_purge, &vm_page_lck_attr);
928 
929 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
930 		int group;
931 
932 		purgeable_queues[i].token_q_head = 0;
933 		purgeable_queues[i].token_q_tail = 0;
934 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
935 			queue_init(&purgeable_queues[i].objq[group]);
936 		}
937 
938 		purgeable_queues[i].type = i;
939 		purgeable_queues[i].new_pages = 0;
940 #if MACH_ASSERT
941 		purgeable_queues[i].debug_count_tokens = 0;
942 		purgeable_queues[i].debug_count_objects = 0;
943 #endif
944 	}
945 	;
946 	purgeable_nonvolatile_count = 0;
947 	queue_init(&purgeable_nonvolatile_queue);
948 
949 	for (i = 0; i < MAX_COLORS; i++) {
950 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
951 	}
952 
953 	vm_page_queue_init(&vm_lopage_queue_free);
954 	vm_page_queue_init(&vm_page_queue_active);
955 	vm_page_queue_init(&vm_page_queue_inactive);
956 #if CONFIG_SECLUDED_MEMORY
957 	vm_page_queue_init(&vm_page_queue_secluded);
958 #endif /* CONFIG_SECLUDED_MEMORY */
959 	vm_page_queue_init(&vm_page_queue_cleaned);
960 	vm_page_queue_init(&vm_page_queue_throttled);
961 	vm_page_queue_init(&vm_page_queue_anonymous);
962 	queue_init(&vm_objects_wired);
963 
964 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
965 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
966 
967 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
968 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
969 	}
970 #if CONFIG_BACKGROUND_QUEUE
971 	vm_page_queue_init(&vm_page_queue_background);
972 
973 	vm_page_background_count = 0;
974 	vm_page_background_internal_count = 0;
975 	vm_page_background_external_count = 0;
976 	vm_page_background_promoted_count = 0;
977 
978 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
979 
980 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
981 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
982 	}
983 
984 	vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
985 	vm_page_background_exclude_external = 0;
986 
987 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
988 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
989 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
990 
991 	if (vm_page_background_mode > VM_PAGE_BG_LEVEL_1) {
992 		vm_page_background_mode = VM_PAGE_BG_LEVEL_1;
993 	}
994 #endif
995 	vm_page_free_wanted = 0;
996 	vm_page_free_wanted_privileged = 0;
997 #if CONFIG_SECLUDED_MEMORY
998 	vm_page_free_wanted_secluded = 0;
999 #endif /* CONFIG_SECLUDED_MEMORY */
1000 
1001 #if defined (__x86_64__)
1002 	/* this must be called before vm_page_set_colors() */
1003 	vm_page_setup_clump();
1004 #endif
1005 
1006 	vm_page_set_colors();
1007 
1008 	bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1009 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1010 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1011 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1012 
1013 	bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1014 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1015 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1016 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1017 	vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1018 	vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1019 	vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1020 #if CONFIG_SECLUDED_MEMORY
1021 	vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1022 #endif /* CONFIG_SECLUDED_MEMORY */
1023 
1024 	bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1025 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1026 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1027 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1028 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1029 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1030 #if CONFIG_SECLUDED_MEMORY
1031 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1032 #endif /* CONFIG_SECLUDED_MEMORY */
1033 
1034 	bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1035 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1036 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1037 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1038 	vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1039 #if CONFIG_SECLUDED_MEMORY
1040 	vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1041 #endif /* CONFIG_SECLUDED_MEMORY */
1042 
1043 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1044 		vm_allocation_sites_static[t].refcount = 2;
1045 		vm_allocation_sites_static[t].tag = t;
1046 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1047 	}
1048 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1049 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1050 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1051 
1052 	/*
1053 	 *	Steal memory for the map and zone subsystems.
1054 	 */
1055 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1056 
1057 	/*
1058 	 *	Allocate (and initialize) the virtual-to-physical
1059 	 *	table hash buckets.
1060 	 *
1061 	 *	The number of buckets should be a power of two to
1062 	 *	get a good hash function.  The following computation
1063 	 *	chooses the first power of two that is greater
1064 	 *	than the number of physical pages in the system.
1065 	 */
1066 
1067 	if (vm_page_bucket_count == 0) {
1068 		unsigned int npages = pmap_free_pages();
1069 
1070 		vm_page_bucket_count = 1;
1071 		while (vm_page_bucket_count < npages) {
1072 			vm_page_bucket_count <<= 1;
1073 		}
1074 	}
1075 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1076 
1077 	vm_page_hash_mask = vm_page_bucket_count - 1;
1078 
1079 	/*
1080 	 *	Calculate object shift value for hashing algorithm:
1081 	 *		O = log2(sizeof(struct vm_object))
1082 	 *		B = log2(vm_page_bucket_count)
1083 	 *	        hash shifts the object left by
1084 	 *		B/2 - O
1085 	 */
1086 	size = vm_page_bucket_count;
1087 	for (log1 = 0; size > 1; log1++) {
1088 		size /= 2;
1089 	}
1090 	size = sizeof(struct vm_object);
1091 	for (log2 = 0; size > 1; log2++) {
1092 		size /= 2;
1093 	}
1094 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1095 
1096 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1097 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1098 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1099 
1100 	if (vm_page_hash_mask & vm_page_bucket_count) {
1101 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1102 	}
1103 
1104 #if VM_PAGE_BUCKETS_CHECK
1105 #if VM_PAGE_FAKE_BUCKETS
1106 	/*
1107 	 * Allocate a decoy set of page buckets, to detect
1108 	 * any stomping there.
1109 	 */
1110 	vm_page_fake_buckets = (vm_page_bucket_t *)
1111 	    pmap_steal_memory(vm_page_bucket_count *
1112 	    sizeof(vm_page_bucket_t));
1113 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1114 	vm_page_fake_buckets_end =
1115 	    vm_map_round_page((vm_page_fake_buckets_start +
1116 	    (vm_page_bucket_count *
1117 	    sizeof(vm_page_bucket_t))),
1118 	    PAGE_MASK);
1119 	char *cp;
1120 	for (cp = (char *)vm_page_fake_buckets_start;
1121 	    cp < (char *)vm_page_fake_buckets_end;
1122 	    cp++) {
1123 		*cp = 0x5a;
1124 	}
1125 #endif /* VM_PAGE_FAKE_BUCKETS */
1126 #endif /* VM_PAGE_BUCKETS_CHECK */
1127 
1128 	kernel_debug_string_early("vm_page_buckets");
1129 	vm_page_buckets = (vm_page_bucket_t *)
1130 	    pmap_steal_memory(vm_page_bucket_count *
1131 	    sizeof(vm_page_bucket_t));
1132 
1133 	kernel_debug_string_early("vm_page_bucket_locks");
1134 	vm_page_bucket_locks = (lck_spin_t *)
1135 	    pmap_steal_memory(vm_page_bucket_lock_count *
1136 	    sizeof(lck_spin_t));
1137 
1138 	for (i = 0; i < vm_page_bucket_count; i++) {
1139 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1140 
1141 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1142 #if     MACH_PAGE_HASH_STATS
1143 		bucket->cur_count = 0;
1144 		bucket->hi_count = 0;
1145 #endif /* MACH_PAGE_HASH_STATS */
1146 	}
1147 
1148 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1149 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1150 	}
1151 
1152 	vm_tag_init();
1153 
1154 #if VM_PAGE_BUCKETS_CHECK
1155 	vm_page_buckets_check_ready = TRUE;
1156 #endif /* VM_PAGE_BUCKETS_CHECK */
1157 
1158 	/*
1159 	 *	Machine-dependent code allocates the resident page table.
1160 	 *	It uses vm_page_init to initialize the page frames.
1161 	 *	The code also returns to us the virtual space available
1162 	 *	to the kernel.  We don't trust the pmap module
1163 	 *	to get the alignment right.
1164 	 */
1165 
1166 	kernel_debug_string_early("pmap_startup");
1167 	pmap_startup(&virtual_space_start, &virtual_space_end);
1168 	virtual_space_start = round_page(virtual_space_start);
1169 	virtual_space_end = trunc_page(virtual_space_end);
1170 
1171 	*startp = virtual_space_start;
1172 	*endp = virtual_space_end;
1173 
1174 	/*
1175 	 *	Compute the initial "wire" count.
1176 	 *	Up until now, the pages which have been set aside are not under
1177 	 *	the VM system's control, so although they aren't explicitly
1178 	 *	wired, they nonetheless can't be moved. At this moment,
1179 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1180 	 */
1181 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1182 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1183 	    vm_page_free_count - vm_lopage_free_count;
1184 #if CONFIG_SECLUDED_MEMORY
1185 	vm_page_wire_count -= vm_page_secluded_count;
1186 #endif
1187 	vm_page_wire_count_initial = vm_page_wire_count;
1188 
1189 	/* capture this for later use */
1190 	booter_size = ml_get_booter_memory_size();
1191 
1192 	printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1193 	    vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1194 
1195 	kernel_debug_string_early("vm_page_bootstrap complete");
1196 }
1197 
1198 #ifndef MACHINE_PAGES
1199 /*
1200  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1201  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1202  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1203  */
1204 static void *
pmap_steal_memory_internal(vm_size_t size,boolean_t might_free)1205 pmap_steal_memory_internal(
1206 	vm_size_t size,
1207 	boolean_t might_free)
1208 {
1209 	kern_return_t kr;
1210 	vm_offset_t addr;
1211 	vm_offset_t map_addr;
1212 	ppnum_t phys_page;
1213 
1214 	/*
1215 	 * Size needs to be aligned to word size.
1216 	 */
1217 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1218 
1219 	/*
1220 	 * On the first call, get the initial values for virtual address space
1221 	 * and page align them.
1222 	 */
1223 	if (virtual_space_start == virtual_space_end) {
1224 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1225 		virtual_space_start = round_page(virtual_space_start);
1226 		virtual_space_end = trunc_page(virtual_space_end);
1227 
1228 #if defined(__x86_64__)
1229 		/*
1230 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1231 		 * that map it. This makes the VA available for large page mappings.
1232 		 */
1233 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1234 #endif
1235 	}
1236 
1237 	/*
1238 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1239 	 * address if the size is big enough to back with at least 1 large page.
1240 	 */
1241 #if defined(__x86_64__)
1242 	if (size >= I386_LPGBYTES) {
1243 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1244 	}
1245 #endif
1246 	addr = virtual_space_start;
1247 	virtual_space_start += size;
1248 
1249 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1250 
1251 	/*
1252 	 * Allocate and map physical pages to back the new virtual space.
1253 	 */
1254 	map_addr = round_page(addr);
1255 	while (map_addr < addr + size) {
1256 #if defined(__x86_64__)
1257 		/*
1258 		 * Back with a large page if properly aligned on x86
1259 		 */
1260 		if ((map_addr & I386_LPGMASK) == 0 &&
1261 		    map_addr + I386_LPGBYTES <= addr + size &&
1262 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1263 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1264 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1265 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1266 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE);
1267 
1268 			if (kr != KERN_SUCCESS) {
1269 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1270 				    (unsigned long)map_addr, phys_page);
1271 			}
1272 			map_addr += I386_LPGBYTES;
1273 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1274 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1275 			vm_page_kern_lpage_count++;
1276 			continue;
1277 		}
1278 #endif
1279 
1280 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1281 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1282 		}
1283 
1284 #if defined(__x86_64__)
1285 		pmap_pre_expand(kernel_pmap, map_addr);
1286 #endif
1287 
1288 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1289 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1290 		    VM_WIMG_USE_DEFAULT, FALSE);
1291 
1292 		if (kr != KERN_SUCCESS) {
1293 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1294 			    (unsigned long)map_addr, phys_page);
1295 		}
1296 		map_addr += PAGE_SIZE;
1297 
1298 		/*
1299 		 * Account for newly stolen memory
1300 		 */
1301 		vm_page_wire_count++;
1302 		vm_page_stolen_count++;
1303 	}
1304 
1305 #if defined(__x86_64__)
1306 	/*
1307 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1308 	 * Notify the pmap layer to record which high pages were allocated so far.
1309 	 */
1310 	if (might_free) {
1311 		pmap_hi_pages_done();
1312 	}
1313 #endif
1314 #if KASAN
1315 	kasan_notify_address(round_page(addr), size);
1316 #endif
1317 	return (void *) addr;
1318 }
1319 
1320 void *
pmap_steal_memory(vm_size_t size)1321 pmap_steal_memory(
1322 	vm_size_t size)
1323 {
1324 	return pmap_steal_memory_internal(size, FALSE);
1325 }
1326 
1327 void *
pmap_steal_freeable_memory(vm_size_t size)1328 pmap_steal_freeable_memory(
1329 	vm_size_t size)
1330 {
1331 	return pmap_steal_memory_internal(size, TRUE);
1332 }
1333 
1334 #if defined(__arm64__)
1335 /*
1336  * Retire a page at startup.
1337  * These pages will eventually wind up on the retired_pages_object
1338  * in vm_retire_boot_pages().
1339  */
1340 static vm_page_queue_head_t vm_page_queue_retired VM_PAGE_PACKED_ALIGNED;
1341 static void
vm_page_retire_startup(vm_page_t p)1342 vm_page_retire_startup(vm_page_t p)
1343 {
1344 	p->vmp_q_state = VM_PAGE_NOT_ON_Q;
1345 	p->vmp_error = true;
1346 	p->vmp_unusual = true;
1347 	vm_page_queue_enter(&vm_page_queue_retired, p, vmp_pageq);
1348 	printf("To be retired at boot: page at 0x%llx\n", (long long)ptoa(VM_PAGE_GET_PHYS_PAGE(p)));
1349 }
1350 #endif /* defined(__arm64__) */
1351 
1352 #if CONFIG_SECLUDED_MEMORY
1353 /* boot-args to control secluded memory */
1354 unsigned int secluded_mem_mb = 0;       /* # of MBs of RAM to seclude */
1355 int secluded_for_iokit = 1;             /* IOKit can use secluded memory */
1356 int secluded_for_apps = 1;              /* apps can use secluded memory */
1357 int secluded_for_filecache = 2;         /* filecache can use seclude memory */
1358 #if 11
1359 int secluded_for_fbdp = 0;
1360 #endif
1361 uint64_t secluded_shutoff_trigger = 0;
1362 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1363 #endif /* CONFIG_SECLUDED_MEMORY */
1364 
1365 
1366 #if defined(__arm__) || defined(__arm64__)
1367 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1368 unsigned int vm_first_phys_ppnum = 0;
1369 #endif
1370 
1371 void vm_page_release_startup(vm_page_t mem);
1372 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1373 pmap_startup(
1374 	vm_offset_t     *startp,
1375 	vm_offset_t     *endp)
1376 {
1377 	unsigned int    i, npages;
1378 	ppnum_t         phys_page;
1379 	uint64_t        mem_sz;
1380 	uint64_t        start_ns;
1381 	uint64_t        now_ns;
1382 	uint_t          low_page_count = 0;
1383 
1384 #if    defined(__LP64__)
1385 	/*
1386 	 * make sure we are aligned on a 64 byte boundary
1387 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1388 	 * 6 bits of the pointer)
1389 	 */
1390 	if (virtual_space_start != virtual_space_end) {
1391 		virtual_space_start = round_page(virtual_space_start);
1392 	}
1393 #endif
1394 
1395 	/*
1396 	 * We calculate how many page frames we will have
1397 	 * and then allocate the page structures in one chunk.
1398 	 *
1399 	 * Note that the calculation here doesn't take into account
1400 	 * the memory needed to map what's being allocated, i.e. the page
1401 	 * table entries. So the actual number of pages we get will be
1402 	 * less than this. To do someday: include that in the computation.
1403 	 *
1404 	 * Also for ARM, we don't use the count of free_pages, but rather the
1405 	 * range from last page to first page (ignore holes due to retired pages).
1406 	 */
1407 #if defined(__arm__) || defined(__arm64__)
1408 	mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1409 #else /* defined(__arm__) || defined(__arm64__) */
1410 	mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1411 #endif /* defined(__arm__) || defined(__arm64__) */
1412 	mem_sz += round_page(virtual_space_start) - virtual_space_start;        /* Account for any slop */
1413 	npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages)));    /* scaled to include the vm_page_ts */
1414 
1415 	vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1416 
1417 	/*
1418 	 * Check if we want to initialize pages to a known value
1419 	 */
1420 	if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1421 		fill = TRUE;
1422 	}
1423 #if     DEBUG
1424 	/* This slows down booting the DEBUG kernel, particularly on
1425 	 * large memory systems, but is worthwhile in deterministically
1426 	 * trapping uninitialized memory usage.
1427 	 */
1428 	if (!fill) {
1429 		fill = TRUE;
1430 		fillval = 0xDEB8F177;
1431 	}
1432 #endif
1433 	if (fill) {
1434 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1435 	}
1436 
1437 #if CONFIG_SECLUDED_MEMORY
1438 	/*
1439 	 * Figure out how much secluded memory to have before we start
1440 	 * release pages to free lists.
1441 	 * The default, if specified nowhere else, is no secluded mem.
1442 	 */
1443 	secluded_mem_mb = 0;
1444 	if (max_mem > 1 * 1024 * 1024 * 1024) {
1445 		/* default to 90MB for devices with > 1GB of RAM */
1446 		secluded_mem_mb = 90;
1447 	}
1448 	/* override with value from device tree, if provided */
1449 	PE_get_default("kern.secluded_mem_mb",
1450 	    &secluded_mem_mb, sizeof(secluded_mem_mb));
1451 	/* override with value from boot-args, if provided */
1452 	PE_parse_boot_argn("secluded_mem_mb",
1453 	    &secluded_mem_mb,
1454 	    sizeof(secluded_mem_mb));
1455 
1456 	vm_page_secluded_target = (unsigned int)
1457 	    ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE);
1458 	PE_parse_boot_argn("secluded_for_iokit",
1459 	    &secluded_for_iokit,
1460 	    sizeof(secluded_for_iokit));
1461 	PE_parse_boot_argn("secluded_for_apps",
1462 	    &secluded_for_apps,
1463 	    sizeof(secluded_for_apps));
1464 	PE_parse_boot_argn("secluded_for_filecache",
1465 	    &secluded_for_filecache,
1466 	    sizeof(secluded_for_filecache));
1467 #if 11
1468 	PE_parse_boot_argn("secluded_for_fbdp",
1469 	    &secluded_for_fbdp,
1470 	    sizeof(secluded_for_fbdp));
1471 #endif
1472 
1473 	/*
1474 	 * Allow a really large app to effectively use secluded memory until it exits.
1475 	 */
1476 	if (vm_page_secluded_target != 0) {
1477 		/*
1478 		 * Get an amount from boot-args, else use 1/2 of max_mem.
1479 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1480 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
1481 		 */
1482 		int secluded_shutoff_mb;
1483 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1484 		    sizeof(secluded_shutoff_mb))) {
1485 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1486 		} else {
1487 			secluded_shutoff_trigger = max_mem / 2;
1488 		}
1489 
1490 		/* ensure the headroom value is sensible and avoid underflows */
1491 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1492 	}
1493 
1494 #endif /* CONFIG_SECLUDED_MEMORY */
1495 
1496 #if defined(__x86_64__)
1497 
1498 	/*
1499 	 * Decide how much memory we delay freeing at boot time.
1500 	 */
1501 	uint32_t delay_above_gb;
1502 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1503 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1504 	}
1505 
1506 	if (delay_above_gb == 0) {
1507 		delay_above_pnum = PPNUM_MAX;
1508 	} else {
1509 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1510 	}
1511 
1512 	/* make sure we have sane breathing room: 1G above low memory */
1513 	if (delay_above_pnum <= max_valid_low_ppnum) {
1514 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1515 	}
1516 
1517 	if (delay_above_pnum < PPNUM_MAX) {
1518 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1519 	}
1520 
1521 #endif /* defined(__x86_64__) */
1522 
1523 	/*
1524 	 * Initialize and release the page frames.
1525 	 */
1526 	kernel_debug_string_early("page_frame_init");
1527 
1528 	vm_page_array_beginning_addr = &vm_pages[0];
1529 	vm_page_array_ending_addr = &vm_pages[npages];  /* used by ptr packing/unpacking code */
1530 #if VM_PAGE_PACKED_FROM_ARRAY
1531 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1532 		panic("pmap_startup(): too many pages to support vm_page packing");
1533 	}
1534 #endif
1535 
1536 	vm_delayed_count = 0;
1537 #if defined(__arm64__)
1538 	vm_page_queue_init(&vm_page_queue_retired);
1539 #endif /* defined(__arm64__) */
1540 
1541 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1542 	vm_pages_count = 0;
1543 	for (i = 0; i < npages; i++) {
1544 		/* Did we run out of pages? */
1545 		if (!pmap_next_page(&phys_page)) {
1546 			break;
1547 		}
1548 
1549 		if (phys_page < max_valid_low_ppnum) {
1550 			++low_page_count;
1551 		}
1552 
1553 		/* Are we at high enough pages to delay the rest? */
1554 		if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1555 			vm_delayed_count = pmap_free_pages();
1556 			break;
1557 		}
1558 
1559 #if defined(__arm__) || defined(__arm64__)
1560 		if (i == 0) {
1561 			vm_first_phys_ppnum = phys_page;
1562 			patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1563 			    (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1564 #if defined(__arm64__)
1565 		} else {
1566 			/*
1567 			 * pmap_next_page() may skip over pages reported bad by iboot.
1568 			 */
1569 			while (i < phys_page - vm_first_phys_ppnum && i < npages) {
1570 				++vm_pages_count;
1571 				vm_page_init(&vm_pages[i], i + vm_first_phys_ppnum, FALSE);
1572 				vm_page_retire_startup(&vm_pages[i]);
1573 				++i;
1574 			}
1575 			if (i >= npages) {
1576 				break;
1577 			}
1578 			assert(i == phys_page - vm_first_phys_ppnum);
1579 #endif /* defined(__arm64__) */
1580 		}
1581 #endif /* defined(__arm__) || defined(__arm64__) */
1582 
1583 #if defined(__x86_64__)
1584 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
1585 		if (i > 0) {
1586 			assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1587 		}
1588 #endif
1589 		++vm_pages_count;
1590 		vm_page_init(&vm_pages[i], phys_page, FALSE);
1591 		if (fill) {
1592 			fillPage(phys_page, fillval);
1593 		}
1594 		if (vm_himemory_mode) {
1595 			vm_page_release_startup(&vm_pages[i]);
1596 		}
1597 	}
1598 	vm_page_pages = vm_pages_count; /* used to report to user space */
1599 
1600 	if (!vm_himemory_mode) {
1601 		do {
1602 			if (!vm_pages[--i].vmp_error) {               /* skip retired pages */
1603 				vm_page_release_startup(&vm_pages[i]);
1604 			}
1605 		} while (i != 0);
1606 	}
1607 
1608 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1609 	printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1610 	printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1611 
1612 #if defined(__LP64__)
1613 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1614 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1615 	}
1616 
1617 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1618 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1619 	}
1620 #endif
1621 
1622 	VM_CHECK_MEMORYSTATUS;
1623 
1624 	/*
1625 	 * We have to re-align virtual_space_start,
1626 	 * because pmap_steal_memory has been using it.
1627 	 */
1628 	virtual_space_start = round_page(virtual_space_start);
1629 	*startp = virtual_space_start;
1630 	*endp = virtual_space_end;
1631 }
1632 #endif  /* MACHINE_PAGES */
1633 
1634 /*
1635  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1636  * or frees to this zone. It's just here for reporting purposes via zprint command.
1637  * This needs to be done after all initially delayed pages are put on the free lists.
1638  */
1639 static void
vm_page_module_init_delayed(void)1640 vm_page_module_init_delayed(void)
1641 {
1642 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1643 	    ZC_NOGZALLOC, ZONE_ID_VM_PAGES, ^(zone_t z) {
1644 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1645 
1646 		zone_set_exhaustible(z, 0);
1647 		/*
1648 		 * Reflect size and usage information for vm_pages[].
1649 		 */
1650 
1651 		z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1652 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
1653 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1654 		vm_pages_count * sizeof(struct vm_page);
1655 		vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1656 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1657 		z->z_wired_cur += vm_page_zone_pages;
1658 		z->z_wired_hwm = z->z_wired_cur;
1659 		z->z_va_cur = z->z_wired_cur;
1660 		/* since zone accounts for these, take them out of stolen */
1661 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1662 	});
1663 }
1664 
1665 /*
1666  * Create the vm_pages zone. This is used for the vm_page structures for the pages
1667  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1668  * this needs to happen in early VM bootstrap.
1669  */
1670 
1671 __startup_func
1672 static void
vm_page_module_init(void)1673 vm_page_module_init(void)
1674 {
1675 	vm_size_t vm_page_with_ppnum_size;
1676 
1677 	/*
1678 	 * Since the pointers to elements in this zone will be packed, they
1679 	 * must have appropriate size. Not strictly what sizeof() reports.
1680 	 */
1681 	vm_page_with_ppnum_size =
1682 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1683 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1684 
1685 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1686 	    ZC_NOGZALLOC | ZC_ALIGNMENT_REQUIRED | ZC_VM_LP64 | ZC_NOTBITAG,
1687 	    ZONE_ID_ANY, ^(zone_t z) {
1688 		/*
1689 		 * The number "10" is a small number that is larger than the number
1690 		 * of fictitious pages that any single caller will attempt to allocate
1691 		 * without blocking.
1692 		 *
1693 		 * The largest such number at the moment is kernel_memory_allocate()
1694 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1695 		 * taking into account the 50% hysteresis the zone allocator uses.
1696 		 *
1697 		 * Note: this works at all because the zone allocator
1698 		 *       doesn't ever allocate fictitious pages.
1699 		 */
1700 		z->z_elems_rsv = 10;
1701 	});
1702 }
1703 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1704 
1705 /*
1706  *	Routine:	vm_page_create
1707  *	Purpose:
1708  *		After the VM system is up, machine-dependent code
1709  *		may stumble across more physical memory.  For example,
1710  *		memory that it was reserving for a frame buffer.
1711  *		vm_page_create turns this memory into available pages.
1712  */
1713 
1714 void
vm_page_create(ppnum_t start,ppnum_t end)1715 vm_page_create(
1716 	ppnum_t start,
1717 	ppnum_t end)
1718 {
1719 	ppnum_t         phys_page;
1720 	vm_page_t       m;
1721 
1722 	for (phys_page = start;
1723 	    phys_page < end;
1724 	    phys_page++) {
1725 		m = vm_page_grab_fictitious_common(phys_page, TRUE);
1726 		m->vmp_fictitious = FALSE;
1727 		pmap_clear_noencrypt(phys_page);
1728 
1729 		lck_mtx_lock(&vm_page_queue_free_lock);
1730 		vm_page_pages++;
1731 		lck_mtx_unlock(&vm_page_queue_free_lock);
1732 		vm_page_release(m, FALSE);
1733 	}
1734 }
1735 
1736 #if defined(__arm64__)
1737 /*
1738  * Like vm_page_create(), except we want to immediately retire the page,
1739  * not put it on the free list.
1740  */
1741 void
vm_page_create_retired(ppnum_t phys_page)1742 vm_page_create_retired(
1743 	ppnum_t   phys_page)
1744 {
1745 	vm_page_t m;
1746 
1747 	m = vm_page_grab_fictitious_common(phys_page, TRUE);
1748 	m->vmp_fictitious = FALSE;
1749 	pmap_clear_noencrypt(phys_page);
1750 	m->vmp_error = true;
1751 	m->vmp_unusual = true;
1752 	vm_page_lock_queues();
1753 	m->vmp_q_state = VM_PAGE_IS_WIRED;
1754 	m->vmp_wire_count++;
1755 	vm_page_unlock_queues();
1756 
1757 	lck_mtx_lock(&vm_page_queue_free_lock);
1758 	vm_page_pages++;
1759 	lck_mtx_unlock(&vm_page_queue_free_lock);
1760 
1761 	vm_object_lock(retired_pages_object);
1762 	vm_page_insert_wired(m, retired_pages_object, ptoa(VM_PAGE_GET_PHYS_PAGE(m)), VM_KERN_MEMORY_RETIRED);
1763 	vm_object_unlock(retired_pages_object);
1764 	pmap_retire_page(VM_PAGE_GET_PHYS_PAGE(m));
1765 }
1766 #endif /* defined(__arm64__) */
1767 
1768 /*
1769  *	vm_page_hash:
1770  *
1771  *	Distributes the object/offset key pair among hash buckets.
1772  *
1773  *	NOTE:	The bucket count must be a power of 2
1774  */
1775 #define vm_page_hash(object, offset) (\
1776 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1777 	 & vm_page_hash_mask)
1778 
1779 
1780 /*
1781  *	vm_page_insert:		[ internal use only ]
1782  *
1783  *	Inserts the given mem entry into the object/object-page
1784  *	table and object list.
1785  *
1786  *	The object must be locked.
1787  */
1788 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1789 vm_page_insert(
1790 	vm_page_t               mem,
1791 	vm_object_t             object,
1792 	vm_object_offset_t      offset)
1793 {
1794 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1795 }
1796 
1797 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1798 vm_page_insert_wired(
1799 	vm_page_t               mem,
1800 	vm_object_t             object,
1801 	vm_object_offset_t      offset,
1802 	vm_tag_t                tag)
1803 {
1804 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1805 }
1806 
1807 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1808 vm_page_insert_internal(
1809 	vm_page_t               mem,
1810 	vm_object_t             object,
1811 	vm_object_offset_t      offset,
1812 	vm_tag_t                tag,
1813 	boolean_t               queues_lock_held,
1814 	boolean_t               insert_in_hash,
1815 	boolean_t               batch_pmap_op,
1816 	boolean_t               batch_accounting,
1817 	uint64_t                *delayed_ledger_update)
1818 {
1819 	vm_page_bucket_t        *bucket;
1820 	lck_spin_t              *bucket_lock;
1821 	int                     hash_id;
1822 	task_t                  owner;
1823 	int                     ledger_idx_volatile;
1824 	int                     ledger_idx_nonvolatile;
1825 	int                     ledger_idx_volatile_compressed;
1826 	int                     ledger_idx_nonvolatile_compressed;
1827 	boolean_t               do_footprint;
1828 
1829 #if 0
1830 	/*
1831 	 * we may not hold the page queue lock
1832 	 * so this check isn't safe to make
1833 	 */
1834 	VM_PAGE_CHECK(mem);
1835 #endif
1836 
1837 	assertf(page_aligned(offset), "0x%llx\n", offset);
1838 
1839 	assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1840 
1841 	/* the vm_submap_object is only a placeholder for submaps */
1842 	assert(object != vm_submap_object);
1843 
1844 	vm_object_lock_assert_exclusive(object);
1845 	LCK_MTX_ASSERT(&vm_page_queue_lock,
1846 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
1847 	    : LCK_MTX_ASSERT_NOTOWNED);
1848 
1849 	if (queues_lock_held == FALSE) {
1850 		assert(!VM_PAGE_PAGEABLE(mem));
1851 	}
1852 
1853 	if (insert_in_hash == TRUE) {
1854 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1855 		if (mem->vmp_tabled || mem->vmp_object) {
1856 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1857 			    "already in (obj=%p,off=0x%llx)",
1858 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1859 		}
1860 #endif
1861 		if (object->internal && (offset >= object->vo_size)) {
1862 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1863 			    mem, object, offset, object->vo_size);
1864 		}
1865 
1866 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1867 
1868 		/*
1869 		 *	Record the object/offset pair in this page
1870 		 */
1871 
1872 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1873 		mem->vmp_offset = offset;
1874 
1875 #if CONFIG_SECLUDED_MEMORY
1876 		if (object->eligible_for_secluded) {
1877 			vm_page_secluded.eligible_for_secluded++;
1878 		}
1879 #endif /* CONFIG_SECLUDED_MEMORY */
1880 
1881 		/*
1882 		 *	Insert it into the object_object/offset hash table
1883 		 */
1884 		hash_id = vm_page_hash(object, offset);
1885 		bucket = &vm_page_buckets[hash_id];
1886 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1887 
1888 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1889 
1890 		mem->vmp_next_m = bucket->page_list;
1891 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
1892 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1893 
1894 #if     MACH_PAGE_HASH_STATS
1895 		if (++bucket->cur_count > bucket->hi_count) {
1896 			bucket->hi_count = bucket->cur_count;
1897 		}
1898 #endif /* MACH_PAGE_HASH_STATS */
1899 		mem->vmp_hashed = TRUE;
1900 		lck_spin_unlock(bucket_lock);
1901 	}
1902 
1903 	{
1904 		unsigned int    cache_attr;
1905 
1906 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
1907 
1908 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
1909 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1910 		}
1911 	}
1912 	/*
1913 	 *	Now link into the object's list of backed pages.
1914 	 */
1915 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
1916 	object->memq_hint = mem;
1917 	mem->vmp_tabled = TRUE;
1918 
1919 	/*
1920 	 *	Show that the object has one more resident page.
1921 	 */
1922 
1923 	object->resident_page_count++;
1924 	if (VM_PAGE_WIRED(mem)) {
1925 		assert(mem->vmp_wire_count > 0);
1926 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1927 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1928 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1929 	}
1930 	assert(object->resident_page_count >= object->wired_page_count);
1931 
1932 #if DEVELOPMENT || DEBUG
1933 	if (object->object_is_shared_cache &&
1934 	    object->pager != NULL &&
1935 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1936 		int new, old;
1937 		assert(!object->internal);
1938 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1939 		do {
1940 			old = shared_region_pagers_resident_peak;
1941 		} while (old < new &&
1942 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1943 	}
1944 #endif /* DEVELOPMENT || DEBUG */
1945 
1946 	if (batch_accounting == FALSE) {
1947 		if (object->internal) {
1948 			OSAddAtomic(1, &vm_page_internal_count);
1949 		} else {
1950 			OSAddAtomic(1, &vm_page_external_count);
1951 		}
1952 	}
1953 
1954 	/*
1955 	 * It wouldn't make sense to insert a "reusable" page in
1956 	 * an object (the page would have been marked "reusable" only
1957 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1958 	 * in the object at that time).
1959 	 * But a page could be inserted in a "all_reusable" object, if
1960 	 * something faults it in (a vm_read() from another task or a
1961 	 * "use-after-free" issue in user space, for example).  It can
1962 	 * also happen if we're relocating a page from that object to
1963 	 * a different physical page during a physically-contiguous
1964 	 * allocation.
1965 	 */
1966 	assert(!mem->vmp_reusable);
1967 	if (object->all_reusable) {
1968 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1969 	}
1970 
1971 	if (object->purgable == VM_PURGABLE_DENY &&
1972 	    !object->vo_ledger_tag) {
1973 		owner = TASK_NULL;
1974 	} else {
1975 		owner = VM_OBJECT_OWNER(object);
1976 		vm_object_ledger_tag_ledgers(object,
1977 		    &ledger_idx_volatile,
1978 		    &ledger_idx_nonvolatile,
1979 		    &ledger_idx_volatile_compressed,
1980 		    &ledger_idx_nonvolatile_compressed,
1981 		    &do_footprint);
1982 	}
1983 	if (owner &&
1984 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
1985 	    object->purgable == VM_PURGABLE_DENY ||
1986 	    VM_PAGE_WIRED(mem))) {
1987 		if (delayed_ledger_update) {
1988 			*delayed_ledger_update += PAGE_SIZE;
1989 		} else {
1990 			/* more non-volatile bytes */
1991 			ledger_credit(owner->ledger,
1992 			    ledger_idx_nonvolatile,
1993 			    PAGE_SIZE);
1994 			if (do_footprint) {
1995 				/* more footprint */
1996 				ledger_credit(owner->ledger,
1997 				    task_ledgers.phys_footprint,
1998 				    PAGE_SIZE);
1999 			}
2000 		}
2001 	} else if (owner &&
2002 	    (object->purgable == VM_PURGABLE_VOLATILE ||
2003 	    object->purgable == VM_PURGABLE_EMPTY)) {
2004 		assert(!VM_PAGE_WIRED(mem));
2005 		/* more volatile bytes */
2006 		ledger_credit(owner->ledger,
2007 		    ledger_idx_volatile,
2008 		    PAGE_SIZE);
2009 	}
2010 
2011 	if (object->purgable == VM_PURGABLE_VOLATILE) {
2012 		if (VM_PAGE_WIRED(mem)) {
2013 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
2014 		} else {
2015 			OSAddAtomic(+1, &vm_page_purgeable_count);
2016 		}
2017 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
2018 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2019 		/*
2020 		 * This page belongs to a purged VM object but hasn't
2021 		 * been purged (because it was "busy").
2022 		 * It's in the "throttled" queue and hence not
2023 		 * visible to vm_pageout_scan().  Move it to a pageable
2024 		 * queue, so that it can eventually be reclaimed, instead
2025 		 * of lingering in the "empty" object.
2026 		 */
2027 		if (queues_lock_held == FALSE) {
2028 			vm_page_lockspin_queues();
2029 		}
2030 		vm_page_deactivate(mem);
2031 		if (queues_lock_held == FALSE) {
2032 			vm_page_unlock_queues();
2033 		}
2034 	}
2035 
2036 #if VM_OBJECT_TRACKING_OP_MODIFIED
2037 	if (vm_object_tracking_inited &&
2038 	    object->internal &&
2039 	    object->resident_page_count == 0 &&
2040 	    object->pager == NULL &&
2041 	    object->shadow != NULL &&
2042 	    object->shadow->copy == object) {
2043 		void *bt[VM_OBJECT_TRACKING_BTDEPTH];
2044 		int numsaved = 0;
2045 
2046 		numsaved = OSBacktrace(bt, VM_OBJECT_TRACKING_BTDEPTH);
2047 		btlog_add_entry(vm_object_tracking_btlog,
2048 		    object,
2049 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2050 		    bt,
2051 		    numsaved);
2052 	}
2053 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2054 }
2055 
2056 /*
2057  *	vm_page_replace:
2058  *
2059  *	Exactly like vm_page_insert, except that we first
2060  *	remove any existing page at the given offset in object.
2061  *
2062  *	The object must be locked.
2063  */
2064 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2065 vm_page_replace(
2066 	vm_page_t               mem,
2067 	vm_object_t             object,
2068 	vm_object_offset_t      offset)
2069 {
2070 	vm_page_bucket_t *bucket;
2071 	vm_page_t        found_m = VM_PAGE_NULL;
2072 	lck_spin_t      *bucket_lock;
2073 	int             hash_id;
2074 
2075 #if 0
2076 	/*
2077 	 * we don't hold the page queue lock
2078 	 * so this check isn't safe to make
2079 	 */
2080 	VM_PAGE_CHECK(mem);
2081 #endif
2082 	vm_object_lock_assert_exclusive(object);
2083 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2084 	if (mem->vmp_tabled || mem->vmp_object) {
2085 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2086 		    "already in (obj=%p,off=0x%llx)",
2087 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2088 	}
2089 #endif
2090 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2091 
2092 	assert(!VM_PAGE_PAGEABLE(mem));
2093 
2094 	/*
2095 	 *	Record the object/offset pair in this page
2096 	 */
2097 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2098 	mem->vmp_offset = offset;
2099 
2100 	/*
2101 	 *	Insert it into the object_object/offset hash table,
2102 	 *	replacing any page that might have been there.
2103 	 */
2104 
2105 	hash_id = vm_page_hash(object, offset);
2106 	bucket = &vm_page_buckets[hash_id];
2107 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2108 
2109 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2110 
2111 	if (bucket->page_list) {
2112 		vm_page_packed_t *mp = &bucket->page_list;
2113 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2114 
2115 		do {
2116 			/*
2117 			 * compare packed object pointers
2118 			 */
2119 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2120 				/*
2121 				 * Remove old page from hash list
2122 				 */
2123 				*mp = m->vmp_next_m;
2124 				m->vmp_hashed = FALSE;
2125 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2126 
2127 				found_m = m;
2128 				break;
2129 			}
2130 			mp = &m->vmp_next_m;
2131 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2132 
2133 		mem->vmp_next_m = bucket->page_list;
2134 	} else {
2135 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2136 	}
2137 	/*
2138 	 * insert new page at head of hash list
2139 	 */
2140 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2141 	mem->vmp_hashed = TRUE;
2142 
2143 	lck_spin_unlock(bucket_lock);
2144 
2145 	if (found_m) {
2146 		/*
2147 		 * there was already a page at the specified
2148 		 * offset for this object... remove it from
2149 		 * the object and free it back to the free list
2150 		 */
2151 		vm_page_free_unlocked(found_m, FALSE);
2152 	}
2153 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2154 }
2155 
2156 /*
2157  *	vm_page_remove:		[ internal use only ]
2158  *
2159  *	Removes the given mem entry from the object/offset-page
2160  *	table and the object page list.
2161  *
2162  *	The object must be locked.
2163  */
2164 
2165 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2166 vm_page_remove(
2167 	vm_page_t       mem,
2168 	boolean_t       remove_from_hash)
2169 {
2170 	vm_page_bucket_t *bucket;
2171 	vm_page_t       this;
2172 	lck_spin_t      *bucket_lock;
2173 	int             hash_id;
2174 	task_t          owner;
2175 	vm_object_t     m_object;
2176 	int             ledger_idx_volatile;
2177 	int             ledger_idx_nonvolatile;
2178 	int             ledger_idx_volatile_compressed;
2179 	int             ledger_idx_nonvolatile_compressed;
2180 	int             do_footprint;
2181 
2182 	m_object = VM_PAGE_OBJECT(mem);
2183 
2184 	vm_object_lock_assert_exclusive(m_object);
2185 	assert(mem->vmp_tabled);
2186 	assert(!mem->vmp_cleaning);
2187 	assert(!mem->vmp_laundry);
2188 
2189 	if (VM_PAGE_PAGEABLE(mem)) {
2190 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2191 	}
2192 #if 0
2193 	/*
2194 	 * we don't hold the page queue lock
2195 	 * so this check isn't safe to make
2196 	 */
2197 	VM_PAGE_CHECK(mem);
2198 #endif
2199 	if (remove_from_hash == TRUE) {
2200 		/*
2201 		 *	Remove from the object_object/offset hash table
2202 		 */
2203 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2204 		bucket = &vm_page_buckets[hash_id];
2205 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2206 
2207 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2208 
2209 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2210 			/* optimize for common case */
2211 
2212 			bucket->page_list = mem->vmp_next_m;
2213 		} else {
2214 			vm_page_packed_t        *prev;
2215 
2216 			for (prev = &this->vmp_next_m;
2217 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2218 			    prev = &this->vmp_next_m) {
2219 				continue;
2220 			}
2221 			*prev = this->vmp_next_m;
2222 		}
2223 #if     MACH_PAGE_HASH_STATS
2224 		bucket->cur_count--;
2225 #endif /* MACH_PAGE_HASH_STATS */
2226 		mem->vmp_hashed = FALSE;
2227 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2228 		lck_spin_unlock(bucket_lock);
2229 	}
2230 	/*
2231 	 *	Now remove from the object's list of backed pages.
2232 	 */
2233 
2234 	vm_page_remove_internal(mem);
2235 
2236 	/*
2237 	 *	And show that the object has one fewer resident
2238 	 *	page.
2239 	 */
2240 
2241 	assert(m_object->resident_page_count > 0);
2242 	m_object->resident_page_count--;
2243 
2244 #if DEVELOPMENT || DEBUG
2245 	if (m_object->object_is_shared_cache &&
2246 	    m_object->pager != NULL &&
2247 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2248 		assert(!m_object->internal);
2249 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
2250 	}
2251 #endif /* DEVELOPMENT || DEBUG */
2252 
2253 	if (m_object->internal) {
2254 #if DEBUG
2255 		assert(vm_page_internal_count);
2256 #endif /* DEBUG */
2257 
2258 		OSAddAtomic(-1, &vm_page_internal_count);
2259 	} else {
2260 		assert(vm_page_external_count);
2261 		OSAddAtomic(-1, &vm_page_external_count);
2262 
2263 		if (mem->vmp_xpmapped) {
2264 			assert(vm_page_xpmapped_external_count);
2265 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2266 		}
2267 	}
2268 	if (!m_object->internal &&
2269 	    m_object->cached_list.next &&
2270 	    m_object->cached_list.prev) {
2271 		if (m_object->resident_page_count == 0) {
2272 			vm_object_cache_remove(m_object);
2273 		}
2274 	}
2275 
2276 	if (VM_PAGE_WIRED(mem)) {
2277 		assert(mem->vmp_wire_count > 0);
2278 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2279 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2280 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2281 	}
2282 	assert(m_object->resident_page_count >=
2283 	    m_object->wired_page_count);
2284 	if (mem->vmp_reusable) {
2285 		assert(m_object->reusable_page_count > 0);
2286 		m_object->reusable_page_count--;
2287 		assert(m_object->reusable_page_count <=
2288 		    m_object->resident_page_count);
2289 		mem->vmp_reusable = FALSE;
2290 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2291 		vm_page_stats_reusable.reused_remove++;
2292 	} else if (m_object->all_reusable) {
2293 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2294 		vm_page_stats_reusable.reused_remove++;
2295 	}
2296 
2297 	if (m_object->purgable == VM_PURGABLE_DENY &&
2298 	    !m_object->vo_ledger_tag) {
2299 		owner = TASK_NULL;
2300 	} else {
2301 		owner = VM_OBJECT_OWNER(m_object);
2302 		vm_object_ledger_tag_ledgers(m_object,
2303 		    &ledger_idx_volatile,
2304 		    &ledger_idx_nonvolatile,
2305 		    &ledger_idx_volatile_compressed,
2306 		    &ledger_idx_nonvolatile_compressed,
2307 		    &do_footprint);
2308 	}
2309 	if (owner &&
2310 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2311 	    m_object->purgable == VM_PURGABLE_DENY ||
2312 	    VM_PAGE_WIRED(mem))) {
2313 		/* less non-volatile bytes */
2314 		ledger_debit(owner->ledger,
2315 		    ledger_idx_nonvolatile,
2316 		    PAGE_SIZE);
2317 		if (do_footprint) {
2318 			/* less footprint */
2319 			ledger_debit(owner->ledger,
2320 			    task_ledgers.phys_footprint,
2321 			    PAGE_SIZE);
2322 		}
2323 	} else if (owner &&
2324 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
2325 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
2326 		assert(!VM_PAGE_WIRED(mem));
2327 		/* less volatile bytes */
2328 		ledger_debit(owner->ledger,
2329 		    ledger_idx_volatile,
2330 		    PAGE_SIZE);
2331 	}
2332 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2333 		if (VM_PAGE_WIRED(mem)) {
2334 			assert(vm_page_purgeable_wired_count > 0);
2335 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2336 		} else {
2337 			assert(vm_page_purgeable_count > 0);
2338 			OSAddAtomic(-1, &vm_page_purgeable_count);
2339 		}
2340 	}
2341 
2342 	if (m_object->set_cache_attr == TRUE) {
2343 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2344 	}
2345 
2346 	mem->vmp_tabled = FALSE;
2347 	mem->vmp_object = 0;
2348 	mem->vmp_offset = (vm_object_offset_t) -1;
2349 }
2350 
2351 
2352 /*
2353  *	vm_page_lookup:
2354  *
2355  *	Returns the page associated with the object/offset
2356  *	pair specified; if none is found, VM_PAGE_NULL is returned.
2357  *
2358  *	The object must be locked.  No side effects.
2359  */
2360 
2361 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
2362 
2363 #if DEBUG_VM_PAGE_LOOKUP
2364 
2365 struct {
2366 	uint64_t        vpl_total;
2367 	uint64_t        vpl_empty_obj;
2368 	uint64_t        vpl_bucket_NULL;
2369 	uint64_t        vpl_hit_hint;
2370 	uint64_t        vpl_hit_hint_next;
2371 	uint64_t        vpl_hit_hint_prev;
2372 	uint64_t        vpl_fast;
2373 	uint64_t        vpl_slow;
2374 	uint64_t        vpl_hit;
2375 	uint64_t        vpl_miss;
2376 
2377 	uint64_t        vpl_fast_elapsed;
2378 	uint64_t        vpl_slow_elapsed;
2379 } vm_page_lookup_stats __attribute__((aligned(8)));
2380 
2381 #endif
2382 
2383 #define KDP_VM_PAGE_WALK_MAX    1000
2384 
2385 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2386 kdp_vm_page_lookup(
2387 	vm_object_t             object,
2388 	vm_object_offset_t      offset)
2389 {
2390 	vm_page_t cur_page;
2391 	int num_traversed = 0;
2392 
2393 	if (not_in_kdp) {
2394 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2395 	}
2396 
2397 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2398 		if (cur_page->vmp_offset == offset) {
2399 			return cur_page;
2400 		}
2401 		num_traversed++;
2402 
2403 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2404 			return VM_PAGE_NULL;
2405 		}
2406 	}
2407 
2408 	return VM_PAGE_NULL;
2409 }
2410 
2411 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2412 vm_page_lookup(
2413 	vm_object_t             object,
2414 	vm_object_offset_t      offset)
2415 {
2416 	vm_page_t       mem;
2417 	vm_page_bucket_t *bucket;
2418 	vm_page_queue_entry_t   qe;
2419 	lck_spin_t      *bucket_lock = NULL;
2420 	int             hash_id;
2421 #if DEBUG_VM_PAGE_LOOKUP
2422 	uint64_t        start, elapsed;
2423 
2424 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2425 #endif
2426 
2427 #if CONFIG_KERNEL_TBI
2428 	if (VM_KERNEL_ADDRESS(offset)) {
2429 		offset = VM_KERNEL_STRIP_UPTR(offset);
2430 	}
2431 #endif /* CONFIG_KERNEL_TBI */
2432 
2433 	vm_object_lock_assert_held(object);
2434 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2435 
2436 	if (object->resident_page_count == 0) {
2437 #if DEBUG_VM_PAGE_LOOKUP
2438 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2439 #endif
2440 		return VM_PAGE_NULL;
2441 	}
2442 
2443 	mem = object->memq_hint;
2444 
2445 	if (mem != VM_PAGE_NULL) {
2446 		assert(VM_PAGE_OBJECT(mem) == object);
2447 
2448 		if (mem->vmp_offset == offset) {
2449 #if DEBUG_VM_PAGE_LOOKUP
2450 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2451 #endif
2452 			return mem;
2453 		}
2454 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2455 
2456 		if (!vm_page_queue_end(&object->memq, qe)) {
2457 			vm_page_t       next_page;
2458 
2459 			next_page = (vm_page_t)((uintptr_t)qe);
2460 			assert(VM_PAGE_OBJECT(next_page) == object);
2461 
2462 			if (next_page->vmp_offset == offset) {
2463 				object->memq_hint = next_page; /* new hint */
2464 #if DEBUG_VM_PAGE_LOOKUP
2465 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2466 #endif
2467 				return next_page;
2468 			}
2469 		}
2470 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2471 
2472 		if (!vm_page_queue_end(&object->memq, qe)) {
2473 			vm_page_t prev_page;
2474 
2475 			prev_page = (vm_page_t)((uintptr_t)qe);
2476 			assert(VM_PAGE_OBJECT(prev_page) == object);
2477 
2478 			if (prev_page->vmp_offset == offset) {
2479 				object->memq_hint = prev_page; /* new hint */
2480 #if DEBUG_VM_PAGE_LOOKUP
2481 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2482 #endif
2483 				return prev_page;
2484 			}
2485 		}
2486 	}
2487 	/*
2488 	 * Search the hash table for this object/offset pair
2489 	 */
2490 	hash_id = vm_page_hash(object, offset);
2491 	bucket = &vm_page_buckets[hash_id];
2492 
2493 	/*
2494 	 * since we hold the object lock, we are guaranteed that no
2495 	 * new pages can be inserted into this object... this in turn
2496 	 * guarantess that the page we're looking for can't exist
2497 	 * if the bucket it hashes to is currently NULL even when looked
2498 	 * at outside the scope of the hash bucket lock... this is a
2499 	 * really cheap optimiztion to avoid taking the lock
2500 	 */
2501 	if (!bucket->page_list) {
2502 #if DEBUG_VM_PAGE_LOOKUP
2503 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2504 #endif
2505 		return VM_PAGE_NULL;
2506 	}
2507 
2508 #if DEBUG_VM_PAGE_LOOKUP
2509 	start = mach_absolute_time();
2510 #endif
2511 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2512 		/*
2513 		 * on average, it's roughly 3 times faster to run a short memq list
2514 		 * than to take the spin lock and go through the hash list
2515 		 */
2516 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
2517 
2518 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2519 			if (mem->vmp_offset == offset) {
2520 				break;
2521 			}
2522 
2523 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2524 		}
2525 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2526 			mem = NULL;
2527 		}
2528 	} else {
2529 		vm_page_object_t        packed_object;
2530 
2531 		packed_object = VM_PAGE_PACK_OBJECT(object);
2532 
2533 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2534 
2535 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2536 
2537 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2538 		    mem != VM_PAGE_NULL;
2539 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2540 #if 0
2541 			/*
2542 			 * we don't hold the page queue lock
2543 			 * so this check isn't safe to make
2544 			 */
2545 			VM_PAGE_CHECK(mem);
2546 #endif
2547 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2548 				break;
2549 			}
2550 		}
2551 		lck_spin_unlock(bucket_lock);
2552 	}
2553 
2554 #if DEBUG_VM_PAGE_LOOKUP
2555 	elapsed = mach_absolute_time() - start;
2556 
2557 	if (bucket_lock) {
2558 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2559 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2560 	} else {
2561 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2562 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2563 	}
2564 	if (mem != VM_PAGE_NULL) {
2565 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2566 	} else {
2567 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2568 	}
2569 #endif
2570 	if (mem != VM_PAGE_NULL) {
2571 		assert(VM_PAGE_OBJECT(mem) == object);
2572 
2573 		object->memq_hint = mem;
2574 	}
2575 	return mem;
2576 }
2577 
2578 
2579 /*
2580  *	vm_page_rename:
2581  *
2582  *	Move the given memory entry from its
2583  *	current object to the specified target object/offset.
2584  *
2585  *	The object must be locked.
2586  */
2587 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2588 vm_page_rename(
2589 	vm_page_t               mem,
2590 	vm_object_t             new_object,
2591 	vm_object_offset_t      new_offset)
2592 {
2593 	boolean_t       internal_to_external, external_to_internal;
2594 	vm_tag_t        tag;
2595 	vm_object_t     m_object;
2596 
2597 	m_object = VM_PAGE_OBJECT(mem);
2598 
2599 	assert(m_object != new_object);
2600 	assert(m_object);
2601 
2602 	/*
2603 	 *	Changes to mem->vmp_object require the page lock because
2604 	 *	the pageout daemon uses that lock to get the object.
2605 	 */
2606 	vm_page_lockspin_queues();
2607 
2608 	internal_to_external = FALSE;
2609 	external_to_internal = FALSE;
2610 
2611 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2612 		/*
2613 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
2614 		 * if we first move the page to the active queue... it's going to end
2615 		 * up there anyway, and we don't do vm_page_rename's frequently enough
2616 		 * for this to matter.
2617 		 */
2618 		vm_page_queues_remove(mem, FALSE);
2619 		vm_page_activate(mem);
2620 	}
2621 	if (VM_PAGE_PAGEABLE(mem)) {
2622 		if (m_object->internal && !new_object->internal) {
2623 			internal_to_external = TRUE;
2624 		}
2625 		if (!m_object->internal && new_object->internal) {
2626 			external_to_internal = TRUE;
2627 		}
2628 	}
2629 
2630 	tag = m_object->wire_tag;
2631 	vm_page_remove(mem, TRUE);
2632 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2633 
2634 	if (internal_to_external) {
2635 		vm_page_pageable_internal_count--;
2636 		vm_page_pageable_external_count++;
2637 	} else if (external_to_internal) {
2638 		vm_page_pageable_external_count--;
2639 		vm_page_pageable_internal_count++;
2640 	}
2641 
2642 	vm_page_unlock_queues();
2643 }
2644 
2645 /*
2646  *	vm_page_init:
2647  *
2648  *	Initialize the fields in a new page.
2649  *	This takes a structure with random values and initializes it
2650  *	so that it can be given to vm_page_release or vm_page_insert.
2651  */
2652 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2653 vm_page_init(
2654 	vm_page_t mem,
2655 	ppnum_t   phys_page,
2656 	boolean_t lopage)
2657 {
2658 	uint_t    i;
2659 	uintptr_t *p;
2660 
2661 	assert(phys_page);
2662 
2663 #if DEBUG
2664 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2665 		if (!(pmap_valid_page(phys_page))) {
2666 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2667 		}
2668 	}
2669 #endif /* DEBUG */
2670 
2671 	/*
2672 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2673 	 * try to use initial values which match 0. This minimizes the number of writes
2674 	 * needed for boot-time initialization.
2675 	 *
2676 	 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2677 	 */
2678 	assert(VM_PAGE_NOT_ON_Q == 0);
2679 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2680 	for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2681 		*p++ = 0;
2682 	}
2683 	mem->vmp_offset = (vm_object_offset_t)-1;
2684 	mem->vmp_busy = TRUE;
2685 	mem->vmp_lopage = lopage;
2686 
2687 	VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2688 #if 0
2689 	/*
2690 	 * we're leaving this turned off for now... currently pages
2691 	 * come off the free list and are either immediately dirtied/referenced
2692 	 * due to zero-fill or COW faults, or are used to read or write files...
2693 	 * in the file I/O case, the UPL mechanism takes care of clearing
2694 	 * the state of the HW ref/mod bits in a somewhat fragile way.
2695 	 * Since we may change the way this works in the future (to toughen it up),
2696 	 * I'm leaving this as a reminder of where these bits could get cleared
2697 	 */
2698 
2699 	/*
2700 	 * make sure both the h/w referenced and modified bits are
2701 	 * clear at this point... we are especially dependent on
2702 	 * not finding a 'stale' h/w modified in a number of spots
2703 	 * once this page goes back into use
2704 	 */
2705 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2706 #endif
2707 }
2708 
2709 /*
2710  *	vm_page_grab_fictitious:
2711  *
2712  *	Remove a fictitious page from the free list.
2713  *	Returns VM_PAGE_NULL if there are no free pages.
2714  */
2715 
2716 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2717 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2718 {
2719 	vm_page_t m;
2720 
2721 	m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2722 	if (m) {
2723 		vm_page_init(m, phys_addr, FALSE);
2724 		m->vmp_fictitious = TRUE;
2725 	}
2726 	return m;
2727 }
2728 
2729 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2730 vm_page_grab_fictitious(boolean_t canwait)
2731 {
2732 	return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2733 }
2734 
2735 int vm_guard_count;
2736 
2737 
2738 vm_page_t
vm_page_grab_guard(boolean_t canwait)2739 vm_page_grab_guard(boolean_t canwait)
2740 {
2741 	vm_page_t page;
2742 	page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2743 	if (page) {
2744 		OSAddAtomic(1, &vm_guard_count);
2745 	}
2746 	return page;
2747 }
2748 
2749 
2750 /*
2751  *	vm_page_release_fictitious:
2752  *
2753  *	Release a fictitious page to the zone pool
2754  */
2755 void
vm_page_release_fictitious(vm_page_t m)2756 vm_page_release_fictitious(
2757 	vm_page_t m)
2758 {
2759 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2760 	assert(m->vmp_fictitious);
2761 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2762 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2763 
2764 
2765 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2766 		OSAddAtomic(-1, &vm_guard_count);
2767 	}
2768 
2769 	zfree(vm_page_zone, m);
2770 }
2771 
2772 /*
2773  *	vm_pool_low():
2774  *
2775  *	Return true if it is not likely that a non-vm_privileged thread
2776  *	can get memory without blocking.  Advisory only, since the
2777  *	situation may change under us.
2778  */
2779 bool
vm_pool_low(void)2780 vm_pool_low(void)
2781 {
2782 	/* No locking, at worst we will fib. */
2783 	return vm_page_free_count <= vm_page_free_reserved;
2784 }
2785 
2786 boolean_t vm_darkwake_mode = FALSE;
2787 
2788 /*
2789  * vm_update_darkwake_mode():
2790  *
2791  * Tells the VM that the system is in / out of darkwake.
2792  *
2793  * Today, the VM only lowers/raises the background queue target
2794  * so as to favor consuming more/less background pages when
2795  * darwake is ON/OFF.
2796  *
2797  * We might need to do more things in the future.
2798  */
2799 
2800 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2801 vm_update_darkwake_mode(boolean_t darkwake_mode)
2802 {
2803 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2804 
2805 	vm_page_lockspin_queues();
2806 
2807 	if (vm_darkwake_mode == darkwake_mode) {
2808 		/*
2809 		 * No change.
2810 		 */
2811 		vm_page_unlock_queues();
2812 		return;
2813 	}
2814 
2815 	vm_darkwake_mode = darkwake_mode;
2816 
2817 	if (vm_darkwake_mode == TRUE) {
2818 #if CONFIG_BACKGROUND_QUEUE
2819 
2820 		/* save background target to restore later */
2821 		vm_page_background_target_snapshot = vm_page_background_target;
2822 
2823 		/* target is set to 0...no protection for background pages */
2824 		vm_page_background_target = 0;
2825 
2826 #endif /* CONFIG_BACKGROUND_QUEUE */
2827 	} else if (vm_darkwake_mode == FALSE) {
2828 #if CONFIG_BACKGROUND_QUEUE
2829 
2830 		if (vm_page_background_target_snapshot) {
2831 			vm_page_background_target = vm_page_background_target_snapshot;
2832 		}
2833 #endif /* CONFIG_BACKGROUND_QUEUE */
2834 	}
2835 	vm_page_unlock_queues();
2836 }
2837 
2838 #if CONFIG_BACKGROUND_QUEUE
2839 
2840 void
vm_page_update_background_state(vm_page_t mem)2841 vm_page_update_background_state(vm_page_t mem)
2842 {
2843 	if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2844 		return;
2845 	}
2846 
2847 	if (mem->vmp_in_background == FALSE) {
2848 		return;
2849 	}
2850 
2851 	task_t  my_task = current_task_early();
2852 
2853 	if (my_task) {
2854 		if (task_get_darkwake_mode(my_task)) {
2855 			return;
2856 		}
2857 	}
2858 
2859 #if BACKGROUNDQ_BASED_ON_QOS
2860 	if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) {
2861 		return;
2862 	}
2863 #else
2864 	if (my_task) {
2865 		if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2866 			return;
2867 		}
2868 	}
2869 #endif
2870 	vm_page_lockspin_queues();
2871 
2872 	mem->vmp_in_background = FALSE;
2873 	vm_page_background_promoted_count++;
2874 
2875 	vm_page_remove_from_backgroundq(mem);
2876 
2877 	vm_page_unlock_queues();
2878 }
2879 
2880 
2881 void
vm_page_assign_background_state(vm_page_t mem)2882 vm_page_assign_background_state(vm_page_t mem)
2883 {
2884 	if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2885 		return;
2886 	}
2887 
2888 	task_t  my_task = current_task_early();
2889 
2890 	if (my_task) {
2891 		if (task_get_darkwake_mode(my_task)) {
2892 			mem->vmp_in_background = TRUE;
2893 			return;
2894 		}
2895 	}
2896 
2897 #if BACKGROUNDQ_BASED_ON_QOS
2898 	if (proc_get_effective_thread_policy(current_thread(), TASK_POLICY_QOS) <= THREAD_QOS_LEGACY) {
2899 		mem->vmp_in_background = TRUE;
2900 	} else {
2901 		mem->vmp_in_background = FALSE;
2902 	}
2903 #else
2904 	if (my_task) {
2905 		mem->vmp_in_background = proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG);
2906 	}
2907 #endif
2908 }
2909 
2910 
2911 void
vm_page_remove_from_backgroundq(vm_page_t mem)2912 vm_page_remove_from_backgroundq(
2913 	vm_page_t       mem)
2914 {
2915 	vm_object_t     m_object;
2916 
2917 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2918 
2919 	if (mem->vmp_on_backgroundq) {
2920 		vm_page_queue_remove(&vm_page_queue_background, mem, vmp_backgroundq);
2921 
2922 		mem->vmp_backgroundq.next = 0;
2923 		mem->vmp_backgroundq.prev = 0;
2924 		mem->vmp_on_backgroundq = FALSE;
2925 
2926 		vm_page_background_count--;
2927 
2928 		m_object = VM_PAGE_OBJECT(mem);
2929 
2930 		if (m_object->internal) {
2931 			vm_page_background_internal_count--;
2932 		} else {
2933 			vm_page_background_external_count--;
2934 		}
2935 	} else {
2936 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.next) == (uintptr_t)NULL &&
2937 		    VM_PAGE_UNPACK_PTR(mem->vmp_backgroundq.prev) == (uintptr_t)NULL);
2938 	}
2939 }
2940 
2941 
2942 void
vm_page_add_to_backgroundq(vm_page_t mem,boolean_t first)2943 vm_page_add_to_backgroundq(
2944 	vm_page_t       mem,
2945 	boolean_t       first)
2946 {
2947 	vm_object_t     m_object;
2948 
2949 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2950 
2951 	if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2952 		return;
2953 	}
2954 
2955 	if (mem->vmp_on_backgroundq == FALSE) {
2956 		m_object = VM_PAGE_OBJECT(mem);
2957 
2958 		if (vm_page_background_exclude_external && !m_object->internal) {
2959 			return;
2960 		}
2961 
2962 		if (first == TRUE) {
2963 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_backgroundq);
2964 		} else {
2965 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_backgroundq);
2966 		}
2967 		mem->vmp_on_backgroundq = TRUE;
2968 
2969 		vm_page_background_count++;
2970 
2971 		if (m_object->internal) {
2972 			vm_page_background_internal_count++;
2973 		} else {
2974 			vm_page_background_external_count++;
2975 		}
2976 	}
2977 }
2978 
2979 #endif /* CONFIG_BACKGROUND_QUEUE */
2980 
2981 /*
2982  * This can be switched to FALSE to help debug drivers
2983  * that are having problems with memory > 4G.
2984  */
2985 boolean_t       vm_himemory_mode = TRUE;
2986 
2987 /*
2988  * this interface exists to support hardware controllers
2989  * incapable of generating DMAs with more than 32 bits
2990  * of address on platforms with physical memory > 4G...
2991  */
2992 unsigned int    vm_lopages_allocated_q = 0;
2993 unsigned int    vm_lopages_allocated_cpm_success = 0;
2994 unsigned int    vm_lopages_allocated_cpm_failed = 0;
2995 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
2996 
2997 vm_page_t
vm_page_grablo(void)2998 vm_page_grablo(void)
2999 {
3000 	vm_page_t       mem;
3001 
3002 	if (vm_lopage_needed == FALSE) {
3003 		return vm_page_grab();
3004 	}
3005 
3006 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
3007 
3008 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3009 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3010 		assert(vm_lopage_free_count);
3011 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3012 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3013 
3014 		vm_lopage_free_count--;
3015 		vm_lopages_allocated_q++;
3016 
3017 		if (vm_lopage_free_count < vm_lopage_lowater) {
3018 			vm_lopage_refill = TRUE;
3019 		}
3020 
3021 		lck_mtx_unlock(&vm_page_queue_free_lock);
3022 
3023 #if CONFIG_BACKGROUND_QUEUE
3024 		vm_page_assign_background_state(mem);
3025 #endif
3026 	} else {
3027 		lck_mtx_unlock(&vm_page_queue_free_lock);
3028 
3029 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3030 			lck_mtx_lock_spin(&vm_page_queue_free_lock);
3031 			vm_lopages_allocated_cpm_failed++;
3032 			lck_mtx_unlock(&vm_page_queue_free_lock);
3033 
3034 			return VM_PAGE_NULL;
3035 		}
3036 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3037 
3038 		mem->vmp_busy = TRUE;
3039 
3040 		vm_page_lockspin_queues();
3041 
3042 		mem->vmp_gobbled = FALSE;
3043 		vm_page_gobble_count--;
3044 		vm_page_wire_count--;
3045 
3046 		vm_lopages_allocated_cpm_success++;
3047 		vm_page_unlock_queues();
3048 	}
3049 	assert(mem->vmp_busy);
3050 	assert(!mem->vmp_pmapped);
3051 	assert(!mem->vmp_wpmapped);
3052 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3053 
3054 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3055 
3056 	counter_inc(&vm_page_grab_count);
3057 	VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3058 
3059 	return mem;
3060 }
3061 
3062 /*
3063  *	vm_page_grab:
3064  *
3065  *	first try to grab a page from the per-cpu free list...
3066  *	this must be done while pre-emption is disabled... if
3067  *      a page is available, we're done...
3068  *	if no page is available, grab the vm_page_queue_free_lock
3069  *	and see if current number of free pages would allow us
3070  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3071  *	if there are pages available, disable preemption and
3072  *      recheck the state of the per-cpu free list... we could
3073  *	have been preempted and moved to a different cpu, or
3074  *      some other thread could have re-filled it... if still
3075  *	empty, figure out how many pages we can steal from the
3076  *	global free queue and move to the per-cpu queue...
3077  *	return 1 of these pages when done... only wakeup the
3078  *      pageout_scan thread if we moved pages from the global
3079  *	list... no need for the wakeup if we've satisfied the
3080  *	request from the per-cpu queue.
3081  */
3082 
3083 #if CONFIG_SECLUDED_MEMORY
3084 vm_page_t vm_page_grab_secluded(void);
3085 #endif /* CONFIG_SECLUDED_MEMORY */
3086 
3087 static inline void
3088 vm_page_grab_diags(void);
3089 
3090 vm_page_t
vm_page_grab(void)3091 vm_page_grab(void)
3092 {
3093 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3094 }
3095 
3096 #if HIBERNATION
3097 boolean_t       hibernate_rebuild_needed = FALSE;
3098 #endif /* HIBERNATION */
3099 
3100 vm_page_t
vm_page_grab_options(int grab_options)3101 vm_page_grab_options(
3102 	int grab_options)
3103 {
3104 	vm_page_t       mem;
3105 
3106 	disable_preemption();
3107 
3108 	if ((mem = *PERCPU_GET(free_pages))) {
3109 return_page_from_cpu_list:
3110 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3111 
3112 #if HIBERNATION
3113 		if (hibernate_rebuild_needed) {
3114 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3115 		}
3116 #endif /* HIBERNATION */
3117 
3118 		vm_page_grab_diags();
3119 
3120 		vm_offset_t pcpu_base = current_percpu_base();
3121 		counter_inc_preemption_disabled(&vm_page_grab_count);
3122 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3123 		VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3124 
3125 		enable_preemption();
3126 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3127 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3128 
3129 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3130 		assert(mem->vmp_tabled == FALSE);
3131 		assert(mem->vmp_object == 0);
3132 		assert(!mem->vmp_laundry);
3133 		ASSERT_PMAP_FREE(mem);
3134 		assert(mem->vmp_busy);
3135 		assert(!mem->vmp_pmapped);
3136 		assert(!mem->vmp_wpmapped);
3137 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3138 
3139 #if CONFIG_BACKGROUND_QUEUE
3140 		vm_page_assign_background_state(mem);
3141 #endif
3142 		return mem;
3143 	}
3144 	enable_preemption();
3145 
3146 
3147 	/*
3148 	 *	Optionally produce warnings if the wire or gobble
3149 	 *	counts exceed some threshold.
3150 	 */
3151 #if VM_PAGE_WIRE_COUNT_WARNING
3152 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3153 		printf("mk: vm_page_grab(): high wired page count of %d\n",
3154 		    vm_page_wire_count);
3155 	}
3156 #endif
3157 #if VM_PAGE_GOBBLE_COUNT_WARNING
3158 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3159 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3160 		    vm_page_gobble_count);
3161 	}
3162 #endif
3163 
3164 	/*
3165 	 * If free count is low and we have delayed pages from early boot,
3166 	 * get one of those instead.
3167 	 */
3168 	if (__improbable(vm_delayed_count > 0 &&
3169 	    vm_page_free_count <= vm_page_free_target &&
3170 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3171 		return mem;
3172 	}
3173 
3174 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
3175 
3176 	/*
3177 	 *	Only let privileged threads (involved in pageout)
3178 	 *	dip into the reserved pool.
3179 	 */
3180 	if ((vm_page_free_count < vm_page_free_reserved) &&
3181 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
3182 		/* no page for us in the free queue... */
3183 		lck_mtx_unlock(&vm_page_queue_free_lock);
3184 		mem = VM_PAGE_NULL;
3185 
3186 #if CONFIG_SECLUDED_MEMORY
3187 		/* ... but can we try and grab from the secluded queue? */
3188 		if (vm_page_secluded_count > 0 &&
3189 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3190 		    task_can_use_secluded_mem(current_task(), TRUE))) {
3191 			mem = vm_page_grab_secluded();
3192 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3193 				vm_page_secluded.grab_for_iokit++;
3194 				if (mem) {
3195 					vm_page_secluded.grab_for_iokit_success++;
3196 				}
3197 			}
3198 			if (mem) {
3199 				VM_CHECK_MEMORYSTATUS;
3200 
3201 				vm_page_grab_diags();
3202 				counter_inc(&vm_page_grab_count);
3203 				VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3204 
3205 				return mem;
3206 			}
3207 		}
3208 #else /* CONFIG_SECLUDED_MEMORY */
3209 		(void) grab_options;
3210 #endif /* CONFIG_SECLUDED_MEMORY */
3211 	} else {
3212 		vm_page_t        head;
3213 		vm_page_t        tail;
3214 		unsigned int     pages_to_steal;
3215 		unsigned int     color;
3216 		unsigned int clump_end, sub_count;
3217 
3218 		while (vm_page_free_count == 0) {
3219 			lck_mtx_unlock(&vm_page_queue_free_lock);
3220 			/*
3221 			 * must be a privileged thread to be
3222 			 * in this state since a non-privileged
3223 			 * thread would have bailed if we were
3224 			 * under the vm_page_free_reserved mark
3225 			 */
3226 			VM_PAGE_WAIT();
3227 			lck_mtx_lock_spin(&vm_page_queue_free_lock);
3228 		}
3229 
3230 		disable_preemption();
3231 
3232 		if ((mem = *PERCPU_GET(free_pages))) {
3233 			lck_mtx_unlock(&vm_page_queue_free_lock);
3234 
3235 			/*
3236 			 * we got preempted and moved to another processor
3237 			 * or we got preempted and someone else ran and filled the cache
3238 			 */
3239 			goto return_page_from_cpu_list;
3240 		}
3241 		if (vm_page_free_count <= vm_page_free_reserved) {
3242 			pages_to_steal = 1;
3243 		} else {
3244 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3245 				pages_to_steal = vm_free_magazine_refill_limit;
3246 			} else {
3247 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3248 			}
3249 		}
3250 		color = *PERCPU_GET(start_color);
3251 		head = tail = NULL;
3252 
3253 		vm_page_free_count -= pages_to_steal;
3254 		clump_end = sub_count = 0;
3255 
3256 		while (pages_to_steal--) {
3257 			while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3258 				color = (color + 1) & vm_color_mask;
3259 			}
3260 #if defined(__x86_64__)
3261 			vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3262 			    mem, clump_end);
3263 #else
3264 			vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3265 			    mem, vmp_pageq);
3266 #endif
3267 
3268 			assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3269 
3270 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3271 
3272 #if defined(__arm__) || defined(__arm64__)
3273 			color = (color + 1) & vm_color_mask;
3274 #else
3275 
3276 #if DEVELOPMENT || DEBUG
3277 
3278 			sub_count++;
3279 			if (clump_end) {
3280 				vm_clump_update_stats(sub_count);
3281 				sub_count = 0;
3282 				color = (color + 1) & vm_color_mask;
3283 			}
3284 #else
3285 			if (clump_end) {
3286 				color = (color + 1) & vm_color_mask;
3287 			}
3288 
3289 #endif /* if DEVELOPMENT || DEBUG */
3290 
3291 #endif  /* if defined(__arm__) || defined(__arm64__) */
3292 
3293 			if (head == NULL) {
3294 				head = mem;
3295 			} else {
3296 				tail->vmp_snext = mem;
3297 			}
3298 			tail = mem;
3299 
3300 			assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3301 			assert(mem->vmp_tabled == FALSE);
3302 			assert(mem->vmp_object == 0);
3303 			assert(!mem->vmp_laundry);
3304 
3305 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3306 
3307 			ASSERT_PMAP_FREE(mem);
3308 			assert(mem->vmp_busy);
3309 			assert(!mem->vmp_pmapped);
3310 			assert(!mem->vmp_wpmapped);
3311 			assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3312 		}
3313 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3314 		vm_clump_update_stats(sub_count);
3315 #endif
3316 		lck_mtx_unlock(&vm_page_queue_free_lock);
3317 
3318 #if HIBERNATION
3319 		if (hibernate_rebuild_needed) {
3320 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3321 		}
3322 #endif /* HIBERNATION */
3323 		vm_offset_t pcpu_base = current_percpu_base();
3324 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head->vmp_snext;
3325 		*PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3326 
3327 		/*
3328 		 * satisfy this request
3329 		 */
3330 		vm_page_grab_diags();
3331 		counter_inc_preemption_disabled(&vm_page_grab_count);
3332 		VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3333 		mem = head;
3334 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3335 
3336 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3337 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3338 
3339 		enable_preemption();
3340 	}
3341 	/*
3342 	 *	Decide if we should poke the pageout daemon.
3343 	 *	We do this if the free count is less than the low
3344 	 *	water mark. VM Pageout Scan will keep running till
3345 	 *	the free_count > free_target (& hence above free_min).
3346 	 *	This wakeup is to catch the possibility of the counts
3347 	 *	dropping between VM Pageout Scan parking and this check.
3348 	 *
3349 	 *	We don't have the counts locked ... if they change a little,
3350 	 *	it doesn't really matter.
3351 	 */
3352 	if (vm_page_free_count < vm_page_free_min) {
3353 		lck_mtx_lock(&vm_page_queue_free_lock);
3354 		if (vm_pageout_running == FALSE) {
3355 			lck_mtx_unlock(&vm_page_queue_free_lock);
3356 			thread_wakeup((event_t) &vm_page_free_wanted);
3357 		} else {
3358 			lck_mtx_unlock(&vm_page_queue_free_lock);
3359 		}
3360 	}
3361 
3362 	VM_CHECK_MEMORYSTATUS;
3363 
3364 	if (mem) {
3365 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
3366 
3367 #if CONFIG_BACKGROUND_QUEUE
3368 		vm_page_assign_background_state(mem);
3369 #endif
3370 	}
3371 	return mem;
3372 }
3373 
3374 #if CONFIG_SECLUDED_MEMORY
3375 vm_page_t
vm_page_grab_secluded(void)3376 vm_page_grab_secluded(void)
3377 {
3378 	vm_page_t       mem;
3379 	vm_object_t     object;
3380 	int             refmod_state;
3381 
3382 	if (vm_page_secluded_count == 0) {
3383 		/* no secluded pages to grab... */
3384 		return VM_PAGE_NULL;
3385 	}
3386 
3387 	/* secluded queue is protected by the VM page queue lock */
3388 	vm_page_lock_queues();
3389 
3390 	if (vm_page_secluded_count == 0) {
3391 		/* no secluded pages to grab... */
3392 		vm_page_unlock_queues();
3393 		return VM_PAGE_NULL;
3394 	}
3395 
3396 #if 00
3397 	/* can we grab from the secluded queue? */
3398 	if (vm_page_secluded_count > vm_page_secluded_target ||
3399 	    (vm_page_secluded_count > 0 &&
3400 	    task_can_use_secluded_mem(current_task(), TRUE))) {
3401 		/* OK */
3402 	} else {
3403 		/* can't grab from secluded queue... */
3404 		vm_page_unlock_queues();
3405 		return VM_PAGE_NULL;
3406 	}
3407 #endif
3408 
3409 	/* we can grab a page from secluded queue! */
3410 	assert((vm_page_secluded_count_free +
3411 	    vm_page_secluded_count_inuse) ==
3412 	    vm_page_secluded_count);
3413 	if (current_task()->task_can_use_secluded_mem) {
3414 		assert(num_tasks_can_use_secluded_mem > 0);
3415 	}
3416 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3417 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3418 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3419 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3420 	vm_page_queues_remove(mem, TRUE);
3421 
3422 	object = VM_PAGE_OBJECT(mem);
3423 
3424 	assert(!mem->vmp_fictitious);
3425 	assert(!VM_PAGE_WIRED(mem));
3426 	if (object == VM_OBJECT_NULL) {
3427 		/* free for grab! */
3428 		vm_page_unlock_queues();
3429 		vm_page_secluded.grab_success_free++;
3430 
3431 		assert(mem->vmp_busy);
3432 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3433 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3434 		assert(mem->vmp_pageq.next == 0);
3435 		assert(mem->vmp_pageq.prev == 0);
3436 		assert(mem->vmp_listq.next == 0);
3437 		assert(mem->vmp_listq.prev == 0);
3438 #if CONFIG_BACKGROUND_QUEUE
3439 		assert(mem->vmp_on_backgroundq == 0);
3440 		assert(mem->vmp_backgroundq.next == 0);
3441 		assert(mem->vmp_backgroundq.prev == 0);
3442 #endif /* CONFIG_BACKGROUND_QUEUE */
3443 		return mem;
3444 	}
3445 
3446 	assert(!object->internal);
3447 //	vm_page_pageable_external_count--;
3448 
3449 	if (!vm_object_lock_try(object)) {
3450 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
3451 		vm_page_secluded.grab_failure_locked++;
3452 reactivate_secluded_page:
3453 		vm_page_activate(mem);
3454 		vm_page_unlock_queues();
3455 		return VM_PAGE_NULL;
3456 	}
3457 	if (mem->vmp_busy ||
3458 	    mem->vmp_cleaning ||
3459 	    mem->vmp_laundry) {
3460 		/* can't steal page in this state... */
3461 		vm_object_unlock(object);
3462 		vm_page_secluded.grab_failure_state++;
3463 		goto reactivate_secluded_page;
3464 	}
3465 
3466 	mem->vmp_busy = TRUE;
3467 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3468 	if (refmod_state & VM_MEM_REFERENCED) {
3469 		mem->vmp_reference = TRUE;
3470 	}
3471 	if (refmod_state & VM_MEM_MODIFIED) {
3472 		SET_PAGE_DIRTY(mem, FALSE);
3473 	}
3474 	if (mem->vmp_dirty || mem->vmp_precious) {
3475 		/* can't grab a dirty page; re-activate */
3476 //		printf("SECLUDED: dirty page %p\n", mem);
3477 		PAGE_WAKEUP_DONE(mem);
3478 		vm_page_secluded.grab_failure_dirty++;
3479 		vm_object_unlock(object);
3480 		goto reactivate_secluded_page;
3481 	}
3482 	if (mem->vmp_reference) {
3483 		/* it's been used but we do need to grab a page... */
3484 	}
3485 
3486 	vm_page_unlock_queues();
3487 
3488 	/* finish what vm_page_free() would have done... */
3489 	vm_page_free_prepare_object(mem, TRUE);
3490 	vm_object_unlock(object);
3491 	object = VM_OBJECT_NULL;
3492 	if (vm_page_free_verify) {
3493 		ASSERT_PMAP_FREE(mem);
3494 	}
3495 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3496 	vm_page_secluded.grab_success_other++;
3497 
3498 	assert(mem->vmp_busy);
3499 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3500 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3501 	assert(mem->vmp_pageq.next == 0);
3502 	assert(mem->vmp_pageq.prev == 0);
3503 	assert(mem->vmp_listq.next == 0);
3504 	assert(mem->vmp_listq.prev == 0);
3505 #if CONFIG_BACKGROUND_QUEUE
3506 	assert(mem->vmp_on_backgroundq == 0);
3507 	assert(mem->vmp_backgroundq.next == 0);
3508 	assert(mem->vmp_backgroundq.prev == 0);
3509 #endif /* CONFIG_BACKGROUND_QUEUE */
3510 
3511 	return mem;
3512 }
3513 
3514 uint64_t
vm_page_secluded_drain(void)3515 vm_page_secluded_drain(void)
3516 {
3517 	vm_page_t local_freeq;
3518 	int local_freed;
3519 	uint64_t num_reclaimed;
3520 	unsigned int saved_secluded_count, saved_secluded_target;
3521 
3522 	num_reclaimed = 0;
3523 	local_freeq = NULL;
3524 	local_freed = 0;
3525 
3526 	vm_page_lock_queues();
3527 
3528 	saved_secluded_count = vm_page_secluded_count;
3529 	saved_secluded_target = vm_page_secluded_target;
3530 	vm_page_secluded_target = 0;
3531 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3532 	while (vm_page_secluded_count) {
3533 		vm_page_t secluded_page;
3534 
3535 		assert((vm_page_secluded_count_free +
3536 		    vm_page_secluded_count_inuse) ==
3537 		    vm_page_secluded_count);
3538 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3539 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3540 
3541 		vm_page_queues_remove(secluded_page, FALSE);
3542 		assert(!secluded_page->vmp_fictitious);
3543 		assert(!VM_PAGE_WIRED(secluded_page));
3544 
3545 		if (secluded_page->vmp_object == 0) {
3546 			/* transfer to free queue */
3547 			assert(secluded_page->vmp_busy);
3548 			secluded_page->vmp_snext = local_freeq;
3549 			local_freeq = secluded_page;
3550 			local_freed += 1;
3551 		} else {
3552 			/* transfer to head of active queue */
3553 			vm_page_enqueue_active(secluded_page, FALSE);
3554 			secluded_page = VM_PAGE_NULL;
3555 		}
3556 		num_reclaimed++;
3557 	}
3558 	vm_page_secluded_target = saved_secluded_target;
3559 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3560 
3561 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3562 
3563 	vm_page_unlock_queues();
3564 
3565 	if (local_freed) {
3566 		vm_page_free_list(local_freeq, TRUE);
3567 		local_freeq = NULL;
3568 		local_freed = 0;
3569 	}
3570 
3571 	return num_reclaimed;
3572 }
3573 #endif /* CONFIG_SECLUDED_MEMORY */
3574 
3575 
3576 static inline void
vm_page_grab_diags()3577 vm_page_grab_diags()
3578 {
3579 #if DEVELOPMENT || DEBUG
3580 	task_t task = current_task_early();
3581 	if (task == NULL) {
3582 		return;
3583 	}
3584 
3585 	ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3586 #endif /* DEVELOPMENT || DEBUG */
3587 }
3588 
3589 /*
3590  *	vm_page_release:
3591  *
3592  *	Return a page to the free list.
3593  */
3594 
3595 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3596 vm_page_release(
3597 	vm_page_t       mem,
3598 	boolean_t       page_queues_locked)
3599 {
3600 	unsigned int    color;
3601 	int     need_wakeup = 0;
3602 	int     need_priv_wakeup = 0;
3603 #if CONFIG_SECLUDED_MEMORY
3604 	int     need_secluded_wakeup = 0;
3605 #endif /* CONFIG_SECLUDED_MEMORY */
3606 	event_t wakeup_event = NULL;
3607 
3608 	if (page_queues_locked) {
3609 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3610 	} else {
3611 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3612 	}
3613 
3614 	assert(!mem->vmp_private && !mem->vmp_fictitious);
3615 	if (vm_page_free_verify) {
3616 		ASSERT_PMAP_FREE(mem);
3617 	}
3618 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
3619 
3620 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3621 
3622 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
3623 
3624 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3625 	assert(mem->vmp_busy);
3626 	assert(!mem->vmp_laundry);
3627 	assert(mem->vmp_object == 0);
3628 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3629 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3630 #if CONFIG_BACKGROUND_QUEUE
3631 	assert(mem->vmp_backgroundq.next == 0 &&
3632 	    mem->vmp_backgroundq.prev == 0 &&
3633 	    mem->vmp_on_backgroundq == FALSE);
3634 #endif
3635 	if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3636 	    vm_lopage_free_count < vm_lopage_free_limit &&
3637 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3638 		/*
3639 		 * this exists to support hardware controllers
3640 		 * incapable of generating DMAs with more than 32 bits
3641 		 * of address on platforms with physical memory > 4G...
3642 		 */
3643 		vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3644 		vm_lopage_free_count++;
3645 
3646 		if (vm_lopage_free_count >= vm_lopage_free_limit) {
3647 			vm_lopage_refill = FALSE;
3648 		}
3649 
3650 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3651 		mem->vmp_lopage = TRUE;
3652 #if CONFIG_SECLUDED_MEMORY
3653 	} else if (vm_page_free_count > vm_page_free_reserved &&
3654 	    vm_page_secluded_count < vm_page_secluded_target &&
3655 	    num_tasks_can_use_secluded_mem == 0) {
3656 		/*
3657 		 * XXX FBDP TODO: also avoid refilling secluded queue
3658 		 * when some IOKit objects are already grabbing from it...
3659 		 */
3660 		if (!page_queues_locked) {
3661 			if (!vm_page_trylock_queues()) {
3662 				/* take locks in right order */
3663 				lck_mtx_unlock(&vm_page_queue_free_lock);
3664 				vm_page_lock_queues();
3665 				lck_mtx_lock_spin(&vm_page_queue_free_lock);
3666 			}
3667 		}
3668 		mem->vmp_lopage = FALSE;
3669 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3670 		vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3671 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3672 		vm_page_secluded_count++;
3673 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3674 		vm_page_secluded_count_free++;
3675 		if (!page_queues_locked) {
3676 			vm_page_unlock_queues();
3677 		}
3678 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3679 		if (vm_page_free_wanted_secluded > 0) {
3680 			vm_page_free_wanted_secluded--;
3681 			need_secluded_wakeup = 1;
3682 		}
3683 #endif /* CONFIG_SECLUDED_MEMORY */
3684 	} else {
3685 		mem->vmp_lopage = FALSE;
3686 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3687 
3688 		color = VM_PAGE_GET_COLOR(mem);
3689 #if defined(__x86_64__)
3690 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3691 #else
3692 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3693 #endif
3694 		vm_page_free_count++;
3695 		/*
3696 		 *	Check if we should wake up someone waiting for page.
3697 		 *	But don't bother waking them unless they can allocate.
3698 		 *
3699 		 *	We wakeup only one thread, to prevent starvation.
3700 		 *	Because the scheduling system handles wait queues FIFO,
3701 		 *	if we wakeup all waiting threads, one greedy thread
3702 		 *	can starve multiple niceguy threads.  When the threads
3703 		 *	all wakeup, the greedy threads runs first, grabs the page,
3704 		 *	and waits for another page.  It will be the first to run
3705 		 *	when the next page is freed.
3706 		 *
3707 		 *	However, there is a slight danger here.
3708 		 *	The thread we wake might not use the free page.
3709 		 *	Then the other threads could wait indefinitely
3710 		 *	while the page goes unused.  To forestall this,
3711 		 *	the pageout daemon will keep making free pages
3712 		 *	as long as vm_page_free_wanted is non-zero.
3713 		 */
3714 
3715 		assert(vm_page_free_count > 0);
3716 		if (vm_page_free_wanted_privileged > 0) {
3717 			vm_page_free_wanted_privileged--;
3718 			need_priv_wakeup = 1;
3719 #if CONFIG_SECLUDED_MEMORY
3720 		} else if (vm_page_free_wanted_secluded > 0 &&
3721 		    vm_page_free_count > vm_page_free_reserved) {
3722 			vm_page_free_wanted_secluded--;
3723 			need_secluded_wakeup = 1;
3724 #endif /* CONFIG_SECLUDED_MEMORY */
3725 		} else if (vm_page_free_wanted > 0 &&
3726 		    vm_page_free_count > vm_page_free_reserved) {
3727 			vm_page_free_wanted--;
3728 			need_wakeup = 1;
3729 		}
3730 	}
3731 	vm_pageout_vminfo.vm_page_pages_freed++;
3732 
3733 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3734 
3735 	lck_mtx_unlock(&vm_page_queue_free_lock);
3736 
3737 	if (need_priv_wakeup) {
3738 		wakeup_event = &vm_page_free_wanted_privileged;
3739 	}
3740 #if CONFIG_SECLUDED_MEMORY
3741 	else if (need_secluded_wakeup) {
3742 		wakeup_event = &vm_page_free_wanted_secluded;
3743 	}
3744 #endif /* CONFIG_SECLUDED_MEMORY */
3745 	else if (need_wakeup) {
3746 		wakeup_event = &vm_page_free_count;
3747 	}
3748 
3749 	if (wakeup_event) {
3750 		if (vps_dynamic_priority_enabled == TRUE) {
3751 			thread_t thread_woken = NULL;
3752 			wakeup_one_with_inheritor((event_t) wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
3753 			/*
3754 			 * (80947592) if this is the last reference on this
3755 			 * thread, calling thread_deallocate() here
3756 			 * might take the tasks_threads_lock,
3757 			 * sadly thread_create_internal is doing several
3758 			 * allocations under this lock, which can result in
3759 			 * deadlocks with the pageout scan daemon.
3760 			 *
3761 			 * FIXME: we should disallow allocations under the
3762 			 * task_thread_locks, but that is a larger fix to make.
3763 			 */
3764 			thread_deallocate_safe(thread_woken);
3765 		} else {
3766 			thread_wakeup_one((event_t) wakeup_event);
3767 		}
3768 	}
3769 
3770 	VM_CHECK_MEMORYSTATUS;
3771 }
3772 
3773 /*
3774  * This version of vm_page_release() is used only at startup
3775  * when we are single-threaded and pages are being released
3776  * for the first time. Hence, no locking or unnecessary checks are made.
3777  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3778  */
3779 void
vm_page_release_startup(vm_page_t mem)3780 vm_page_release_startup(
3781 	vm_page_t       mem)
3782 {
3783 	vm_page_queue_t queue_free;
3784 
3785 	if (vm_lopage_free_count < vm_lopage_free_limit &&
3786 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3787 		mem->vmp_lopage = TRUE;
3788 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3789 		vm_lopage_free_count++;
3790 		queue_free = &vm_lopage_queue_free;
3791 #if CONFIG_SECLUDED_MEMORY
3792 	} else if (vm_page_secluded_count < vm_page_secluded_target) {
3793 		mem->vmp_lopage = FALSE;
3794 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3795 		vm_page_secluded_count++;
3796 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3797 		vm_page_secluded_count_free++;
3798 		queue_free = &vm_page_queue_secluded;
3799 #endif /* CONFIG_SECLUDED_MEMORY */
3800 	} else {
3801 		mem->vmp_lopage = FALSE;
3802 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3803 		vm_page_free_count++;
3804 		queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3805 	}
3806 	if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3807 #if defined(__x86_64__)
3808 		vm_page_queue_enter_clump(queue_free, mem);
3809 #else
3810 		vm_page_queue_enter(queue_free, mem, vmp_pageq);
3811 #endif
3812 	} else {
3813 		vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3814 	}
3815 }
3816 
3817 /*
3818  *	vm_page_wait:
3819  *
3820  *	Wait for a page to become available.
3821  *	If there are plenty of free pages, then we don't sleep.
3822  *
3823  *	Returns:
3824  *		TRUE:  There may be another page, try again
3825  *		FALSE: We were interrupted out of our wait, don't try again
3826  */
3827 
3828 boolean_t
vm_page_wait(int interruptible)3829 vm_page_wait(
3830 	int     interruptible )
3831 {
3832 	/*
3833 	 *	We can't use vm_page_free_reserved to make this
3834 	 *	determination.  Consider: some thread might
3835 	 *	need to allocate two pages.  The first allocation
3836 	 *	succeeds, the second fails.  After the first page is freed,
3837 	 *	a call to vm_page_wait must really block.
3838 	 */
3839 	kern_return_t   wait_result;
3840 	int             need_wakeup = 0;
3841 	int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3842 	event_t         wait_event = NULL;
3843 
3844 	lck_mtx_lock_spin(&vm_page_queue_free_lock);
3845 
3846 	if (is_privileged && vm_page_free_count) {
3847 		lck_mtx_unlock(&vm_page_queue_free_lock);
3848 		return TRUE;
3849 	}
3850 
3851 	if (vm_page_free_count >= vm_page_free_target) {
3852 		lck_mtx_unlock(&vm_page_queue_free_lock);
3853 		return TRUE;
3854 	}
3855 
3856 	if (is_privileged) {
3857 		if (vm_page_free_wanted_privileged++ == 0) {
3858 			need_wakeup = 1;
3859 		}
3860 		wait_event = (event_t)&vm_page_free_wanted_privileged;
3861 #if CONFIG_SECLUDED_MEMORY
3862 	} else if (secluded_for_apps &&
3863 	    task_can_use_secluded_mem(current_task(), FALSE)) {
3864 #if 00
3865 		/* XXX FBDP: need pageq lock for this... */
3866 		/* XXX FBDP: might wait even if pages available, */
3867 		/* XXX FBDP: hopefully not for too long... */
3868 		if (vm_page_secluded_count > 0) {
3869 			lck_mtx_unlock(&vm_page_queue_free_lock);
3870 			return TRUE;
3871 		}
3872 #endif
3873 		if (vm_page_free_wanted_secluded++ == 0) {
3874 			need_wakeup = 1;
3875 		}
3876 		wait_event = (event_t)&vm_page_free_wanted_secluded;
3877 #endif /* CONFIG_SECLUDED_MEMORY */
3878 	} else {
3879 		if (vm_page_free_wanted++ == 0) {
3880 			need_wakeup = 1;
3881 		}
3882 		wait_event = (event_t)&vm_page_free_count;
3883 	}
3884 
3885 	/*
3886 	 * We don't do a vm_pageout_scan wakeup if we already have
3887 	 * some waiters because vm_pageout_scan checks for waiters
3888 	 * before it returns and does so behind the vm_page_queue_free_lock,
3889 	 * which we own when we bump the waiter counts.
3890 	 */
3891 
3892 	if (vps_dynamic_priority_enabled == TRUE) {
3893 		/*
3894 		 * We are waking up vm_pageout_scan here. If it needs
3895 		 * the vm_page_queue_free_lock before we unlock it
3896 		 * we'll end up just blocking and incur an extra
3897 		 * context switch. Could be a perf. issue.
3898 		 */
3899 
3900 		if (need_wakeup) {
3901 			thread_wakeup((event_t)&vm_page_free_wanted);
3902 		}
3903 
3904 		/*
3905 		 * LD: This event is going to get recorded every time because
3906 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3907 		 * We just block in that routine.
3908 		 */
3909 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3910 		    vm_page_free_wanted_privileged,
3911 		    vm_page_free_wanted,
3912 #if CONFIG_SECLUDED_MEMORY
3913 		    vm_page_free_wanted_secluded,
3914 #else /* CONFIG_SECLUDED_MEMORY */
3915 		    0,
3916 #endif /* CONFIG_SECLUDED_MEMORY */
3917 		    0);
3918 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
3919 		    LCK_SLEEP_UNLOCK,
3920 		    wait_event,
3921 		    vm_pageout_scan_thread,
3922 		    interruptible,
3923 		    0);
3924 	} else {
3925 		wait_result = assert_wait(wait_event, interruptible);
3926 
3927 		lck_mtx_unlock(&vm_page_queue_free_lock);
3928 
3929 		if (need_wakeup) {
3930 			thread_wakeup((event_t)&vm_page_free_wanted);
3931 		}
3932 
3933 		if (wait_result == THREAD_WAITING) {
3934 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3935 			    vm_page_free_wanted_privileged,
3936 			    vm_page_free_wanted,
3937 #if CONFIG_SECLUDED_MEMORY
3938 			    vm_page_free_wanted_secluded,
3939 #else /* CONFIG_SECLUDED_MEMORY */
3940 			    0,
3941 #endif /* CONFIG_SECLUDED_MEMORY */
3942 			    0);
3943 			wait_result = thread_block(THREAD_CONTINUE_NULL);
3944 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
3945 			    VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
3946 		}
3947 	}
3948 
3949 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
3950 }
3951 
3952 /*
3953  *	vm_page_alloc:
3954  *
3955  *	Allocate and return a memory cell associated
3956  *	with this VM object/offset pair.
3957  *
3958  *	Object must be locked.
3959  */
3960 
3961 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)3962 vm_page_alloc(
3963 	vm_object_t             object,
3964 	vm_object_offset_t      offset)
3965 {
3966 	vm_page_t       mem;
3967 	int             grab_options;
3968 
3969 	vm_object_lock_assert_exclusive(object);
3970 	grab_options = 0;
3971 #if CONFIG_SECLUDED_MEMORY
3972 	if (object->can_grab_secluded) {
3973 		grab_options |= VM_PAGE_GRAB_SECLUDED;
3974 	}
3975 #endif /* CONFIG_SECLUDED_MEMORY */
3976 	mem = vm_page_grab_options(grab_options);
3977 	if (mem == VM_PAGE_NULL) {
3978 		return VM_PAGE_NULL;
3979 	}
3980 
3981 	vm_page_insert(mem, object, offset);
3982 
3983 	return mem;
3984 }
3985 
3986 /*
3987  *	vm_page_free_prepare:
3988  *
3989  *	Removes page from any queue it may be on
3990  *	and disassociates it from its VM object.
3991  *
3992  *	Object and page queues must be locked prior to entry.
3993  */
3994 static void
vm_page_free_prepare(vm_page_t mem)3995 vm_page_free_prepare(
3996 	vm_page_t       mem)
3997 {
3998 	vm_page_free_prepare_queues(mem);
3999 	vm_page_free_prepare_object(mem, TRUE);
4000 }
4001 
4002 
4003 void
vm_page_free_prepare_queues(vm_page_t mem)4004 vm_page_free_prepare_queues(
4005 	vm_page_t       mem)
4006 {
4007 	vm_object_t     m_object;
4008 
4009 	VM_PAGE_CHECK(mem);
4010 
4011 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4012 	assert(!mem->vmp_cleaning);
4013 	m_object = VM_PAGE_OBJECT(mem);
4014 
4015 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4016 	if (m_object) {
4017 		vm_object_lock_assert_exclusive(m_object);
4018 	}
4019 	if (mem->vmp_laundry) {
4020 		/*
4021 		 * We may have to free a page while it's being laundered
4022 		 * if we lost its pager (due to a forced unmount, for example).
4023 		 * We need to call vm_pageout_steal_laundry() before removing
4024 		 * the page from its VM object, so that we can remove it
4025 		 * from its pageout queue and adjust the laundry accounting
4026 		 */
4027 		vm_pageout_steal_laundry(mem, TRUE);
4028 	}
4029 
4030 	vm_page_queues_remove(mem, TRUE);
4031 
4032 	if (VM_PAGE_WIRED(mem)) {
4033 		assert(mem->vmp_wire_count > 0);
4034 
4035 		if (m_object) {
4036 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4037 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4038 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4039 
4040 			assert(m_object->resident_page_count >=
4041 			    m_object->wired_page_count);
4042 
4043 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4044 				OSAddAtomic(+1, &vm_page_purgeable_count);
4045 				assert(vm_page_purgeable_wired_count > 0);
4046 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4047 			}
4048 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4049 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4050 			    m_object->vo_owner != TASK_NULL) {
4051 				task_t          owner;
4052 				int             ledger_idx_volatile;
4053 				int             ledger_idx_nonvolatile;
4054 				int             ledger_idx_volatile_compressed;
4055 				int             ledger_idx_nonvolatile_compressed;
4056 				boolean_t       do_footprint;
4057 
4058 				owner = VM_OBJECT_OWNER(m_object);
4059 				vm_object_ledger_tag_ledgers(
4060 					m_object,
4061 					&ledger_idx_volatile,
4062 					&ledger_idx_nonvolatile,
4063 					&ledger_idx_volatile_compressed,
4064 					&ledger_idx_nonvolatile_compressed,
4065 					&do_footprint);
4066 				/*
4067 				 * While wired, this page was accounted
4068 				 * as "non-volatile" but it should now
4069 				 * be accounted as "volatile".
4070 				 */
4071 				/* one less "non-volatile"... */
4072 				ledger_debit(owner->ledger,
4073 				    ledger_idx_nonvolatile,
4074 				    PAGE_SIZE);
4075 				if (do_footprint) {
4076 					/* ... and "phys_footprint" */
4077 					ledger_debit(owner->ledger,
4078 					    task_ledgers.phys_footprint,
4079 					    PAGE_SIZE);
4080 				}
4081 				/* one more "volatile" */
4082 				ledger_credit(owner->ledger,
4083 				    ledger_idx_volatile,
4084 				    PAGE_SIZE);
4085 			}
4086 		}
4087 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4088 			vm_page_wire_count--;
4089 		}
4090 
4091 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4092 		mem->vmp_wire_count = 0;
4093 		assert(!mem->vmp_gobbled);
4094 	} else if (mem->vmp_gobbled) {
4095 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4096 			vm_page_wire_count--;
4097 		}
4098 		vm_page_gobble_count--;
4099 	}
4100 }
4101 
4102 
4103 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4104 vm_page_free_prepare_object(
4105 	vm_page_t       mem,
4106 	boolean_t       remove_from_hash)
4107 {
4108 	if (mem->vmp_tabled) {
4109 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4110 	}
4111 	PAGE_WAKEUP(mem);               /* clears wanted */
4112 
4113 	if (mem->vmp_private) {
4114 		mem->vmp_private = FALSE;
4115 		mem->vmp_fictitious = TRUE;
4116 		VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4117 	}
4118 	if (!mem->vmp_fictitious) {
4119 		assert(mem->vmp_pageq.next == 0);
4120 		assert(mem->vmp_pageq.prev == 0);
4121 		assert(mem->vmp_listq.next == 0);
4122 		assert(mem->vmp_listq.prev == 0);
4123 #if CONFIG_BACKGROUND_QUEUE
4124 		assert(mem->vmp_backgroundq.next == 0);
4125 		assert(mem->vmp_backgroundq.prev == 0);
4126 #endif /* CONFIG_BACKGROUND_QUEUE */
4127 		assert(mem->vmp_next_m == 0);
4128 		ASSERT_PMAP_FREE(mem);
4129 		vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4130 	}
4131 }
4132 
4133 
4134 /*
4135  *	vm_page_free:
4136  *
4137  *	Returns the given page to the free list,
4138  *	disassociating it with any VM object.
4139  *
4140  *	Object and page queues must be locked prior to entry.
4141  */
4142 void
vm_page_free(vm_page_t mem)4143 vm_page_free(
4144 	vm_page_t       mem)
4145 {
4146 	vm_page_free_prepare(mem);
4147 
4148 	if (mem->vmp_fictitious) {
4149 		vm_page_release_fictitious(mem);
4150 	} else {
4151 		vm_page_release(mem,
4152 		    TRUE);             /* page queues are locked */
4153 	}
4154 }
4155 
4156 
4157 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4158 vm_page_free_unlocked(
4159 	vm_page_t       mem,
4160 	boolean_t       remove_from_hash)
4161 {
4162 	vm_page_lockspin_queues();
4163 	vm_page_free_prepare_queues(mem);
4164 	vm_page_unlock_queues();
4165 
4166 	vm_page_free_prepare_object(mem, remove_from_hash);
4167 
4168 	if (mem->vmp_fictitious) {
4169 		vm_page_release_fictitious(mem);
4170 	} else {
4171 		vm_page_release(mem, FALSE); /* page queues are not locked */
4172 	}
4173 }
4174 
4175 
4176 /*
4177  * Free a list of pages.  The list can be up to several hundred pages,
4178  * as blocked up by vm_pageout_scan().
4179  * The big win is not having to take the free list lock once
4180  * per page.
4181  *
4182  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4183  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4184  */
4185 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4186 vm_page_free_list(
4187 	vm_page_t       freeq,
4188 	boolean_t       prepare_object)
4189 {
4190 	vm_page_t       mem;
4191 	vm_page_t       nxt;
4192 	vm_page_t       local_freeq;
4193 	int             pg_count;
4194 
4195 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4196 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4197 
4198 	while (freeq) {
4199 		pg_count = 0;
4200 		local_freeq = VM_PAGE_NULL;
4201 		mem = freeq;
4202 
4203 		/*
4204 		 * break up the processing into smaller chunks so
4205 		 * that we can 'pipeline' the pages onto the
4206 		 * free list w/o introducing too much
4207 		 * contention on the global free queue lock
4208 		 */
4209 		while (mem && pg_count < 64) {
4210 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4211 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4212 #if CONFIG_BACKGROUND_QUEUE
4213 			assert(mem->vmp_backgroundq.next == 0 &&
4214 			    mem->vmp_backgroundq.prev == 0 &&
4215 			    mem->vmp_on_backgroundq == FALSE);
4216 #endif
4217 			nxt = mem->vmp_snext;
4218 			mem->vmp_snext = NULL;
4219 			assert(mem->vmp_pageq.prev == 0);
4220 
4221 			if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) {
4222 				ASSERT_PMAP_FREE(mem);
4223 			}
4224 			if (prepare_object == TRUE) {
4225 				vm_page_free_prepare_object(mem, TRUE);
4226 			}
4227 
4228 			if (!mem->vmp_fictitious) {
4229 				assert(mem->vmp_busy);
4230 
4231 				if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4232 				    vm_lopage_free_count < vm_lopage_free_limit &&
4233 				    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4234 					vm_page_release(mem, FALSE); /* page queues are not locked */
4235 #if CONFIG_SECLUDED_MEMORY
4236 				} else if (vm_page_secluded_count < vm_page_secluded_target &&
4237 				    num_tasks_can_use_secluded_mem == 0) {
4238 					vm_page_release(mem,
4239 					    FALSE);             /* page queues are not locked */
4240 #endif /* CONFIG_SECLUDED_MEMORY */
4241 				} else {
4242 					/*
4243 					 * IMPORTANT: we can't set the page "free" here
4244 					 * because that would make the page eligible for
4245 					 * a physically-contiguous allocation (see
4246 					 * vm_page_find_contiguous()) right away (we don't
4247 					 * hold the vm_page_queue_free lock).  That would
4248 					 * cause trouble because the page is not actually
4249 					 * in the free queue yet...
4250 					 */
4251 					mem->vmp_snext = local_freeq;
4252 					local_freeq = mem;
4253 					pg_count++;
4254 
4255 					pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4256 				}
4257 			} else {
4258 				assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4259 				    VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4260 				vm_page_release_fictitious(mem);
4261 			}
4262 			mem = nxt;
4263 		}
4264 		freeq = mem;
4265 
4266 		if ((mem = local_freeq)) {
4267 			unsigned int    avail_free_count;
4268 			unsigned int    need_wakeup = 0;
4269 			unsigned int    need_priv_wakeup = 0;
4270 #if CONFIG_SECLUDED_MEMORY
4271 			unsigned int    need_wakeup_secluded = 0;
4272 #endif /* CONFIG_SECLUDED_MEMORY */
4273 			event_t         priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4274 			boolean_t       priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4275 
4276 			lck_mtx_lock_spin(&vm_page_queue_free_lock);
4277 
4278 			while (mem) {
4279 				int     color;
4280 
4281 				nxt = mem->vmp_snext;
4282 
4283 				assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4284 				assert(mem->vmp_busy);
4285 				mem->vmp_lopage = FALSE;
4286 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4287 
4288 				color = VM_PAGE_GET_COLOR(mem);
4289 #if defined(__x86_64__)
4290 				vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4291 #else
4292 				vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4293 				    mem, vmp_pageq);
4294 #endif
4295 				mem = nxt;
4296 			}
4297 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4298 			vm_page_free_count += pg_count;
4299 			avail_free_count = vm_page_free_count;
4300 
4301 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4302 
4303 			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4304 				if (avail_free_count < vm_page_free_wanted_privileged) {
4305 					need_priv_wakeup = avail_free_count;
4306 					vm_page_free_wanted_privileged -= avail_free_count;
4307 					avail_free_count = 0;
4308 				} else {
4309 					need_priv_wakeup = vm_page_free_wanted_privileged;
4310 					avail_free_count -= vm_page_free_wanted_privileged;
4311 					vm_page_free_wanted_privileged = 0;
4312 				}
4313 			}
4314 #if CONFIG_SECLUDED_MEMORY
4315 			if (vm_page_free_wanted_secluded > 0 &&
4316 			    avail_free_count > vm_page_free_reserved) {
4317 				unsigned int available_pages;
4318 				available_pages = (avail_free_count -
4319 				    vm_page_free_reserved);
4320 				if (available_pages <
4321 				    vm_page_free_wanted_secluded) {
4322 					need_wakeup_secluded = available_pages;
4323 					vm_page_free_wanted_secluded -=
4324 					    available_pages;
4325 					avail_free_count -= available_pages;
4326 				} else {
4327 					need_wakeup_secluded =
4328 					    vm_page_free_wanted_secluded;
4329 					avail_free_count -=
4330 					    vm_page_free_wanted_secluded;
4331 					vm_page_free_wanted_secluded = 0;
4332 				}
4333 			}
4334 #endif /* CONFIG_SECLUDED_MEMORY */
4335 			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4336 				unsigned int  available_pages;
4337 
4338 				available_pages = avail_free_count - vm_page_free_reserved;
4339 
4340 				if (available_pages >= vm_page_free_wanted) {
4341 					need_wakeup = vm_page_free_wanted;
4342 					vm_page_free_wanted = 0;
4343 				} else {
4344 					need_wakeup = available_pages;
4345 					vm_page_free_wanted -= available_pages;
4346 				}
4347 			}
4348 			lck_mtx_unlock(&vm_page_queue_free_lock);
4349 
4350 			priv_wakeup_event = NULL;
4351 			secluded_wakeup_event = NULL;
4352 			normal_wakeup_event = NULL;
4353 
4354 			priv_wakeup_all = FALSE;
4355 			secluded_wakeup_all = FALSE;
4356 			normal_wakeup_all = FALSE;
4357 
4358 
4359 			if (need_priv_wakeup != 0) {
4360 				/*
4361 				 * There shouldn't be that many VM-privileged threads,
4362 				 * so let's wake them all up, even if we don't quite
4363 				 * have enough pages to satisfy them all.
4364 				 */
4365 				priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4366 				priv_wakeup_all = TRUE;
4367 			}
4368 #if CONFIG_SECLUDED_MEMORY
4369 			if (need_wakeup_secluded != 0 &&
4370 			    vm_page_free_wanted_secluded == 0) {
4371 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4372 				secluded_wakeup_all = TRUE;
4373 				need_wakeup_secluded = 0;
4374 			} else {
4375 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4376 			}
4377 #endif /* CONFIG_SECLUDED_MEMORY */
4378 			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4379 				/*
4380 				 * We don't expect to have any more waiters
4381 				 * after this, so let's wake them all up at
4382 				 * once.
4383 				 */
4384 				normal_wakeup_event = (event_t) &vm_page_free_count;
4385 				normal_wakeup_all = TRUE;
4386 				need_wakeup = 0;
4387 			} else {
4388 				normal_wakeup_event = (event_t) &vm_page_free_count;
4389 			}
4390 
4391 			if (priv_wakeup_event ||
4392 #if CONFIG_SECLUDED_MEMORY
4393 			    secluded_wakeup_event ||
4394 #endif /* CONFIG_SECLUDED_MEMORY */
4395 			    normal_wakeup_event) {
4396 				if (vps_dynamic_priority_enabled == TRUE) {
4397 					thread_t thread_woken = NULL;
4398 
4399 					if (priv_wakeup_all == TRUE) {
4400 						wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4401 					}
4402 
4403 #if CONFIG_SECLUDED_MEMORY
4404 					if (secluded_wakeup_all == TRUE) {
4405 						wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4406 					}
4407 
4408 					while (need_wakeup_secluded-- != 0) {
4409 						/*
4410 						 * Wake up one waiter per page we just released.
4411 						 */
4412 						wakeup_one_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4413 						thread_deallocate(thread_woken);
4414 					}
4415 #endif /* CONFIG_SECLUDED_MEMORY */
4416 
4417 					if (normal_wakeup_all == TRUE) {
4418 						wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4419 					}
4420 
4421 					while (need_wakeup-- != 0) {
4422 						/*
4423 						 * Wake up one waiter per page we just released.
4424 						 */
4425 						wakeup_one_with_inheritor(normal_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4426 						thread_deallocate(thread_woken);
4427 					}
4428 				} else {
4429 					/*
4430 					 * Non-priority-aware wakeups.
4431 					 */
4432 
4433 					if (priv_wakeup_all == TRUE) {
4434 						thread_wakeup(priv_wakeup_event);
4435 					}
4436 
4437 #if CONFIG_SECLUDED_MEMORY
4438 					if (secluded_wakeup_all == TRUE) {
4439 						thread_wakeup(secluded_wakeup_event);
4440 					}
4441 
4442 					while (need_wakeup_secluded-- != 0) {
4443 						/*
4444 						 * Wake up one waiter per page we just released.
4445 						 */
4446 						thread_wakeup_one(secluded_wakeup_event);
4447 					}
4448 
4449 #endif /* CONFIG_SECLUDED_MEMORY */
4450 					if (normal_wakeup_all == TRUE) {
4451 						thread_wakeup(normal_wakeup_event);
4452 					}
4453 
4454 					while (need_wakeup-- != 0) {
4455 						/*
4456 						 * Wake up one waiter per page we just released.
4457 						 */
4458 						thread_wakeup_one(normal_wakeup_event);
4459 					}
4460 				}
4461 			}
4462 
4463 			VM_CHECK_MEMORYSTATUS;
4464 		}
4465 	}
4466 }
4467 
4468 
4469 /*
4470  *	vm_page_wire:
4471  *
4472  *	Mark this page as wired down by yet
4473  *	another map, removing it from paging queues
4474  *	as necessary.
4475  *
4476  *	The page's object and the page queues must be locked.
4477  */
4478 
4479 
4480 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4481 vm_page_wire(
4482 	vm_page_t mem,
4483 	vm_tag_t           tag,
4484 	boolean_t          check_memorystatus)
4485 {
4486 	vm_object_t     m_object;
4487 
4488 	m_object = VM_PAGE_OBJECT(mem);
4489 
4490 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
4491 
4492 	VM_PAGE_CHECK(mem);
4493 	if (m_object) {
4494 		vm_object_lock_assert_exclusive(m_object);
4495 	} else {
4496 		/*
4497 		 * In theory, the page should be in an object before it
4498 		 * gets wired, since we need to hold the object lock
4499 		 * to update some fields in the page structure.
4500 		 * However, some code (i386 pmap, for example) might want
4501 		 * to wire a page before it gets inserted into an object.
4502 		 * That's somewhat OK, as long as nobody else can get to
4503 		 * that page and update it at the same time.
4504 		 */
4505 	}
4506 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4507 	if (!VM_PAGE_WIRED(mem)) {
4508 		if (mem->vmp_laundry) {
4509 			vm_pageout_steal_laundry(mem, TRUE);
4510 		}
4511 
4512 		vm_page_queues_remove(mem, TRUE);
4513 
4514 		assert(mem->vmp_wire_count == 0);
4515 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
4516 
4517 		if (m_object) {
4518 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4519 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4520 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4521 
4522 			assert(m_object->resident_page_count >=
4523 			    m_object->wired_page_count);
4524 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4525 				assert(vm_page_purgeable_count > 0);
4526 				OSAddAtomic(-1, &vm_page_purgeable_count);
4527 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
4528 			}
4529 			if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4530 			    m_object->purgable == VM_PURGABLE_EMPTY) &&
4531 			    m_object->vo_owner != TASK_NULL) {
4532 				task_t          owner;
4533 				int             ledger_idx_volatile;
4534 				int             ledger_idx_nonvolatile;
4535 				int             ledger_idx_volatile_compressed;
4536 				int             ledger_idx_nonvolatile_compressed;
4537 				boolean_t       do_footprint;
4538 
4539 				owner = VM_OBJECT_OWNER(m_object);
4540 				vm_object_ledger_tag_ledgers(
4541 					m_object,
4542 					&ledger_idx_volatile,
4543 					&ledger_idx_nonvolatile,
4544 					&ledger_idx_volatile_compressed,
4545 					&ledger_idx_nonvolatile_compressed,
4546 					&do_footprint);
4547 				/* less volatile bytes */
4548 				ledger_debit(owner->ledger,
4549 				    ledger_idx_volatile,
4550 				    PAGE_SIZE);
4551 				/* more not-quite-volatile bytes */
4552 				ledger_credit(owner->ledger,
4553 				    ledger_idx_nonvolatile,
4554 				    PAGE_SIZE);
4555 				if (do_footprint) {
4556 					/* more footprint */
4557 					ledger_credit(owner->ledger,
4558 					    task_ledgers.phys_footprint,
4559 					    PAGE_SIZE);
4560 				}
4561 			}
4562 			if (m_object->all_reusable) {
4563 				/*
4564 				 * Wired pages are not counted as "re-usable"
4565 				 * in "all_reusable" VM objects, so nothing
4566 				 * to do here.
4567 				 */
4568 			} else if (mem->vmp_reusable) {
4569 				/*
4570 				 * This page is not "re-usable" when it's
4571 				 * wired, so adjust its state and the
4572 				 * accounting.
4573 				 */
4574 				vm_object_reuse_pages(m_object,
4575 				    mem->vmp_offset,
4576 				    mem->vmp_offset + PAGE_SIZE_64,
4577 				    FALSE);
4578 			}
4579 		}
4580 		assert(!mem->vmp_reusable);
4581 
4582 		if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4583 			vm_page_wire_count++;
4584 		}
4585 		if (mem->vmp_gobbled) {
4586 			vm_page_gobble_count--;
4587 		}
4588 		mem->vmp_gobbled = FALSE;
4589 
4590 		if (check_memorystatus == TRUE) {
4591 			VM_CHECK_MEMORYSTATUS;
4592 		}
4593 	}
4594 	assert(!mem->vmp_gobbled);
4595 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4596 	mem->vmp_wire_count++;
4597 	if (__improbable(mem->vmp_wire_count == 0)) {
4598 		panic("vm_page_wire(%p): wire_count overflow", mem);
4599 	}
4600 	VM_PAGE_CHECK(mem);
4601 }
4602 
4603 /*
4604  *	vm_page_unwire:
4605  *
4606  *	Release one wiring of this page, potentially
4607  *	enabling it to be paged again.
4608  *
4609  *	The page's object and the page queues must be locked.
4610  */
4611 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4612 vm_page_unwire(
4613 	vm_page_t       mem,
4614 	boolean_t       queueit)
4615 {
4616 	vm_object_t     m_object;
4617 
4618 	m_object = VM_PAGE_OBJECT(mem);
4619 
4620 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
4621 
4622 	VM_PAGE_CHECK(mem);
4623 	assert(VM_PAGE_WIRED(mem));
4624 	assert(mem->vmp_wire_count > 0);
4625 	assert(!mem->vmp_gobbled);
4626 	assert(m_object != VM_OBJECT_NULL);
4627 	vm_object_lock_assert_exclusive(m_object);
4628 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4629 	if (--mem->vmp_wire_count == 0) {
4630 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4631 
4632 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4633 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4634 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4635 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4636 			vm_page_wire_count--;
4637 		}
4638 
4639 		assert(m_object->resident_page_count >=
4640 		    m_object->wired_page_count);
4641 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4642 			OSAddAtomic(+1, &vm_page_purgeable_count);
4643 			assert(vm_page_purgeable_wired_count > 0);
4644 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4645 		}
4646 		if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4647 		    m_object->purgable == VM_PURGABLE_EMPTY) &&
4648 		    m_object->vo_owner != TASK_NULL) {
4649 			task_t          owner;
4650 			int             ledger_idx_volatile;
4651 			int             ledger_idx_nonvolatile;
4652 			int             ledger_idx_volatile_compressed;
4653 			int             ledger_idx_nonvolatile_compressed;
4654 			boolean_t       do_footprint;
4655 
4656 			owner = VM_OBJECT_OWNER(m_object);
4657 			vm_object_ledger_tag_ledgers(
4658 				m_object,
4659 				&ledger_idx_volatile,
4660 				&ledger_idx_nonvolatile,
4661 				&ledger_idx_volatile_compressed,
4662 				&ledger_idx_nonvolatile_compressed,
4663 				&do_footprint);
4664 			/* more volatile bytes */
4665 			ledger_credit(owner->ledger,
4666 			    ledger_idx_volatile,
4667 			    PAGE_SIZE);
4668 			/* less not-quite-volatile bytes */
4669 			ledger_debit(owner->ledger,
4670 			    ledger_idx_nonvolatile,
4671 			    PAGE_SIZE);
4672 			if (do_footprint) {
4673 				/* less footprint */
4674 				ledger_debit(owner->ledger,
4675 				    task_ledgers.phys_footprint,
4676 				    PAGE_SIZE);
4677 			}
4678 		}
4679 		assert(m_object != kernel_object);
4680 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4681 
4682 		if (queueit == TRUE) {
4683 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
4684 				vm_page_deactivate(mem);
4685 			} else {
4686 				vm_page_activate(mem);
4687 			}
4688 		}
4689 
4690 		VM_CHECK_MEMORYSTATUS;
4691 	}
4692 	VM_PAGE_CHECK(mem);
4693 }
4694 
4695 /*
4696  *	vm_page_deactivate:
4697  *
4698  *	Returns the given page to the inactive list,
4699  *	indicating that no physical maps have access
4700  *	to this page.  [Used by the physical mapping system.]
4701  *
4702  *	The page queues must be locked.
4703  */
4704 void
vm_page_deactivate(vm_page_t m)4705 vm_page_deactivate(
4706 	vm_page_t       m)
4707 {
4708 	vm_page_deactivate_internal(m, TRUE);
4709 }
4710 
4711 
4712 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4713 vm_page_deactivate_internal(
4714 	vm_page_t       m,
4715 	boolean_t       clear_hw_reference)
4716 {
4717 	vm_object_t     m_object;
4718 
4719 	m_object = VM_PAGE_OBJECT(m);
4720 
4721 	VM_PAGE_CHECK(m);
4722 	assert(m_object != kernel_object);
4723 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4724 
4725 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
4726 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4727 	/*
4728 	 *	This page is no longer very interesting.  If it was
4729 	 *	interesting (active or inactive/referenced), then we
4730 	 *	clear the reference bit and (re)enter it in the
4731 	 *	inactive queue.  Note wired pages should not have
4732 	 *	their reference bit cleared.
4733 	 */
4734 	assert( !(m->vmp_absent && !m->vmp_unusual));
4735 
4736 	if (m->vmp_gobbled) {           /* can this happen? */
4737 		assert( !VM_PAGE_WIRED(m));
4738 
4739 		if (!m->vmp_private && !m->vmp_fictitious) {
4740 			vm_page_wire_count--;
4741 		}
4742 		vm_page_gobble_count--;
4743 		m->vmp_gobbled = FALSE;
4744 	}
4745 	/*
4746 	 * if this page is currently on the pageout queue, we can't do the
4747 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4748 	 * and we can't remove it manually since we would need the object lock
4749 	 * (which is not required here) to decrement the activity_in_progress
4750 	 * reference which is held on the object while the page is in the pageout queue...
4751 	 * just let the normal laundry processing proceed
4752 	 */
4753 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4754 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4755 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4756 	    VM_PAGE_WIRED(m)) {
4757 		return;
4758 	}
4759 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
4760 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4761 	}
4762 
4763 	m->vmp_reference = FALSE;
4764 	m->vmp_no_cache = FALSE;
4765 
4766 	if (!VM_PAGE_INACTIVE(m)) {
4767 		vm_page_queues_remove(m, FALSE);
4768 
4769 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
4770 		    m->vmp_dirty && m_object->internal &&
4771 		    (m_object->purgable == VM_PURGABLE_DENY ||
4772 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4773 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
4774 			vm_page_check_pageable_safe(m);
4775 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4776 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4777 			vm_page_throttled_count++;
4778 		} else {
4779 			if (m_object->named && m_object->ref_count == 1) {
4780 				vm_page_speculate(m, FALSE);
4781 #if DEVELOPMENT || DEBUG
4782 				vm_page_speculative_recreated++;
4783 #endif
4784 			} else {
4785 				vm_page_enqueue_inactive(m, FALSE);
4786 			}
4787 		}
4788 	}
4789 }
4790 
4791 /*
4792  * vm_page_enqueue_cleaned
4793  *
4794  * Put the page on the cleaned queue, mark it cleaned, etc.
4795  * Being on the cleaned queue (and having m->clean_queue set)
4796  * does ** NOT ** guarantee that the page is clean!
4797  *
4798  * Call with the queues lock held.
4799  */
4800 
4801 void
vm_page_enqueue_cleaned(vm_page_t m)4802 vm_page_enqueue_cleaned(vm_page_t m)
4803 {
4804 	vm_object_t     m_object;
4805 
4806 	m_object = VM_PAGE_OBJECT(m);
4807 
4808 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4809 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4810 	assert( !(m->vmp_absent && !m->vmp_unusual));
4811 
4812 	if (VM_PAGE_WIRED(m)) {
4813 		return;
4814 	}
4815 
4816 	if (m->vmp_gobbled) {
4817 		if (!m->vmp_private && !m->vmp_fictitious) {
4818 			vm_page_wire_count--;
4819 		}
4820 		vm_page_gobble_count--;
4821 		m->vmp_gobbled = FALSE;
4822 	}
4823 	/*
4824 	 * if this page is currently on the pageout queue, we can't do the
4825 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4826 	 * and we can't remove it manually since we would need the object lock
4827 	 * (which is not required here) to decrement the activity_in_progress
4828 	 * reference which is held on the object while the page is in the pageout queue...
4829 	 * just let the normal laundry processing proceed
4830 	 */
4831 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4832 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
4833 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4834 		return;
4835 	}
4836 	vm_page_queues_remove(m, FALSE);
4837 
4838 	vm_page_check_pageable_safe(m);
4839 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
4840 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
4841 	vm_page_cleaned_count++;
4842 
4843 	vm_page_inactive_count++;
4844 	if (m_object->internal) {
4845 		vm_page_pageable_internal_count++;
4846 	} else {
4847 		vm_page_pageable_external_count++;
4848 	}
4849 #if CONFIG_BACKGROUND_QUEUE
4850 	if (m->vmp_in_background) {
4851 		vm_page_add_to_backgroundq(m, TRUE);
4852 	}
4853 #endif
4854 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
4855 }
4856 
4857 /*
4858  *	vm_page_activate:
4859  *
4860  *	Put the specified page on the active list (if appropriate).
4861  *
4862  *	The page queues must be locked.
4863  */
4864 
4865 void
vm_page_activate(vm_page_t m)4866 vm_page_activate(
4867 	vm_page_t       m)
4868 {
4869 	vm_object_t     m_object;
4870 
4871 	m_object = VM_PAGE_OBJECT(m);
4872 
4873 	VM_PAGE_CHECK(m);
4874 #ifdef  FIXME_4778297
4875 	assert(m_object != kernel_object);
4876 #endif
4877 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4878 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4879 	assert( !(m->vmp_absent && !m->vmp_unusual));
4880 
4881 	if (m->vmp_gobbled) {
4882 		assert( !VM_PAGE_WIRED(m));
4883 		if (!m->vmp_private && !m->vmp_fictitious) {
4884 			vm_page_wire_count--;
4885 		}
4886 		vm_page_gobble_count--;
4887 		m->vmp_gobbled = FALSE;
4888 	}
4889 	/*
4890 	 * if this page is currently on the pageout queue, we can't do the
4891 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4892 	 * and we can't remove it manually since we would need the object lock
4893 	 * (which is not required here) to decrement the activity_in_progress
4894 	 * reference which is held on the object while the page is in the pageout queue...
4895 	 * just let the normal laundry processing proceed
4896 	 */
4897 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4898 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4899 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4900 		return;
4901 	}
4902 
4903 #if DEBUG
4904 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
4905 		panic("vm_page_activate: already active");
4906 	}
4907 #endif
4908 
4909 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
4910 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
4911 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
4912 	}
4913 
4914 	vm_page_queues_remove(m, FALSE);
4915 
4916 	if (!VM_PAGE_WIRED(m)) {
4917 		vm_page_check_pageable_safe(m);
4918 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
4919 		    m->vmp_dirty && m_object->internal &&
4920 		    (m_object->purgable == VM_PURGABLE_DENY ||
4921 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4922 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
4923 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4924 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4925 			vm_page_throttled_count++;
4926 		} else {
4927 #if CONFIG_SECLUDED_MEMORY
4928 			if (secluded_for_filecache &&
4929 			    vm_page_secluded_target != 0 &&
4930 			    num_tasks_can_use_secluded_mem == 0 &&
4931 			    m_object->eligible_for_secluded) {
4932 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
4933 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
4934 				vm_page_secluded_count++;
4935 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4936 				vm_page_secluded_count_inuse++;
4937 				assert(!m_object->internal);
4938 //				vm_page_pageable_external_count++;
4939 			} else
4940 #endif /* CONFIG_SECLUDED_MEMORY */
4941 			vm_page_enqueue_active(m, FALSE);
4942 		}
4943 		m->vmp_reference = TRUE;
4944 		m->vmp_no_cache = FALSE;
4945 	}
4946 	VM_PAGE_CHECK(m);
4947 }
4948 
4949 
4950 /*
4951  *      vm_page_speculate:
4952  *
4953  *      Put the specified page on the speculative list (if appropriate).
4954  *
4955  *      The page queues must be locked.
4956  */
4957 void
vm_page_speculate(vm_page_t m,boolean_t new)4958 vm_page_speculate(
4959 	vm_page_t       m,
4960 	boolean_t       new)
4961 {
4962 	struct vm_speculative_age_q     *aq;
4963 	vm_object_t     m_object;
4964 
4965 	m_object = VM_PAGE_OBJECT(m);
4966 
4967 	VM_PAGE_CHECK(m);
4968 	vm_page_check_pageable_safe(m);
4969 
4970 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4971 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4972 	assert( !(m->vmp_absent && !m->vmp_unusual));
4973 	assert(m_object->internal == FALSE);
4974 
4975 	/*
4976 	 * if this page is currently on the pageout queue, we can't do the
4977 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4978 	 * and we can't remove it manually since we would need the object lock
4979 	 * (which is not required here) to decrement the activity_in_progress
4980 	 * reference which is held on the object while the page is in the pageout queue...
4981 	 * just let the normal laundry processing proceed
4982 	 */
4983 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4984 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4985 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4986 		return;
4987 	}
4988 
4989 	vm_page_queues_remove(m, FALSE);
4990 
4991 	if (!VM_PAGE_WIRED(m)) {
4992 		mach_timespec_t         ts;
4993 		clock_sec_t sec;
4994 		clock_nsec_t nsec;
4995 
4996 		clock_get_system_nanotime(&sec, &nsec);
4997 		ts.tv_sec = (unsigned int) sec;
4998 		ts.tv_nsec = nsec;
4999 
5000 		if (vm_page_speculative_count == 0) {
5001 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5002 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5003 
5004 			aq = &vm_page_queue_speculative[speculative_age_index];
5005 
5006 			/*
5007 			 * set the timer to begin a new group
5008 			 */
5009 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5010 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5011 
5012 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5013 		} else {
5014 			aq = &vm_page_queue_speculative[speculative_age_index];
5015 
5016 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5017 				speculative_age_index++;
5018 
5019 				if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5020 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5021 				}
5022 				if (speculative_age_index == speculative_steal_index) {
5023 					speculative_steal_index = speculative_age_index + 1;
5024 
5025 					if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5026 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5027 					}
5028 				}
5029 				aq = &vm_page_queue_speculative[speculative_age_index];
5030 
5031 				if (!vm_page_queue_empty(&aq->age_q)) {
5032 					vm_page_speculate_ageit(aq);
5033 				}
5034 
5035 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5036 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5037 
5038 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5039 			}
5040 		}
5041 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5042 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5043 		vm_page_speculative_count++;
5044 		vm_page_pageable_external_count++;
5045 
5046 		if (new == TRUE) {
5047 			vm_object_lock_assert_exclusive(m_object);
5048 
5049 			m_object->pages_created++;
5050 #if DEVELOPMENT || DEBUG
5051 			vm_page_speculative_created++;
5052 #endif
5053 		}
5054 	}
5055 	VM_PAGE_CHECK(m);
5056 }
5057 
5058 
5059 /*
5060  * move pages from the specified aging bin to
5061  * the speculative bin that pageout_scan claims from
5062  *
5063  *      The page queues must be locked.
5064  */
5065 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5066 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5067 {
5068 	struct vm_speculative_age_q     *sq;
5069 	vm_page_t       t;
5070 
5071 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5072 
5073 	if (vm_page_queue_empty(&sq->age_q)) {
5074 		sq->age_q.next = aq->age_q.next;
5075 		sq->age_q.prev = aq->age_q.prev;
5076 
5077 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5078 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5079 
5080 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5081 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5082 	} else {
5083 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5084 		t->vmp_pageq.next = aq->age_q.next;
5085 
5086 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5087 		t->vmp_pageq.prev = sq->age_q.prev;
5088 
5089 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5090 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5091 
5092 		sq->age_q.prev = aq->age_q.prev;
5093 	}
5094 	vm_page_queue_init(&aq->age_q);
5095 }
5096 
5097 
5098 void
vm_page_lru(vm_page_t m)5099 vm_page_lru(
5100 	vm_page_t       m)
5101 {
5102 	VM_PAGE_CHECK(m);
5103 	assert(VM_PAGE_OBJECT(m) != kernel_object);
5104 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5105 
5106 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5107 
5108 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5109 		/*
5110 		 * we don't need to do all the other work that
5111 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5112 		 * bring along for the ride
5113 		 */
5114 		assert(!m->vmp_laundry);
5115 		assert(!m->vmp_private);
5116 
5117 		m->vmp_no_cache = FALSE;
5118 
5119 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5120 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5121 
5122 		return;
5123 	}
5124 	/*
5125 	 * if this page is currently on the pageout queue, we can't do the
5126 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5127 	 * and we can't remove it manually since we would need the object lock
5128 	 * (which is not required here) to decrement the activity_in_progress
5129 	 * reference which is held on the object while the page is in the pageout queue...
5130 	 * just let the normal laundry processing proceed
5131 	 */
5132 	if (m->vmp_laundry || m->vmp_private ||
5133 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5134 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5135 	    VM_PAGE_WIRED(m)) {
5136 		return;
5137 	}
5138 
5139 	m->vmp_no_cache = FALSE;
5140 
5141 	vm_page_queues_remove(m, FALSE);
5142 
5143 	vm_page_enqueue_inactive(m, FALSE);
5144 }
5145 
5146 
5147 void
vm_page_reactivate_all_throttled(void)5148 vm_page_reactivate_all_throttled(void)
5149 {
5150 	vm_page_t       first_throttled, last_throttled;
5151 	vm_page_t       first_active;
5152 	vm_page_t       m;
5153 	int             extra_active_count;
5154 	int             extra_internal_count, extra_external_count;
5155 	vm_object_t     m_object;
5156 
5157 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5158 		return;
5159 	}
5160 
5161 	extra_active_count = 0;
5162 	extra_internal_count = 0;
5163 	extra_external_count = 0;
5164 	vm_page_lock_queues();
5165 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5166 		/*
5167 		 * Switch "throttled" pages to "active".
5168 		 */
5169 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5170 			VM_PAGE_CHECK(m);
5171 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5172 
5173 			m_object = VM_PAGE_OBJECT(m);
5174 
5175 			extra_active_count++;
5176 			if (m_object->internal) {
5177 				extra_internal_count++;
5178 			} else {
5179 				extra_external_count++;
5180 			}
5181 
5182 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5183 			VM_PAGE_CHECK(m);
5184 #if CONFIG_BACKGROUND_QUEUE
5185 			if (m->vmp_in_background) {
5186 				vm_page_add_to_backgroundq(m, FALSE);
5187 			}
5188 #endif
5189 		}
5190 
5191 		/*
5192 		 * Transfer the entire throttled queue to a regular LRU page queues.
5193 		 * We insert it at the head of the active queue, so that these pages
5194 		 * get re-evaluated by the LRU algorithm first, since they've been
5195 		 * completely out of it until now.
5196 		 */
5197 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5198 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5199 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5200 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5201 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5202 		} else {
5203 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5204 		}
5205 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5206 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5207 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5208 
5209 #if DEBUG
5210 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5211 #endif
5212 		vm_page_queue_init(&vm_page_queue_throttled);
5213 		/*
5214 		 * Adjust the global page counts.
5215 		 */
5216 		vm_page_active_count += extra_active_count;
5217 		vm_page_pageable_internal_count += extra_internal_count;
5218 		vm_page_pageable_external_count += extra_external_count;
5219 		vm_page_throttled_count = 0;
5220 	}
5221 	assert(vm_page_throttled_count == 0);
5222 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5223 	vm_page_unlock_queues();
5224 }
5225 
5226 
5227 /*
5228  * move pages from the indicated local queue to the global active queue
5229  * its ok to fail if we're below the hard limit and force == FALSE
5230  * the nolocks == TRUE case is to allow this function to be run on
5231  * the hibernate path
5232  */
5233 
5234 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5235 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5236 {
5237 	struct vpl      *lq;
5238 	vm_page_t       first_local, last_local;
5239 	vm_page_t       first_active;
5240 	vm_page_t       m;
5241 	uint32_t        count = 0;
5242 
5243 	if (vm_page_local_q == NULL) {
5244 		return;
5245 	}
5246 
5247 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5248 
5249 	if (nolocks == FALSE) {
5250 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5251 			if (!vm_page_trylockspin_queues()) {
5252 				return;
5253 			}
5254 		} else {
5255 			vm_page_lockspin_queues();
5256 		}
5257 
5258 		VPL_LOCK(&lq->vpl_lock);
5259 	}
5260 	if (lq->vpl_count) {
5261 		/*
5262 		 * Switch "local" pages to "active".
5263 		 */
5264 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5265 
5266 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5267 			VM_PAGE_CHECK(m);
5268 			vm_page_check_pageable_safe(m);
5269 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5270 			assert(!m->vmp_fictitious);
5271 
5272 			if (m->vmp_local_id != lid) {
5273 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5274 			}
5275 
5276 			m->vmp_local_id = 0;
5277 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5278 			VM_PAGE_CHECK(m);
5279 #if CONFIG_BACKGROUND_QUEUE
5280 			if (m->vmp_in_background) {
5281 				vm_page_add_to_backgroundq(m, FALSE);
5282 			}
5283 #endif
5284 			count++;
5285 		}
5286 		if (count != lq->vpl_count) {
5287 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5288 		}
5289 
5290 		/*
5291 		 * Transfer the entire local queue to a regular LRU page queues.
5292 		 */
5293 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5294 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5295 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5296 
5297 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5298 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5299 		} else {
5300 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5301 		}
5302 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5303 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5304 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5305 
5306 		vm_page_queue_init(&lq->vpl_queue);
5307 		/*
5308 		 * Adjust the global page counts.
5309 		 */
5310 		vm_page_active_count += lq->vpl_count;
5311 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5312 		vm_page_pageable_external_count += lq->vpl_external_count;
5313 		lq->vpl_count = 0;
5314 		lq->vpl_internal_count = 0;
5315 		lq->vpl_external_count = 0;
5316 	}
5317 	assert(vm_page_queue_empty(&lq->vpl_queue));
5318 
5319 	if (nolocks == FALSE) {
5320 		VPL_UNLOCK(&lq->vpl_lock);
5321 
5322 		vm_page_balance_inactive(count / 4);
5323 		vm_page_unlock_queues();
5324 	}
5325 }
5326 
5327 /*
5328  *	vm_page_part_zero_fill:
5329  *
5330  *	Zero-fill a part of the page.
5331  */
5332 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5333 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5334 vm_page_part_zero_fill(
5335 	vm_page_t       m,
5336 	vm_offset_t     m_pa,
5337 	vm_size_t       len)
5338 {
5339 #if 0
5340 	/*
5341 	 * we don't hold the page queue lock
5342 	 * so this check isn't safe to make
5343 	 */
5344 	VM_PAGE_CHECK(m);
5345 #endif
5346 
5347 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5348 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5349 #else
5350 	vm_page_t       tmp;
5351 	while (1) {
5352 		tmp = vm_page_grab();
5353 		if (tmp == VM_PAGE_NULL) {
5354 			vm_page_wait(THREAD_UNINT);
5355 			continue;
5356 		}
5357 		break;
5358 	}
5359 	vm_page_zero_fill(tmp);
5360 	if (m_pa != 0) {
5361 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
5362 	}
5363 	if ((m_pa + len) < PAGE_SIZE) {
5364 		vm_page_part_copy(m, m_pa + len, tmp,
5365 		    m_pa + len, PAGE_SIZE - (m_pa + len));
5366 	}
5367 	vm_page_copy(tmp, m);
5368 	VM_PAGE_FREE(tmp);
5369 #endif
5370 }
5371 
5372 /*
5373  *	vm_page_zero_fill:
5374  *
5375  *	Zero-fill the specified page.
5376  */
5377 void
vm_page_zero_fill(vm_page_t m)5378 vm_page_zero_fill(
5379 	vm_page_t       m)
5380 {
5381 #if 0
5382 	/*
5383 	 * we don't hold the page queue lock
5384 	 * so this check isn't safe to make
5385 	 */
5386 	VM_PAGE_CHECK(m);
5387 #endif
5388 
5389 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
5390 	pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5391 }
5392 
5393 /*
5394  *	vm_page_part_copy:
5395  *
5396  *	copy part of one page to another
5397  */
5398 
5399 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5400 vm_page_part_copy(
5401 	vm_page_t       src_m,
5402 	vm_offset_t     src_pa,
5403 	vm_page_t       dst_m,
5404 	vm_offset_t     dst_pa,
5405 	vm_size_t       len)
5406 {
5407 #if 0
5408 	/*
5409 	 * we don't hold the page queue lock
5410 	 * so this check isn't safe to make
5411 	 */
5412 	VM_PAGE_CHECK(src_m);
5413 	VM_PAGE_CHECK(dst_m);
5414 #endif
5415 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5416 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5417 }
5418 
5419 /*
5420  *	vm_page_copy:
5421  *
5422  *	Copy one page to another
5423  */
5424 
5425 int vm_page_copy_cs_validations = 0;
5426 int vm_page_copy_cs_tainted = 0;
5427 
5428 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5429 vm_page_copy(
5430 	vm_page_t       src_m,
5431 	vm_page_t       dest_m)
5432 {
5433 	vm_object_t     src_m_object;
5434 
5435 	src_m_object = VM_PAGE_OBJECT(src_m);
5436 
5437 #if 0
5438 	/*
5439 	 * we don't hold the page queue lock
5440 	 * so this check isn't safe to make
5441 	 */
5442 	VM_PAGE_CHECK(src_m);
5443 	VM_PAGE_CHECK(dest_m);
5444 #endif
5445 	vm_object_lock_assert_held(src_m_object);
5446 
5447 	if (src_m_object != VM_OBJECT_NULL &&
5448 	    src_m_object->code_signed) {
5449 		/*
5450 		 * We're copying a page from a code-signed object.
5451 		 * Whoever ends up mapping the copy page might care about
5452 		 * the original page's integrity, so let's validate the
5453 		 * source page now.
5454 		 */
5455 		vm_page_copy_cs_validations++;
5456 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5457 #if DEVELOPMENT || DEBUG
5458 		DTRACE_VM4(codesigned_copy,
5459 		    vm_object_t, src_m_object,
5460 		    vm_object_offset_t, src_m->vmp_offset,
5461 		    int, src_m->vmp_cs_validated,
5462 		    int, src_m->vmp_cs_tainted);
5463 #endif /* DEVELOPMENT || DEBUG */
5464 	}
5465 
5466 	/*
5467 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
5468 	 * the cs_validated bit.
5469 	 */
5470 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5471 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5472 	if (dest_m->vmp_cs_tainted) {
5473 		vm_page_copy_cs_tainted++;
5474 	}
5475 	dest_m->vmp_error = src_m->vmp_error; /* sliding src_m might have failed... */
5476 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5477 }
5478 
5479 #if MACH_ASSERT
5480 static void
_vm_page_print(vm_page_t p)5481 _vm_page_print(
5482 	vm_page_t       p)
5483 {
5484 	printf("vm_page %p: \n", p);
5485 	printf("  pageq: next=%p prev=%p\n",
5486 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5487 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5488 	printf("  listq: next=%p prev=%p\n",
5489 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5490 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5491 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5492 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5493 	printf("  wire_count=%u\n", p->vmp_wire_count);
5494 	printf("  q_state=%u\n", p->vmp_q_state);
5495 
5496 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
5497 	    (p->vmp_laundry ? "" : "!"),
5498 	    (p->vmp_reference ? "" : "!"),
5499 	    (p->vmp_gobbled ? "" : "!"),
5500 	    (p->vmp_private ? "" : "!"));
5501 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5502 	    (p->vmp_busy ? "" : "!"),
5503 	    (p->vmp_wanted ? "" : "!"),
5504 	    (p->vmp_tabled ? "" : "!"),
5505 	    (p->vmp_fictitious ? "" : "!"),
5506 	    (p->vmp_pmapped ? "" : "!"),
5507 	    (p->vmp_wpmapped ? "" : "!"));
5508 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5509 	    (p->vmp_free_when_done ? "" : "!"),
5510 	    (p->vmp_absent ? "" : "!"),
5511 	    (p->vmp_error ? "" : "!"),
5512 	    (p->vmp_dirty ? "" : "!"),
5513 	    (p->vmp_cleaning ? "" : "!"),
5514 	    (p->vmp_precious ? "" : "!"),
5515 	    (p->vmp_clustered ? "" : "!"));
5516 	printf("  %soverwriting, %srestart, %sunusual\n",
5517 	    (p->vmp_overwriting ? "" : "!"),
5518 	    (p->vmp_restart ? "" : "!"),
5519 	    (p->vmp_unusual ? "" : "!"));
5520 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5521 	    p->vmp_cs_validated,
5522 	    p->vmp_cs_tainted,
5523 	    p->vmp_cs_nx,
5524 	    (p->vmp_no_cache ? "" : "!"));
5525 
5526 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5527 }
5528 
5529 /*
5530  *	Check that the list of pages is ordered by
5531  *	ascending physical address and has no holes.
5532  */
5533 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5534 vm_page_verify_contiguous(
5535 	vm_page_t       pages,
5536 	unsigned int    npages)
5537 {
5538 	vm_page_t               m;
5539 	unsigned int            page_count;
5540 	vm_offset_t             prev_addr;
5541 
5542 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5543 	page_count = 1;
5544 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5545 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5546 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5547 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5548 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5549 			panic("vm_page_verify_contiguous:  not contiguous!");
5550 		}
5551 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5552 		++page_count;
5553 	}
5554 	if (page_count != npages) {
5555 		printf("pages %p actual count 0x%x but requested 0x%x\n",
5556 		    pages, page_count, npages);
5557 		panic("vm_page_verify_contiguous:  count error");
5558 	}
5559 	return 1;
5560 }
5561 
5562 
5563 /*
5564  *	Check the free lists for proper length etc.
5565  */
5566 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5567 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5568 vm_page_verify_free_list(
5569 	vm_page_queue_head_t    *vm_page_queue,
5570 	unsigned int    color,
5571 	vm_page_t       look_for_page,
5572 	boolean_t       expect_page)
5573 {
5574 	unsigned int    npages;
5575 	vm_page_t       m;
5576 	vm_page_t       prev_m;
5577 	boolean_t       found_page;
5578 
5579 	if (!vm_page_verify_this_free_list_enabled) {
5580 		return 0;
5581 	}
5582 
5583 	found_page = FALSE;
5584 	npages = 0;
5585 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5586 
5587 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5588 		if (m == look_for_page) {
5589 			found_page = TRUE;
5590 		}
5591 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5592 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5593 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5594 		}
5595 		if (!m->vmp_busy) {
5596 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5597 			    color, npages, m);
5598 		}
5599 		if (color != (unsigned int) -1) {
5600 			if (VM_PAGE_GET_COLOR(m) != color) {
5601 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5602 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
5603 			}
5604 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5605 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5606 				    color, npages, m, m->vmp_q_state);
5607 			}
5608 		} else {
5609 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5610 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5611 				    npages, m, m->vmp_q_state);
5612 			}
5613 		}
5614 		++npages;
5615 		prev_m = m;
5616 	}
5617 	if (look_for_page != VM_PAGE_NULL) {
5618 		unsigned int other_color;
5619 
5620 		if (expect_page && !found_page) {
5621 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5622 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5623 			_vm_page_print(look_for_page);
5624 			for (other_color = 0;
5625 			    other_color < vm_colors;
5626 			    other_color++) {
5627 				if (other_color == color) {
5628 					continue;
5629 				}
5630 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5631 				    other_color, look_for_page, FALSE);
5632 			}
5633 			if (color == (unsigned int) -1) {
5634 				vm_page_verify_free_list(&vm_lopage_queue_free,
5635 				    (unsigned int) -1, look_for_page, FALSE);
5636 			}
5637 			panic("vm_page_verify_free_list(color=%u)", color);
5638 		}
5639 		if (!expect_page && found_page) {
5640 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5641 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5642 		}
5643 	}
5644 	return npages;
5645 }
5646 
5647 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5648 static void
vm_page_verify_free_lists(void)5649 vm_page_verify_free_lists( void )
5650 {
5651 	unsigned int    color, npages, nlopages;
5652 	boolean_t       toggle = TRUE;
5653 
5654 	if (!vm_page_verify_all_free_lists_enabled) {
5655 		return;
5656 	}
5657 
5658 	npages = 0;
5659 
5660 	lck_mtx_lock(&vm_page_queue_free_lock);
5661 
5662 	if (vm_page_verify_this_free_list_enabled == TRUE) {
5663 		/*
5664 		 * This variable has been set globally for extra checking of
5665 		 * each free list Q. Since we didn't set it, we don't own it
5666 		 * and we shouldn't toggle it.
5667 		 */
5668 		toggle = FALSE;
5669 	}
5670 
5671 	if (toggle == TRUE) {
5672 		vm_page_verify_this_free_list_enabled = TRUE;
5673 	}
5674 
5675 	for (color = 0; color < vm_colors; color++) {
5676 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5677 		    color, VM_PAGE_NULL, FALSE);
5678 	}
5679 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5680 	    (unsigned int) -1,
5681 	    VM_PAGE_NULL, FALSE);
5682 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5683 		panic("vm_page_verify_free_lists:  "
5684 		    "npages %u free_count %d nlopages %u lo_free_count %u",
5685 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5686 	}
5687 
5688 	if (toggle == TRUE) {
5689 		vm_page_verify_this_free_list_enabled = FALSE;
5690 	}
5691 
5692 	lck_mtx_unlock(&vm_page_queue_free_lock);
5693 }
5694 
5695 #endif  /* MACH_ASSERT */
5696 
5697 
5698 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5699 
5700 /*
5701  *	CONTIGUOUS PAGE ALLOCATION
5702  *
5703  *	Find a region large enough to contain at least n pages
5704  *	of contiguous physical memory.
5705  *
5706  *	This is done by traversing the vm_page_t array in a linear fashion
5707  *	we assume that the vm_page_t array has the avaiable physical pages in an
5708  *	ordered, ascending list... this is currently true of all our implementations
5709  *      and must remain so... there can be 'holes' in the array...  we also can
5710  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5711  *      which use to happen via 'vm_page_convert'... that function was no longer
5712  *      being called and was removed...
5713  *
5714  *	The basic flow consists of stabilizing some of the interesting state of
5715  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5716  *	sweep at the beginning of the array looking for pages that meet our criterea
5717  *	for a 'stealable' page... currently we are pretty conservative... if the page
5718  *	meets this criterea and is physically contiguous to the previous page in the 'run'
5719  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
5720  *	and start to develop a new run... if at this point we've already considered
5721  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5722  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
5723  *	to other threads trying to acquire free pages (or move pages from q to q),
5724  *	and then continue from the spot we left off... we only make 1 pass through the
5725  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
5726  *      which steals the pages from the queues they're currently on... pages on the free
5727  *	queue can be stolen directly... pages that are on any of the other queues
5728  *	must be removed from the object they are tabled on... this requires taking the
5729  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5730  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
5731  *	dump the pages we've currently stolen back to the free list, and pick up our
5732  *	scan from the point where we aborted the 'current' run.
5733  *
5734  *
5735  *	Requirements:
5736  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
5737  *
5738  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5739  *
5740  * Algorithm:
5741  */
5742 
5743 #define MAX_CONSIDERED_BEFORE_YIELD     1000
5744 
5745 
5746 #define RESET_STATE_OF_RUN()    \
5747 	MACRO_BEGIN             \
5748 	prevcontaddr = -2;      \
5749 	start_pnum = -1;        \
5750 	free_considered = 0;    \
5751 	substitute_needed = 0;  \
5752 	npages = 0;             \
5753 	MACRO_END
5754 
5755 /*
5756  * Can we steal in-use (i.e. not free) pages when searching for
5757  * physically-contiguous pages ?
5758  */
5759 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5760 
5761 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
5762 #if DEBUG
5763 int vm_page_find_contig_debug = 0;
5764 #endif
5765 
5766 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)5767 vm_page_find_contiguous(
5768 	unsigned int    contig_pages,
5769 	ppnum_t         max_pnum,
5770 	ppnum_t     pnum_mask,
5771 	boolean_t       wire,
5772 	int             flags)
5773 {
5774 	vm_page_t       m = NULL;
5775 	ppnum_t         prevcontaddr = 0;
5776 	ppnum_t         start_pnum = 0;
5777 	unsigned int    npages = 0, considered = 0, scanned = 0;
5778 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
5779 	unsigned int    idx_last_contig_page_found = 0;
5780 	int             free_considered = 0, free_available = 0;
5781 	int             substitute_needed = 0;
5782 	int             zone_gc_called = 0;
5783 	boolean_t       wrapped;
5784 	kern_return_t   kr;
5785 #if DEBUG
5786 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
5787 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
5788 #endif
5789 
5790 	int             yielded = 0;
5791 	int             dumped_run = 0;
5792 	int             stolen_pages = 0;
5793 	int             compressed_pages = 0;
5794 
5795 
5796 	if (contig_pages == 0) {
5797 		return VM_PAGE_NULL;
5798 	}
5799 
5800 full_scan_again:
5801 
5802 #if MACH_ASSERT
5803 	vm_page_verify_free_lists();
5804 #endif
5805 #if DEBUG
5806 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
5807 #endif
5808 	PAGE_REPLACEMENT_ALLOWED(TRUE);
5809 
5810 	/*
5811 	 * If there are still delayed pages, try to free up some that match.
5812 	 */
5813 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
5814 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
5815 	}
5816 
5817 	vm_page_lock_queues();
5818 	lck_mtx_lock(&vm_page_queue_free_lock);
5819 
5820 	RESET_STATE_OF_RUN();
5821 
5822 	scanned = 0;
5823 	considered = 0;
5824 	free_available = vm_page_free_count - vm_page_free_reserved;
5825 
5826 	wrapped = FALSE;
5827 
5828 	if (flags & KMA_LOMEM) {
5829 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
5830 	} else {
5831 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
5832 	}
5833 
5834 	orig_last_idx = idx_last_contig_page_found;
5835 	last_idx = orig_last_idx;
5836 
5837 	for (page_idx = last_idx, start_idx = last_idx;
5838 	    npages < contig_pages && page_idx < vm_pages_count;
5839 	    page_idx++) {
5840 retry:
5841 		if (wrapped &&
5842 		    npages == 0 &&
5843 		    page_idx >= orig_last_idx) {
5844 			/*
5845 			 * We're back where we started and we haven't
5846 			 * found any suitable contiguous range.  Let's
5847 			 * give up.
5848 			 */
5849 			break;
5850 		}
5851 		scanned++;
5852 		m = &vm_pages[page_idx];
5853 
5854 		assert(!m->vmp_fictitious);
5855 		assert(!m->vmp_private);
5856 
5857 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
5858 			/* no more low pages... */
5859 			break;
5860 		}
5861 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
5862 			/*
5863 			 * not aligned
5864 			 */
5865 			RESET_STATE_OF_RUN();
5866 		} else if (VM_PAGE_WIRED(m) || m->vmp_gobbled ||
5867 		    m->vmp_laundry || m->vmp_wanted ||
5868 		    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5869 			/*
5870 			 * page is in a transient state
5871 			 * or a state we don't want to deal
5872 			 * with, so don't consider it which
5873 			 * means starting a new run
5874 			 */
5875 			RESET_STATE_OF_RUN();
5876 		} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5877 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5878 		    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5879 		    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5880 			/*
5881 			 * page needs to be on one of our queues (other then the pageout or special free queues)
5882 			 * or it needs to belong to the compressor pool (which is now indicated
5883 			 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
5884 			 * from the check for VM_PAGE_NOT_ON_Q)
5885 			 * in order for it to be stable behind the
5886 			 * locks we hold at this point...
5887 			 * if not, don't consider it which
5888 			 * means starting a new run
5889 			 */
5890 			RESET_STATE_OF_RUN();
5891 		} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) {
5892 			/*
5893 			 * pages on the free list are always 'busy'
5894 			 * so we couldn't test for 'busy' in the check
5895 			 * for the transient states... pages that are
5896 			 * 'free' are never 'tabled', so we also couldn't
5897 			 * test for 'tabled'.  So we check here to make
5898 			 * sure that a non-free page is not busy and is
5899 			 * tabled on an object...
5900 			 * if not, don't consider it which
5901 			 * means starting a new run
5902 			 */
5903 			RESET_STATE_OF_RUN();
5904 		} else {
5905 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
5906 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
5907 					RESET_STATE_OF_RUN();
5908 					goto did_consider;
5909 				} else {
5910 					npages = 1;
5911 					start_idx = page_idx;
5912 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
5913 				}
5914 			} else {
5915 				npages++;
5916 			}
5917 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
5918 
5919 			VM_PAGE_CHECK(m);
5920 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
5921 				free_considered++;
5922 			} else {
5923 				/*
5924 				 * This page is not free.
5925 				 * If we can't steal used pages,
5926 				 * we have to give up this run
5927 				 * and keep looking.
5928 				 * Otherwise, we might need to
5929 				 * move the contents of this page
5930 				 * into a substitute page.
5931 				 */
5932 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
5933 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
5934 					substitute_needed++;
5935 				}
5936 #else
5937 				RESET_STATE_OF_RUN();
5938 #endif
5939 			}
5940 
5941 			if ((free_considered + substitute_needed) > free_available) {
5942 				/*
5943 				 * if we let this run continue
5944 				 * we will end up dropping the vm_page_free_count
5945 				 * below the reserve limit... we need to abort
5946 				 * this run, but we can at least re-consider this
5947 				 * page... thus the jump back to 'retry'
5948 				 */
5949 				RESET_STATE_OF_RUN();
5950 
5951 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
5952 					considered++;
5953 					goto retry;
5954 				}
5955 				/*
5956 				 * free_available == 0
5957 				 * so can't consider any free pages... if
5958 				 * we went to retry in this case, we'd
5959 				 * get stuck looking at the same page
5960 				 * w/o making any forward progress
5961 				 * we also want to take this path if we've already
5962 				 * reached our limit that controls the lock latency
5963 				 */
5964 			}
5965 		}
5966 did_consider:
5967 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
5968 			PAGE_REPLACEMENT_ALLOWED(FALSE);
5969 
5970 			lck_mtx_unlock(&vm_page_queue_free_lock);
5971 			vm_page_unlock_queues();
5972 
5973 			mutex_pause(0);
5974 
5975 			PAGE_REPLACEMENT_ALLOWED(TRUE);
5976 
5977 			vm_page_lock_queues();
5978 			lck_mtx_lock(&vm_page_queue_free_lock);
5979 
5980 			RESET_STATE_OF_RUN();
5981 			/*
5982 			 * reset our free page limit since we
5983 			 * dropped the lock protecting the vm_page_free_queue
5984 			 */
5985 			free_available = vm_page_free_count - vm_page_free_reserved;
5986 			considered = 0;
5987 
5988 			yielded++;
5989 
5990 			goto retry;
5991 		}
5992 		considered++;
5993 	}
5994 	m = VM_PAGE_NULL;
5995 
5996 	if (npages != contig_pages) {
5997 		if (!wrapped) {
5998 			/*
5999 			 * We didn't find a contiguous range but we didn't
6000 			 * start from the very first page.
6001 			 * Start again from the very first page.
6002 			 */
6003 			RESET_STATE_OF_RUN();
6004 			if (flags & KMA_LOMEM) {
6005 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6006 			} else {
6007 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6008 			}
6009 			last_idx = 0;
6010 			page_idx = last_idx;
6011 			wrapped = TRUE;
6012 			goto retry;
6013 		}
6014 		lck_mtx_unlock(&vm_page_queue_free_lock);
6015 	} else {
6016 		vm_page_t       m1;
6017 		vm_page_t       m2;
6018 		unsigned int    cur_idx;
6019 		unsigned int    tmp_start_idx;
6020 		vm_object_t     locked_object = VM_OBJECT_NULL;
6021 		boolean_t       abort_run = FALSE;
6022 
6023 		assert(page_idx - start_idx == contig_pages);
6024 
6025 		tmp_start_idx = start_idx;
6026 
6027 		/*
6028 		 * first pass through to pull the free pages
6029 		 * off of the free queue so that in case we
6030 		 * need substitute pages, we won't grab any
6031 		 * of the free pages in the run... we'll clear
6032 		 * the 'free' bit in the 2nd pass, and even in
6033 		 * an abort_run case, we'll collect all of the
6034 		 * free pages in this run and return them to the free list
6035 		 */
6036 		while (start_idx < page_idx) {
6037 			m1 = &vm_pages[start_idx++];
6038 
6039 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6040 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6041 #endif
6042 
6043 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6044 				unsigned int color;
6045 
6046 				color = VM_PAGE_GET_COLOR(m1);
6047 #if MACH_ASSERT
6048 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6049 #endif
6050 				vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6051 
6052 				VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6053 #if MACH_ASSERT
6054 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6055 #endif
6056 				/*
6057 				 * Clear the "free" bit so that this page
6058 				 * does not get considered for another
6059 				 * concurrent physically-contiguous allocation.
6060 				 */
6061 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6062 				assert(m1->vmp_busy);
6063 
6064 				vm_page_free_count--;
6065 			}
6066 		}
6067 		if (flags & KMA_LOMEM) {
6068 			vm_page_lomem_find_contiguous_last_idx = page_idx;
6069 		} else {
6070 			vm_page_find_contiguous_last_idx = page_idx;
6071 		}
6072 
6073 		/*
6074 		 * we can drop the free queue lock at this point since
6075 		 * we've pulled any 'free' candidates off of the list
6076 		 * we need it dropped so that we can do a vm_page_grab
6077 		 * when substituing for pmapped/dirty pages
6078 		 */
6079 		lck_mtx_unlock(&vm_page_queue_free_lock);
6080 
6081 		start_idx = tmp_start_idx;
6082 		cur_idx = page_idx - 1;
6083 
6084 		while (start_idx++ < page_idx) {
6085 			/*
6086 			 * must go through the list from back to front
6087 			 * so that the page list is created in the
6088 			 * correct order - low -> high phys addresses
6089 			 */
6090 			m1 = &vm_pages[cur_idx--];
6091 
6092 			if (m1->vmp_object == 0) {
6093 				/*
6094 				 * page has already been removed from
6095 				 * the free list in the 1st pass
6096 				 */
6097 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6098 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
6099 				assert(m1->vmp_busy);
6100 				assert(!m1->vmp_wanted);
6101 				assert(!m1->vmp_laundry);
6102 			} else {
6103 				vm_object_t object;
6104 				int refmod;
6105 				boolean_t disconnected, reusable;
6106 
6107 				if (abort_run == TRUE) {
6108 					continue;
6109 				}
6110 
6111 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6112 
6113 				object = VM_PAGE_OBJECT(m1);
6114 
6115 				if (object != locked_object) {
6116 					if (locked_object) {
6117 						vm_object_unlock(locked_object);
6118 						locked_object = VM_OBJECT_NULL;
6119 					}
6120 					if (vm_object_lock_try(object)) {
6121 						locked_object = object;
6122 					}
6123 				}
6124 				if (locked_object == VM_OBJECT_NULL ||
6125 				    (VM_PAGE_WIRED(m1) || m1->vmp_gobbled ||
6126 				    m1->vmp_laundry || m1->vmp_wanted ||
6127 				    m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) ||
6128 				    (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6129 					if (locked_object) {
6130 						vm_object_unlock(locked_object);
6131 						locked_object = VM_OBJECT_NULL;
6132 					}
6133 					tmp_start_idx = cur_idx;
6134 					abort_run = TRUE;
6135 					continue;
6136 				}
6137 
6138 				disconnected = FALSE;
6139 				reusable = FALSE;
6140 
6141 				if ((m1->vmp_reusable ||
6142 				    object->all_reusable) &&
6143 				    (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
6144 				    !m1->vmp_dirty &&
6145 				    !m1->vmp_reference) {
6146 					/* reusable page... */
6147 					refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6148 					disconnected = TRUE;
6149 					if (refmod == 0) {
6150 						/*
6151 						 * ... not reused: can steal
6152 						 * without relocating contents.
6153 						 */
6154 						reusable = TRUE;
6155 					}
6156 				}
6157 
6158 				if ((m1->vmp_pmapped &&
6159 				    !reusable) ||
6160 				    m1->vmp_dirty ||
6161 				    m1->vmp_precious) {
6162 					vm_object_offset_t offset;
6163 
6164 					m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6165 
6166 					if (m2 == VM_PAGE_NULL) {
6167 						if (locked_object) {
6168 							vm_object_unlock(locked_object);
6169 							locked_object = VM_OBJECT_NULL;
6170 						}
6171 						tmp_start_idx = cur_idx;
6172 						abort_run = TRUE;
6173 						continue;
6174 					}
6175 					if (!disconnected) {
6176 						if (m1->vmp_pmapped) {
6177 							refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6178 						} else {
6179 							refmod = 0;
6180 						}
6181 					}
6182 
6183 					/* copy the page's contents */
6184 					pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6185 					/* copy the page's state */
6186 					assert(!VM_PAGE_WIRED(m1));
6187 					assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6188 					assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6189 					assert(!m1->vmp_laundry);
6190 					m2->vmp_reference       = m1->vmp_reference;
6191 					assert(!m1->vmp_gobbled);
6192 					assert(!m1->vmp_private);
6193 					m2->vmp_no_cache        = m1->vmp_no_cache;
6194 					m2->vmp_xpmapped        = 0;
6195 					assert(!m1->vmp_busy);
6196 					assert(!m1->vmp_wanted);
6197 					assert(!m1->vmp_fictitious);
6198 					m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6199 					m2->vmp_wpmapped        = m1->vmp_wpmapped;
6200 					assert(!m1->vmp_free_when_done);
6201 					m2->vmp_absent  = m1->vmp_absent;
6202 					m2->vmp_error   = m1->vmp_error;
6203 					m2->vmp_dirty   = m1->vmp_dirty;
6204 					assert(!m1->vmp_cleaning);
6205 					m2->vmp_precious        = m1->vmp_precious;
6206 					m2->vmp_clustered       = m1->vmp_clustered;
6207 					assert(!m1->vmp_overwriting);
6208 					m2->vmp_restart = m1->vmp_restart;
6209 					m2->vmp_unusual = m1->vmp_unusual;
6210 					m2->vmp_cs_validated = m1->vmp_cs_validated;
6211 					m2->vmp_cs_tainted      = m1->vmp_cs_tainted;
6212 					m2->vmp_cs_nx   = m1->vmp_cs_nx;
6213 
6214 					/*
6215 					 * If m1 had really been reusable,
6216 					 * we would have just stolen it, so
6217 					 * let's not propagate it's "reusable"
6218 					 * bit and assert that m2 is not
6219 					 * marked as "reusable".
6220 					 */
6221 					// m2->vmp_reusable	= m1->vmp_reusable;
6222 					assert(!m2->vmp_reusable);
6223 
6224 					// assert(!m1->vmp_lopage);
6225 
6226 					if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6227 						m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6228 					}
6229 
6230 					/*
6231 					 * page may need to be flushed if
6232 					 * it is marshalled into a UPL
6233 					 * that is going to be used by a device
6234 					 * that doesn't support coherency
6235 					 */
6236 					m2->vmp_written_by_kernel = TRUE;
6237 
6238 					/*
6239 					 * make sure we clear the ref/mod state
6240 					 * from the pmap layer... else we risk
6241 					 * inheriting state from the last time
6242 					 * this page was used...
6243 					 */
6244 					pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6245 
6246 					if (refmod & VM_MEM_REFERENCED) {
6247 						m2->vmp_reference = TRUE;
6248 					}
6249 					if (refmod & VM_MEM_MODIFIED) {
6250 						SET_PAGE_DIRTY(m2, TRUE);
6251 					}
6252 					offset = m1->vmp_offset;
6253 
6254 					/*
6255 					 * completely cleans up the state
6256 					 * of the page so that it is ready
6257 					 * to be put onto the free list, or
6258 					 * for this purpose it looks like it
6259 					 * just came off of the free list
6260 					 */
6261 					vm_page_free_prepare(m1);
6262 
6263 					/*
6264 					 * now put the substitute page
6265 					 * on the object
6266 					 */
6267 					vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
6268 
6269 					if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6270 						m2->vmp_pmapped = TRUE;
6271 						m2->vmp_wpmapped = TRUE;
6272 
6273 						PMAP_ENTER(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6274 						    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr);
6275 
6276 						assert(kr == KERN_SUCCESS);
6277 
6278 						compressed_pages++;
6279 					} else {
6280 						if (m2->vmp_reference) {
6281 							vm_page_activate(m2);
6282 						} else {
6283 							vm_page_deactivate(m2);
6284 						}
6285 					}
6286 					PAGE_WAKEUP_DONE(m2);
6287 				} else {
6288 					assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6289 
6290 					/*
6291 					 * completely cleans up the state
6292 					 * of the page so that it is ready
6293 					 * to be put onto the free list, or
6294 					 * for this purpose it looks like it
6295 					 * just came off of the free list
6296 					 */
6297 					vm_page_free_prepare(m1);
6298 				}
6299 
6300 				stolen_pages++;
6301 			}
6302 #if CONFIG_BACKGROUND_QUEUE
6303 			vm_page_assign_background_state(m1);
6304 #endif
6305 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6306 			m1->vmp_snext = m;
6307 			m = m1;
6308 		}
6309 		if (locked_object) {
6310 			vm_object_unlock(locked_object);
6311 			locked_object = VM_OBJECT_NULL;
6312 		}
6313 
6314 		if (abort_run == TRUE) {
6315 			/*
6316 			 * want the index of the last
6317 			 * page in this run that was
6318 			 * successfully 'stolen', so back
6319 			 * it up 1 for the auto-decrement on use
6320 			 * and 1 more to bump back over this page
6321 			 */
6322 			page_idx = tmp_start_idx + 2;
6323 			if (page_idx >= vm_pages_count) {
6324 				if (wrapped) {
6325 					if (m != VM_PAGE_NULL) {
6326 						vm_page_unlock_queues();
6327 						vm_page_free_list(m, FALSE);
6328 						vm_page_lock_queues();
6329 						m = VM_PAGE_NULL;
6330 					}
6331 					dumped_run++;
6332 					goto done_scanning;
6333 				}
6334 				page_idx = last_idx = 0;
6335 				wrapped = TRUE;
6336 			}
6337 			abort_run = FALSE;
6338 
6339 			/*
6340 			 * We didn't find a contiguous range but we didn't
6341 			 * start from the very first page.
6342 			 * Start again from the very first page.
6343 			 */
6344 			RESET_STATE_OF_RUN();
6345 
6346 			if (flags & KMA_LOMEM) {
6347 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
6348 			} else {
6349 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6350 			}
6351 
6352 			last_idx = page_idx;
6353 
6354 			if (m != VM_PAGE_NULL) {
6355 				vm_page_unlock_queues();
6356 				vm_page_free_list(m, FALSE);
6357 				vm_page_lock_queues();
6358 				m = VM_PAGE_NULL;
6359 			}
6360 			dumped_run++;
6361 
6362 			lck_mtx_lock(&vm_page_queue_free_lock);
6363 			/*
6364 			 * reset our free page limit since we
6365 			 * dropped the lock protecting the vm_page_free_queue
6366 			 */
6367 			free_available = vm_page_free_count - vm_page_free_reserved;
6368 			goto retry;
6369 		}
6370 
6371 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6372 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6373 			assert(m1->vmp_wire_count == 0);
6374 
6375 			if (wire == TRUE) {
6376 				m1->vmp_wire_count++;
6377 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
6378 			} else {
6379 				m1->vmp_gobbled = TRUE;
6380 			}
6381 		}
6382 		if (wire == FALSE) {
6383 			vm_page_gobble_count += npages;
6384 		}
6385 
6386 		/*
6387 		 * gobbled pages are also counted as wired pages
6388 		 */
6389 		vm_page_wire_count += npages;
6390 
6391 		assert(vm_page_verify_contiguous(m, npages));
6392 	}
6393 done_scanning:
6394 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6395 
6396 	vm_page_unlock_queues();
6397 
6398 #if DEBUG
6399 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6400 
6401 	tv_end_sec -= tv_start_sec;
6402 	if (tv_end_usec < tv_start_usec) {
6403 		tv_end_sec--;
6404 		tv_end_usec += 1000000;
6405 	}
6406 	tv_end_usec -= tv_start_usec;
6407 	if (tv_end_usec >= 1000000) {
6408 		tv_end_sec++;
6409 		tv_end_sec -= 1000000;
6410 	}
6411 	if (vm_page_find_contig_debug) {
6412 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
6413 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6414 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
6415 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6416 	}
6417 
6418 #endif
6419 #if MACH_ASSERT
6420 	vm_page_verify_free_lists();
6421 #endif
6422 	if (m == NULL && zone_gc_called < 2) {
6423 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6424 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6425 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6426 
6427 		if (consider_buffer_cache_collect != NULL) {
6428 			(void)(*consider_buffer_cache_collect)(1);
6429 		}
6430 
6431 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6432 
6433 		zone_gc_called++;
6434 
6435 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6436 		goto full_scan_again;
6437 	}
6438 
6439 	return m;
6440 }
6441 
6442 /*
6443  *	Allocate a list of contiguous, wired pages.
6444  */
6445 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6446 cpm_allocate(
6447 	vm_size_t       size,
6448 	vm_page_t       *list,
6449 	ppnum_t         max_pnum,
6450 	ppnum_t         pnum_mask,
6451 	boolean_t       wire,
6452 	int             flags)
6453 {
6454 	vm_page_t               pages;
6455 	unsigned int            npages;
6456 
6457 	if (size % PAGE_SIZE != 0) {
6458 		return KERN_INVALID_ARGUMENT;
6459 	}
6460 
6461 	npages = (unsigned int) (size / PAGE_SIZE);
6462 	if (npages != size / PAGE_SIZE) {
6463 		/* 32-bit overflow */
6464 		return KERN_INVALID_ARGUMENT;
6465 	}
6466 
6467 	/*
6468 	 *	Obtain a pointer to a subset of the free
6469 	 *	list large enough to satisfy the request;
6470 	 *	the region will be physically contiguous.
6471 	 */
6472 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6473 
6474 	if (pages == VM_PAGE_NULL) {
6475 		return KERN_NO_SPACE;
6476 	}
6477 	/*
6478 	 * determine need for wakeups
6479 	 */
6480 	if (vm_page_free_count < vm_page_free_min) {
6481 		lck_mtx_lock(&vm_page_queue_free_lock);
6482 		if (vm_pageout_running == FALSE) {
6483 			lck_mtx_unlock(&vm_page_queue_free_lock);
6484 			thread_wakeup((event_t) &vm_page_free_wanted);
6485 		} else {
6486 			lck_mtx_unlock(&vm_page_queue_free_lock);
6487 		}
6488 	}
6489 
6490 	VM_CHECK_MEMORYSTATUS;
6491 
6492 	/*
6493 	 *	The CPM pages should now be available and
6494 	 *	ordered by ascending physical address.
6495 	 */
6496 	assert(vm_page_verify_contiguous(pages, npages));
6497 
6498 	*list = pages;
6499 	return KERN_SUCCESS;
6500 }
6501 
6502 
6503 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6504 
6505 /*
6506  * when working on a 'run' of pages, it is necessary to hold
6507  * the vm_page_queue_lock (a hot global lock) for certain operations
6508  * on the page... however, the majority of the work can be done
6509  * while merely holding the object lock... in fact there are certain
6510  * collections of pages that don't require any work brokered by the
6511  * vm_page_queue_lock... to mitigate the time spent behind the global
6512  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6513  * while doing all of the work that doesn't require the vm_page_queue_lock...
6514  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6515  * necessary work for each page... we will grab the busy bit on the page
6516  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6517  * if it can't immediately take the vm_page_queue_lock in order to compete
6518  * for the locks in the same order that vm_pageout_scan takes them.
6519  * the operation names are modeled after the names of the routines that
6520  * need to be called in order to make the changes very obvious in the
6521  * original loop
6522  */
6523 
6524 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6525 vm_page_do_delayed_work(
6526 	vm_object_t     object,
6527 	vm_tag_t        tag,
6528 	struct vm_page_delayed_work *dwp,
6529 	int             dw_count)
6530 {
6531 	int             j;
6532 	vm_page_t       m;
6533 	vm_page_t       local_free_q = VM_PAGE_NULL;
6534 
6535 	/*
6536 	 * pageout_scan takes the vm_page_lock_queues first
6537 	 * then tries for the object lock... to avoid what
6538 	 * is effectively a lock inversion, we'll go to the
6539 	 * trouble of taking them in that same order... otherwise
6540 	 * if this object contains the majority of the pages resident
6541 	 * in the UBC (or a small set of large objects actively being
6542 	 * worked on contain the majority of the pages), we could
6543 	 * cause the pageout_scan thread to 'starve' in its attempt
6544 	 * to find pages to move to the free queue, since it has to
6545 	 * successfully acquire the object lock of any candidate page
6546 	 * before it can steal/clean it.
6547 	 */
6548 	if (!vm_page_trylockspin_queues()) {
6549 		vm_object_unlock(object);
6550 
6551 		/*
6552 		 * "Turnstile enabled vm_pageout_scan" can be runnable
6553 		 * for a very long time without getting on a core.
6554 		 * If this is a higher priority thread it could be
6555 		 * waiting here for a very long time respecting the fact
6556 		 * that pageout_scan would like its object after VPS does
6557 		 * a mutex_pause(0).
6558 		 * So we cap the number of yields in the vm_object_lock_avoid()
6559 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
6560 		 * 10us to run and grab the object if needed.
6561 		 */
6562 		vm_page_lockspin_queues();
6563 
6564 		for (j = 0;; j++) {
6565 			if ((!vm_object_lock_avoid(object) ||
6566 			    (vps_dynamic_priority_enabled && (j > 0))) &&
6567 			    _vm_object_lock_try(object)) {
6568 				break;
6569 			}
6570 			vm_page_unlock_queues();
6571 			mutex_pause(j);
6572 			vm_page_lockspin_queues();
6573 		}
6574 	}
6575 	for (j = 0; j < dw_count; j++, dwp++) {
6576 		m = dwp->dw_m;
6577 
6578 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6579 			vm_pageout_throttle_up(m);
6580 		}
6581 #if CONFIG_PHANTOM_CACHE
6582 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6583 			vm_phantom_cache_update(m);
6584 		}
6585 #endif
6586 		if (dwp->dw_mask & DW_vm_page_wire) {
6587 			vm_page_wire(m, tag, FALSE);
6588 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
6589 			boolean_t       queueit;
6590 
6591 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6592 
6593 			vm_page_unwire(m, queueit);
6594 		}
6595 		if (dwp->dw_mask & DW_vm_page_free) {
6596 			vm_page_free_prepare_queues(m);
6597 
6598 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6599 			/*
6600 			 * Add this page to our list of reclaimed pages,
6601 			 * to be freed later.
6602 			 */
6603 			m->vmp_snext = local_free_q;
6604 			local_free_q = m;
6605 		} else {
6606 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6607 				vm_page_deactivate_internal(m, FALSE);
6608 			} else if (dwp->dw_mask & DW_vm_page_activate) {
6609 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6610 					vm_page_activate(m);
6611 				}
6612 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
6613 				vm_page_speculate(m, TRUE);
6614 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
6615 				/*
6616 				 * if we didn't hold the object lock and did this,
6617 				 * we might disconnect the page, then someone might
6618 				 * soft fault it back in, then we would put it on the
6619 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
6620 				 * page on that queue, which we don't want
6621 				 */
6622 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6623 
6624 				if ((refmod_state & VM_MEM_REFERENCED)) {
6625 					/*
6626 					 * this page has been touched since it got cleaned; let's activate it
6627 					 * if it hasn't already been
6628 					 */
6629 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6630 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6631 
6632 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6633 						vm_page_activate(m);
6634 					}
6635 				} else {
6636 					m->vmp_reference = FALSE;
6637 					vm_page_enqueue_cleaned(m);
6638 				}
6639 			} else if (dwp->dw_mask & DW_vm_page_lru) {
6640 				vm_page_lru(m);
6641 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6642 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6643 					vm_page_queues_remove(m, TRUE);
6644 				}
6645 			}
6646 			if (dwp->dw_mask & DW_set_reference) {
6647 				m->vmp_reference = TRUE;
6648 			} else if (dwp->dw_mask & DW_clear_reference) {
6649 				m->vmp_reference = FALSE;
6650 			}
6651 
6652 			if (dwp->dw_mask & DW_move_page) {
6653 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6654 					vm_page_queues_remove(m, FALSE);
6655 
6656 					assert(VM_PAGE_OBJECT(m) != kernel_object);
6657 
6658 					vm_page_enqueue_inactive(m, FALSE);
6659 				}
6660 			}
6661 			if (dwp->dw_mask & DW_clear_busy) {
6662 				m->vmp_busy = FALSE;
6663 			}
6664 
6665 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6666 				PAGE_WAKEUP(m);
6667 			}
6668 		}
6669 	}
6670 	vm_page_unlock_queues();
6671 
6672 	if (local_free_q) {
6673 		vm_page_free_list(local_free_q, TRUE);
6674 	}
6675 
6676 	VM_CHECK_MEMORYSTATUS;
6677 }
6678 
6679 kern_return_t
vm_page_alloc_list(int page_count,kma_flags_t flags,vm_page_t * list)6680 vm_page_alloc_list(
6681 	int         page_count,
6682 	kma_flags_t flags,
6683 	vm_page_t  *list)
6684 {
6685 	vm_page_t       page_list = VM_PAGE_NULL;
6686 	vm_page_t       mem;
6687 	kern_return_t   kr = KERN_SUCCESS;
6688 	int             page_grab_count = 0;
6689 	mach_vm_size_t  map_size = ptoa_64(page_count);
6690 #if DEVELOPMENT || DEBUG
6691 	task_t          task = current_task_early();
6692 #endif /* DEVELOPMENT || DEBUG */
6693 
6694 	for (int i = 0; i < page_count; i++) {
6695 		for (;;) {
6696 			if (flags & KMA_LOMEM) {
6697 				mem = vm_page_grablo();
6698 			} else {
6699 				mem = vm_page_grab();
6700 			}
6701 
6702 			if (mem != VM_PAGE_NULL) {
6703 				break;
6704 			}
6705 
6706 			if (flags & KMA_NOPAGEWAIT) {
6707 				kr = KERN_RESOURCE_SHORTAGE;
6708 				goto out;
6709 			}
6710 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
6711 				kr = KERN_RESOURCE_SHORTAGE;
6712 				goto out;
6713 			}
6714 
6715 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
6716 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
6717 
6718 			uint64_t unavailable = (vm_page_wire_count + vm_page_free_target) * PAGE_SIZE;
6719 			if (unavailable > max_mem || map_size > (max_mem - unavailable)) {
6720 				kr = KERN_RESOURCE_SHORTAGE;
6721 				goto out;
6722 			}
6723 			VM_PAGE_WAIT();
6724 		}
6725 
6726 		page_grab_count++;
6727 		mem->vmp_snext = page_list;
6728 		page_list = mem;
6729 	}
6730 
6731 	if (KMA_ZERO & flags) {
6732 		for (mem = page_list; mem; mem = mem->vmp_snext) {
6733 			vm_page_zero_fill(mem);
6734 		}
6735 	}
6736 
6737 out:
6738 #if DEBUG || DEVELOPMENT
6739 	if (task != NULL) {
6740 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
6741 	}
6742 #endif
6743 
6744 	if (kr == KERN_SUCCESS) {
6745 		*list = page_list;
6746 	} else {
6747 		vm_page_free_list(page_list, FALSE);
6748 	}
6749 
6750 	return kr;
6751 }
6752 
6753 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)6754 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
6755 {
6756 	page->vmp_offset = offset;
6757 }
6758 
6759 vm_page_t
vm_page_get_next(vm_page_t page)6760 vm_page_get_next(vm_page_t page)
6761 {
6762 	return page->vmp_snext;
6763 }
6764 
6765 vm_object_offset_t
vm_page_get_offset(vm_page_t page)6766 vm_page_get_offset(vm_page_t page)
6767 {
6768 	return page->vmp_offset;
6769 }
6770 
6771 ppnum_t
vm_page_get_phys_page(vm_page_t page)6772 vm_page_get_phys_page(vm_page_t page)
6773 {
6774 	return VM_PAGE_GET_PHYS_PAGE(page);
6775 }
6776 
6777 
6778 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6779 
6780 #if HIBERNATION
6781 
6782 static vm_page_t hibernate_gobble_queue;
6783 
6784 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
6785 static int  hibernate_flush_dirty_pages(int);
6786 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
6787 
6788 void hibernate_flush_wait(void);
6789 void hibernate_mark_in_progress(void);
6790 void hibernate_clear_in_progress(void);
6791 
6792 void            hibernate_free_range(int, int);
6793 void            hibernate_hash_insert_page(vm_page_t);
6794 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
6795 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
6796 ppnum_t         hibernate_lookup_paddr(unsigned int);
6797 
6798 struct hibernate_statistics {
6799 	int hibernate_considered;
6800 	int hibernate_reentered_on_q;
6801 	int hibernate_found_dirty;
6802 	int hibernate_skipped_cleaning;
6803 	int hibernate_skipped_transient;
6804 	int hibernate_skipped_precious;
6805 	int hibernate_skipped_external;
6806 	int hibernate_queue_nolock;
6807 	int hibernate_queue_paused;
6808 	int hibernate_throttled;
6809 	int hibernate_throttle_timeout;
6810 	int hibernate_drained;
6811 	int hibernate_drain_timeout;
6812 	int cd_lock_failed;
6813 	int cd_found_precious;
6814 	int cd_found_wired;
6815 	int cd_found_busy;
6816 	int cd_found_unusual;
6817 	int cd_found_cleaning;
6818 	int cd_found_laundry;
6819 	int cd_found_dirty;
6820 	int cd_found_xpmapped;
6821 	int cd_skipped_xpmapped;
6822 	int cd_local_free;
6823 	int cd_total_free;
6824 	int cd_vm_page_wire_count;
6825 	int cd_vm_struct_pages_unneeded;
6826 	int cd_pages;
6827 	int cd_discarded;
6828 	int cd_count_wire;
6829 } hibernate_stats;
6830 
6831 
6832 /*
6833  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6834  * so that we don't overrun the estimated image size, which would
6835  * result in a hibernation failure.
6836  *
6837  * We use a size value instead of pages because we don't want to take up more space
6838  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
6839  * to have that additional space available.
6840  *
6841  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
6842  * xpmapped size.
6843  */
6844 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
6845 
6846 
6847 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)6848 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
6849 {
6850 	wait_result_t   wait_result;
6851 
6852 	vm_page_lock_queues();
6853 
6854 	while (!vm_page_queue_empty(&q->pgo_pending)) {
6855 		q->pgo_draining = TRUE;
6856 
6857 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
6858 
6859 		vm_page_unlock_queues();
6860 
6861 		wait_result = thread_block(THREAD_CONTINUE_NULL);
6862 
6863 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
6864 			hibernate_stats.hibernate_drain_timeout++;
6865 
6866 			if (q == &vm_pageout_queue_external) {
6867 				return 0;
6868 			}
6869 
6870 			return 1;
6871 		}
6872 		vm_page_lock_queues();
6873 
6874 		hibernate_stats.hibernate_drained++;
6875 	}
6876 	vm_page_unlock_queues();
6877 
6878 	return 0;
6879 }
6880 
6881 
6882 boolean_t hibernate_skip_external = FALSE;
6883 
6884 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)6885 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
6886 {
6887 	vm_page_t       m;
6888 	vm_object_t     l_object = NULL;
6889 	vm_object_t     m_object = NULL;
6890 	int             refmod_state = 0;
6891 	int             try_failed_count = 0;
6892 	int             retval = 0;
6893 	int             current_run = 0;
6894 	struct  vm_pageout_queue *iq;
6895 	struct  vm_pageout_queue *eq;
6896 	struct  vm_pageout_queue *tq;
6897 
6898 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
6899 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
6900 
6901 	iq = &vm_pageout_queue_internal;
6902 	eq = &vm_pageout_queue_external;
6903 
6904 	vm_page_lock_queues();
6905 
6906 	while (qcount && !vm_page_queue_empty(q)) {
6907 		if (current_run++ == 1000) {
6908 			if (hibernate_should_abort()) {
6909 				retval = 1;
6910 				break;
6911 			}
6912 			current_run = 0;
6913 		}
6914 
6915 		m = (vm_page_t) vm_page_queue_first(q);
6916 		m_object = VM_PAGE_OBJECT(m);
6917 
6918 		/*
6919 		 * check to see if we currently are working
6920 		 * with the same object... if so, we've
6921 		 * already got the lock
6922 		 */
6923 		if (m_object != l_object) {
6924 			/*
6925 			 * the object associated with candidate page is
6926 			 * different from the one we were just working
6927 			 * with... dump the lock if we still own it
6928 			 */
6929 			if (l_object != NULL) {
6930 				vm_object_unlock(l_object);
6931 				l_object = NULL;
6932 			}
6933 			/*
6934 			 * Try to lock object; since we've alread got the
6935 			 * page queues lock, we can only 'try' for this one.
6936 			 * if the 'try' fails, we need to do a mutex_pause
6937 			 * to allow the owner of the object lock a chance to
6938 			 * run...
6939 			 */
6940 			if (!vm_object_lock_try_scan(m_object)) {
6941 				if (try_failed_count > 20) {
6942 					hibernate_stats.hibernate_queue_nolock++;
6943 
6944 					goto reenter_pg_on_q;
6945 				}
6946 
6947 				vm_page_unlock_queues();
6948 				mutex_pause(try_failed_count++);
6949 				vm_page_lock_queues();
6950 
6951 				hibernate_stats.hibernate_queue_paused++;
6952 				continue;
6953 			} else {
6954 				l_object = m_object;
6955 			}
6956 		}
6957 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || m->vmp_error) {
6958 			/*
6959 			 * page is not to be cleaned
6960 			 * put it back on the head of its queue
6961 			 */
6962 			if (m->vmp_cleaning) {
6963 				hibernate_stats.hibernate_skipped_cleaning++;
6964 			} else {
6965 				hibernate_stats.hibernate_skipped_transient++;
6966 			}
6967 
6968 			goto reenter_pg_on_q;
6969 		}
6970 		if (m_object->copy == VM_OBJECT_NULL) {
6971 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
6972 				/*
6973 				 * let the normal hibernate image path
6974 				 * deal with these
6975 				 */
6976 				goto reenter_pg_on_q;
6977 			}
6978 		}
6979 		if (!m->vmp_dirty && m->vmp_pmapped) {
6980 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
6981 
6982 			if ((refmod_state & VM_MEM_MODIFIED)) {
6983 				SET_PAGE_DIRTY(m, FALSE);
6984 			}
6985 		} else {
6986 			refmod_state = 0;
6987 		}
6988 
6989 		if (!m->vmp_dirty) {
6990 			/*
6991 			 * page is not to be cleaned
6992 			 * put it back on the head of its queue
6993 			 */
6994 			if (m->vmp_precious) {
6995 				hibernate_stats.hibernate_skipped_precious++;
6996 			}
6997 
6998 			goto reenter_pg_on_q;
6999 		}
7000 
7001 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7002 			hibernate_stats.hibernate_skipped_external++;
7003 
7004 			goto reenter_pg_on_q;
7005 		}
7006 		tq = NULL;
7007 
7008 		if (m_object->internal) {
7009 			if (VM_PAGE_Q_THROTTLED(iq)) {
7010 				tq = iq;
7011 			}
7012 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7013 			tq = eq;
7014 		}
7015 
7016 		if (tq != NULL) {
7017 			wait_result_t   wait_result;
7018 			int             wait_count = 5;
7019 
7020 			if (l_object != NULL) {
7021 				vm_object_unlock(l_object);
7022 				l_object = NULL;
7023 			}
7024 
7025 			while (retval == 0) {
7026 				tq->pgo_throttled = TRUE;
7027 
7028 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7029 
7030 				vm_page_unlock_queues();
7031 
7032 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7033 
7034 				vm_page_lock_queues();
7035 
7036 				if (wait_result != THREAD_TIMED_OUT) {
7037 					break;
7038 				}
7039 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7040 					break;
7041 				}
7042 
7043 				if (hibernate_should_abort()) {
7044 					retval = 1;
7045 				}
7046 
7047 				if (--wait_count == 0) {
7048 					hibernate_stats.hibernate_throttle_timeout++;
7049 
7050 					if (tq == eq) {
7051 						hibernate_skip_external = TRUE;
7052 						break;
7053 					}
7054 					retval = 1;
7055 				}
7056 			}
7057 			if (retval) {
7058 				break;
7059 			}
7060 
7061 			hibernate_stats.hibernate_throttled++;
7062 
7063 			continue;
7064 		}
7065 		/*
7066 		 * we've already factored out pages in the laundry which
7067 		 * means this page can't be on the pageout queue so it's
7068 		 * safe to do the vm_page_queues_remove
7069 		 */
7070 		vm_page_queues_remove(m, TRUE);
7071 
7072 		if (m_object->internal == TRUE) {
7073 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7074 		}
7075 
7076 		vm_pageout_cluster(m);
7077 
7078 		hibernate_stats.hibernate_found_dirty++;
7079 
7080 		goto next_pg;
7081 
7082 reenter_pg_on_q:
7083 		vm_page_queue_remove(q, m, vmp_pageq);
7084 		vm_page_queue_enter(q, m, vmp_pageq);
7085 
7086 		hibernate_stats.hibernate_reentered_on_q++;
7087 next_pg:
7088 		hibernate_stats.hibernate_considered++;
7089 
7090 		qcount--;
7091 		try_failed_count = 0;
7092 	}
7093 	if (l_object != NULL) {
7094 		vm_object_unlock(l_object);
7095 		l_object = NULL;
7096 	}
7097 
7098 	vm_page_unlock_queues();
7099 
7100 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7101 
7102 	return retval;
7103 }
7104 
7105 
7106 static int
hibernate_flush_dirty_pages(int pass)7107 hibernate_flush_dirty_pages(int pass)
7108 {
7109 	struct vm_speculative_age_q     *aq;
7110 	uint32_t        i;
7111 
7112 	if (vm_page_local_q) {
7113 		zpercpu_foreach_cpu(lid) {
7114 			vm_page_reactivate_local(lid, TRUE, FALSE);
7115 		}
7116 	}
7117 
7118 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7119 		int             qcount;
7120 		vm_page_t       m;
7121 
7122 		aq = &vm_page_queue_speculative[i];
7123 
7124 		if (vm_page_queue_empty(&aq->age_q)) {
7125 			continue;
7126 		}
7127 		qcount = 0;
7128 
7129 		vm_page_lockspin_queues();
7130 
7131 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7132 			qcount++;
7133 		}
7134 		vm_page_unlock_queues();
7135 
7136 		if (qcount) {
7137 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7138 				return 1;
7139 			}
7140 		}
7141 	}
7142 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7143 		return 1;
7144 	}
7145 	/* XXX FBDP TODO: flush secluded queue */
7146 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7147 		return 1;
7148 	}
7149 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7150 		return 1;
7151 	}
7152 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7153 		return 1;
7154 	}
7155 
7156 	if (pass == 1) {
7157 		vm_compressor_record_warmup_start();
7158 	}
7159 
7160 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7161 		if (pass == 1) {
7162 			vm_compressor_record_warmup_end();
7163 		}
7164 		return 1;
7165 	}
7166 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7167 		if (pass == 1) {
7168 			vm_compressor_record_warmup_end();
7169 		}
7170 		return 1;
7171 	}
7172 	if (pass == 1) {
7173 		vm_compressor_record_warmup_end();
7174 	}
7175 
7176 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7177 		return 1;
7178 	}
7179 
7180 	return 0;
7181 }
7182 
7183 
7184 void
hibernate_reset_stats()7185 hibernate_reset_stats()
7186 {
7187 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7188 }
7189 
7190 
7191 int
hibernate_flush_memory()7192 hibernate_flush_memory()
7193 {
7194 	int     retval;
7195 
7196 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7197 
7198 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7199 
7200 	hibernate_cleaning_in_progress = TRUE;
7201 	hibernate_skip_external = FALSE;
7202 
7203 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7204 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7205 
7206 		vm_compressor_flush();
7207 
7208 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7209 
7210 		if (consider_buffer_cache_collect != NULL) {
7211 			unsigned int orig_wire_count;
7212 
7213 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7214 			orig_wire_count = vm_page_wire_count;
7215 
7216 			(void)(*consider_buffer_cache_collect)(1);
7217 			zone_gc(ZONE_GC_DRAIN);
7218 
7219 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7220 
7221 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7222 		}
7223 	}
7224 	hibernate_cleaning_in_progress = FALSE;
7225 
7226 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7227 
7228 	if (retval) {
7229 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7230 	}
7231 
7232 
7233 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7234 	    hibernate_stats.hibernate_considered,
7235 	    hibernate_stats.hibernate_reentered_on_q,
7236 	    hibernate_stats.hibernate_found_dirty);
7237 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7238 	    hibernate_stats.hibernate_skipped_cleaning,
7239 	    hibernate_stats.hibernate_skipped_transient,
7240 	    hibernate_stats.hibernate_skipped_precious,
7241 	    hibernate_stats.hibernate_skipped_external,
7242 	    hibernate_stats.hibernate_queue_nolock);
7243 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7244 	    hibernate_stats.hibernate_queue_paused,
7245 	    hibernate_stats.hibernate_throttled,
7246 	    hibernate_stats.hibernate_throttle_timeout,
7247 	    hibernate_stats.hibernate_drained,
7248 	    hibernate_stats.hibernate_drain_timeout);
7249 
7250 	return retval;
7251 }
7252 
7253 
7254 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7255 hibernate_page_list_zero(hibernate_page_list_t *list)
7256 {
7257 	uint32_t             bank;
7258 	hibernate_bitmap_t * bitmap;
7259 
7260 	bitmap = &list->bank_bitmap[0];
7261 	for (bank = 0; bank < list->bank_count; bank++) {
7262 		uint32_t last_bit;
7263 
7264 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7265 		// set out-of-bound bits at end of bitmap.
7266 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7267 		if (last_bit) {
7268 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7269 		}
7270 
7271 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7272 	}
7273 }
7274 
7275 void
hibernate_free_gobble_pages(void)7276 hibernate_free_gobble_pages(void)
7277 {
7278 	vm_page_t m, next;
7279 	uint32_t  count = 0;
7280 
7281 	m = (vm_page_t) hibernate_gobble_queue;
7282 	while (m) {
7283 		next = m->vmp_snext;
7284 		vm_page_free(m);
7285 		count++;
7286 		m = next;
7287 	}
7288 	hibernate_gobble_queue = VM_PAGE_NULL;
7289 
7290 	if (count) {
7291 		HIBLOG("Freed %d pages\n", count);
7292 	}
7293 }
7294 
7295 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7296 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7297 {
7298 	vm_object_t object = NULL;
7299 	int                  refmod_state;
7300 	boolean_t            discard = FALSE;
7301 
7302 	do{
7303 		if (m->vmp_private) {
7304 			panic("hibernate_consider_discard: private");
7305 		}
7306 
7307 		object = VM_PAGE_OBJECT(m);
7308 
7309 		if (!vm_object_lock_try(object)) {
7310 			object = NULL;
7311 			if (!preflight) {
7312 				hibernate_stats.cd_lock_failed++;
7313 			}
7314 			break;
7315 		}
7316 		if (VM_PAGE_WIRED(m)) {
7317 			if (!preflight) {
7318 				hibernate_stats.cd_found_wired++;
7319 			}
7320 			break;
7321 		}
7322 		if (m->vmp_precious) {
7323 			if (!preflight) {
7324 				hibernate_stats.cd_found_precious++;
7325 			}
7326 			break;
7327 		}
7328 		if (m->vmp_busy || !object->alive) {
7329 			/*
7330 			 *	Somebody is playing with this page.
7331 			 */
7332 			if (!preflight) {
7333 				hibernate_stats.cd_found_busy++;
7334 			}
7335 			break;
7336 		}
7337 		if (m->vmp_absent || m->vmp_unusual || m->vmp_error) {
7338 			/*
7339 			 * If it's unusual in anyway, ignore it
7340 			 */
7341 			if (!preflight) {
7342 				hibernate_stats.cd_found_unusual++;
7343 			}
7344 			break;
7345 		}
7346 		if (m->vmp_cleaning) {
7347 			if (!preflight) {
7348 				hibernate_stats.cd_found_cleaning++;
7349 			}
7350 			break;
7351 		}
7352 		if (m->vmp_laundry) {
7353 			if (!preflight) {
7354 				hibernate_stats.cd_found_laundry++;
7355 			}
7356 			break;
7357 		}
7358 		if (!m->vmp_dirty) {
7359 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7360 
7361 			if (refmod_state & VM_MEM_REFERENCED) {
7362 				m->vmp_reference = TRUE;
7363 			}
7364 			if (refmod_state & VM_MEM_MODIFIED) {
7365 				SET_PAGE_DIRTY(m, FALSE);
7366 			}
7367 		}
7368 
7369 		/*
7370 		 * If it's clean or purgeable we can discard the page on wakeup.
7371 		 */
7372 		discard = (!m->vmp_dirty)
7373 		    || (VM_PURGABLE_VOLATILE == object->purgable)
7374 		    || (VM_PURGABLE_EMPTY == object->purgable);
7375 
7376 
7377 		if (discard == FALSE) {
7378 			if (!preflight) {
7379 				hibernate_stats.cd_found_dirty++;
7380 			}
7381 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7382 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7383 				if (!preflight) {
7384 					hibernate_stats.cd_found_xpmapped++;
7385 				}
7386 				discard = FALSE;
7387 			} else {
7388 				if (!preflight) {
7389 					hibernate_stats.cd_skipped_xpmapped++;
7390 				}
7391 			}
7392 		}
7393 	}while (FALSE);
7394 
7395 	if (object) {
7396 		vm_object_unlock(object);
7397 	}
7398 
7399 	return discard;
7400 }
7401 
7402 
7403 static void
hibernate_discard_page(vm_page_t m)7404 hibernate_discard_page(vm_page_t m)
7405 {
7406 	vm_object_t m_object;
7407 
7408 	if (m->vmp_absent || m->vmp_unusual || m->vmp_error) {
7409 		/*
7410 		 * If it's unusual in anyway, ignore
7411 		 */
7412 		return;
7413 	}
7414 
7415 	m_object = VM_PAGE_OBJECT(m);
7416 
7417 #if MACH_ASSERT || DEBUG
7418 	if (!vm_object_lock_try(m_object)) {
7419 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7420 	}
7421 #else
7422 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
7423 	 *  makes sure these locks are uncontended before sleep */
7424 #endif /* MACH_ASSERT || DEBUG */
7425 
7426 	if (m->vmp_pmapped == TRUE) {
7427 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7428 	}
7429 
7430 	if (m->vmp_laundry) {
7431 		panic("hibernate_discard_page(%p) laundry", m);
7432 	}
7433 	if (m->vmp_private) {
7434 		panic("hibernate_discard_page(%p) private", m);
7435 	}
7436 	if (m->vmp_fictitious) {
7437 		panic("hibernate_discard_page(%p) fictitious", m);
7438 	}
7439 
7440 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7441 		/* object should be on a queue */
7442 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7443 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7444 		assert(old_queue);
7445 		if (m_object->purgeable_when_ripe) {
7446 			vm_purgeable_token_delete_first(old_queue);
7447 		}
7448 		vm_object_lock_assert_exclusive(m_object);
7449 		m_object->purgable = VM_PURGABLE_EMPTY;
7450 
7451 		/*
7452 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
7453 		 * accounted in the "volatile" ledger, so no change here.
7454 		 * We have to update vm_page_purgeable_count, though, since we're
7455 		 * effectively purging this object.
7456 		 */
7457 		unsigned int delta;
7458 		assert(m_object->resident_page_count >= m_object->wired_page_count);
7459 		delta = (m_object->resident_page_count - m_object->wired_page_count);
7460 		assert(vm_page_purgeable_count >= delta);
7461 		assert(delta > 0);
7462 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7463 	}
7464 
7465 	vm_page_free(m);
7466 
7467 #if MACH_ASSERT || DEBUG
7468 	vm_object_unlock(m_object);
7469 #endif  /* MACH_ASSERT || DEBUG */
7470 }
7471 
7472 /*
7473  *  Grab locks for hibernate_page_list_setall()
7474  */
7475 void
hibernate_vm_lock_queues(void)7476 hibernate_vm_lock_queues(void)
7477 {
7478 	vm_object_lock(compressor_object);
7479 	vm_page_lock_queues();
7480 	lck_mtx_lock(&vm_page_queue_free_lock);
7481 	lck_mtx_lock(&vm_purgeable_queue_lock);
7482 
7483 	if (vm_page_local_q) {
7484 		zpercpu_foreach(lq, vm_page_local_q) {
7485 			VPL_LOCK(&lq->vpl_lock);
7486 		}
7487 	}
7488 }
7489 
7490 void
hibernate_vm_unlock_queues(void)7491 hibernate_vm_unlock_queues(void)
7492 {
7493 	if (vm_page_local_q) {
7494 		zpercpu_foreach(lq, vm_page_local_q) {
7495 			VPL_UNLOCK(&lq->vpl_lock);
7496 		}
7497 	}
7498 	lck_mtx_unlock(&vm_purgeable_queue_lock);
7499 	lck_mtx_unlock(&vm_page_queue_free_lock);
7500 	vm_page_unlock_queues();
7501 	vm_object_unlock(compressor_object);
7502 }
7503 
7504 /*
7505  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7506  *  pages known to VM to not need saving are subtracted.
7507  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
7508  */
7509 
7510 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7511 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7512     hibernate_page_list_t * page_list_wired,
7513     hibernate_page_list_t * page_list_pal,
7514     boolean_t preflight,
7515     boolean_t will_discard,
7516     uint32_t * pagesOut)
7517 {
7518 	uint64_t start, end, nsec;
7519 	vm_page_t m;
7520 	vm_page_t next;
7521 	uint32_t pages = page_list->page_count;
7522 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7523 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7524 	uint32_t count_wire = pages;
7525 	uint32_t count_discard_active    = 0;
7526 	uint32_t count_discard_inactive  = 0;
7527 	uint32_t count_discard_cleaned   = 0;
7528 	uint32_t count_discard_purgeable = 0;
7529 	uint32_t count_discard_speculative = 0;
7530 	uint32_t count_discard_vm_struct_pages = 0;
7531 	uint32_t i;
7532 	uint32_t             bank;
7533 	hibernate_bitmap_t * bitmap;
7534 	hibernate_bitmap_t * bitmap_wired;
7535 	boolean_t                    discard_all;
7536 	boolean_t            discard;
7537 
7538 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7539 
7540 	if (preflight) {
7541 		page_list       = NULL;
7542 		page_list_wired = NULL;
7543 		page_list_pal   = NULL;
7544 		discard_all     = FALSE;
7545 	} else {
7546 		discard_all     = will_discard;
7547 	}
7548 
7549 #if MACH_ASSERT || DEBUG
7550 	if (!preflight) {
7551 		assert(hibernate_vm_locks_are_safe());
7552 		vm_page_lock_queues();
7553 		if (vm_page_local_q) {
7554 			zpercpu_foreach(lq, vm_page_local_q) {
7555 				VPL_LOCK(&lq->vpl_lock);
7556 			}
7557 		}
7558 	}
7559 #endif  /* MACH_ASSERT || DEBUG */
7560 
7561 
7562 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7563 
7564 	clock_get_uptime(&start);
7565 
7566 	if (!preflight) {
7567 		hibernate_page_list_zero(page_list);
7568 		hibernate_page_list_zero(page_list_wired);
7569 		hibernate_page_list_zero(page_list_pal);
7570 
7571 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7572 		hibernate_stats.cd_pages = pages;
7573 	}
7574 
7575 	if (vm_page_local_q) {
7576 		zpercpu_foreach_cpu(lid) {
7577 			vm_page_reactivate_local(lid, TRUE, !preflight);
7578 		}
7579 	}
7580 
7581 	if (preflight) {
7582 		vm_object_lock(compressor_object);
7583 		vm_page_lock_queues();
7584 		lck_mtx_lock(&vm_page_queue_free_lock);
7585 	}
7586 
7587 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7588 
7589 	hibernation_vmqueues_inspection = TRUE;
7590 
7591 	m = (vm_page_t) hibernate_gobble_queue;
7592 	while (m) {
7593 		pages--;
7594 		count_wire--;
7595 		if (!preflight) {
7596 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7597 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7598 		}
7599 		m = m->vmp_snext;
7600 	}
7601 
7602 	if (!preflight) {
7603 		percpu_foreach(free_pages_head, free_pages) {
7604 			for (m = *free_pages_head; m; m = m->vmp_snext) {
7605 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7606 
7607 				pages--;
7608 				count_wire--;
7609 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7610 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7611 
7612 				hibernate_stats.cd_local_free++;
7613 				hibernate_stats.cd_total_free++;
7614 			}
7615 		}
7616 	}
7617 
7618 	for (i = 0; i < vm_colors; i++) {
7619 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7620 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7621 
7622 			pages--;
7623 			count_wire--;
7624 			if (!preflight) {
7625 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7626 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7627 
7628 				hibernate_stats.cd_total_free++;
7629 			}
7630 		}
7631 	}
7632 
7633 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7634 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7635 
7636 		pages--;
7637 		count_wire--;
7638 		if (!preflight) {
7639 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7640 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7641 
7642 			hibernate_stats.cd_total_free++;
7643 		}
7644 	}
7645 
7646 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
7647 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
7648 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
7649 
7650 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7651 		discard = FALSE;
7652 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
7653 		    && hibernate_consider_discard(m, preflight)) {
7654 			if (!preflight) {
7655 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7656 			}
7657 			count_discard_inactive++;
7658 			discard = discard_all;
7659 		} else {
7660 			count_throttled++;
7661 		}
7662 		count_wire--;
7663 		if (!preflight) {
7664 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7665 		}
7666 
7667 		if (discard) {
7668 			hibernate_discard_page(m);
7669 		}
7670 		m = next;
7671 	}
7672 
7673 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
7674 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7675 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7676 
7677 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7678 		discard = FALSE;
7679 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7680 		    hibernate_consider_discard(m, preflight)) {
7681 			if (!preflight) {
7682 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7683 			}
7684 			if (m->vmp_dirty) {
7685 				count_discard_purgeable++;
7686 			} else {
7687 				count_discard_inactive++;
7688 			}
7689 			discard = discard_all;
7690 		} else {
7691 			count_anonymous++;
7692 		}
7693 		count_wire--;
7694 		if (!preflight) {
7695 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7696 		}
7697 		if (discard) {
7698 			hibernate_discard_page(m);
7699 		}
7700 		m = next;
7701 	}
7702 
7703 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7704 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7705 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7706 
7707 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7708 		discard = FALSE;
7709 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7710 		    hibernate_consider_discard(m, preflight)) {
7711 			if (!preflight) {
7712 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7713 			}
7714 			if (m->vmp_dirty) {
7715 				count_discard_purgeable++;
7716 			} else {
7717 				count_discard_cleaned++;
7718 			}
7719 			discard = discard_all;
7720 		} else {
7721 			count_cleaned++;
7722 		}
7723 		count_wire--;
7724 		if (!preflight) {
7725 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7726 		}
7727 		if (discard) {
7728 			hibernate_discard_page(m);
7729 		}
7730 		m = next;
7731 	}
7732 
7733 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7734 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7735 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7736 
7737 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7738 		discard = FALSE;
7739 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
7740 		    hibernate_consider_discard(m, preflight)) {
7741 			if (!preflight) {
7742 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7743 			}
7744 			if (m->vmp_dirty) {
7745 				count_discard_purgeable++;
7746 			} else {
7747 				count_discard_active++;
7748 			}
7749 			discard = discard_all;
7750 		} else {
7751 			count_active++;
7752 		}
7753 		count_wire--;
7754 		if (!preflight) {
7755 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7756 		}
7757 		if (discard) {
7758 			hibernate_discard_page(m);
7759 		}
7760 		m = next;
7761 	}
7762 
7763 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7764 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7765 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7766 
7767 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7768 		discard = FALSE;
7769 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7770 		    hibernate_consider_discard(m, preflight)) {
7771 			if (!preflight) {
7772 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7773 			}
7774 			if (m->vmp_dirty) {
7775 				count_discard_purgeable++;
7776 			} else {
7777 				count_discard_inactive++;
7778 			}
7779 			discard = discard_all;
7780 		} else {
7781 			count_inactive++;
7782 		}
7783 		count_wire--;
7784 		if (!preflight) {
7785 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7786 		}
7787 		if (discard) {
7788 			hibernate_discard_page(m);
7789 		}
7790 		m = next;
7791 	}
7792 	/* XXX FBDP TODO: secluded queue */
7793 
7794 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7795 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7796 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7797 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
7798 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7799 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
7800 
7801 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7802 			discard = FALSE;
7803 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7804 			    hibernate_consider_discard(m, preflight)) {
7805 				if (!preflight) {
7806 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7807 				}
7808 				count_discard_speculative++;
7809 				discard = discard_all;
7810 			} else {
7811 				count_speculative++;
7812 			}
7813 			count_wire--;
7814 			if (!preflight) {
7815 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7816 			}
7817 			if (discard) {
7818 				hibernate_discard_page(m);
7819 			}
7820 			m = next;
7821 		}
7822 	}
7823 
7824 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
7825 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
7826 
7827 		count_compressor++;
7828 		count_wire--;
7829 		if (!preflight) {
7830 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7831 		}
7832 	}
7833 
7834 	if (preflight == FALSE && discard_all == TRUE) {
7835 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
7836 
7837 		HIBLOG("hibernate_teardown started\n");
7838 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
7839 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
7840 
7841 		pages -= count_discard_vm_struct_pages;
7842 		count_wire -= count_discard_vm_struct_pages;
7843 
7844 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
7845 
7846 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
7847 	}
7848 
7849 	if (!preflight) {
7850 		// pull wired from hibernate_bitmap
7851 		bitmap = &page_list->bank_bitmap[0];
7852 		bitmap_wired = &page_list_wired->bank_bitmap[0];
7853 		for (bank = 0; bank < page_list->bank_count; bank++) {
7854 			for (i = 0; i < bitmap->bitmapwords; i++) {
7855 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
7856 			}
7857 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
7858 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
7859 		}
7860 	}
7861 
7862 	// machine dependent adjustments
7863 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
7864 
7865 	if (!preflight) {
7866 		hibernate_stats.cd_count_wire = count_wire;
7867 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
7868 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
7869 	}
7870 
7871 	clock_get_uptime(&end);
7872 	absolutetime_to_nanoseconds(end - start, &nsec);
7873 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
7874 
7875 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d\n",
7876 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
7877 	    discard_all ? "did" : "could",
7878 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
7879 
7880 	if (hibernate_stats.cd_skipped_xpmapped) {
7881 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
7882 	}
7883 
7884 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned;
7885 
7886 	if (preflight && will_discard) {
7887 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
7888 		/*
7889 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
7890 		 * even if these are clean and so we need to size the hibernation image accordingly.
7891 		 *
7892 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
7893 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
7894 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
7895 		 * clean xpmapped pages.
7896 		 *
7897 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
7898 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
7899 		 */
7900 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
7901 	}
7902 
7903 	hibernation_vmqueues_inspection = FALSE;
7904 
7905 #if MACH_ASSERT || DEBUG
7906 	if (!preflight) {
7907 		if (vm_page_local_q) {
7908 			zpercpu_foreach(lq, vm_page_local_q) {
7909 				VPL_UNLOCK(&lq->vpl_lock);
7910 			}
7911 		}
7912 		vm_page_unlock_queues();
7913 	}
7914 #endif  /* MACH_ASSERT || DEBUG */
7915 
7916 	if (preflight) {
7917 		lck_mtx_unlock(&vm_page_queue_free_lock);
7918 		vm_page_unlock_queues();
7919 		vm_object_unlock(compressor_object);
7920 	}
7921 
7922 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
7923 }
7924 
7925 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)7926 hibernate_page_list_discard(hibernate_page_list_t * page_list)
7927 {
7928 	uint64_t  start, end, nsec;
7929 	vm_page_t m;
7930 	vm_page_t next;
7931 	uint32_t  i;
7932 	uint32_t  count_discard_active    = 0;
7933 	uint32_t  count_discard_inactive  = 0;
7934 	uint32_t  count_discard_purgeable = 0;
7935 	uint32_t  count_discard_cleaned   = 0;
7936 	uint32_t  count_discard_speculative = 0;
7937 
7938 
7939 #if MACH_ASSERT || DEBUG
7940 	vm_page_lock_queues();
7941 	if (vm_page_local_q) {
7942 		zpercpu_foreach(lq, vm_page_local_q) {
7943 			VPL_LOCK(&lq->vpl_lock);
7944 		}
7945 	}
7946 #endif  /* MACH_ASSERT || DEBUG */
7947 
7948 	clock_get_uptime(&start);
7949 
7950 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
7951 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7952 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7953 
7954 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7955 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7956 			if (m->vmp_dirty) {
7957 				count_discard_purgeable++;
7958 			} else {
7959 				count_discard_inactive++;
7960 			}
7961 			hibernate_discard_page(m);
7962 		}
7963 		m = next;
7964 	}
7965 
7966 	for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7967 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7968 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7969 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
7970 
7971 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7972 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7973 				count_discard_speculative++;
7974 				hibernate_discard_page(m);
7975 			}
7976 			m = next;
7977 		}
7978 	}
7979 
7980 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7981 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7982 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7983 
7984 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7985 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
7986 			if (m->vmp_dirty) {
7987 				count_discard_purgeable++;
7988 			} else {
7989 				count_discard_inactive++;
7990 			}
7991 			hibernate_discard_page(m);
7992 		}
7993 		m = next;
7994 	}
7995 	/* XXX FBDP TODO: secluded queue */
7996 
7997 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7998 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7999 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8000 
8001 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8002 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8003 			if (m->vmp_dirty) {
8004 				count_discard_purgeable++;
8005 			} else {
8006 				count_discard_active++;
8007 			}
8008 			hibernate_discard_page(m);
8009 		}
8010 		m = next;
8011 	}
8012 
8013 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8014 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8015 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8016 
8017 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8018 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8019 			if (m->vmp_dirty) {
8020 				count_discard_purgeable++;
8021 			} else {
8022 				count_discard_cleaned++;
8023 			}
8024 			hibernate_discard_page(m);
8025 		}
8026 		m = next;
8027 	}
8028 
8029 #if MACH_ASSERT || DEBUG
8030 	if (vm_page_local_q) {
8031 		zpercpu_foreach(lq, vm_page_local_q) {
8032 			VPL_UNLOCK(&lq->vpl_lock);
8033 		}
8034 	}
8035 	vm_page_unlock_queues();
8036 #endif  /* MACH_ASSERT || DEBUG */
8037 
8038 	clock_get_uptime(&end);
8039 	absolutetime_to_nanoseconds(end - start, &nsec);
8040 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8041 	    nsec / 1000000ULL,
8042 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8043 }
8044 
8045 boolean_t       hibernate_paddr_map_inited = FALSE;
8046 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8047 vm_page_t       hibernate_rebuild_hash_list = NULL;
8048 
8049 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8050 unsigned int    hibernate_teardown_found_created_pages = 0;
8051 unsigned int    hibernate_teardown_found_free_pages = 0;
8052 unsigned int    hibernate_teardown_vm_page_free_count;
8053 
8054 
8055 struct ppnum_mapping {
8056 	struct ppnum_mapping    *ppnm_next;
8057 	ppnum_t                 ppnm_base_paddr;
8058 	unsigned int            ppnm_sindx;
8059 	unsigned int            ppnm_eindx;
8060 };
8061 
8062 struct ppnum_mapping    *ppnm_head;
8063 struct ppnum_mapping    *ppnm_last_found = NULL;
8064 
8065 
8066 void
hibernate_create_paddr_map(void)8067 hibernate_create_paddr_map(void)
8068 {
8069 	unsigned int    i;
8070 	ppnum_t         next_ppnum_in_run = 0;
8071 	struct ppnum_mapping *ppnm = NULL;
8072 
8073 	if (hibernate_paddr_map_inited == FALSE) {
8074 		for (i = 0; i < vm_pages_count; i++) {
8075 			if (ppnm) {
8076 				ppnm->ppnm_eindx = i;
8077 			}
8078 
8079 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8080 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
8081 
8082 				ppnm->ppnm_next = ppnm_head;
8083 				ppnm_head = ppnm;
8084 
8085 				ppnm->ppnm_sindx = i;
8086 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8087 			}
8088 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8089 		}
8090 		ppnm->ppnm_eindx = vm_pages_count;
8091 
8092 		hibernate_paddr_map_inited = TRUE;
8093 	}
8094 }
8095 
8096 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8097 hibernate_lookup_paddr(unsigned int indx)
8098 {
8099 	struct ppnum_mapping *ppnm = NULL;
8100 
8101 	ppnm = ppnm_last_found;
8102 
8103 	if (ppnm) {
8104 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8105 			goto done;
8106 		}
8107 	}
8108 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8109 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8110 			ppnm_last_found = ppnm;
8111 			break;
8112 		}
8113 	}
8114 	if (ppnm == NULL) {
8115 		panic("hibernate_lookup_paddr of %d failed", indx);
8116 	}
8117 done:
8118 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8119 }
8120 
8121 
8122 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8123 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8124 {
8125 	addr64_t        saddr_aligned;
8126 	addr64_t        eaddr_aligned;
8127 	addr64_t        addr;
8128 	ppnum_t         paddr;
8129 	unsigned int    mark_as_unneeded_pages = 0;
8130 
8131 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8132 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
8133 
8134 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8135 		paddr = pmap_find_phys(kernel_pmap, addr);
8136 
8137 		assert(paddr);
8138 
8139 		hibernate_page_bitset(page_list, TRUE, paddr);
8140 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
8141 
8142 		mark_as_unneeded_pages++;
8143 	}
8144 	return mark_as_unneeded_pages;
8145 }
8146 
8147 
8148 void
hibernate_hash_insert_page(vm_page_t mem)8149 hibernate_hash_insert_page(vm_page_t mem)
8150 {
8151 	vm_page_bucket_t *bucket;
8152 	int             hash_id;
8153 	vm_object_t     m_object;
8154 
8155 	m_object = VM_PAGE_OBJECT(mem);
8156 
8157 	assert(mem->vmp_hashed);
8158 	assert(m_object);
8159 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
8160 
8161 	/*
8162 	 *	Insert it into the object_object/offset hash table
8163 	 */
8164 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
8165 	bucket = &vm_page_buckets[hash_id];
8166 
8167 	mem->vmp_next_m = bucket->page_list;
8168 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
8169 }
8170 
8171 
8172 void
hibernate_free_range(int sindx,int eindx)8173 hibernate_free_range(int sindx, int eindx)
8174 {
8175 	vm_page_t       mem;
8176 	unsigned int    color;
8177 
8178 	while (sindx < eindx) {
8179 		mem = &vm_pages[sindx];
8180 
8181 		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8182 
8183 		mem->vmp_lopage = FALSE;
8184 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8185 
8186 		color = VM_PAGE_GET_COLOR(mem);
8187 #if defined(__x86_64__)
8188 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8189 #else
8190 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8191 #endif
8192 		vm_page_free_count++;
8193 
8194 		sindx++;
8195 	}
8196 }
8197 
8198 void
hibernate_rebuild_vm_structs(void)8199 hibernate_rebuild_vm_structs(void)
8200 {
8201 	int             i, cindx, sindx, eindx;
8202 	vm_page_t       mem, tmem, mem_next;
8203 	AbsoluteTime    startTime, endTime;
8204 	uint64_t        nsec;
8205 
8206 	if (hibernate_rebuild_needed == FALSE) {
8207 		return;
8208 	}
8209 
8210 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8211 	HIBLOG("hibernate_rebuild started\n");
8212 
8213 	clock_get_uptime(&startTime);
8214 
8215 	pal_hib_rebuild_pmap_structs();
8216 
8217 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8218 	eindx = vm_pages_count;
8219 
8220 	/*
8221 	 * Mark all the vm_pages[] that have not been initialized yet as being
8222 	 * transient. This is needed to ensure that buddy page search is corrrect.
8223 	 * Without this random data in these vm_pages[] can trip the buddy search
8224 	 */
8225 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8226 		vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8227 	}
8228 
8229 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8230 		mem = &vm_pages[cindx];
8231 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8232 		/*
8233 		 * hibernate_teardown_vm_structs leaves the location where
8234 		 * this vm_page_t must be located in "next".
8235 		 */
8236 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8237 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8238 
8239 		sindx = (int)(tmem - &vm_pages[0]);
8240 
8241 		if (mem != tmem) {
8242 			/*
8243 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8244 			 * so move it back to its real location
8245 			 */
8246 			*tmem = *mem;
8247 			mem = tmem;
8248 		}
8249 		if (mem->vmp_hashed) {
8250 			hibernate_hash_insert_page(mem);
8251 		}
8252 		/*
8253 		 * the 'hole' between this vm_page_t and the previous
8254 		 * vm_page_t we moved needs to be initialized as
8255 		 * a range of free vm_page_t's
8256 		 */
8257 		hibernate_free_range(sindx + 1, eindx);
8258 
8259 		eindx = sindx;
8260 	}
8261 	if (sindx) {
8262 		hibernate_free_range(0, sindx);
8263 	}
8264 
8265 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8266 
8267 	/*
8268 	 * process the list of vm_page_t's that were entered in the hash,
8269 	 * but were not located in the vm_pages arrary... these are
8270 	 * vm_page_t's that were created on the fly (i.e. fictitious)
8271 	 */
8272 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8273 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8274 
8275 		mem->vmp_next_m = 0;
8276 		hibernate_hash_insert_page(mem);
8277 	}
8278 	hibernate_rebuild_hash_list = NULL;
8279 
8280 	clock_get_uptime(&endTime);
8281 	SUB_ABSOLUTETIME(&endTime, &startTime);
8282 	absolutetime_to_nanoseconds(endTime, &nsec);
8283 
8284 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8285 
8286 	hibernate_rebuild_needed = FALSE;
8287 
8288 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8289 }
8290 
8291 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8292 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8293 {
8294 	unsigned int    i;
8295 	unsigned int    compact_target_indx;
8296 	vm_page_t       mem, mem_next;
8297 	vm_page_bucket_t *bucket;
8298 	unsigned int    mark_as_unneeded_pages = 0;
8299 	unsigned int    unneeded_vm_page_bucket_pages = 0;
8300 	unsigned int    unneeded_vm_pages_pages = 0;
8301 	unsigned int    unneeded_pmap_pages = 0;
8302 	addr64_t        start_of_unneeded = 0;
8303 	addr64_t        end_of_unneeded = 0;
8304 
8305 
8306 	if (hibernate_should_abort()) {
8307 		return 0;
8308 	}
8309 
8310 	hibernate_rebuild_needed = TRUE;
8311 
8312 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8313 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8314 	    vm_page_cleaned_count, compressor_object->resident_page_count);
8315 
8316 	for (i = 0; i < vm_page_bucket_count; i++) {
8317 		bucket = &vm_page_buckets[i];
8318 
8319 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8320 			assert(mem->vmp_hashed);
8321 
8322 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8323 
8324 			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8325 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8326 				hibernate_rebuild_hash_list = mem;
8327 			}
8328 		}
8329 	}
8330 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8331 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8332 
8333 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
8334 
8335 	compact_target_indx = 0;
8336 
8337 	for (i = 0; i < vm_pages_count; i++) {
8338 		mem = &vm_pages[i];
8339 
8340 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8341 			unsigned int color;
8342 
8343 			assert(mem->vmp_busy);
8344 			assert(!mem->vmp_lopage);
8345 
8346 			color = VM_PAGE_GET_COLOR(mem);
8347 
8348 			vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8349 
8350 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8351 
8352 			vm_page_free_count--;
8353 
8354 			hibernate_teardown_found_free_pages++;
8355 
8356 			if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8357 				compact_target_indx = i;
8358 			}
8359 		} else {
8360 			/*
8361 			 * record this vm_page_t's original location
8362 			 * we need this even if it doesn't get moved
8363 			 * as an indicator to the rebuild function that
8364 			 * we don't have to move it
8365 			 */
8366 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8367 
8368 			if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8369 				/*
8370 				 * we've got a hole to fill, so
8371 				 * move this vm_page_t to it's new home
8372 				 */
8373 				vm_pages[compact_target_indx] = *mem;
8374 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8375 
8376 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8377 				compact_target_indx++;
8378 			} else {
8379 				hibernate_teardown_last_valid_compact_indx = i;
8380 			}
8381 		}
8382 	}
8383 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8384 	    (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8385 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
8386 
8387 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8388 
8389 	if (start_of_unneeded) {
8390 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8391 		mark_as_unneeded_pages += unneeded_pmap_pages;
8392 	}
8393 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8394 
8395 	return mark_as_unneeded_pages;
8396 }
8397 
8398 
8399 #endif /* HIBERNATION */
8400 
8401 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8402 
8403 #include <mach_vm_debug.h>
8404 #if     MACH_VM_DEBUG
8405 
8406 #include <mach_debug/hash_info.h>
8407 #include <vm/vm_debug.h>
8408 
8409 /*
8410  *	Routine:	vm_page_info
8411  *	Purpose:
8412  *		Return information about the global VP table.
8413  *		Fills the buffer with as much information as possible
8414  *		and returns the desired size of the buffer.
8415  *	Conditions:
8416  *		Nothing locked.  The caller should provide
8417  *		possibly-pageable memory.
8418  */
8419 
8420 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8421 vm_page_info(
8422 	hash_info_bucket_t *info,
8423 	unsigned int count)
8424 {
8425 	unsigned int i;
8426 	lck_spin_t      *bucket_lock;
8427 
8428 	if (vm_page_bucket_count < count) {
8429 		count = vm_page_bucket_count;
8430 	}
8431 
8432 	for (i = 0; i < count; i++) {
8433 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
8434 		unsigned int bucket_count = 0;
8435 		vm_page_t m;
8436 
8437 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8438 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8439 
8440 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8441 		    m != VM_PAGE_NULL;
8442 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8443 			bucket_count++;
8444 		}
8445 
8446 		lck_spin_unlock(bucket_lock);
8447 
8448 		/* don't touch pageable memory while holding locks */
8449 		info[i].hib_count = bucket_count;
8450 	}
8451 
8452 	return vm_page_bucket_count;
8453 }
8454 #endif  /* MACH_VM_DEBUG */
8455 
8456 #if VM_PAGE_BUCKETS_CHECK
8457 void
vm_page_buckets_check(void)8458 vm_page_buckets_check(void)
8459 {
8460 	unsigned int i;
8461 	vm_page_t p;
8462 	unsigned int p_hash;
8463 	vm_page_bucket_t *bucket;
8464 	lck_spin_t      *bucket_lock;
8465 
8466 	if (!vm_page_buckets_check_ready) {
8467 		return;
8468 	}
8469 
8470 #if HIBERNATION
8471 	if (hibernate_rebuild_needed ||
8472 	    hibernate_rebuild_hash_list) {
8473 		panic("BUCKET_CHECK: hibernation in progress: "
8474 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
8475 		    hibernate_rebuild_needed,
8476 		    hibernate_rebuild_hash_list);
8477 	}
8478 #endif /* HIBERNATION */
8479 
8480 #if VM_PAGE_FAKE_BUCKETS
8481 	char *cp;
8482 	for (cp = (char *) vm_page_fake_buckets_start;
8483 	    cp < (char *) vm_page_fake_buckets_end;
8484 	    cp++) {
8485 		if (*cp != 0x5a) {
8486 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
8487 			    "[0x%llx:0x%llx]\n",
8488 			    cp,
8489 			    (uint64_t) vm_page_fake_buckets_start,
8490 			    (uint64_t) vm_page_fake_buckets_end);
8491 		}
8492 	}
8493 #endif /* VM_PAGE_FAKE_BUCKETS */
8494 
8495 	for (i = 0; i < vm_page_bucket_count; i++) {
8496 		vm_object_t     p_object;
8497 
8498 		bucket = &vm_page_buckets[i];
8499 		if (!bucket->page_list) {
8500 			continue;
8501 		}
8502 
8503 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8504 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8505 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8506 
8507 		while (p != VM_PAGE_NULL) {
8508 			p_object = VM_PAGE_OBJECT(p);
8509 
8510 			if (!p->vmp_hashed) {
8511 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8512 				    "hash %d in bucket %d at %p "
8513 				    "is not hashed\n",
8514 				    p, p_object, p->vmp_offset,
8515 				    p_hash, i, bucket);
8516 			}
8517 			p_hash = vm_page_hash(p_object, p->vmp_offset);
8518 			if (p_hash != i) {
8519 				panic("BUCKET_CHECK: corruption in bucket %d "
8520 				    "at %p: page %p object %p offset 0x%llx "
8521 				    "hash %d\n",
8522 				    i, bucket, p, p_object, p->vmp_offset,
8523 				    p_hash);
8524 			}
8525 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8526 		}
8527 		lck_spin_unlock(bucket_lock);
8528 	}
8529 
8530 //	printf("BUCKET_CHECK: checked buckets\n");
8531 }
8532 #endif /* VM_PAGE_BUCKETS_CHECK */
8533 
8534 /*
8535  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8536  * local queues if they exist... its the only spot in the system where we add pages
8537  * to those queues...  once on those queues, those pages can only move to one of the
8538  * global page queues or the free queues... they NEVER move from local q to local q.
8539  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8540  * the global vm_page_queue_lock at this point...  we still need to take the local lock
8541  * in case this operation is being run on a different CPU then the local queue's identity,
8542  * but we don't have to worry about the page moving to a global queue or becoming wired
8543  * while we're grabbing the local lock since those operations would require the global
8544  * vm_page_queue_lock to be held, and we already own it.
8545  *
8546  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8547  * 'wired' and local are ALWAYS mutually exclusive conditions.
8548  */
8549 
8550 #if CONFIG_BACKGROUND_QUEUE
8551 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_backgroundq)8552 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_backgroundq)
8553 #else
8554 void
8555 vm_page_queues_remove(vm_page_t mem, boolean_t __unused remove_from_backgroundq)
8556 #endif
8557 {
8558 	boolean_t       was_pageable = TRUE;
8559 	vm_object_t     m_object;
8560 
8561 	m_object = VM_PAGE_OBJECT(mem);
8562 
8563 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8564 
8565 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8566 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8567 #if CONFIG_BACKGROUND_QUEUE
8568 		if (remove_from_backgroundq == TRUE) {
8569 			vm_page_remove_from_backgroundq(mem);
8570 		}
8571 		if (mem->vmp_on_backgroundq) {
8572 			assert(mem->vmp_backgroundq.next != 0);
8573 			assert(mem->vmp_backgroundq.prev != 0);
8574 		} else {
8575 			assert(mem->vmp_backgroundq.next == 0);
8576 			assert(mem->vmp_backgroundq.prev == 0);
8577 		}
8578 #endif /* CONFIG_BACKGROUND_QUEUE */
8579 		return;
8580 	}
8581 
8582 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8583 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8584 #if CONFIG_BACKGROUND_QUEUE
8585 		assert(mem->vmp_backgroundq.next == 0 &&
8586 		    mem->vmp_backgroundq.prev == 0 &&
8587 		    mem->vmp_on_backgroundq == FALSE);
8588 #endif
8589 		return;
8590 	}
8591 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
8592 		/*
8593 		 * might put these guys on a list for debugging purposes
8594 		 * if we do, we'll need to remove this assert
8595 		 */
8596 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8597 #if CONFIG_BACKGROUND_QUEUE
8598 		assert(mem->vmp_backgroundq.next == 0 &&
8599 		    mem->vmp_backgroundq.prev == 0 &&
8600 		    mem->vmp_on_backgroundq == FALSE);
8601 #endif
8602 		return;
8603 	}
8604 
8605 	assert(m_object != compressor_object);
8606 	assert(m_object != kernel_object);
8607 	assert(m_object != vm_submap_object);
8608 	assert(!mem->vmp_fictitious);
8609 
8610 	switch (mem->vmp_q_state) {
8611 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
8612 	{
8613 		struct vpl      *lq;
8614 
8615 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
8616 		VPL_LOCK(&lq->vpl_lock);
8617 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
8618 		mem->vmp_local_id = 0;
8619 		lq->vpl_count--;
8620 		if (m_object->internal) {
8621 			lq->vpl_internal_count--;
8622 		} else {
8623 			lq->vpl_external_count--;
8624 		}
8625 		VPL_UNLOCK(&lq->vpl_lock);
8626 		was_pageable = FALSE;
8627 		break;
8628 	}
8629 	case VM_PAGE_ON_ACTIVE_Q:
8630 	{
8631 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
8632 		vm_page_active_count--;
8633 		break;
8634 	}
8635 
8636 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
8637 	{
8638 		assert(m_object->internal == TRUE);
8639 
8640 		vm_page_inactive_count--;
8641 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
8642 		vm_page_anonymous_count--;
8643 
8644 		vm_purgeable_q_advance_all();
8645 		vm_page_balance_inactive(3);
8646 		break;
8647 	}
8648 
8649 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
8650 	{
8651 		assert(m_object->internal == FALSE);
8652 
8653 		vm_page_inactive_count--;
8654 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
8655 		vm_purgeable_q_advance_all();
8656 		vm_page_balance_inactive(3);
8657 		break;
8658 	}
8659 
8660 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
8661 	{
8662 		assert(m_object->internal == FALSE);
8663 
8664 		vm_page_inactive_count--;
8665 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
8666 		vm_page_cleaned_count--;
8667 		vm_page_balance_inactive(3);
8668 		break;
8669 	}
8670 
8671 	case VM_PAGE_ON_THROTTLED_Q:
8672 	{
8673 		assert(m_object->internal == TRUE);
8674 
8675 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
8676 		vm_page_throttled_count--;
8677 		was_pageable = FALSE;
8678 		break;
8679 	}
8680 
8681 	case VM_PAGE_ON_SPECULATIVE_Q:
8682 	{
8683 		assert(m_object->internal == FALSE);
8684 
8685 		vm_page_remque(&mem->vmp_pageq);
8686 		vm_page_speculative_count--;
8687 		vm_page_balance_inactive(3);
8688 		break;
8689 	}
8690 
8691 #if CONFIG_SECLUDED_MEMORY
8692 	case VM_PAGE_ON_SECLUDED_Q:
8693 	{
8694 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
8695 		vm_page_secluded_count--;
8696 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8697 		if (m_object == VM_OBJECT_NULL) {
8698 			vm_page_secluded_count_free--;
8699 			was_pageable = FALSE;
8700 		} else {
8701 			assert(!m_object->internal);
8702 			vm_page_secluded_count_inuse--;
8703 			was_pageable = FALSE;
8704 //			was_pageable = TRUE;
8705 		}
8706 		break;
8707 	}
8708 #endif /* CONFIG_SECLUDED_MEMORY */
8709 
8710 	default:
8711 	{
8712 		/*
8713 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8714 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8715 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
8716 		 *              either first remove it (it needs both the page queues lock and the object lock to do
8717 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8718 		 *
8719 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8720 		 *	or any of the undefined states
8721 		 */
8722 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
8723 		break;
8724 	}
8725 	}
8726 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8727 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
8728 
8729 #if CONFIG_BACKGROUND_QUEUE
8730 	if (remove_from_backgroundq == TRUE) {
8731 		vm_page_remove_from_backgroundq(mem);
8732 	}
8733 #endif
8734 	if (was_pageable) {
8735 		if (m_object->internal) {
8736 			vm_page_pageable_internal_count--;
8737 		} else {
8738 			vm_page_pageable_external_count--;
8739 		}
8740 	}
8741 }
8742 
8743 void
vm_page_remove_internal(vm_page_t page)8744 vm_page_remove_internal(vm_page_t page)
8745 {
8746 	vm_object_t __object = VM_PAGE_OBJECT(page);
8747 	if (page == __object->memq_hint) {
8748 		vm_page_t       __new_hint;
8749 		vm_page_queue_entry_t   __qe;
8750 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
8751 		if (vm_page_queue_end(&__object->memq, __qe)) {
8752 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
8753 			if (vm_page_queue_end(&__object->memq, __qe)) {
8754 				__qe = NULL;
8755 			}
8756 		}
8757 		__new_hint = (vm_page_t)((uintptr_t) __qe);
8758 		__object->memq_hint = __new_hint;
8759 	}
8760 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
8761 #if CONFIG_SECLUDED_MEMORY
8762 	if (__object->eligible_for_secluded) {
8763 		vm_page_secluded.eligible_for_secluded--;
8764 	}
8765 #endif /* CONFIG_SECLUDED_MEMORY */
8766 }
8767 
8768 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)8769 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
8770 {
8771 	vm_object_t     m_object;
8772 
8773 	m_object = VM_PAGE_OBJECT(mem);
8774 
8775 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8776 	assert(!mem->vmp_fictitious);
8777 	assert(!mem->vmp_laundry);
8778 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8779 	vm_page_check_pageable_safe(mem);
8780 
8781 	if (m_object->internal) {
8782 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
8783 
8784 		if (first == TRUE) {
8785 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
8786 		} else {
8787 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
8788 		}
8789 
8790 		vm_page_anonymous_count++;
8791 		vm_page_pageable_internal_count++;
8792 	} else {
8793 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
8794 
8795 		if (first == TRUE) {
8796 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
8797 		} else {
8798 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
8799 		}
8800 
8801 		vm_page_pageable_external_count++;
8802 	}
8803 	vm_page_inactive_count++;
8804 	token_new_pagecount++;
8805 
8806 #if CONFIG_BACKGROUND_QUEUE
8807 	if (mem->vmp_in_background) {
8808 		vm_page_add_to_backgroundq(mem, FALSE);
8809 	}
8810 #endif
8811 }
8812 
8813 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)8814 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
8815 {
8816 	vm_object_t     m_object;
8817 
8818 	m_object = VM_PAGE_OBJECT(mem);
8819 
8820 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8821 	assert(!mem->vmp_fictitious);
8822 	assert(!mem->vmp_laundry);
8823 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8824 	vm_page_check_pageable_safe(mem);
8825 
8826 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
8827 	if (first == TRUE) {
8828 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
8829 	} else {
8830 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
8831 	}
8832 	vm_page_active_count++;
8833 
8834 	if (m_object->internal) {
8835 		vm_page_pageable_internal_count++;
8836 	} else {
8837 		vm_page_pageable_external_count++;
8838 	}
8839 
8840 #if CONFIG_BACKGROUND_QUEUE
8841 	if (mem->vmp_in_background) {
8842 		vm_page_add_to_backgroundq(mem, FALSE);
8843 	}
8844 #endif
8845 	vm_page_balance_inactive(3);
8846 }
8847 
8848 /*
8849  * Pages from special kernel objects shouldn't
8850  * be placed on pageable queues.
8851  */
8852 void
vm_page_check_pageable_safe(vm_page_t page)8853 vm_page_check_pageable_safe(vm_page_t page)
8854 {
8855 	vm_object_t     page_object;
8856 
8857 	page_object = VM_PAGE_OBJECT(page);
8858 
8859 	if (page_object == kernel_object) {
8860 		panic("vm_page_check_pageable_safe: trying to add page" \
8861 		    "from kernel object (%p) to pageable queue", kernel_object);
8862 	}
8863 
8864 	if (page_object == compressor_object) {
8865 		panic("vm_page_check_pageable_safe: trying to add page" \
8866 		    "from compressor object (%p) to pageable queue", compressor_object);
8867 	}
8868 
8869 	if (page_object == vm_submap_object) {
8870 		panic("vm_page_check_pageable_safe: trying to add page" \
8871 		    "from submap object (%p) to pageable queue", vm_submap_object);
8872 	}
8873 }
8874 
8875 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
8876 * wired page diagnose
8877 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8878 
8879 #include <libkern/OSKextLibPrivate.h>
8880 
8881 #define KA_SIZE(namelen, subtotalscount)        \
8882 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
8883 
8884 #define KA_NAME(alloc)  \
8885 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
8886 
8887 #define KA_NAME_LEN(alloc)      \
8888     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
8889 
8890 vm_tag_t
vm_tag_bt(void)8891 vm_tag_bt(void)
8892 {
8893 	uintptr_t* frameptr;
8894 	uintptr_t* frameptr_next;
8895 	uintptr_t retaddr;
8896 	uintptr_t kstackb, kstackt;
8897 	const vm_allocation_site_t * site;
8898 	thread_t cthread;
8899 	kern_allocation_name_t name;
8900 
8901 	cthread = current_thread();
8902 	if (__improbable(cthread == NULL)) {
8903 		return VM_KERN_MEMORY_OSFMK;
8904 	}
8905 
8906 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
8907 		if (!name->tag) {
8908 			vm_tag_alloc(name);
8909 		}
8910 		return name->tag;
8911 	}
8912 
8913 	kstackb = cthread->kernel_stack;
8914 	kstackt = kstackb + kernel_stack_size;
8915 
8916 	/* Load stack frame pointer (EBP on x86) into frameptr */
8917 	frameptr = __builtin_frame_address(0);
8918 	site = NULL;
8919 	while (frameptr != NULL) {
8920 		/* Verify thread stack bounds */
8921 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
8922 			break;
8923 		}
8924 
8925 		/* Next frame pointer is pointed to by the previous one */
8926 		frameptr_next = (uintptr_t*) *frameptr;
8927 
8928 		/* Pull return address from one spot above the frame pointer */
8929 		retaddr = *(frameptr + 1);
8930 
8931 #if defined(HAS_APPLE_PAC)
8932 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
8933 #endif
8934 
8935 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
8936 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
8937 			site = OSKextGetAllocationSiteForCaller(retaddr);
8938 			break;
8939 		}
8940 		frameptr = frameptr_next;
8941 	}
8942 
8943 	return site ? site->tag : VM_KERN_MEMORY_NONE;
8944 }
8945 
8946 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
8947 
8948 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)8949 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
8950 {
8951 	vm_tag_t tag;
8952 	uint64_t avail;
8953 	uint32_t idx;
8954 	vm_allocation_site_t * prev;
8955 
8956 	if (site->tag) {
8957 		return;
8958 	}
8959 
8960 	idx = 0;
8961 	while (TRUE) {
8962 		avail = free_tag_bits[idx];
8963 		if (avail) {
8964 			tag = (vm_tag_t)__builtin_clzll(avail);
8965 			avail &= ~(1ULL << (63 - tag));
8966 			free_tag_bits[idx] = avail;
8967 			tag += (idx << 6);
8968 			break;
8969 		}
8970 		idx++;
8971 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
8972 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
8973 				prev = vm_allocation_sites[idx];
8974 				if (!prev) {
8975 					continue;
8976 				}
8977 				if (!KA_NAME_LEN(prev)) {
8978 					continue;
8979 				}
8980 				if (!prev->tag) {
8981 					continue;
8982 				}
8983 				if (prev->total) {
8984 					continue;
8985 				}
8986 				if (1 != prev->refcount) {
8987 					continue;
8988 				}
8989 
8990 				assert(idx == prev->tag);
8991 				tag = (vm_tag_t)idx;
8992 				prev->tag = VM_KERN_MEMORY_NONE;
8993 				*releasesiteP = prev;
8994 				break;
8995 			}
8996 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
8997 				tag = VM_KERN_MEMORY_ANY;
8998 			}
8999 			break;
9000 		}
9001 	}
9002 	site->tag = tag;
9003 
9004 	OSAddAtomic16(1, &site->refcount);
9005 
9006 	if (VM_KERN_MEMORY_ANY != tag) {
9007 		vm_allocation_sites[tag] = site;
9008 	}
9009 
9010 	if (tag > vm_allocation_tag_highest) {
9011 		vm_allocation_tag_highest = tag;
9012 	}
9013 }
9014 
9015 static void
vm_tag_free_locked(vm_tag_t tag)9016 vm_tag_free_locked(vm_tag_t tag)
9017 {
9018 	uint64_t avail;
9019 	uint32_t idx;
9020 	uint64_t bit;
9021 
9022 	if (VM_KERN_MEMORY_ANY == tag) {
9023 		return;
9024 	}
9025 
9026 	idx = (tag >> 6);
9027 	avail = free_tag_bits[idx];
9028 	tag &= 63;
9029 	bit = (1ULL << (63 - tag));
9030 	assert(!(avail & bit));
9031 	free_tag_bits[idx] = (avail | bit);
9032 }
9033 
9034 static void
vm_tag_init(void)9035 vm_tag_init(void)
9036 {
9037 	vm_tag_t tag;
9038 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9039 		vm_tag_free_locked(tag);
9040 	}
9041 
9042 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9043 		vm_tag_free_locked(tag);
9044 	}
9045 }
9046 
9047 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9048 vm_tag_alloc(vm_allocation_site_t * site)
9049 {
9050 	vm_tag_t tag;
9051 	vm_allocation_site_t * releasesite;
9052 
9053 	if (VM_TAG_BT & site->flags) {
9054 		tag = vm_tag_bt();
9055 		if (VM_KERN_MEMORY_NONE != tag) {
9056 			return tag;
9057 		}
9058 	}
9059 
9060 	if (!site->tag) {
9061 		releasesite = NULL;
9062 		lck_spin_lock(&vm_allocation_sites_lock);
9063 		vm_tag_alloc_locked(site, &releasesite);
9064 		lck_spin_unlock(&vm_allocation_sites_lock);
9065 		if (releasesite) {
9066 			kern_allocation_name_release(releasesite);
9067 		}
9068 	}
9069 
9070 	return site->tag;
9071 }
9072 
9073 void
vm_tag_update_size(vm_tag_t tag,int64_t delta)9074 vm_tag_update_size(vm_tag_t tag, int64_t delta)
9075 {
9076 	vm_allocation_site_t * allocation;
9077 	uint64_t prior;
9078 
9079 	assert(VM_KERN_MEMORY_NONE != tag);
9080 	assert(tag < VM_MAX_TAG_VALUE);
9081 
9082 	allocation = vm_allocation_sites[tag];
9083 	assert(allocation);
9084 
9085 	if (delta < 0) {
9086 		assertf(allocation->total >= ((uint64_t)-delta), "tag %d, site %p", tag, allocation);
9087 	}
9088 	prior = OSAddAtomic64(delta, &allocation->total);
9089 
9090 #if DEBUG || DEVELOPMENT
9091 
9092 	uint64_t new, peak;
9093 	new = prior + delta;
9094 	do{
9095 		peak = allocation->peak;
9096 		if (new <= peak) {
9097 			break;
9098 		}
9099 	}while (!OSCompareAndSwap64(peak, new, &allocation->peak));
9100 
9101 #endif /* DEBUG || DEVELOPMENT */
9102 
9103 	if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9104 		return;
9105 	}
9106 
9107 	if (!prior && !allocation->tag) {
9108 		vm_tag_alloc(allocation);
9109 	}
9110 }
9111 
9112 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta)9113 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
9114 {
9115 	uint64_t prior;
9116 
9117 	if (delta < 0) {
9118 		assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
9119 	}
9120 	prior = OSAddAtomic64(delta, &allocation->total);
9121 
9122 #if DEBUG || DEVELOPMENT
9123 
9124 	uint64_t new, peak;
9125 	new = prior + delta;
9126 	do{
9127 		peak = allocation->peak;
9128 		if (new <= peak) {
9129 			break;
9130 		}
9131 	}while (!OSCompareAndSwap64(peak, new, &allocation->peak));
9132 
9133 #endif /* DEBUG || DEVELOPMENT */
9134 
9135 	if (!prior && !allocation->tag) {
9136 		vm_tag_alloc(allocation);
9137 	}
9138 }
9139 
9140 #if VM_TAG_SIZECLASSES
9141 
9142 void
vm_allocation_zones_init(void)9143 vm_allocation_zones_init(void)
9144 {
9145 	kern_return_t ret;
9146 	vm_offset_t       addr;
9147 	vm_size_t     size;
9148 
9149 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9150 	    + 4 * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9151 
9152 	ret = kernel_memory_allocate(kernel_map,
9153 	    &addr, round_page(size), 0,
9154 	    KMA_ZERO, VM_KERN_MEMORY_DIAG);
9155 	assert(KERN_SUCCESS == ret);
9156 
9157 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9158 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9159 
9160 	// prepopulate VM_KERN_MEMORY_DIAG & VM_KERN_MEMORY_KALLOC so allocations
9161 	// in vm_tag_update_zone_size() won't recurse
9162 	vm_allocation_zone_totals[VM_KERN_MEMORY_DIAG]   = (vm_allocation_zone_total_t *) addr;
9163 	addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9164 	vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC] = (vm_allocation_zone_total_t *) addr;
9165 	addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9166 	vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC_DATA] = (vm_allocation_zone_total_t *) addr;
9167 	addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9168 	vm_allocation_zone_totals[VM_KERN_MEMORY_KALLOC_TYPE] = (vm_allocation_zone_total_t *) addr;
9169 }
9170 
9171 __attribute__((noinline))
9172 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9173 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9174 {
9175 	vm_allocation_zone_total_t *stats;
9176 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9177 
9178 	stats = kalloc_data(size,
9179 	    Z_VM_TAG(VM_KERN_MEMORY_DIAG) | Z_ZERO | flags);
9180 	if (!stats) {
9181 		return VM_KERN_MEMORY_NONE;
9182 	}
9183 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9184 		kfree_data(stats, size);
9185 	}
9186 	return tag;
9187 }
9188 
9189 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zidx,uint32_t zflags)9190 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags)
9191 {
9192 	assert(VM_KERN_MEMORY_NONE != tag);
9193 	assert(tag < VM_MAX_TAG_VALUE);
9194 
9195 	if (zidx >= VM_TAG_SIZECLASSES) {
9196 		return VM_KERN_MEMORY_NONE;
9197 	}
9198 
9199 	if (__probable(vm_allocation_zone_totals[tag])) {
9200 		return tag;
9201 	}
9202 	return vm_tag_zone_stats_alloc(tag, zflags);
9203 }
9204 
9205 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9206 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9207 {
9208 	vm_allocation_zone_total_t *stats;
9209 	vm_size_t value;
9210 
9211 	assert(VM_KERN_MEMORY_NONE != tag);
9212 	assert(tag < VM_MAX_TAG_VALUE);
9213 
9214 	if (zidx >= VM_TAG_SIZECLASSES) {
9215 		return;
9216 	}
9217 
9218 	stats = vm_allocation_zone_totals[tag];
9219 	assert(stats);
9220 	stats += zidx;
9221 
9222 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9223 	if (delta < 0) {
9224 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9225 		return;
9226 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9227 		os_atomic_max(&stats->vazt_peak, value, relaxed);
9228 	}
9229 }
9230 
9231 #endif /* VM_TAG_SIZECLASSES */
9232 
9233 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,uint32_t subtag,int64_t delta)9234 kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
9235 {
9236 	kern_allocation_name_t other;
9237 	struct vm_allocation_total * total;
9238 	uint32_t subidx;
9239 
9240 	subidx = 0;
9241 	assert(VM_KERN_MEMORY_NONE != subtag);
9242 	lck_spin_lock(&vm_allocation_sites_lock);
9243 	for (; subidx < allocation->subtotalscount; subidx++) {
9244 		if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) {
9245 			allocation->subtotals[subidx].tag = (vm_tag_t)subtag;
9246 			break;
9247 		}
9248 		if (subtag == allocation->subtotals[subidx].tag) {
9249 			break;
9250 		}
9251 	}
9252 	lck_spin_unlock(&vm_allocation_sites_lock);
9253 	assert(subidx < allocation->subtotalscount);
9254 	if (subidx >= allocation->subtotalscount) {
9255 		return;
9256 	}
9257 
9258 	total = &allocation->subtotals[subidx];
9259 	other = vm_allocation_sites[subtag];
9260 	assert(other);
9261 
9262 	if (delta < 0) {
9263 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9264 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9265 	}
9266 	OSAddAtomic64(delta, &other->mapped);
9267 	OSAddAtomic64(delta, &total->total);
9268 }
9269 
9270 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9271 kern_allocation_get_name(kern_allocation_name_t allocation)
9272 {
9273 	return KA_NAME(allocation);
9274 }
9275 
9276 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9277 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9278 {
9279 	kern_allocation_name_t allocation;
9280 	uint16_t namelen;
9281 
9282 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9283 
9284 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9285 	allocation->refcount       = 1;
9286 	allocation->subtotalscount = subtotalscount;
9287 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9288 	strlcpy(KA_NAME(allocation), name, namelen + 1);
9289 
9290 	return allocation;
9291 }
9292 
9293 void
kern_allocation_name_release(kern_allocation_name_t allocation)9294 kern_allocation_name_release(kern_allocation_name_t allocation)
9295 {
9296 	assert(allocation->refcount > 0);
9297 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9298 		kfree_data(allocation,
9299 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9300 	}
9301 }
9302 
9303 vm_tag_t
kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)9304 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
9305 {
9306 	return vm_tag_alloc(allocation);
9307 }
9308 
9309 #if !VM_TAG_ACTIVE_UPDATE
9310 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9311 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9312 {
9313 	if (!object->wired_page_count) {
9314 		return;
9315 	}
9316 	if (object != kernel_object) {
9317 		assert(object->wire_tag < num_info);
9318 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9319 	}
9320 }
9321 
9322 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9323     unsigned int num_info, vm_object_t object);
9324 
9325 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9326 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9327     vm_page_iterate_proc proc, purgeable_q_t queue,
9328     int group)
9329 {
9330 	vm_object_t object;
9331 
9332 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
9333 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
9334 	    object = (vm_object_t) queue_next(&object->objq)) {
9335 		proc(info, num_info, object);
9336 	}
9337 }
9338 
9339 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9340 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9341     vm_page_iterate_proc proc)
9342 {
9343 	vm_object_t     object;
9344 
9345 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9346 	queue_iterate(&vm_objects_wired,
9347 	    object,
9348 	    vm_object_t,
9349 	    wired_objq)
9350 	{
9351 		proc(info, num_info, object);
9352 	}
9353 	lck_spin_unlock(&vm_objects_wired_lock);
9354 }
9355 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9356 
9357 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated)9358 process_account(mach_memory_info_t * info, unsigned int num_info,
9359     uint64_t zones_collectable_bytes, boolean_t iterated)
9360 {
9361 	size_t                 namelen;
9362 	unsigned int           idx, count, nextinfo;
9363 	vm_allocation_site_t * site;
9364 	lck_spin_lock(&vm_allocation_sites_lock);
9365 
9366 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9367 		site = vm_allocation_sites[idx];
9368 		if (!site) {
9369 			continue;
9370 		}
9371 		info[idx].mapped = site->mapped;
9372 		info[idx].tag    = site->tag;
9373 		if (!iterated) {
9374 			info[idx].size = site->total;
9375 #if DEBUG || DEVELOPMENT
9376 			info[idx].peak = site->peak;
9377 #endif /* DEBUG || DEVELOPMENT */
9378 		} else {
9379 			if (!site->subtotalscount && (site->total != info[idx].size)) {
9380 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9381 				info[idx].size = site->total;
9382 			}
9383 		}
9384 		info[idx].flags |= VM_KERN_SITE_WIRED;
9385 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9386 			info[idx].site   = idx;
9387 			info[idx].flags |= VM_KERN_SITE_TAG;
9388 			if (VM_KERN_MEMORY_ZONE == idx) {
9389 				info[idx].flags |= VM_KERN_SITE_HIDE;
9390 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
9391 				info[idx].collectable_bytes = zones_collectable_bytes;
9392 			}
9393 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9394 			info[idx].site   = 0;
9395 			info[idx].flags |= VM_KERN_SITE_NAMED;
9396 			if (namelen > sizeof(info[idx].name)) {
9397 				namelen = sizeof(info[idx].name);
9398 			}
9399 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9400 		} else if (VM_TAG_KMOD & site->flags) {
9401 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
9402 			info[idx].flags |= VM_KERN_SITE_KMOD;
9403 		} else {
9404 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
9405 			info[idx].flags |= VM_KERN_SITE_KERNEL;
9406 		}
9407 	}
9408 
9409 	nextinfo = (vm_allocation_tag_highest + 1);
9410 	count    = nextinfo;
9411 	if (count >= num_info) {
9412 		count = num_info;
9413 	}
9414 
9415 	for (idx = 0; idx < count; idx++) {
9416 		site = vm_allocation_sites[idx];
9417 		if (!site) {
9418 			continue;
9419 		}
9420 #if VM_TAG_SIZECLASSES
9421 		vm_allocation_zone_total_t * zone;
9422 		unsigned int                 zidx;
9423 
9424 		if (vm_allocation_zone_totals
9425 		    && (zone = vm_allocation_zone_totals[idx])
9426 		    && (nextinfo < num_info)) {
9427 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9428 				if (!zone[zidx].vazt_peak) {
9429 					continue;
9430 				}
9431 				info[nextinfo]        = info[idx];
9432 				info[nextinfo].zone   = (uint16_t)zone_index_from_tag_index(zidx);
9433 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
9434 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
9435 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
9436 				info[nextinfo].size   = zone[zidx].vazt_total;
9437 				info[nextinfo].peak   = zone[zidx].vazt_peak;
9438 				info[nextinfo].mapped = 0;
9439 				nextinfo++;
9440 			}
9441 		}
9442 #endif /* VM_TAG_SIZECLASSES */
9443 		if (site->subtotalscount) {
9444 			uint64_t mapped, mapcost, take;
9445 			uint32_t sub;
9446 			vm_tag_t alloctag;
9447 
9448 			info[idx].size = site->total;
9449 			mapped = info[idx].size;
9450 			info[idx].mapped = mapped;
9451 			mapcost = 0;
9452 			for (sub = 0; sub < site->subtotalscount; sub++) {
9453 				alloctag = site->subtotals[sub].tag;
9454 				assert(alloctag < num_info);
9455 				if (info[alloctag].name[0]) {
9456 					continue;
9457 				}
9458 				take = site->subtotals[sub].total;
9459 				if (take > info[alloctag].size) {
9460 					take = info[alloctag].size;
9461 				}
9462 				if (take > mapped) {
9463 					take = mapped;
9464 				}
9465 				info[alloctag].mapped  -= take;
9466 				info[alloctag].size    -= take;
9467 				mapped                 -= take;
9468 				mapcost                += take;
9469 			}
9470 			info[idx].size = mapcost;
9471 		}
9472 	}
9473 	lck_spin_unlock(&vm_allocation_sites_lock);
9474 
9475 	return 0;
9476 }
9477 
9478 uint32_t
vm_page_diagnose_estimate(void)9479 vm_page_diagnose_estimate(void)
9480 {
9481 	vm_allocation_site_t * site;
9482 	uint32_t               count = zone_view_count;
9483 	uint32_t               idx;
9484 
9485 	lck_spin_lock(&vm_allocation_sites_lock);
9486 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9487 		site = vm_allocation_sites[idx];
9488 		if (!site) {
9489 			continue;
9490 		}
9491 		count++;
9492 #if VM_TAG_SIZECLASSES
9493 		if (vm_allocation_zone_totals) {
9494 			vm_allocation_zone_total_t * zone;
9495 			zone = vm_allocation_zone_totals[idx];
9496 			if (!zone) {
9497 				continue;
9498 			}
9499 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9500 				count += (zone[zidx].vazt_peak != 0);
9501 			}
9502 		}
9503 #endif
9504 	}
9505 	lck_spin_unlock(&vm_allocation_sites_lock);
9506 
9507 	/* some slop for new tags created */
9508 	count += 8;
9509 	count += VM_KERN_COUNTER_COUNT;
9510 
9511 	return count;
9512 }
9513 
9514 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)9515 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9516     bool percpu)
9517 {
9518 	zpercpu_foreach(zs, zstats) {
9519 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9520 	}
9521 	if (percpu) {
9522 		info->size *= zpercpu_count();
9523 	}
9524 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9525 }
9526 
9527 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)9528 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
9529 {
9530 	vm_page_diagnose_zone_stats(info, z->z_stats, z->z_percpu);
9531 	snprintf(info->name, sizeof(info->name),
9532 	    "%s%s[raw]", zone_heap_name(z), z->z_name);
9533 }
9534 
9535 static int
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)9536 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
9537 {
9538 	struct kheap_zones *zones = kheap->kh_zones;
9539 	int i = 0;
9540 
9541 	for (; i < zones->max_k_zone; i++) {
9542 		vm_page_diagnose_zone(info + i, zones->k_zone[i]);
9543 	}
9544 
9545 	for (kalloc_heap_t kh = zones->views; kh; kh = kh->kh_next, i++) {
9546 		vm_page_diagnose_zone_stats(info + i, kh->kh_stats, false);
9547 		snprintf(info[i].name, sizeof(info[i].name),
9548 		    "%skalloc[%s]", kheap->kh_name, kh->kh_name);
9549 	}
9550 
9551 	return i;
9552 }
9553 
9554 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes)9555 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
9556 {
9557 	uint64_t                 wired_size;
9558 	uint64_t                 wired_managed_size;
9559 	uint64_t                 wired_reserved_size;
9560 	boolean_t                iterate;
9561 	mach_memory_info_t     * counts;
9562 	uint32_t                 i;
9563 
9564 	bzero(info, num_info * sizeof(mach_memory_info_t));
9565 
9566 	if (!vm_page_wire_count_initial) {
9567 		return KERN_ABORTED;
9568 	}
9569 
9570 #if !XNU_TARGET_OS_OSX
9571 	wired_size          = ptoa_64(vm_page_wire_count);
9572 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
9573 #else /* !XNU_TARGET_OS_OSX */
9574 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
9575 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
9576 #endif /* !XNU_TARGET_OS_OSX */
9577 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
9578 
9579 	wired_size += booter_size;
9580 
9581 	assert(num_info >= VM_KERN_COUNTER_COUNT);
9582 	num_info -= VM_KERN_COUNTER_COUNT;
9583 	counts = &info[num_info];
9584 
9585 #define SET_COUNT(xcount, xsize, xflags)                        \
9586     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
9587     counts[xcount].site  = (xcount);                            \
9588     counts[xcount].size  = (xsize);                                 \
9589     counts[xcount].mapped  = (xsize);                           \
9590     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9591 
9592 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
9593 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
9594 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
9595 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
9596 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
9597 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
9598 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
9599 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
9600 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
9601 
9602 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9603     counts[xcount].site    = (xcount);                  \
9604     counts[xcount].size    = (xsize);                   \
9605     counts[xcount].mapped  = (xsize);                   \
9606     counts[xcount].free    = (xfree);                   \
9607     counts[xcount].largest = (xlargest);                \
9608     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
9609 
9610 	vm_map_size_t map_size, map_free, map_largest;
9611 
9612 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
9613 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
9614 
9615 	vm_map_sizes(kalloc_large_map_get(), &map_size, &map_free, &map_largest);
9616 	SET_MAP(VM_KERN_COUNT_MAP_KALLOC_LARGE, map_size, map_free, map_largest);
9617 
9618 	vm_map_sizes(kernel_data_map_get(), &map_size, &map_free, &map_largest);
9619 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL_DATA, map_size, map_free, map_largest);
9620 
9621 	vm_map_sizes(kalloc_large_data_map_get(), &map_size, &map_free, &map_largest);
9622 	SET_MAP(VM_KERN_COUNT_MAP_KALLOC_LARGE_DATA, map_size, map_free, map_largest);
9623 
9624 	zone_map_sizes(&map_size, &map_free, &map_largest);
9625 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
9626 
9627 	assert(num_info >= zone_view_count);
9628 	num_info -= zone_view_count;
9629 	counts = &info[num_info];
9630 	i = 0;
9631 
9632 	i += vm_page_diagnose_heap(counts + i, KHEAP_DEFAULT);
9633 	if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
9634 		i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
9635 	}
9636 	if (KHEAP_KEXT->kh_heap_id == KHEAP_ID_KEXT) {
9637 		i += vm_page_diagnose_heap(counts + i, KHEAP_KEXT);
9638 	}
9639 	assert(i <= zone_view_count);
9640 
9641 	zone_index_foreach(zidx) {
9642 		zone_t z = &zone_array[zidx];
9643 		zone_security_flags_t zsflags = zone_security_array[zidx];
9644 		zone_view_t zv = z->z_views;
9645 
9646 		if (zv == NULL) {
9647 			continue;
9648 		}
9649 
9650 		zone_stats_t zv_stats_head = z->z_stats;
9651 		bool has_raw_view = false;
9652 
9653 		for (; zv; zv = zv->zv_next) {
9654 			/*
9655 			 * kalloc_types that allocate from the same zone are linked
9656 			 * as views. Only print the ones that have their own stats.
9657 			 */
9658 			if (zv->zv_stats == zv_stats_head) {
9659 				continue;
9660 			}
9661 			has_raw_view = true;
9662 			vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
9663 			    z->z_percpu);
9664 			snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
9665 			    zone_heap_name(z), z->z_name, zv->zv_name);
9666 			i++;
9667 			assert(i <= zone_view_count);
9668 		}
9669 
9670 		/*
9671 		 * Print raw views for non kalloc or kalloc_type zones
9672 		 */
9673 		bool kalloc_type = zsflags.z_kalloc_type;
9674 		if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
9675 		    (kalloc_type && has_raw_view)) {
9676 			vm_page_diagnose_zone(counts + i, z);
9677 			i++;
9678 			assert(i <= zone_view_count);
9679 		}
9680 	}
9681 
9682 	iterate = !VM_TAG_ACTIVE_UPDATE;
9683 	if (iterate) {
9684 		enum                       { kMaxKernelDepth = 1 };
9685 		vm_map_t                     maps[kMaxKernelDepth];
9686 		vm_map_entry_t               entries[kMaxKernelDepth];
9687 		vm_map_t                     map;
9688 		vm_map_entry_t               entry;
9689 		vm_object_offset_t           offset;
9690 		vm_page_t                    page;
9691 		int                          stackIdx, count;
9692 
9693 #if !VM_TAG_ACTIVE_UPDATE
9694 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
9695 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9696 
9697 		map = kernel_map;
9698 		stackIdx = 0;
9699 		while (map) {
9700 			vm_map_lock(map);
9701 			for (entry = map->hdr.links.next; map; entry = entry->links.next) {
9702 				if (entry->is_sub_map) {
9703 					assert(stackIdx < kMaxKernelDepth);
9704 					maps[stackIdx] = map;
9705 					entries[stackIdx] = entry;
9706 					stackIdx++;
9707 					map = VME_SUBMAP(entry);
9708 					entry = NULL;
9709 					break;
9710 				}
9711 				if (VME_OBJECT(entry) == kernel_object) {
9712 					count = 0;
9713 					vm_object_lock(VME_OBJECT(entry));
9714 					for (offset = entry->links.start; offset < entry->links.end; offset += page_size) {
9715 						page = vm_page_lookup(VME_OBJECT(entry), offset);
9716 						if (page && VM_PAGE_WIRED(page)) {
9717 							count++;
9718 						}
9719 					}
9720 					vm_object_unlock(VME_OBJECT(entry));
9721 
9722 					if (count) {
9723 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
9724 						assert(VME_ALIAS(entry) < num_info);
9725 						info[VME_ALIAS(entry)].size += ptoa_64(count);
9726 					}
9727 				}
9728 				while (map && (entry == vm_map_last_entry(map))) {
9729 					vm_map_unlock(map);
9730 					if (!stackIdx) {
9731 						map = NULL;
9732 					} else {
9733 						--stackIdx;
9734 						map = maps[stackIdx];
9735 						entry = entries[stackIdx];
9736 					}
9737 				}
9738 			}
9739 		}
9740 	}
9741 
9742 	process_account(info, num_info, zones_collectable_bytes, iterate);
9743 
9744 	return KERN_SUCCESS;
9745 }
9746 
9747 #if DEBUG || DEVELOPMENT
9748 
9749 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)9750 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
9751 {
9752 	kern_return_t  ret;
9753 	vm_size_t      zsize;
9754 	vm_map_t       map;
9755 	vm_map_entry_t entry;
9756 
9757 	zsize = zone_element_info((void *) addr, tag);
9758 	if (zsize) {
9759 		*zone_size = *size = zsize;
9760 		return KERN_SUCCESS;
9761 	}
9762 
9763 	*zone_size = 0;
9764 	ret = KERN_INVALID_ADDRESS;
9765 	for (map = kernel_map; map;) {
9766 		vm_map_lock(map);
9767 		if (!vm_map_lookup_entry(map, addr, &entry)) {
9768 			break;
9769 		}
9770 		if (entry->is_sub_map) {
9771 			if (map != kernel_map) {
9772 				break;
9773 			}
9774 			map = VME_SUBMAP(entry);
9775 			continue;
9776 		}
9777 		if (entry->vme_start != addr) {
9778 			break;
9779 		}
9780 		*tag = (vm_tag_t)VME_ALIAS(entry);
9781 		*size = (entry->vme_end - addr);
9782 		ret = KERN_SUCCESS;
9783 		break;
9784 	}
9785 	if (map != kernel_map) {
9786 		vm_map_unlock(map);
9787 	}
9788 	vm_map_unlock(kernel_map);
9789 
9790 	return ret;
9791 }
9792 
9793 #endif /* DEBUG || DEVELOPMENT */
9794 
9795 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)9796 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
9797 {
9798 	vm_allocation_site_t * site;
9799 	uint32_t               kmodId;
9800 
9801 	kmodId = 0;
9802 	lck_spin_lock(&vm_allocation_sites_lock);
9803 	if ((site = vm_allocation_sites[tag])) {
9804 		if (VM_TAG_KMOD & site->flags) {
9805 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
9806 		}
9807 	}
9808 	lck_spin_unlock(&vm_allocation_sites_lock);
9809 
9810 	return kmodId;
9811 }
9812 
9813 
9814 #if CONFIG_SECLUDED_MEMORY
9815 /*
9816  * Note that there's no locking around other accesses to vm_page_secluded_target.
9817  * That should be OK, since these are the only place where it can be changed after
9818  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
9819  * but will eventually get the correct value. This brief mismatch is OK as pageout
9820  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
9821  * over time.
9822  */
9823 unsigned int vm_page_secluded_suppress_cnt = 0;
9824 unsigned int vm_page_secluded_save_target;
9825 
9826 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
9827 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
9828 
9829 void
start_secluded_suppression(task_t task)9830 start_secluded_suppression(task_t task)
9831 {
9832 	if (task->task_suppressed_secluded) {
9833 		return;
9834 	}
9835 	lck_spin_lock(&secluded_suppress_slock);
9836 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
9837 		task->task_suppressed_secluded = TRUE;
9838 		vm_page_secluded_save_target = vm_page_secluded_target;
9839 		vm_page_secluded_target = 0;
9840 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9841 	}
9842 	lck_spin_unlock(&secluded_suppress_slock);
9843 }
9844 
9845 void
stop_secluded_suppression(task_t task)9846 stop_secluded_suppression(task_t task)
9847 {
9848 	lck_spin_lock(&secluded_suppress_slock);
9849 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
9850 		task->task_suppressed_secluded = FALSE;
9851 		vm_page_secluded_target = vm_page_secluded_save_target;
9852 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9853 	}
9854 	lck_spin_unlock(&secluded_suppress_slock);
9855 }
9856 
9857 #endif /* CONFIG_SECLUDED_MEMORY */
9858 
9859 /*
9860  * Move the list of retired pages on the vm_page_queue_retired to
9861  * their final resting place on retired_pages_object.
9862  */
9863 void
vm_retire_boot_pages(void)9864 vm_retire_boot_pages(void)
9865 {
9866 #if defined(__arm64__)
9867 	vm_page_t p;
9868 
9869 	vm_object_lock(retired_pages_object);
9870 	while (!vm_page_queue_empty(&vm_page_queue_retired)) {
9871 		vm_page_queue_remove_first(&vm_page_queue_retired, p, vmp_pageq);
9872 		assert(p != NULL);
9873 		vm_page_lock_queues();
9874 		p->vmp_q_state = VM_PAGE_IS_WIRED;
9875 		p->vmp_wire_count++;
9876 		vm_page_unlock_queues();
9877 		vm_page_insert_wired(p, retired_pages_object, ptoa(VM_PAGE_GET_PHYS_PAGE(p)), VM_KERN_MEMORY_RETIRED);
9878 		vm_object_unlock(retired_pages_object);
9879 		pmap_retire_page(VM_PAGE_GET_PHYS_PAGE(p));
9880 		vm_object_lock(retired_pages_object);
9881 	}
9882 	vm_object_unlock(retired_pages_object);
9883 #endif /* defined(__arm64__) */
9884 }
9885 
9886 /*
9887  * Returns the current number of retired pages, used for sysctl.
9888  */
9889 uint32_t
vm_retired_pages_count(void)9890 vm_retired_pages_count(void)
9891 {
9892 	return retired_pages_object->resident_page_count;
9893 }
9894 
9895