xref: /xnu-11215.61.5/osfmk/vm/vm_resident.c (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68 
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init_xnu.h>
85 #include <vm/vm_map_internal.h>
86 #include <vm/vm_page_internal.h>
87 #include <vm/vm_pageout_internal.h>
88 #include <vm/vm_kern_xnu.h>                 /* kmem_alloc() */
89 #include <vm/vm_compressor_pager_internal.h>
90 #include <kern/misc_protos.h>
91 #include <mach_debug/zone_info.h>
92 #include <vm/cpm_internal.h>
93 #include <pexpert/pexpert.h>
94 #include <pexpert/device_tree.h>
95 #include <san/kasan.h>
96 #include <os/log.h>
97 
98 #include <vm/vm_protos_internal.h>
99 #include <vm/memory_object.h>
100 #include <vm/vm_purgeable_internal.h>
101 #include <vm/vm_compressor_internal.h>
102 #include <vm/vm_iokit.h>
103 #include <vm/vm_object_internal.h>
104 #if defined (__x86_64__)
105 #include <i386/misc_protos.h>
106 #endif
107 
108 #if CONFIG_SPTM
109 #include <arm64/sptm/sptm.h>
110 #endif
111 
112 #if CONFIG_PHANTOM_CACHE
113 #include <vm/vm_phantom_cache_internal.h>
114 #endif
115 
116 #if HIBERNATION
117 #include <IOKit/IOHibernatePrivate.h>
118 #include <machine/pal_hibernate.h>
119 #endif /* HIBERNATION */
120 
121 #include <sys/kdebug.h>
122 
123 #if defined(HAS_APPLE_PAC)
124 #include <ptrauth.h>
125 #endif
126 #if defined(__arm64__)
127 #include <arm/cpu_internal.h>
128 #endif /* defined(__arm64__) */
129 
130 #if MACH_ASSERT
131 
132 TUNABLE(bool, vm_check_refs_on_free, "vm_check_refs_on_free", true);
133 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
134 
135 #else /* MACH_ASSERT */
136 
137 #define ASSERT_PMAP_FREE(mem) /* nothing */
138 
139 #endif /* MACH_ASSERT */
140 
141 extern boolean_t vm_pageout_running;
142 extern thread_t  vm_pageout_scan_thread;
143 extern bool vps_dynamic_priority_enabled;
144 
145 char    vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
146 char    vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
147 char    vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
148 char    vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
149 
150 #if CONFIG_SECLUDED_MEMORY
151 struct vm_page_secluded_data vm_page_secluded;
152 #endif /* CONFIG_SECLUDED_MEMORY */
153 
154 #if DEVELOPMENT || DEBUG
155 extern struct memory_object_pager_ops shared_region_pager_ops;
156 unsigned int shared_region_pagers_resident_count = 0;
157 unsigned int shared_region_pagers_resident_peak = 0;
158 #endif /* DEVELOPMENT || DEBUG */
159 
160 
161 
162 int             PERCPU_DATA(start_color);
163 vm_page_t       PERCPU_DATA(free_pages);
164 boolean_t       hibernate_cleaning_in_progress = FALSE;
165 
166 uint32_t        vm_lopage_free_count = 0;
167 uint32_t        vm_lopage_free_limit = 0;
168 uint32_t        vm_lopage_lowater    = 0;
169 boolean_t       vm_lopage_refill = FALSE;
170 boolean_t       vm_lopage_needed = FALSE;
171 
172 int             speculative_age_index = 0;
173 int             speculative_steal_index = 0;
174 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
175 
176 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
177                                                           * Updated and checked behind the vm_page_queues_lock. */
178 
179 static void             vm_page_free_prepare(vm_page_t  page);
180 static vm_page_t        vm_page_grab_fictitious_common(ppnum_t, boolean_t);
181 
182 static void vm_tag_init(void);
183 
184 /* for debugging purposes */
185 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
186     VM_PAGE_PACKED_FROM_ARRAY;
187 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
188     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
189 
190 /*
191  *	Associated with page of user-allocatable memory is a
192  *	page structure.
193  */
194 
195 /*
196  *	These variables record the values returned by vm_page_bootstrap,
197  *	for debugging purposes.  The implementation of pmap_steal_memory
198  *	and pmap_startup here also uses them internally.
199  */
200 
201 vm_offset_t virtual_space_start;
202 vm_offset_t virtual_space_end;
203 uint32_t        vm_page_pages;
204 
205 /*
206  *	The vm_page_lookup() routine, which provides for fast
207  *	(virtual memory object, offset) to page lookup, employs
208  *	the following hash table.  The vm_page_{insert,remove}
209  *	routines install and remove associations in the table.
210  *	[This table is often called the virtual-to-physical,
211  *	or VP, table.]
212  */
213 typedef struct {
214 	vm_page_packed_t page_list;
215 #if     MACH_PAGE_HASH_STATS
216 	int             cur_count;              /* current count */
217 	int             hi_count;               /* high water mark */
218 #endif /* MACH_PAGE_HASH_STATS */
219 } vm_page_bucket_t;
220 
221 
222 #define BUCKETS_PER_LOCK        16
223 
224 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
225 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
226 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
227 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
228 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
229 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
230 
231 #ifndef VM_TAG_ACTIVE_UPDATE
232 #error VM_TAG_ACTIVE_UPDATE
233 #endif
234 #ifndef VM_TAG_SIZECLASSES
235 #error VM_TAG_SIZECLASSES
236 #endif
237 
238 /* for debugging */
239 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
240 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
241 
242 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
243 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
244 #if VM_TAG_SIZECLASSES
245 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
246 #endif /* VM_TAG_SIZECLASSES */
247 
248 vm_tag_t vm_allocation_tag_highest;
249 
250 #if VM_PAGE_BUCKETS_CHECK
251 boolean_t vm_page_buckets_check_ready = FALSE;
252 #if VM_PAGE_FAKE_BUCKETS
253 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
254 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
255 #endif /* VM_PAGE_FAKE_BUCKETS */
256 #endif /* VM_PAGE_BUCKETS_CHECK */
257 
258 #if     MACH_PAGE_HASH_STATS
259 /* This routine is only for debug.  It is intended to be called by
260  * hand by a developer using a kernel debugger.  This routine prints
261  * out vm_page_hash table statistics to the kernel debug console.
262  */
263 void
hash_debug(void)264 hash_debug(void)
265 {
266 	int     i;
267 	int     numbuckets = 0;
268 	int     highsum = 0;
269 	int     maxdepth = 0;
270 
271 	for (i = 0; i < vm_page_bucket_count; i++) {
272 		if (vm_page_buckets[i].hi_count) {
273 			numbuckets++;
274 			highsum += vm_page_buckets[i].hi_count;
275 			if (vm_page_buckets[i].hi_count > maxdepth) {
276 				maxdepth = vm_page_buckets[i].hi_count;
277 			}
278 		}
279 	}
280 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
281 	printf("Number used buckets:     %d = %d%%\n",
282 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
283 	printf("Number unused buckets:   %d = %d%%\n",
284 	    vm_page_bucket_count - numbuckets,
285 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
286 	printf("Sum of bucket max depth: %d\n", highsum);
287 	printf("Average bucket depth:    %d.%2d\n",
288 	    highsum / vm_page_bucket_count,
289 	    highsum % vm_page_bucket_count);
290 	printf("Maximum bucket depth:    %d\n", maxdepth);
291 }
292 #endif /* MACH_PAGE_HASH_STATS */
293 
294 /*
295  *	The virtual page size is currently implemented as a runtime
296  *	variable, but is constant once initialized using vm_set_page_size.
297  *	This initialization must be done in the machine-dependent
298  *	bootstrap sequence, before calling other machine-independent
299  *	initializations.
300  *
301  *	All references to the virtual page size outside this
302  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
303  *	constants.
304  */
305 #if defined(__arm64__)
306 vm_size_t       page_size;
307 vm_size_t       page_mask;
308 int             page_shift;
309 #else
310 vm_size_t       page_size  = PAGE_SIZE;
311 vm_size_t       page_mask  = PAGE_MASK;
312 int             page_shift = PAGE_SHIFT;
313 #endif
314 
315 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
316 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
317 vm_page_t                          vm_page_array_ending_addr;
318 
319 unsigned int    vm_pages_count = 0;
320 
321 /*
322  *	Resident pages that represent real memory
323  *	are allocated from a set of free lists,
324  *	one per color.
325  */
326 unsigned int    vm_colors;
327 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
328 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
329 unsigned int    vm_free_magazine_refill_limit = 0;
330 
331 
332 struct vm_page_queue_free_head {
333 	vm_page_queue_head_t    qhead;
334 } VM_PAGE_PACKED_ALIGNED;
335 
336 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
337 
338 
339 unsigned int    vm_page_free_wanted;
340 unsigned int    vm_page_free_wanted_privileged;
341 #if CONFIG_SECLUDED_MEMORY
342 unsigned int    vm_page_free_wanted_secluded;
343 #endif /* CONFIG_SECLUDED_MEMORY */
344 unsigned int    vm_page_free_count;
345 
346 unsigned int    vm_page_realtime_count;
347 
348 /*
349  *	Occasionally, the virtual memory system uses
350  *	resident page structures that do not refer to
351  *	real pages, for example to leave a page with
352  *	important state information in the VP table.
353  *
354  *	These page structures are allocated the way
355  *	most other kernel structures are.
356  */
357 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
358 vm_locks_array_t vm_page_locks;
359 
360 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
361 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
362 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
363 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
364 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
365 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
366 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
367 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
368 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
369 
370 unsigned int    vm_page_local_q_soft_limit = 250;
371 unsigned int    vm_page_local_q_hard_limit = 500;
372 struct vpl     *__zpercpu vm_page_local_q;
373 
374 /* N.B. Guard and fictitious pages must not
375  * be assigned a zero phys_page value.
376  */
377 /*
378  *	Fictitious pages don't have a physical address,
379  *	but we must initialize phys_page to something.
380  *	For debugging, this should be a strange value
381  *	that the pmap module can recognize in assertions.
382  */
383 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
384 
385 /*
386  *	Guard pages are not accessible so they don't
387  *      need a physical address, but we need to enter
388  *	one in the pmap.
389  *	Let's make it recognizable and make sure that
390  *	we don't use a real physical page with that
391  *	physical address.
392  */
393 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
394 
395 /*
396  *	Resident page structures are also chained on
397  *	queues that are used by the page replacement
398  *	system (pageout daemon).  These queues are
399  *	defined here, but are shared by the pageout
400  *	module.  The inactive queue is broken into
401  *	file backed and anonymous for convenience as the
402  *	pageout daemon often assignes a higher
403  *	importance to anonymous pages (less likely to pick)
404  */
405 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
406 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
407 #if CONFIG_SECLUDED_MEMORY
408 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
409 #endif /* CONFIG_SECLUDED_MEMORY */
410 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
411 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
412 
413 queue_head_t    vm_objects_wired;
414 
415 vm_page_queue_head_t    vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
416 uint32_t        vm_page_donate_mode;
417 uint32_t        vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
418 uint32_t        vm_page_donate_count;
419 bool            vm_page_donate_queue_ripe;
420 
421 
422 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
423 uint32_t        vm_page_background_target;
424 uint32_t        vm_page_background_target_snapshot;
425 uint32_t        vm_page_background_count;
426 uint64_t        vm_page_background_promoted_count;
427 
428 uint32_t        vm_page_background_internal_count;
429 uint32_t        vm_page_background_external_count;
430 
431 uint32_t        vm_page_background_mode;
432 uint32_t        vm_page_background_exclude_external;
433 
434 unsigned int    vm_page_active_count;
435 unsigned int    vm_page_inactive_count;
436 unsigned int    vm_page_kernelcache_count;
437 #if CONFIG_SECLUDED_MEMORY
438 unsigned int    vm_page_secluded_count;
439 unsigned int    vm_page_secluded_count_free;
440 unsigned int    vm_page_secluded_count_inuse;
441 unsigned int    vm_page_secluded_count_over_target;
442 #endif /* CONFIG_SECLUDED_MEMORY */
443 unsigned int    vm_page_anonymous_count;
444 unsigned int    vm_page_throttled_count;
445 unsigned int    vm_page_speculative_count;
446 
447 unsigned int    vm_page_wire_count;
448 unsigned int    vm_page_wire_count_on_boot = 0;
449 unsigned int    vm_page_stolen_count = 0;
450 unsigned int    vm_page_wire_count_initial;
451 unsigned int    vm_page_gobble_count = 0;
452 unsigned int    vm_page_kern_lpage_count = 0;
453 
454 uint64_t        booter_size;  /* external so it can be found in core dumps */
455 
456 #define VM_PAGE_WIRE_COUNT_WARNING      0
457 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
458 
459 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
460 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
461 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
462 
463 unsigned int    vm_page_xpmapped_external_count = 0;
464 unsigned int    vm_page_external_count = 0;
465 unsigned int    vm_page_internal_count = 0;
466 unsigned int    vm_page_pageable_external_count = 0;
467 unsigned int    vm_page_pageable_internal_count = 0;
468 
469 #if DEVELOPMENT || DEBUG
470 unsigned int    vm_page_speculative_recreated = 0;
471 unsigned int    vm_page_speculative_created = 0;
472 unsigned int    vm_page_speculative_used = 0;
473 #endif
474 
475 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
476 
477 unsigned int    vm_page_cleaned_count = 0;
478 
479 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
480 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
481 
482 
483 /*
484  *	Several page replacement parameters are also
485  *	shared with this module, so that page allocation
486  *	(done here in vm_page_alloc) can trigger the
487  *	pageout daemon.
488  */
489 unsigned int    vm_page_free_target = 0;
490 unsigned int    vm_page_free_min = 0;
491 unsigned int    vm_page_throttle_limit = 0;
492 unsigned int    vm_page_inactive_target = 0;
493 #if CONFIG_SECLUDED_MEMORY
494 unsigned int    vm_page_secluded_target = 0;
495 #endif /* CONFIG_SECLUDED_MEMORY */
496 unsigned int    vm_page_anonymous_min = 0;
497 unsigned int    vm_page_free_reserved = 0;
498 
499 
500 /*
501  *	The VM system has a couple of heuristics for deciding
502  *	that pages are "uninteresting" and should be placed
503  *	on the inactive queue as likely candidates for replacement.
504  *	These variables let the heuristics be controlled at run-time
505  *	to make experimentation easier.
506  */
507 
508 boolean_t vm_page_deactivate_hint = TRUE;
509 
510 struct vm_page_stats_reusable vm_page_stats_reusable;
511 
512 /*
513  *	vm_set_page_size:
514  *
515  *	Sets the page size, perhaps based upon the memory
516  *	size.  Must be called before any use of page-size
517  *	dependent functions.
518  *
519  *	Sets page_shift and page_mask from page_size.
520  */
521 void
vm_set_page_size(void)522 vm_set_page_size(void)
523 {
524 	page_size  = PAGE_SIZE;
525 	page_mask  = PAGE_MASK;
526 	page_shift = PAGE_SHIFT;
527 
528 	if ((page_mask & page_size) != 0) {
529 		panic("vm_set_page_size: page size not a power of two");
530 	}
531 
532 	for (page_shift = 0;; page_shift++) {
533 		if ((1U << page_shift) == page_size) {
534 			break;
535 		}
536 	}
537 }
538 
539 #if defined (__x86_64__)
540 
541 #define MAX_CLUMP_SIZE      16
542 #define DEFAULT_CLUMP_SIZE  4
543 
544 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
545 
546 #if DEVELOPMENT || DEBUG
547 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
548 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
549 
550 static inline void
vm_clump_update_stats(unsigned int c)551 vm_clump_update_stats(unsigned int c)
552 {
553 	assert(c <= vm_clump_size);
554 	if (c > 0 && c <= vm_clump_size) {
555 		vm_clump_stats[c] += c;
556 	}
557 	vm_clump_allocs += c;
558 }
559 #endif  /*  if DEVELOPMENT || DEBUG */
560 
561 /* Called once to setup the VM clump knobs */
562 static void
vm_page_setup_clump(void)563 vm_page_setup_clump( void )
564 {
565 	unsigned int override, n;
566 
567 	vm_clump_size = DEFAULT_CLUMP_SIZE;
568 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
569 		vm_clump_size = override;
570 	}
571 
572 	if (vm_clump_size > MAX_CLUMP_SIZE) {
573 		panic("vm_page_setup_clump:: clump_size is too large!");
574 	}
575 	if (vm_clump_size < 1) {
576 		panic("vm_page_setup_clump:: clump_size must be >= 1");
577 	}
578 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
579 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
580 	}
581 
582 	vm_clump_promote_threshold = vm_clump_size;
583 	vm_clump_mask = vm_clump_size - 1;
584 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
585 		;
586 	}
587 
588 #if DEVELOPMENT || DEBUG
589 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
590 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
591 #endif  /*  if DEVELOPMENT || DEBUG */
592 }
593 
594 #endif  /* #if defined (__x86_64__) */
595 
596 #define COLOR_GROUPS_TO_STEAL   4
597 
598 /* Called once during statup, once the cache geometry is known.
599  */
600 static void
vm_page_set_colors(void)601 vm_page_set_colors( void )
602 {
603 	unsigned int    n, override;
604 
605 #if defined (__x86_64__)
606 	/* adjust #colors because we need to color outside the clump boundary */
607 	vm_cache_geometry_colors >>= vm_clump_shift;
608 #endif
609 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
610 		n = override;
611 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
612 		n = vm_cache_geometry_colors;
613 	} else {
614 		n = DEFAULT_COLORS;                             /* use default if all else fails */
615 	}
616 	if (n == 0) {
617 		n = 1;
618 	}
619 	if (n > MAX_COLORS) {
620 		n = MAX_COLORS;
621 	}
622 
623 	/* the count must be a power of 2  */
624 	if ((n & (n - 1)) != 0) {
625 		n = DEFAULT_COLORS;                             /* use default if all else fails */
626 	}
627 	vm_colors = n;
628 	vm_color_mask = n - 1;
629 
630 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
631 
632 #if defined (__x86_64__)
633 	/* adjust for reduction in colors due to clumping and multiple cores */
634 	if (real_ncpus) {
635 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
636 	}
637 #endif
638 }
639 
640 /*
641  * During single threaded early boot we don't initialize all pages.
642  * This avoids some delay during boot. They'll be initialized and
643  * added to the free list as needed or after we are multithreaded by
644  * what becomes the pageout thread.
645  */
646 static boolean_t fill = FALSE;
647 static unsigned int fillval;
648 uint_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
649 ppnum_t delay_above_pnum = PPNUM_MAX;
650 
651 /*
652  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
653  * If ARM ever uses delayed page initialization, this value may need to be quite different.
654  */
655 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
656 
657 /*
658  * When we have to dip into more delayed pages due to low memory, free up
659  * a large chunk to get things back to normal. This avoids contention on the
660  * delayed code allocating page by page.
661  */
662 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
663 
664 /*
665  * Get and initialize the next delayed page.
666  */
667 static vm_page_t
vm_get_delayed_page(int grab_options)668 vm_get_delayed_page(int grab_options)
669 {
670 	vm_page_t p;
671 	ppnum_t   pnum;
672 
673 	/*
674 	 * Get a new page if we have one.
675 	 */
676 	vm_free_page_lock();
677 	if (vm_delayed_count == 0) {
678 		vm_free_page_unlock();
679 		return NULL;
680 	}
681 
682 	if (!pmap_next_page(&pnum)) {
683 		vm_delayed_count = 0;
684 		vm_free_page_unlock();
685 		return NULL;
686 	}
687 
688 
689 	assert(vm_delayed_count > 0);
690 	--vm_delayed_count;
691 
692 #if defined(__x86_64__)
693 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
694 	if (vm_pages_count > 0) {
695 		assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
696 	}
697 #endif
698 	p = &vm_pages[vm_pages_count];
699 	assert(p < vm_page_array_ending_addr);
700 	vm_page_init(p, pnum, FALSE);
701 	++vm_pages_count;
702 	++vm_page_pages;
703 	vm_free_page_unlock();
704 
705 	/*
706 	 * These pages were initially counted as wired, undo that now.
707 	 */
708 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
709 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
710 	} else {
711 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
712 		vm_page_lockspin_queues();
713 	}
714 	--vm_page_wire_count;
715 	--vm_page_wire_count_initial;
716 	if (vm_page_wire_count_on_boot != 0) {
717 		--vm_page_wire_count_on_boot;
718 	}
719 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
720 		vm_page_unlock_queues();
721 	}
722 
723 
724 	if (fill) {
725 		fillPage(pnum, fillval);
726 	}
727 	return p;
728 }
729 
730 static void vm_page_module_init_delayed(void);
731 
732 /*
733  * Free all remaining delayed pages to the free lists.
734  */
735 void
vm_free_delayed_pages(void)736 vm_free_delayed_pages(void)
737 {
738 	vm_page_t   p;
739 	vm_page_t   list = NULL;
740 	uint_t      cnt = 0;
741 	vm_offset_t start_free_va;
742 	int64_t     free_size;
743 
744 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
745 		if (vm_himemory_mode) {
746 			vm_page_release(p, FALSE);
747 		} else {
748 			p->vmp_snext = list;
749 			list = p;
750 		}
751 		++cnt;
752 	}
753 
754 	/*
755 	 * Free the pages in reverse order if not himemory mode.
756 	 * Hence the low memory pages will be first on free lists. (LIFO)
757 	 */
758 	while (list != NULL) {
759 		p = list;
760 		list = p->vmp_snext;
761 		p->vmp_snext = NULL;
762 		vm_page_release(p, FALSE);
763 	}
764 #if DEVELOPMENT || DEBUG
765 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
766 #endif
767 
768 	/*
769 	 * Free up any unused full pages at the end of the vm_pages[] array
770 	 */
771 	start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
772 
773 #if defined(__x86_64__)
774 	/*
775 	 * Since x86 might have used large pages for vm_pages[], we can't
776 	 * free starting in the middle of a partially used large page.
777 	 */
778 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
779 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
780 	}
781 #endif
782 	if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
783 		free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
784 		if (free_size > 0) {
785 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
786 			vm_page_array_ending_addr = (void *)start_free_va;
787 
788 			/*
789 			 * Note there's no locking here, as only this thread will ever change this value.
790 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
791 			 */
792 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
793 
794 #if DEVELOPMENT || DEBUG
795 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
796 			    (long)free_size, (long)start_free_va);
797 #endif
798 		}
799 	}
800 
801 
802 	/*
803 	 * now we can create the VM page array zone
804 	 */
805 	vm_page_module_init_delayed();
806 }
807 
808 /*
809  * Try and free up enough delayed pages to match a contig memory allocation.
810  */
811 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)812 vm_free_delayed_pages_contig(
813 	uint_t    npages,
814 	ppnum_t   max_pnum,
815 	ppnum_t   pnum_mask)
816 {
817 	vm_page_t p;
818 	ppnum_t   pnum;
819 	uint_t    cnt = 0;
820 
821 	/*
822 	 * Treat 0 as the absolute max page number.
823 	 */
824 	if (max_pnum == 0) {
825 		max_pnum = PPNUM_MAX;
826 	}
827 
828 	/*
829 	 * Free till we get a properly aligned start page
830 	 */
831 	for (;;) {
832 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
833 		if (p == NULL) {
834 			return;
835 		}
836 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
837 		vm_page_release(p, FALSE);
838 		if (pnum >= max_pnum) {
839 			return;
840 		}
841 		if ((pnum & pnum_mask) == 0) {
842 			break;
843 		}
844 	}
845 
846 	/*
847 	 * Having a healthy pool of free pages will help performance. We don't
848 	 * want to fall back to the delayed code for every page allocation.
849 	 */
850 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
851 		npages += VM_DELAY_PAGE_CHUNK;
852 	}
853 
854 	/*
855 	 * Now free up the pages
856 	 */
857 	for (cnt = 1; cnt < npages; ++cnt) {
858 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
859 		if (p == NULL) {
860 			return;
861 		}
862 		vm_page_release(p, FALSE);
863 	}
864 }
865 
866 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
867 
868 void
vm_page_init_local_q(unsigned int num_cpus)869 vm_page_init_local_q(unsigned int num_cpus)
870 {
871 	struct vpl *t_local_q;
872 
873 	/*
874 	 * no point in this for a uni-processor system
875 	 */
876 	if (num_cpus >= 2) {
877 		ml_cpu_info_t cpu_info;
878 
879 		/*
880 		 * Force the allocation alignment to a cacheline,
881 		 * because the `vpl` struct has a lock and will be taken
882 		 * cross CPU so we want to isolate the rest of the per-CPU
883 		 * data to avoid false sharing due to this lock being taken.
884 		 */
885 
886 		ml_cpu_get_info(&cpu_info);
887 
888 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
889 		    cpu_info.cache_line_size - 1);
890 
891 		zpercpu_foreach(lq, t_local_q) {
892 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
893 			vm_page_queue_init(&lq->vpl_queue);
894 		}
895 
896 		/* make the initialization visible to all cores */
897 		os_atomic_store(&vm_page_local_q, t_local_q, release);
898 	}
899 }
900 
901 /*
902  * vm_init_before_launchd
903  *
904  * This should be called right before launchd is loaded.
905  */
906 void
vm_init_before_launchd()907 vm_init_before_launchd()
908 {
909 	vm_page_lockspin_queues();
910 	vm_page_wire_count_on_boot = vm_page_wire_count;
911 	vm_page_unlock_queues();
912 }
913 
914 
915 /*
916  *	vm_page_bootstrap:
917  *
918  *	Initializes the resident memory module.
919  *
920  *	Allocates memory for the page cells, and
921  *	for the object/offset-to-page hash table headers.
922  *	Each page cell is initialized and placed on the free list.
923  *	Returns the range of available kernel virtual memory.
924  */
925 __startup_func
926 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)927 vm_page_bootstrap(
928 	vm_offset_t             *startp,
929 	vm_offset_t             *endp)
930 {
931 	unsigned int            i;
932 	unsigned int            log1;
933 	unsigned int            log2;
934 	unsigned int            size;
935 
936 	/*
937 	 *	Initialize the page queues.
938 	 */
939 
940 	lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
941 	lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
942 	lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
943 
944 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
945 		int group;
946 
947 		purgeable_queues[i].token_q_head = 0;
948 		purgeable_queues[i].token_q_tail = 0;
949 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
950 			queue_init(&purgeable_queues[i].objq[group]);
951 		}
952 
953 		purgeable_queues[i].type = i;
954 		purgeable_queues[i].new_pages = 0;
955 #if MACH_ASSERT
956 		purgeable_queues[i].debug_count_tokens = 0;
957 		purgeable_queues[i].debug_count_objects = 0;
958 #endif
959 	}
960 	;
961 	purgeable_nonvolatile_count = 0;
962 	queue_init(&purgeable_nonvolatile_queue);
963 
964 	for (i = 0; i < MAX_COLORS; i++) {
965 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
966 	}
967 
968 	vm_page_queue_init(&vm_lopage_queue_free);
969 	vm_page_queue_init(&vm_page_queue_active);
970 	vm_page_queue_init(&vm_page_queue_inactive);
971 #if CONFIG_SECLUDED_MEMORY
972 	vm_page_queue_init(&vm_page_queue_secluded);
973 #endif /* CONFIG_SECLUDED_MEMORY */
974 	vm_page_queue_init(&vm_page_queue_cleaned);
975 	vm_page_queue_init(&vm_page_queue_throttled);
976 	vm_page_queue_init(&vm_page_queue_anonymous);
977 	queue_init(&vm_objects_wired);
978 
979 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
980 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
981 
982 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
983 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
984 	}
985 
986 	vm_page_queue_init(&vm_page_queue_donate);
987 	vm_page_queue_init(&vm_page_queue_background);
988 
989 	vm_page_background_count = 0;
990 	vm_page_background_internal_count = 0;
991 	vm_page_background_external_count = 0;
992 	vm_page_background_promoted_count = 0;
993 
994 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
995 
996 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
997 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
998 	}
999 
1000 #if    defined(__LP64__)
1001 	vm_page_background_mode = VM_PAGE_BG_ENABLED;
1002 	vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
1003 #else
1004 	vm_page_background_mode = VM_PAGE_BG_DISABLED;
1005 	vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1006 #endif
1007 	vm_page_background_exclude_external = 0;
1008 
1009 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1010 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1011 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1012 
1013 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1014 		vm_page_background_mode = VM_PAGE_BG_DISABLED;
1015 	}
1016 
1017 	PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1018 	if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1019 		vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1020 	}
1021 
1022 	vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1023 	vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1024 	vm_page_donate_target = vm_page_donate_target_high;
1025 	vm_page_donate_count = 0;
1026 
1027 	vm_page_free_wanted = 0;
1028 	vm_page_free_wanted_privileged = 0;
1029 #if CONFIG_SECLUDED_MEMORY
1030 	vm_page_free_wanted_secluded = 0;
1031 #endif /* CONFIG_SECLUDED_MEMORY */
1032 
1033 #if defined (__x86_64__)
1034 	/* this must be called before vm_page_set_colors() */
1035 	vm_page_setup_clump();
1036 #endif
1037 
1038 	vm_page_set_colors();
1039 
1040 	bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1041 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1042 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1043 	vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1044 
1045 	bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1046 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1047 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1048 	vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1049 	vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1050 	vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1051 	vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1052 #if CONFIG_SECLUDED_MEMORY
1053 	vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1054 #endif /* CONFIG_SECLUDED_MEMORY */
1055 
1056 	bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1057 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1058 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1059 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1060 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1061 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1062 #if CONFIG_SECLUDED_MEMORY
1063 	vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1064 #endif /* CONFIG_SECLUDED_MEMORY */
1065 
1066 	bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1067 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1068 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1069 	vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1070 	vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1071 #if CONFIG_SECLUDED_MEMORY
1072 	vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1073 #endif /* CONFIG_SECLUDED_MEMORY */
1074 
1075 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1076 		vm_allocation_sites_static[t].refcount = 2;
1077 		vm_allocation_sites_static[t].tag = t;
1078 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1079 	}
1080 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1081 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1082 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1083 
1084 	/*
1085 	 *	Steal memory for the map and zone subsystems.
1086 	 */
1087 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1088 
1089 	/*
1090 	 *	Allocate (and initialize) the virtual-to-physical
1091 	 *	table hash buckets.
1092 	 *
1093 	 *	The number of buckets should be a power of two to
1094 	 *	get a good hash function.  The following computation
1095 	 *	chooses the first power of two that is greater
1096 	 *	than the number of physical pages in the system.
1097 	 */
1098 
1099 	if (vm_page_bucket_count == 0) {
1100 		unsigned int npages = pmap_free_pages();
1101 
1102 		vm_page_bucket_count = 1;
1103 		while (vm_page_bucket_count < npages) {
1104 			vm_page_bucket_count <<= 1;
1105 		}
1106 	}
1107 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1108 
1109 	vm_page_hash_mask = vm_page_bucket_count - 1;
1110 
1111 	/*
1112 	 *	Calculate object shift value for hashing algorithm:
1113 	 *		O = log2(sizeof(struct vm_object))
1114 	 *		B = log2(vm_page_bucket_count)
1115 	 *	        hash shifts the object left by
1116 	 *		B/2 - O
1117 	 */
1118 	size = vm_page_bucket_count;
1119 	for (log1 = 0; size > 1; log1++) {
1120 		size /= 2;
1121 	}
1122 	size = sizeof(struct vm_object);
1123 	for (log2 = 0; size > 1; log2++) {
1124 		size /= 2;
1125 	}
1126 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1127 
1128 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1129 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1130 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1131 
1132 	if (vm_page_hash_mask & vm_page_bucket_count) {
1133 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1134 	}
1135 
1136 #if VM_PAGE_BUCKETS_CHECK
1137 #if VM_PAGE_FAKE_BUCKETS
1138 	/*
1139 	 * Allocate a decoy set of page buckets, to detect
1140 	 * any stomping there.
1141 	 */
1142 	vm_page_fake_buckets = (vm_page_bucket_t *)
1143 	    pmap_steal_memory(vm_page_bucket_count *
1144 	    sizeof(vm_page_bucket_t), 0);
1145 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1146 	vm_page_fake_buckets_end =
1147 	    vm_map_round_page((vm_page_fake_buckets_start +
1148 	    (vm_page_bucket_count *
1149 	    sizeof(vm_page_bucket_t))),
1150 	    PAGE_MASK);
1151 	char *cp;
1152 	for (cp = (char *)vm_page_fake_buckets_start;
1153 	    cp < (char *)vm_page_fake_buckets_end;
1154 	    cp++) {
1155 		*cp = 0x5a;
1156 	}
1157 #endif /* VM_PAGE_FAKE_BUCKETS */
1158 #endif /* VM_PAGE_BUCKETS_CHECK */
1159 
1160 	kernel_debug_string_early("vm_page_buckets");
1161 	vm_page_buckets = (vm_page_bucket_t *)
1162 	    pmap_steal_memory(vm_page_bucket_count *
1163 	    sizeof(vm_page_bucket_t), 0);
1164 
1165 	kernel_debug_string_early("vm_page_bucket_locks");
1166 	vm_page_bucket_locks = (lck_spin_t *)
1167 	    pmap_steal_memory(vm_page_bucket_lock_count *
1168 	    sizeof(lck_spin_t), 0);
1169 
1170 	for (i = 0; i < vm_page_bucket_count; i++) {
1171 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1172 
1173 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1174 #if     MACH_PAGE_HASH_STATS
1175 		bucket->cur_count = 0;
1176 		bucket->hi_count = 0;
1177 #endif /* MACH_PAGE_HASH_STATS */
1178 	}
1179 
1180 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1181 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1182 	}
1183 
1184 	vm_tag_init();
1185 
1186 #if VM_PAGE_BUCKETS_CHECK
1187 	vm_page_buckets_check_ready = TRUE;
1188 #endif /* VM_PAGE_BUCKETS_CHECK */
1189 
1190 	/*
1191 	 *	Machine-dependent code allocates the resident page table.
1192 	 *	It uses vm_page_init to initialize the page frames.
1193 	 *	The code also returns to us the virtual space available
1194 	 *	to the kernel.  We don't trust the pmap module
1195 	 *	to get the alignment right.
1196 	 */
1197 
1198 	kernel_debug_string_early("pmap_startup");
1199 	pmap_startup(&virtual_space_start, &virtual_space_end);
1200 	virtual_space_start = round_page(virtual_space_start);
1201 	virtual_space_end = trunc_page(virtual_space_end);
1202 
1203 	*startp = virtual_space_start;
1204 	*endp = virtual_space_end;
1205 
1206 	/*
1207 	 *	Compute the initial "wire" count.
1208 	 *	Up until now, the pages which have been set aside are not under
1209 	 *	the VM system's control, so although they aren't explicitly
1210 	 *	wired, they nonetheless can't be moved. At this moment,
1211 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1212 	 */
1213 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1214 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1215 	    vm_page_free_count - vm_lopage_free_count;
1216 #if CONFIG_SECLUDED_MEMORY
1217 	vm_page_wire_count -= vm_page_secluded_count;
1218 #endif
1219 	vm_page_wire_count_initial = vm_page_wire_count;
1220 
1221 	/* capture this for later use */
1222 	booter_size = ml_get_booter_memory_size();
1223 
1224 	printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1225 	    vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1226 
1227 	kernel_debug_string_early("vm_page_bootstrap complete");
1228 }
1229 
1230 #ifndef MACHINE_PAGES
1231 /*
1232  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1233  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1234  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1235  */
1236 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1237 pmap_steal_memory_internal(
1238 	vm_size_t size,
1239 	vm_size_t alignment,
1240 	boolean_t might_free,
1241 	unsigned int flags,
1242 	pmap_mapping_type_t mapping_type)
1243 {
1244 	kern_return_t kr;
1245 	vm_offset_t addr;
1246 	vm_offset_t map_addr;
1247 	ppnum_t phys_page;
1248 	unsigned int pmap_flags;
1249 
1250 	/*
1251 	 * Size needs to be aligned to word size.
1252 	 */
1253 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1254 
1255 	/*
1256 	 * Alignment defaults to word size if not specified.
1257 	 */
1258 	if (alignment == 0) {
1259 		alignment = sizeof(void*);
1260 	}
1261 
1262 	/*
1263 	 * Alignment must be no greater than a page and must be a power of two.
1264 	 */
1265 	assert(alignment <= PAGE_SIZE);
1266 	assert((alignment & (alignment - 1)) == 0);
1267 
1268 	/*
1269 	 * On the first call, get the initial values for virtual address space
1270 	 * and page align them.
1271 	 */
1272 	if (virtual_space_start == virtual_space_end) {
1273 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1274 		virtual_space_start = round_page(virtual_space_start);
1275 		virtual_space_end = trunc_page(virtual_space_end);
1276 
1277 #if defined(__x86_64__)
1278 		/*
1279 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1280 		 * that map it. This makes the VA available for large page mappings.
1281 		 */
1282 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1283 #endif
1284 	}
1285 
1286 	/*
1287 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1288 	 * address if the size is big enough to back with at least 1 large page.
1289 	 */
1290 #if defined(__x86_64__)
1291 	if (size >= I386_LPGBYTES) {
1292 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1293 	}
1294 #endif
1295 	virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1296 	addr = virtual_space_start;
1297 	virtual_space_start += size;
1298 
1299 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1300 
1301 	/*
1302 	 * Allocate and map physical pages to back the new virtual space.
1303 	 */
1304 	map_addr = round_page(addr);
1305 	while (map_addr < addr + size) {
1306 #if defined(__x86_64__)
1307 		/*
1308 		 * Back with a large page if properly aligned on x86
1309 		 */
1310 		if ((map_addr & I386_LPGMASK) == 0 &&
1311 		    map_addr + I386_LPGBYTES <= addr + size &&
1312 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1313 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1314 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1315 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1316 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1317 
1318 			if (kr != KERN_SUCCESS) {
1319 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1320 				    (unsigned long)map_addr, phys_page);
1321 			}
1322 			map_addr += I386_LPGBYTES;
1323 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1324 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1325 			vm_page_kern_lpage_count++;
1326 			continue;
1327 		}
1328 #endif
1329 
1330 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1331 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1332 		}
1333 
1334 #if defined(__x86_64__)
1335 		pmap_pre_expand(kernel_pmap, map_addr);
1336 #endif
1337 		pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1338 
1339 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1340 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1341 		    pmap_flags, FALSE, mapping_type);
1342 
1343 		if (kr != KERN_SUCCESS) {
1344 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1345 			    (unsigned long)map_addr, phys_page);
1346 		}
1347 		map_addr += PAGE_SIZE;
1348 
1349 		/*
1350 		 * Account for newly stolen memory
1351 		 */
1352 		vm_page_wire_count++;
1353 		vm_page_stolen_count++;
1354 	}
1355 
1356 #if defined(__x86_64__)
1357 	/*
1358 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1359 	 * Notify the pmap layer to record which high pages were allocated so far.
1360 	 */
1361 	if (might_free) {
1362 		pmap_hi_pages_done();
1363 	}
1364 #endif
1365 #if KASAN
1366 	kasan_notify_address(round_page(addr), size);
1367 #endif
1368 	return (void *) addr;
1369 }
1370 
1371 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1372 pmap_steal_memory(
1373 	vm_size_t size,
1374 	vm_size_t alignment)
1375 {
1376 	return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1377 }
1378 
1379 void *
pmap_steal_freeable_memory(vm_size_t size)1380 pmap_steal_freeable_memory(
1381 	vm_size_t size)
1382 {
1383 	return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1384 }
1385 
1386 
1387 
1388 
1389 #if CONFIG_SECLUDED_MEMORY
1390 /* boot-args to control secluded memory */
1391 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1392 /* IOKit can use secluded memory */
1393 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1394 /* apps can use secluded memory */
1395 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1396 /* filecache can use seclude memory */
1397 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1398 uint64_t secluded_shutoff_trigger = 0;
1399 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1400 #endif /* CONFIG_SECLUDED_MEMORY */
1401 
1402 
1403 #if defined(__arm64__)
1404 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1405 unsigned int vm_first_phys_ppnum = 0;
1406 #endif
1407 
1408 void vm_page_release_startup(vm_page_t mem);
1409 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1410 pmap_startup(
1411 	vm_offset_t     *startp,
1412 	vm_offset_t     *endp)
1413 {
1414 	unsigned int    i, npages;
1415 	ppnum_t         phys_page;
1416 	uint64_t        mem_sz;
1417 	uint64_t        start_ns;
1418 	uint64_t        now_ns;
1419 	uint_t          low_page_count = 0;
1420 
1421 #if    defined(__LP64__)
1422 	/*
1423 	 * make sure we are aligned on a 64 byte boundary
1424 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1425 	 * 6 bits of the pointer)
1426 	 */
1427 	if (virtual_space_start != virtual_space_end) {
1428 		virtual_space_start = round_page(virtual_space_start);
1429 	}
1430 #endif
1431 
1432 	/*
1433 	 * We calculate how many page frames we will have
1434 	 * and then allocate the page structures in one chunk.
1435 	 *
1436 	 * Note that the calculation here doesn't take into account
1437 	 * the memory needed to map what's being allocated, i.e. the page
1438 	 * table entries. So the actual number of pages we get will be
1439 	 * less than this. To do someday: include that in the computation.
1440 	 *
1441 	 * Also for ARM, we don't use the count of free_pages, but rather the
1442 	 * range from last page to first page (ignore holes due to retired pages).
1443 	 */
1444 #if defined(__arm64__)
1445 	mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1446 #else /* defined(__arm64__) */
1447 	mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1448 #endif /* defined(__arm64__) */
1449 	mem_sz += round_page(virtual_space_start) - virtual_space_start;        /* Account for any slop */
1450 	npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages)));    /* scaled to include the vm_page_ts */
1451 
1452 
1453 	vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1454 
1455 	/*
1456 	 * Check if we want to initialize pages to a known value
1457 	 */
1458 	if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1459 		fill = TRUE;
1460 	}
1461 #if     DEBUG
1462 	/* This slows down booting the DEBUG kernel, particularly on
1463 	 * large memory systems, but is worthwhile in deterministically
1464 	 * trapping uninitialized memory usage.
1465 	 */
1466 	if (!fill) {
1467 		fill = TRUE;
1468 		fillval = 0xDEB8F177;
1469 	}
1470 #endif
1471 	if (fill) {
1472 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1473 	}
1474 
1475 #if CONFIG_SECLUDED_MEMORY
1476 	/*
1477 	 * Figure out how much secluded memory to have before we start
1478 	 * release pages to free lists.
1479 	 * The default, if specified nowhere else, is no secluded mem.
1480 	 */
1481 	vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1482 
1483 	/*
1484 	 * Allow a really large app to effectively use secluded memory until it exits.
1485 	 */
1486 	if (vm_page_secluded_target != 0) {
1487 		/*
1488 		 * Get an amount from boot-args, else use 1/2 of max_mem.
1489 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1490 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
1491 		 */
1492 		int secluded_shutoff_mb;
1493 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1494 		    sizeof(secluded_shutoff_mb))) {
1495 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1496 		} else {
1497 			secluded_shutoff_trigger = max_mem / 2;
1498 		}
1499 
1500 		/* ensure the headroom value is sensible and avoid underflows */
1501 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1502 	}
1503 
1504 #endif /* CONFIG_SECLUDED_MEMORY */
1505 
1506 #if defined(__x86_64__)
1507 
1508 	/*
1509 	 * Decide how much memory we delay freeing at boot time.
1510 	 */
1511 	uint32_t delay_above_gb;
1512 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1513 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1514 	}
1515 
1516 	if (delay_above_gb == 0) {
1517 		delay_above_pnum = PPNUM_MAX;
1518 	} else {
1519 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1520 	}
1521 
1522 	/* make sure we have sane breathing room: 1G above low memory */
1523 	if (delay_above_pnum <= max_valid_low_ppnum) {
1524 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1525 	}
1526 
1527 	if (delay_above_pnum < PPNUM_MAX) {
1528 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1529 	}
1530 
1531 #endif /* defined(__x86_64__) */
1532 
1533 	/*
1534 	 * Initialize and release the page frames.
1535 	 */
1536 	kernel_debug_string_early("page_frame_init");
1537 
1538 	vm_page_array_beginning_addr = &vm_pages[0];
1539 	vm_page_array_ending_addr = &vm_pages[npages];  /* used by ptr packing/unpacking code */
1540 #if VM_PAGE_PACKED_FROM_ARRAY
1541 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1542 		panic("pmap_startup(): too many pages to support vm_page packing");
1543 	}
1544 #endif
1545 
1546 	vm_delayed_count = 0;
1547 
1548 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1549 	vm_pages_count = 0;
1550 	for (i = 0; i < npages; i++) {
1551 		/* Did we run out of pages? */
1552 		if (!pmap_next_page(&phys_page)) {
1553 			break;
1554 		}
1555 
1556 		if (phys_page < max_valid_low_ppnum) {
1557 			++low_page_count;
1558 		}
1559 
1560 		/* Are we at high enough pages to delay the rest? */
1561 		if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1562 			vm_delayed_count = pmap_free_pages();
1563 			break;
1564 		}
1565 
1566 #if defined(__arm64__)
1567 		if (i == 0) {
1568 			vm_first_phys_ppnum = phys_page;
1569 			patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1570 			    (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1571 		}
1572 #endif /* defined(__arm64__) */
1573 
1574 #if defined(__x86_64__)
1575 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
1576 		if (i > 0) {
1577 			assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1578 		}
1579 #endif
1580 		++vm_pages_count;
1581 		vm_page_init(&vm_pages[i], phys_page, FALSE);
1582 		if (fill) {
1583 			fillPage(phys_page, fillval);
1584 		}
1585 		if (vm_himemory_mode) {
1586 			vm_page_release_startup(&vm_pages[i]);
1587 		}
1588 	}
1589 	vm_page_pages = vm_pages_count; /* used to report to user space */
1590 
1591 	if (!vm_himemory_mode) {
1592 		do {
1593 			if (!VMP_ERROR_GET(&vm_pages[--i])) {               /* skip retired pages */
1594 				vm_page_release_startup(&vm_pages[i]);
1595 			}
1596 		} while (i != 0);
1597 	}
1598 
1599 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1600 	printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1601 	printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1602 
1603 #if defined(__LP64__)
1604 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1605 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1606 	}
1607 
1608 	if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1609 		panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1610 	}
1611 #endif
1612 
1613 	VM_CHECK_MEMORYSTATUS;
1614 
1615 	/*
1616 	 * We have to re-align virtual_space_start,
1617 	 * because pmap_steal_memory has been using it.
1618 	 */
1619 	virtual_space_start = round_page(virtual_space_start);
1620 	*startp = virtual_space_start;
1621 	*endp = virtual_space_end;
1622 }
1623 #endif  /* MACHINE_PAGES */
1624 
1625 /*
1626  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1627  * or frees to this zone. It's just here for reporting purposes via zprint command.
1628  * This needs to be done after all initially delayed pages are put on the free lists.
1629  */
1630 static void
vm_page_module_init_delayed(void)1631 vm_page_module_init_delayed(void)
1632 {
1633 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1634 	    ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1635 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1636 
1637 		zone_set_exhaustible(z, 0, true);
1638 		/*
1639 		 * Reflect size and usage information for vm_pages[].
1640 		 */
1641 
1642 		z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1643 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
1644 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1645 		vm_pages_count * sizeof(struct vm_page);
1646 		vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1647 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1648 		z->z_wired_cur += vm_page_zone_pages;
1649 		z->z_wired_hwm = z->z_wired_cur;
1650 		z->z_va_cur = z->z_wired_cur;
1651 		/* since zone accounts for these, take them out of stolen */
1652 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1653 	});
1654 }
1655 
1656 /*
1657  * Create the vm_pages zone. This is used for the vm_page structures for the pages
1658  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1659  * this needs to happen in early VM bootstrap.
1660  */
1661 
1662 __startup_func
1663 static void
vm_page_module_init(void)1664 vm_page_module_init(void)
1665 {
1666 	vm_size_t vm_page_with_ppnum_size;
1667 
1668 	/*
1669 	 * Since the pointers to elements in this zone will be packed, they
1670 	 * must have appropriate size. Not strictly what sizeof() reports.
1671 	 */
1672 	vm_page_with_ppnum_size =
1673 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1674 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1675 
1676 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1677 	    ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NO_TBI_TAG,
1678 	    ZONE_ID_ANY, ^(zone_t z) {
1679 		/*
1680 		 * The number "10" is a small number that is larger than the number
1681 		 * of fictitious pages that any single caller will attempt to allocate
1682 		 * without blocking.
1683 		 *
1684 		 * The largest such number at the moment is kmem_alloc()
1685 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1686 		 * taking into account the 50% hysteresis the zone allocator uses.
1687 		 *
1688 		 * Note: this works at all because the zone allocator
1689 		 *       doesn't ever allocate fictitious pages.
1690 		 */
1691 		zone_raise_reserve(z, 10);
1692 	});
1693 }
1694 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1695 
1696 /*
1697  *	Routine:	vm_page_create
1698  *	Purpose:
1699  *		After the VM system is up, machine-dependent code
1700  *		may stumble across more physical memory.  For example,
1701  *		memory that it was reserving for a frame buffer.
1702  *		vm_page_create turns this memory into available pages.
1703  */
1704 
1705 void
vm_page_create(ppnum_t start,ppnum_t end)1706 vm_page_create(
1707 	ppnum_t start,
1708 	ppnum_t end)
1709 {
1710 	ppnum_t         phys_page;
1711 	vm_page_t       m;
1712 
1713 	for (phys_page = start;
1714 	    phys_page < end;
1715 	    phys_page++) {
1716 		m = vm_page_grab_fictitious_common(phys_page, TRUE);
1717 		m->vmp_fictitious = FALSE;
1718 		pmap_clear_noencrypt(phys_page);
1719 
1720 
1721 		vm_free_page_lock();
1722 		vm_page_pages++;
1723 		vm_free_page_unlock();
1724 		vm_page_release(m, FALSE);
1725 	}
1726 }
1727 
1728 
1729 /*
1730  *	vm_page_hash:
1731  *
1732  *	Distributes the object/offset key pair among hash buckets.
1733  *
1734  *	NOTE:	The bucket count must be a power of 2
1735  */
1736 #define vm_page_hash(object, offset) (\
1737 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1738 	 & vm_page_hash_mask)
1739 
1740 
1741 /*
1742  *	vm_page_insert:		[ internal use only ]
1743  *
1744  *	Inserts the given mem entry into the object/object-page
1745  *	table and object list.
1746  *
1747  *	The object must be locked.
1748  */
1749 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1750 vm_page_insert(
1751 	vm_page_t               mem,
1752 	vm_object_t             object,
1753 	vm_object_offset_t      offset)
1754 {
1755 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1756 }
1757 
1758 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1759 vm_page_insert_wired(
1760 	vm_page_t               mem,
1761 	vm_object_t             object,
1762 	vm_object_offset_t      offset,
1763 	vm_tag_t                tag)
1764 {
1765 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1766 }
1767 
1768 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1769 vm_page_insert_internal(
1770 	vm_page_t               mem,
1771 	vm_object_t             object,
1772 	vm_object_offset_t      offset,
1773 	vm_tag_t                tag,
1774 	boolean_t               queues_lock_held,
1775 	boolean_t               insert_in_hash,
1776 	boolean_t               batch_pmap_op,
1777 	boolean_t               batch_accounting,
1778 	uint64_t                *delayed_ledger_update)
1779 {
1780 	vm_page_bucket_t        *bucket;
1781 	lck_spin_t              *bucket_lock;
1782 	int                     hash_id;
1783 	task_t                  owner;
1784 	int                     ledger_idx_volatile;
1785 	int                     ledger_idx_nonvolatile;
1786 	int                     ledger_idx_volatile_compressed;
1787 	int                     ledger_idx_nonvolatile_compressed;
1788 	int                     ledger_idx_composite;
1789 	int                     ledger_idx_external_wired;
1790 	boolean_t               do_footprint;
1791 
1792 #if 0
1793 	/*
1794 	 * we may not hold the page queue lock
1795 	 * so this check isn't safe to make
1796 	 */
1797 	VM_PAGE_CHECK(mem);
1798 #endif
1799 
1800 	assertf(page_aligned(offset), "0x%llx\n", offset);
1801 
1802 	assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1803 
1804 	vm_object_lock_assert_exclusive(object);
1805 	LCK_MTX_ASSERT(&vm_page_queue_lock,
1806 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
1807 	    : LCK_MTX_ASSERT_NOTOWNED);
1808 
1809 	if (queues_lock_held == FALSE) {
1810 		assert(!VM_PAGE_PAGEABLE(mem));
1811 	}
1812 
1813 	if (insert_in_hash == TRUE) {
1814 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1815 		if (mem->vmp_tabled || mem->vmp_object) {
1816 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1817 			    "already in (obj=%p,off=0x%llx)",
1818 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1819 		}
1820 #endif
1821 		if (object->internal && (offset >= object->vo_size)) {
1822 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1823 			    mem, object, offset, object->vo_size);
1824 		}
1825 
1826 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1827 
1828 		/*
1829 		 *	Record the object/offset pair in this page
1830 		 */
1831 
1832 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1833 		mem->vmp_offset = offset;
1834 
1835 #if CONFIG_SECLUDED_MEMORY
1836 		if (object->eligible_for_secluded) {
1837 			vm_page_secluded.eligible_for_secluded++;
1838 		}
1839 #endif /* CONFIG_SECLUDED_MEMORY */
1840 
1841 		/*
1842 		 *	Insert it into the object_object/offset hash table
1843 		 */
1844 		hash_id = vm_page_hash(object, offset);
1845 		bucket = &vm_page_buckets[hash_id];
1846 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1847 
1848 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1849 
1850 		mem->vmp_next_m = bucket->page_list;
1851 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
1852 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1853 
1854 #if     MACH_PAGE_HASH_STATS
1855 		if (++bucket->cur_count > bucket->hi_count) {
1856 			bucket->hi_count = bucket->cur_count;
1857 		}
1858 #endif /* MACH_PAGE_HASH_STATS */
1859 		mem->vmp_hashed = TRUE;
1860 		lck_spin_unlock(bucket_lock);
1861 	}
1862 
1863 	{
1864 		unsigned int    cache_attr;
1865 
1866 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
1867 
1868 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
1869 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1870 		}
1871 	}
1872 	/*
1873 	 *	Now link into the object's list of backed pages.
1874 	 */
1875 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
1876 	object->memq_hint = mem;
1877 	mem->vmp_tabled = TRUE;
1878 
1879 	/*
1880 	 *	Show that the object has one more resident page.
1881 	 */
1882 
1883 	object->resident_page_count++;
1884 	if (VM_PAGE_WIRED(mem)) {
1885 		assert(mem->vmp_wire_count > 0);
1886 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1887 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1888 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1889 	}
1890 	assert(object->resident_page_count >= object->wired_page_count);
1891 
1892 #if DEVELOPMENT || DEBUG
1893 	if (object->object_is_shared_cache &&
1894 	    object->pager != NULL &&
1895 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1896 		int new, old;
1897 		assert(!object->internal);
1898 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1899 		do {
1900 			old = shared_region_pagers_resident_peak;
1901 		} while (old < new &&
1902 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1903 	}
1904 #endif /* DEVELOPMENT || DEBUG */
1905 
1906 	if (batch_accounting == FALSE) {
1907 		if (object->internal) {
1908 			OSAddAtomic(1, &vm_page_internal_count);
1909 		} else {
1910 			OSAddAtomic(1, &vm_page_external_count);
1911 		}
1912 	}
1913 
1914 	/*
1915 	 * It wouldn't make sense to insert a "reusable" page in
1916 	 * an object (the page would have been marked "reusable" only
1917 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1918 	 * in the object at that time).
1919 	 * But a page could be inserted in a "all_reusable" object, if
1920 	 * something faults it in (a vm_read() from another task or a
1921 	 * "use-after-free" issue in user space, for example).  It can
1922 	 * also happen if we're relocating a page from that object to
1923 	 * a different physical page during a physically-contiguous
1924 	 * allocation.
1925 	 */
1926 	assert(!mem->vmp_reusable);
1927 	if (object->all_reusable) {
1928 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1929 	}
1930 
1931 	if (object->purgable == VM_PURGABLE_DENY &&
1932 	    !object->vo_ledger_tag) {
1933 		owner = TASK_NULL;
1934 	} else {
1935 		owner = VM_OBJECT_OWNER(object);
1936 		vm_object_ledger_tag_ledgers(object,
1937 		    &ledger_idx_volatile,
1938 		    &ledger_idx_nonvolatile,
1939 		    &ledger_idx_volatile_compressed,
1940 		    &ledger_idx_nonvolatile_compressed,
1941 		    &ledger_idx_composite,
1942 		    &ledger_idx_external_wired,
1943 		    &do_footprint);
1944 	}
1945 	if (owner &&
1946 	    object->internal &&
1947 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
1948 	    object->purgable == VM_PURGABLE_DENY ||
1949 	    VM_PAGE_WIRED(mem))) {
1950 		if (delayed_ledger_update) {
1951 			*delayed_ledger_update += PAGE_SIZE;
1952 		} else {
1953 			/* more non-volatile bytes */
1954 			ledger_credit(owner->ledger,
1955 			    ledger_idx_nonvolatile,
1956 			    PAGE_SIZE);
1957 			if (do_footprint) {
1958 				/* more footprint */
1959 				ledger_credit(owner->ledger,
1960 				    task_ledgers.phys_footprint,
1961 				    PAGE_SIZE);
1962 			} else if (ledger_idx_composite != -1) {
1963 				ledger_credit(owner->ledger,
1964 				    ledger_idx_composite,
1965 				    PAGE_SIZE);
1966 			}
1967 		}
1968 	} else if (owner &&
1969 	    object->internal &&
1970 	    (object->purgable == VM_PURGABLE_VOLATILE ||
1971 	    object->purgable == VM_PURGABLE_EMPTY)) {
1972 		assert(!VM_PAGE_WIRED(mem));
1973 		/* more volatile bytes */
1974 		ledger_credit(owner->ledger,
1975 		    ledger_idx_volatile,
1976 		    PAGE_SIZE);
1977 	}
1978 
1979 	if (object->purgable == VM_PURGABLE_VOLATILE) {
1980 		if (VM_PAGE_WIRED(mem)) {
1981 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1982 		} else {
1983 			OSAddAtomic(+1, &vm_page_purgeable_count);
1984 		}
1985 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
1986 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1987 		/*
1988 		 * This page belongs to a purged VM object but hasn't
1989 		 * been purged (because it was "busy").
1990 		 * It's in the "throttled" queue and hence not
1991 		 * visible to vm_pageout_scan().  Move it to a pageable
1992 		 * queue, so that it can eventually be reclaimed, instead
1993 		 * of lingering in the "empty" object.
1994 		 */
1995 		if (queues_lock_held == FALSE) {
1996 			vm_page_lockspin_queues();
1997 		}
1998 		vm_page_deactivate(mem);
1999 		if (queues_lock_held == FALSE) {
2000 			vm_page_unlock_queues();
2001 		}
2002 	}
2003 
2004 #if VM_OBJECT_TRACKING_OP_MODIFIED
2005 	if (vm_object_tracking_btlog &&
2006 	    object->internal &&
2007 	    object->resident_page_count == 0 &&
2008 	    object->pager == NULL &&
2009 	    object->shadow != NULL &&
2010 	    object->shadow->vo_copy == object) {
2011 		btlog_record(vm_object_tracking_btlog, object,
2012 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2013 		    btref_get(__builtin_frame_address(0), 0));
2014 	}
2015 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2016 }
2017 
2018 /*
2019  *	vm_page_replace:
2020  *
2021  *	Exactly like vm_page_insert, except that we first
2022  *	remove any existing page at the given offset in object.
2023  *
2024  *	The object must be locked.
2025  */
2026 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2027 vm_page_replace(
2028 	vm_page_t               mem,
2029 	vm_object_t             object,
2030 	vm_object_offset_t      offset)
2031 {
2032 	vm_page_bucket_t *bucket;
2033 	vm_page_t        found_m = VM_PAGE_NULL;
2034 	lck_spin_t      *bucket_lock;
2035 	int             hash_id;
2036 
2037 #if 0
2038 	/*
2039 	 * we don't hold the page queue lock
2040 	 * so this check isn't safe to make
2041 	 */
2042 	VM_PAGE_CHECK(mem);
2043 #endif
2044 	vm_object_lock_assert_exclusive(object);
2045 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2046 	if (mem->vmp_tabled || mem->vmp_object) {
2047 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2048 		    "already in (obj=%p,off=0x%llx)",
2049 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2050 	}
2051 #endif
2052 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2053 
2054 	assert(!VM_PAGE_PAGEABLE(mem));
2055 
2056 	/*
2057 	 *	Record the object/offset pair in this page
2058 	 */
2059 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2060 	mem->vmp_offset = offset;
2061 
2062 	/*
2063 	 *	Insert it into the object_object/offset hash table,
2064 	 *	replacing any page that might have been there.
2065 	 */
2066 
2067 	hash_id = vm_page_hash(object, offset);
2068 	bucket = &vm_page_buckets[hash_id];
2069 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2070 
2071 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2072 
2073 	if (bucket->page_list) {
2074 		vm_page_packed_t *mp = &bucket->page_list;
2075 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2076 
2077 		do {
2078 			/*
2079 			 * compare packed object pointers
2080 			 */
2081 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2082 				/*
2083 				 * Remove old page from hash list
2084 				 */
2085 				*mp = m->vmp_next_m;
2086 				m->vmp_hashed = FALSE;
2087 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2088 
2089 				found_m = m;
2090 				break;
2091 			}
2092 			mp = &m->vmp_next_m;
2093 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2094 
2095 		mem->vmp_next_m = bucket->page_list;
2096 	} else {
2097 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2098 	}
2099 	/*
2100 	 * insert new page at head of hash list
2101 	 */
2102 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2103 	mem->vmp_hashed = TRUE;
2104 
2105 	lck_spin_unlock(bucket_lock);
2106 
2107 	if (found_m) {
2108 		/*
2109 		 * there was already a page at the specified
2110 		 * offset for this object... remove it from
2111 		 * the object and free it back to the free list
2112 		 */
2113 		vm_page_free_unlocked(found_m, FALSE);
2114 	}
2115 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2116 }
2117 
2118 /*
2119  *	vm_page_remove:		[ internal use only ]
2120  *
2121  *	Removes the given mem entry from the object/offset-page
2122  *	table and the object page list.
2123  *
2124  *	The object must be locked.
2125  */
2126 
2127 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2128 vm_page_remove(
2129 	vm_page_t       mem,
2130 	boolean_t       remove_from_hash)
2131 {
2132 	vm_page_bucket_t *bucket;
2133 	vm_page_t       this;
2134 	lck_spin_t      *bucket_lock;
2135 	int             hash_id;
2136 	task_t          owner;
2137 	vm_object_t     m_object;
2138 	int             ledger_idx_volatile;
2139 	int             ledger_idx_nonvolatile;
2140 	int             ledger_idx_volatile_compressed;
2141 	int             ledger_idx_nonvolatile_compressed;
2142 	int             ledger_idx_composite;
2143 	int             ledger_idx_external_wired;
2144 	int             do_footprint;
2145 
2146 	m_object = VM_PAGE_OBJECT(mem);
2147 
2148 	vm_object_lock_assert_exclusive(m_object);
2149 	assert(mem->vmp_tabled);
2150 	assert(!mem->vmp_cleaning);
2151 	assert(!mem->vmp_laundry);
2152 
2153 	if (VM_PAGE_PAGEABLE(mem)) {
2154 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2155 	}
2156 #if 0
2157 	/*
2158 	 * we don't hold the page queue lock
2159 	 * so this check isn't safe to make
2160 	 */
2161 	VM_PAGE_CHECK(mem);
2162 #endif
2163 	if (remove_from_hash == TRUE) {
2164 		/*
2165 		 *	Remove from the object_object/offset hash table
2166 		 */
2167 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2168 		bucket = &vm_page_buckets[hash_id];
2169 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2170 
2171 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2172 
2173 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2174 			/* optimize for common case */
2175 
2176 			bucket->page_list = mem->vmp_next_m;
2177 		} else {
2178 			vm_page_packed_t        *prev;
2179 
2180 			for (prev = &this->vmp_next_m;
2181 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2182 			    prev = &this->vmp_next_m) {
2183 				continue;
2184 			}
2185 			*prev = this->vmp_next_m;
2186 		}
2187 #if     MACH_PAGE_HASH_STATS
2188 		bucket->cur_count--;
2189 #endif /* MACH_PAGE_HASH_STATS */
2190 		mem->vmp_hashed = FALSE;
2191 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2192 		lck_spin_unlock(bucket_lock);
2193 	}
2194 	/*
2195 	 *	Now remove from the object's list of backed pages.
2196 	 */
2197 
2198 	vm_page_remove_internal(mem);
2199 
2200 	/*
2201 	 *	And show that the object has one fewer resident
2202 	 *	page.
2203 	 */
2204 
2205 	assert(m_object->resident_page_count > 0);
2206 	m_object->resident_page_count--;
2207 
2208 #if DEVELOPMENT || DEBUG
2209 	if (m_object->object_is_shared_cache &&
2210 	    m_object->pager != NULL &&
2211 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2212 		assert(!m_object->internal);
2213 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
2214 	}
2215 #endif /* DEVELOPMENT || DEBUG */
2216 
2217 	if (m_object->internal) {
2218 #if DEBUG
2219 		assert(vm_page_internal_count);
2220 #endif /* DEBUG */
2221 
2222 		OSAddAtomic(-1, &vm_page_internal_count);
2223 	} else {
2224 		assert(vm_page_external_count);
2225 		OSAddAtomic(-1, &vm_page_external_count);
2226 
2227 		if (mem->vmp_xpmapped) {
2228 			assert(vm_page_xpmapped_external_count);
2229 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2230 		}
2231 	}
2232 	if (!m_object->internal &&
2233 	    m_object->cached_list.next &&
2234 	    m_object->cached_list.prev) {
2235 		if (m_object->resident_page_count == 0) {
2236 			vm_object_cache_remove(m_object);
2237 		}
2238 	}
2239 
2240 	if (VM_PAGE_WIRED(mem)) {
2241 		assert(mem->vmp_wire_count > 0);
2242 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2243 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2244 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2245 	}
2246 	assert(m_object->resident_page_count >=
2247 	    m_object->wired_page_count);
2248 	if (mem->vmp_reusable) {
2249 		assert(m_object->reusable_page_count > 0);
2250 		m_object->reusable_page_count--;
2251 		assert(m_object->reusable_page_count <=
2252 		    m_object->resident_page_count);
2253 		mem->vmp_reusable = FALSE;
2254 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2255 		vm_page_stats_reusable.reused_remove++;
2256 	} else if (m_object->all_reusable) {
2257 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2258 		vm_page_stats_reusable.reused_remove++;
2259 	}
2260 
2261 	if (m_object->purgable == VM_PURGABLE_DENY &&
2262 	    !m_object->vo_ledger_tag) {
2263 		owner = TASK_NULL;
2264 	} else {
2265 		owner = VM_OBJECT_OWNER(m_object);
2266 		vm_object_ledger_tag_ledgers(m_object,
2267 		    &ledger_idx_volatile,
2268 		    &ledger_idx_nonvolatile,
2269 		    &ledger_idx_volatile_compressed,
2270 		    &ledger_idx_nonvolatile_compressed,
2271 		    &ledger_idx_composite,
2272 		    &ledger_idx_external_wired,
2273 		    &do_footprint);
2274 	}
2275 	if (owner &&
2276 	    m_object->internal &&
2277 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2278 	    m_object->purgable == VM_PURGABLE_DENY ||
2279 	    VM_PAGE_WIRED(mem))) {
2280 		/* less non-volatile bytes */
2281 		ledger_debit(owner->ledger,
2282 		    ledger_idx_nonvolatile,
2283 		    PAGE_SIZE);
2284 		if (do_footprint) {
2285 			/* less footprint */
2286 			ledger_debit(owner->ledger,
2287 			    task_ledgers.phys_footprint,
2288 			    PAGE_SIZE);
2289 		} else if (ledger_idx_composite != -1) {
2290 			ledger_debit(owner->ledger,
2291 			    ledger_idx_composite,
2292 			    PAGE_SIZE);
2293 		}
2294 	} else if (owner &&
2295 	    m_object->internal &&
2296 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
2297 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
2298 		assert(!VM_PAGE_WIRED(mem));
2299 		/* less volatile bytes */
2300 		ledger_debit(owner->ledger,
2301 		    ledger_idx_volatile,
2302 		    PAGE_SIZE);
2303 	}
2304 
2305 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2306 		if (VM_PAGE_WIRED(mem)) {
2307 			assert(vm_page_purgeable_wired_count > 0);
2308 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2309 		} else {
2310 			assert(vm_page_purgeable_count > 0);
2311 			OSAddAtomic(-1, &vm_page_purgeable_count);
2312 		}
2313 	}
2314 
2315 	if (m_object->set_cache_attr == TRUE) {
2316 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2317 	}
2318 
2319 	mem->vmp_tabled = FALSE;
2320 	mem->vmp_object = 0;
2321 	mem->vmp_offset = (vm_object_offset_t) -1;
2322 }
2323 
2324 
2325 /*
2326  *	vm_page_lookup:
2327  *
2328  *	Returns the page associated with the object/offset
2329  *	pair specified; if none is found, VM_PAGE_NULL is returned.
2330  *
2331  *	The object must be locked.  No side effects.
2332  */
2333 
2334 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
2335 
2336 #if DEBUG_VM_PAGE_LOOKUP
2337 
2338 struct {
2339 	uint64_t        vpl_total;
2340 	uint64_t        vpl_empty_obj;
2341 	uint64_t        vpl_bucket_NULL;
2342 	uint64_t        vpl_hit_hint;
2343 	uint64_t        vpl_hit_hint_next;
2344 	uint64_t        vpl_hit_hint_prev;
2345 	uint64_t        vpl_fast;
2346 	uint64_t        vpl_slow;
2347 	uint64_t        vpl_hit;
2348 	uint64_t        vpl_miss;
2349 
2350 	uint64_t        vpl_fast_elapsed;
2351 	uint64_t        vpl_slow_elapsed;
2352 } vm_page_lookup_stats __attribute__((aligned(8)));
2353 
2354 #endif
2355 
2356 #define KDP_VM_PAGE_WALK_MAX    1000
2357 
2358 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2359 kdp_vm_page_lookup(
2360 	vm_object_t             object,
2361 	vm_object_offset_t      offset)
2362 {
2363 	vm_page_t cur_page;
2364 	int num_traversed = 0;
2365 
2366 	if (not_in_kdp) {
2367 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2368 	}
2369 
2370 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2371 		if (cur_page->vmp_offset == offset) {
2372 			return cur_page;
2373 		}
2374 		num_traversed++;
2375 
2376 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2377 			return VM_PAGE_NULL;
2378 		}
2379 	}
2380 
2381 	return VM_PAGE_NULL;
2382 }
2383 
2384 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2385 vm_page_lookup(
2386 	vm_object_t             object,
2387 	vm_object_offset_t      offset)
2388 {
2389 	vm_page_t       mem;
2390 	vm_page_bucket_t *bucket;
2391 	vm_page_queue_entry_t   qe;
2392 	lck_spin_t      *bucket_lock = NULL;
2393 	int             hash_id;
2394 #if DEBUG_VM_PAGE_LOOKUP
2395 	uint64_t        start, elapsed;
2396 
2397 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2398 #endif
2399 
2400 	if (VM_KERNEL_ADDRESS(offset)) {
2401 		offset = VM_KERNEL_STRIP_UPTR(offset);
2402 	}
2403 
2404 	vm_object_lock_assert_held(object);
2405 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2406 
2407 	if (object->resident_page_count == 0) {
2408 #if DEBUG_VM_PAGE_LOOKUP
2409 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2410 #endif
2411 		return VM_PAGE_NULL;
2412 	}
2413 
2414 	mem = object->memq_hint;
2415 
2416 	if (mem != VM_PAGE_NULL) {
2417 		assert(VM_PAGE_OBJECT(mem) == object);
2418 
2419 		if (mem->vmp_offset == offset) {
2420 #if DEBUG_VM_PAGE_LOOKUP
2421 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2422 #endif
2423 			return mem;
2424 		}
2425 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2426 
2427 		if (!vm_page_queue_end(&object->memq, qe)) {
2428 			vm_page_t       next_page;
2429 
2430 			next_page = (vm_page_t)((uintptr_t)qe);
2431 			assert(VM_PAGE_OBJECT(next_page) == object);
2432 
2433 			if (next_page->vmp_offset == offset) {
2434 				object->memq_hint = next_page; /* new hint */
2435 #if DEBUG_VM_PAGE_LOOKUP
2436 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2437 #endif
2438 				return next_page;
2439 			}
2440 		}
2441 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2442 
2443 		if (!vm_page_queue_end(&object->memq, qe)) {
2444 			vm_page_t prev_page;
2445 
2446 			prev_page = (vm_page_t)((uintptr_t)qe);
2447 			assert(VM_PAGE_OBJECT(prev_page) == object);
2448 
2449 			if (prev_page->vmp_offset == offset) {
2450 				object->memq_hint = prev_page; /* new hint */
2451 #if DEBUG_VM_PAGE_LOOKUP
2452 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2453 #endif
2454 				return prev_page;
2455 			}
2456 		}
2457 	}
2458 	/*
2459 	 * Search the hash table for this object/offset pair
2460 	 */
2461 	hash_id = vm_page_hash(object, offset);
2462 	bucket = &vm_page_buckets[hash_id];
2463 
2464 	/*
2465 	 * since we hold the object lock, we are guaranteed that no
2466 	 * new pages can be inserted into this object... this in turn
2467 	 * guarantess that the page we're looking for can't exist
2468 	 * if the bucket it hashes to is currently NULL even when looked
2469 	 * at outside the scope of the hash bucket lock... this is a
2470 	 * really cheap optimiztion to avoid taking the lock
2471 	 */
2472 	if (!bucket->page_list) {
2473 #if DEBUG_VM_PAGE_LOOKUP
2474 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2475 #endif
2476 		return VM_PAGE_NULL;
2477 	}
2478 
2479 #if DEBUG_VM_PAGE_LOOKUP
2480 	start = mach_absolute_time();
2481 #endif
2482 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2483 		/*
2484 		 * on average, it's roughly 3 times faster to run a short memq list
2485 		 * than to take the spin lock and go through the hash list
2486 		 */
2487 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
2488 
2489 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2490 			if (mem->vmp_offset == offset) {
2491 				break;
2492 			}
2493 
2494 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2495 		}
2496 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2497 			mem = NULL;
2498 		}
2499 	} else {
2500 		vm_page_object_t        packed_object;
2501 
2502 		packed_object = VM_PAGE_PACK_OBJECT(object);
2503 
2504 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2505 
2506 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2507 
2508 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2509 		    mem != VM_PAGE_NULL;
2510 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2511 #if 0
2512 			/*
2513 			 * we don't hold the page queue lock
2514 			 * so this check isn't safe to make
2515 			 */
2516 			VM_PAGE_CHECK(mem);
2517 #endif
2518 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2519 				break;
2520 			}
2521 		}
2522 		lck_spin_unlock(bucket_lock);
2523 	}
2524 
2525 #if DEBUG_VM_PAGE_LOOKUP
2526 	elapsed = mach_absolute_time() - start;
2527 
2528 	if (bucket_lock) {
2529 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2530 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2531 	} else {
2532 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2533 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2534 	}
2535 	if (mem != VM_PAGE_NULL) {
2536 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2537 	} else {
2538 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2539 	}
2540 #endif
2541 	if (mem != VM_PAGE_NULL) {
2542 		assert(VM_PAGE_OBJECT(mem) == object);
2543 
2544 		object->memq_hint = mem;
2545 	}
2546 	return mem;
2547 }
2548 
2549 
2550 /*
2551  *	vm_page_rename:
2552  *
2553  *	Move the given memory entry from its
2554  *	current object to the specified target object/offset.
2555  *
2556  *	The object must be locked.
2557  */
2558 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2559 vm_page_rename(
2560 	vm_page_t               mem,
2561 	vm_object_t             new_object,
2562 	vm_object_offset_t      new_offset)
2563 {
2564 	boolean_t       internal_to_external, external_to_internal;
2565 	vm_tag_t        tag;
2566 	vm_object_t     m_object;
2567 
2568 	m_object = VM_PAGE_OBJECT(mem);
2569 
2570 	assert(m_object != new_object);
2571 	assert(m_object);
2572 
2573 	/*
2574 	 *	Changes to mem->vmp_object require the page lock because
2575 	 *	the pageout daemon uses that lock to get the object.
2576 	 */
2577 	vm_page_lockspin_queues();
2578 
2579 	internal_to_external = FALSE;
2580 	external_to_internal = FALSE;
2581 
2582 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2583 		/*
2584 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
2585 		 * if we first move the page to the active queue... it's going to end
2586 		 * up there anyway, and we don't do vm_page_rename's frequently enough
2587 		 * for this to matter.
2588 		 */
2589 		vm_page_queues_remove(mem, FALSE);
2590 		vm_page_activate(mem);
2591 	}
2592 	if (VM_PAGE_PAGEABLE(mem)) {
2593 		if (m_object->internal && !new_object->internal) {
2594 			internal_to_external = TRUE;
2595 		}
2596 		if (!m_object->internal && new_object->internal) {
2597 			external_to_internal = TRUE;
2598 		}
2599 	}
2600 
2601 	tag = m_object->wire_tag;
2602 	vm_page_remove(mem, TRUE);
2603 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2604 
2605 	if (internal_to_external) {
2606 		vm_page_pageable_internal_count--;
2607 		vm_page_pageable_external_count++;
2608 	} else if (external_to_internal) {
2609 		vm_page_pageable_external_count--;
2610 		vm_page_pageable_internal_count++;
2611 	}
2612 
2613 	vm_page_unlock_queues();
2614 }
2615 
2616 /*
2617  *	vm_page_init:
2618  *
2619  *	Initialize the fields in a new page.
2620  *	This takes a structure with random values and initializes it
2621  *	so that it can be given to vm_page_release or vm_page_insert.
2622  */
2623 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2624 vm_page_init(
2625 	vm_page_t mem,
2626 	ppnum_t   phys_page,
2627 	boolean_t lopage)
2628 {
2629 	uint_t    i;
2630 	uintptr_t *p;
2631 
2632 	assert(phys_page);
2633 
2634 #if DEBUG
2635 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2636 		if (!(pmap_valid_page(phys_page))) {
2637 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2638 		}
2639 	}
2640 #endif /* DEBUG */
2641 
2642 	/*
2643 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2644 	 * try to use initial values which match 0. This minimizes the number of writes
2645 	 * needed for boot-time initialization.
2646 	 *
2647 	 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2648 	 */
2649 	assert(VM_PAGE_NOT_ON_Q == 0);
2650 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2651 	for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2652 		*p++ = 0;
2653 	}
2654 	mem->vmp_offset = (vm_object_offset_t)-1;
2655 	mem->vmp_busy = TRUE;
2656 	mem->vmp_lopage = lopage;
2657 
2658 	VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2659 #if 0
2660 	/*
2661 	 * we're leaving this turned off for now... currently pages
2662 	 * come off the free list and are either immediately dirtied/referenced
2663 	 * due to zero-fill or COW faults, or are used to read or write files...
2664 	 * in the file I/O case, the UPL mechanism takes care of clearing
2665 	 * the state of the HW ref/mod bits in a somewhat fragile way.
2666 	 * Since we may change the way this works in the future (to toughen it up),
2667 	 * I'm leaving this as a reminder of where these bits could get cleared
2668 	 */
2669 
2670 	/*
2671 	 * make sure both the h/w referenced and modified bits are
2672 	 * clear at this point... we are especially dependent on
2673 	 * not finding a 'stale' h/w modified in a number of spots
2674 	 * once this page goes back into use
2675 	 */
2676 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2677 #endif
2678 }
2679 
2680 /*
2681  *	vm_page_grab_fictitious:
2682  *
2683  *	Remove a fictitious page from the free list.
2684  *	Returns VM_PAGE_NULL if there are no free pages.
2685  */
2686 
2687 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2688 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2689 {
2690 	vm_page_t m;
2691 
2692 	m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2693 	if (m) {
2694 		vm_page_init(m, phys_addr, FALSE);
2695 		m->vmp_fictitious = TRUE;
2696 	}
2697 	return m;
2698 }
2699 
2700 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2701 vm_page_grab_fictitious(boolean_t canwait)
2702 {
2703 	return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2704 }
2705 
2706 int vm_guard_count;
2707 
2708 
2709 vm_page_t
vm_page_grab_guard(boolean_t canwait)2710 vm_page_grab_guard(boolean_t canwait)
2711 {
2712 	vm_page_t page;
2713 	page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2714 	if (page) {
2715 		OSAddAtomic(1, &vm_guard_count);
2716 	}
2717 	return page;
2718 }
2719 
2720 
2721 /*
2722  *	vm_page_release_fictitious:
2723  *
2724  *	Release a fictitious page to the zone pool
2725  */
2726 void
vm_page_release_fictitious(vm_page_t m)2727 vm_page_release_fictitious(
2728 	vm_page_t m)
2729 {
2730 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2731 	assert(m->vmp_fictitious);
2732 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2733 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2734 	assert(!m->vmp_realtime);
2735 
2736 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2737 		OSAddAtomic(-1, &vm_guard_count);
2738 	}
2739 
2740 	zfree(vm_page_zone, m);
2741 }
2742 
2743 /*
2744  *	vm_pool_low():
2745  *
2746  *	Return true if it is not likely that a non-vm_privileged thread
2747  *	can get memory without blocking.  Advisory only, since the
2748  *	situation may change under us.
2749  */
2750 bool
vm_pool_low(void)2751 vm_pool_low(void)
2752 {
2753 	/* No locking, at worst we will fib. */
2754 	return vm_page_free_count <= vm_page_free_reserved;
2755 }
2756 
2757 boolean_t vm_darkwake_mode = FALSE;
2758 
2759 /*
2760  * vm_update_darkwake_mode():
2761  *
2762  * Tells the VM that the system is in / out of darkwake.
2763  *
2764  * Today, the VM only lowers/raises the background queue target
2765  * so as to favor consuming more/less background pages when
2766  * darwake is ON/OFF.
2767  *
2768  * We might need to do more things in the future.
2769  */
2770 
2771 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2772 vm_update_darkwake_mode(boolean_t darkwake_mode)
2773 {
2774 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2775 #pragma unused(darkwake_mode)
2776 	assert(vm_darkwake_mode == FALSE);
2777 	/*
2778 	 * Darkwake mode isn't supported for AS macOS.
2779 	 */
2780 	return;
2781 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2782 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2783 
2784 	vm_page_lockspin_queues();
2785 
2786 	if (vm_darkwake_mode == darkwake_mode) {
2787 		/*
2788 		 * No change.
2789 		 */
2790 		vm_page_unlock_queues();
2791 		return;
2792 	}
2793 
2794 	vm_darkwake_mode = darkwake_mode;
2795 
2796 	if (vm_darkwake_mode == TRUE) {
2797 		/* save background target to restore later */
2798 		vm_page_background_target_snapshot = vm_page_background_target;
2799 
2800 		/* target is set to 0...no protection for background pages */
2801 		vm_page_background_target = 0;
2802 	} else if (vm_darkwake_mode == FALSE) {
2803 		if (vm_page_background_target_snapshot) {
2804 			vm_page_background_target = vm_page_background_target_snapshot;
2805 		}
2806 	}
2807 	vm_page_unlock_queues();
2808 #endif
2809 }
2810 
2811 void
vm_page_update_special_state(vm_page_t mem)2812 vm_page_update_special_state(vm_page_t mem)
2813 {
2814 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2815 		return;
2816 	}
2817 
2818 	int mode = mem->vmp_on_specialq;
2819 
2820 	switch (mode) {
2821 	case VM_PAGE_SPECIAL_Q_BG:
2822 	{
2823 		task_t  my_task = current_task_early();
2824 
2825 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2826 			return;
2827 		}
2828 
2829 		if (my_task) {
2830 			if (task_get_darkwake_mode(my_task)) {
2831 				return;
2832 			}
2833 		}
2834 
2835 		if (my_task) {
2836 			if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2837 				return;
2838 			}
2839 		}
2840 		vm_page_lockspin_queues();
2841 
2842 		vm_page_background_promoted_count++;
2843 
2844 		vm_page_remove_from_specialq(mem);
2845 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2846 
2847 		vm_page_unlock_queues();
2848 		break;
2849 	}
2850 
2851 	case VM_PAGE_SPECIAL_Q_DONATE:
2852 	{
2853 		task_t  my_task = current_task_early();
2854 
2855 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2856 			return;
2857 		}
2858 
2859 		if (my_task->donates_own_pages == false) {
2860 			vm_page_lockspin_queues();
2861 
2862 			vm_page_remove_from_specialq(mem);
2863 			mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2864 
2865 			vm_page_unlock_queues();
2866 		}
2867 		break;
2868 	}
2869 
2870 	default:
2871 	{
2872 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2873 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2874 		break;
2875 	}
2876 	}
2877 }
2878 
2879 
2880 void
vm_page_assign_special_state(vm_page_t mem,int mode)2881 vm_page_assign_special_state(vm_page_t mem, int mode)
2882 {
2883 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2884 		return;
2885 	}
2886 
2887 	switch (mode) {
2888 	case VM_PAGE_SPECIAL_Q_BG:
2889 	{
2890 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2891 			return;
2892 		}
2893 
2894 		task_t  my_task = current_task_early();
2895 
2896 		if (my_task) {
2897 			if (task_get_darkwake_mode(my_task)) {
2898 				mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2899 				return;
2900 			}
2901 		}
2902 
2903 		if (my_task) {
2904 			mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2905 		}
2906 		break;
2907 	}
2908 
2909 	case VM_PAGE_SPECIAL_Q_DONATE:
2910 	{
2911 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2912 			return;
2913 		}
2914 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2915 		break;
2916 	}
2917 
2918 	default:
2919 		break;
2920 	}
2921 }
2922 
2923 
2924 void
vm_page_remove_from_specialq(vm_page_t mem)2925 vm_page_remove_from_specialq(
2926 	vm_page_t       mem)
2927 {
2928 	vm_object_t     m_object;
2929 	unsigned short  mode;
2930 
2931 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2932 
2933 	mode = mem->vmp_on_specialq;
2934 
2935 	switch (mode) {
2936 	case VM_PAGE_SPECIAL_Q_BG:
2937 	{
2938 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2939 			vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2940 
2941 			mem->vmp_specialq.next = 0;
2942 			mem->vmp_specialq.prev = 0;
2943 
2944 			vm_page_background_count--;
2945 
2946 			m_object = VM_PAGE_OBJECT(mem);
2947 
2948 			if (m_object->internal) {
2949 				vm_page_background_internal_count--;
2950 			} else {
2951 				vm_page_background_external_count--;
2952 			}
2953 		}
2954 		break;
2955 	}
2956 
2957 	case VM_PAGE_SPECIAL_Q_DONATE:
2958 	{
2959 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2960 			vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2961 			mem->vmp_specialq.next = 0;
2962 			mem->vmp_specialq.prev = 0;
2963 			vm_page_donate_count--;
2964 			if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2965 				assert(vm_page_donate_target == vm_page_donate_target_low);
2966 				vm_page_donate_target = vm_page_donate_target_high;
2967 				vm_page_donate_queue_ripe = false;
2968 			}
2969 		}
2970 
2971 		break;
2972 	}
2973 
2974 	default:
2975 	{
2976 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2977 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2978 		break;
2979 	}
2980 	}
2981 }
2982 
2983 
2984 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2985 vm_page_add_to_specialq(
2986 	vm_page_t       mem,
2987 	boolean_t       first)
2988 {
2989 	vm_object_t     m_object;
2990 
2991 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2992 
2993 	if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2994 		return;
2995 	}
2996 
2997 	int mode = mem->vmp_on_specialq;
2998 
2999 	switch (mode) {
3000 	case VM_PAGE_SPECIAL_Q_BG:
3001 	{
3002 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
3003 			return;
3004 		}
3005 
3006 		m_object = VM_PAGE_OBJECT(mem);
3007 
3008 		if (vm_page_background_exclude_external && !m_object->internal) {
3009 			return;
3010 		}
3011 
3012 		if (first == TRUE) {
3013 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
3014 		} else {
3015 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
3016 		}
3017 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3018 
3019 		vm_page_background_count++;
3020 
3021 		if (m_object->internal) {
3022 			vm_page_background_internal_count++;
3023 		} else {
3024 			vm_page_background_external_count++;
3025 		}
3026 		break;
3027 	}
3028 
3029 	case VM_PAGE_SPECIAL_Q_DONATE:
3030 	{
3031 		if (first == TRUE) {
3032 			vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3033 		} else {
3034 			vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3035 		}
3036 		vm_page_donate_count++;
3037 		if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3038 			assert(vm_page_donate_target == vm_page_donate_target_high);
3039 			vm_page_donate_target = vm_page_donate_target_low;
3040 			vm_page_donate_queue_ripe = true;
3041 		}
3042 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3043 		break;
3044 	}
3045 
3046 	default:
3047 		break;
3048 	}
3049 }
3050 
3051 /*
3052  * This can be switched to FALSE to help debug drivers
3053  * that are having problems with memory > 4G.
3054  */
3055 boolean_t       vm_himemory_mode = TRUE;
3056 
3057 /*
3058  * this interface exists to support hardware controllers
3059  * incapable of generating DMAs with more than 32 bits
3060  * of address on platforms with physical memory > 4G...
3061  */
3062 unsigned int    vm_lopages_allocated_q = 0;
3063 unsigned int    vm_lopages_allocated_cpm_success = 0;
3064 unsigned int    vm_lopages_allocated_cpm_failed = 0;
3065 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3066 
3067 vm_page_t
vm_page_grablo(void)3068 vm_page_grablo(void)
3069 {
3070 	vm_page_t       mem;
3071 
3072 	if (vm_lopage_needed == FALSE) {
3073 		return vm_page_grab();
3074 	}
3075 
3076 	vm_free_page_lock_spin();
3077 
3078 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3079 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3080 		assert(vm_lopage_free_count);
3081 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3082 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3083 
3084 		vm_lopage_free_count--;
3085 		vm_lopages_allocated_q++;
3086 
3087 		if (vm_lopage_free_count < vm_lopage_lowater) {
3088 			vm_lopage_refill = TRUE;
3089 		}
3090 
3091 		vm_free_page_unlock();
3092 
3093 		if (current_task()->donates_own_pages) {
3094 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3095 		} else {
3096 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3097 		}
3098 	} else {
3099 		vm_free_page_unlock();
3100 
3101 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3102 			vm_free_page_lock_spin();
3103 			vm_lopages_allocated_cpm_failed++;
3104 			vm_free_page_unlock();
3105 
3106 			return VM_PAGE_NULL;
3107 		}
3108 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3109 
3110 		mem->vmp_busy = TRUE;
3111 
3112 		vm_page_lockspin_queues();
3113 
3114 		mem->vmp_gobbled = FALSE;
3115 		vm_page_gobble_count--;
3116 		vm_page_wire_count--;
3117 
3118 		vm_lopages_allocated_cpm_success++;
3119 		vm_page_unlock_queues();
3120 	}
3121 	assert(mem->vmp_busy);
3122 	assert(!mem->vmp_pmapped);
3123 	assert(!mem->vmp_wpmapped);
3124 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3125 
3126 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3127 
3128 	counter_inc(&vm_page_grab_count);
3129 	VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3130 
3131 	return mem;
3132 }
3133 
3134 /*
3135  *	vm_page_grab:
3136  *
3137  *	first try to grab a page from the per-cpu free list...
3138  *	this must be done while pre-emption is disabled... if
3139  *      a page is available, we're done...
3140  *	if no page is available, grab the vm_page_queue_free_lock
3141  *	and see if current number of free pages would allow us
3142  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3143  *	if there are pages available, disable preemption and
3144  *      recheck the state of the per-cpu free list... we could
3145  *	have been preempted and moved to a different cpu, or
3146  *      some other thread could have re-filled it... if still
3147  *	empty, figure out how many pages we can steal from the
3148  *	global free queue and move to the per-cpu queue...
3149  *	return 1 of these pages when done... only wakeup the
3150  *      pageout_scan thread if we moved pages from the global
3151  *	list... no need for the wakeup if we've satisfied the
3152  *	request from the per-cpu queue.
3153  */
3154 
3155 #if CONFIG_SECLUDED_MEMORY
3156 vm_page_t vm_page_grab_secluded(void);
3157 #endif /* CONFIG_SECLUDED_MEMORY */
3158 
3159 static inline void
3160 vm_page_grab_diags(void);
3161 
3162 /*
3163  *	vm_page_validate_no_references:
3164  *
3165  *	Make sure the physical page has no refcounts.
3166  *
3167  */
3168 static inline void
vm_page_validate_no_references(vm_page_t mem)3169 vm_page_validate_no_references(
3170 	vm_page_t       mem)
3171 {
3172 	bool is_freed;
3173 
3174 	if (mem->vmp_fictitious) {
3175 		return;
3176 	}
3177 
3178 	pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
3179 
3180 #if CONFIG_SPTM
3181 	is_freed = pmap_is_page_free(paddr);
3182 #else
3183 	is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
3184 #endif /* CONFIG_SPTM */
3185 
3186 	if (!is_freed) {
3187 		/*
3188 		 * There is a redundancy here, but we are going to panic anyways,
3189 		 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
3190 		 * behavior.
3191 		 */
3192 		ASSERT_PMAP_FREE(mem);
3193 		panic("%s: page 0x%llx is referenced", __func__, paddr);
3194 	}
3195 }
3196 
3197 vm_page_t
vm_page_grab(void)3198 vm_page_grab(void)
3199 {
3200 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3201 }
3202 
3203 #if HIBERNATION
3204 boolean_t       hibernate_rebuild_needed = FALSE;
3205 #endif /* HIBERNATION */
3206 
3207 static void
vm_page_finalize_grabed_page(vm_page_t mem)3208 vm_page_finalize_grabed_page(vm_page_t mem)
3209 {
3210 	task_t cur_task = current_task_early();
3211 	if (cur_task && cur_task != kernel_task) {
3212 		/* tag:DONATE this is where the donate state of the page is decided according to what task grabs it */
3213 		if (cur_task->donates_own_pages) {
3214 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3215 		} else {
3216 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3217 		}
3218 	}
3219 }
3220 
3221 vm_page_t
vm_page_grab_options(int grab_options)3222 vm_page_grab_options(
3223 	int grab_options)
3224 {
3225 	vm_page_t       mem;
3226 
3227 restart:
3228 	disable_preemption();
3229 
3230 	if ((mem = *PERCPU_GET(free_pages))) {
3231 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3232 
3233 #if HIBERNATION
3234 		if (hibernate_rebuild_needed) {
3235 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3236 		}
3237 #endif /* HIBERNATION */
3238 
3239 		vm_page_grab_diags();
3240 
3241 		vm_offset_t pcpu_base = current_percpu_base();
3242 		counter_inc_preemption_disabled(&vm_page_grab_count);
3243 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3244 		VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3245 
3246 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3247 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3248 		enable_preemption();
3249 
3250 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3251 		assert(mem->vmp_tabled == FALSE);
3252 		assert(mem->vmp_object == 0);
3253 		assert(!mem->vmp_laundry);
3254 		assert(mem->vmp_busy);
3255 		assert(!mem->vmp_pmapped);
3256 		assert(!mem->vmp_wpmapped);
3257 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3258 		assert(!mem->vmp_realtime);
3259 
3260 		vm_page_validate_no_references(mem);
3261 		vm_page_finalize_grabed_page(mem);
3262 		return mem;
3263 	}
3264 	enable_preemption();
3265 
3266 
3267 	/*
3268 	 *	Optionally produce warnings if the wire or gobble
3269 	 *	counts exceed some threshold.
3270 	 */
3271 #if VM_PAGE_WIRE_COUNT_WARNING
3272 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3273 		printf("mk: vm_page_grab(): high wired page count of %d\n",
3274 		    vm_page_wire_count);
3275 	}
3276 #endif
3277 #if VM_PAGE_GOBBLE_COUNT_WARNING
3278 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3279 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3280 		    vm_page_gobble_count);
3281 	}
3282 #endif
3283 
3284 	/*
3285 	 * If free count is low and we have delayed pages from early boot,
3286 	 * get one of those instead.
3287 	 */
3288 	if (__improbable(vm_delayed_count > 0 &&
3289 	    vm_page_free_count <= vm_page_free_target &&
3290 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3291 		assert(!mem->vmp_realtime);
3292 		// TODO: missing vm_page_finalize_grabed_page()?
3293 		return mem;
3294 	}
3295 
3296 	vm_free_page_lock_spin();
3297 
3298 	/*
3299 	 *	Only let privileged threads (involved in pageout)
3300 	 *	dip into the reserved pool.
3301 	 */
3302 	if ((vm_page_free_count < vm_page_free_reserved) &&
3303 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
3304 		/* no page for us in the free queue... */
3305 		vm_free_page_unlock();
3306 		mem = VM_PAGE_NULL;
3307 
3308 #if CONFIG_SECLUDED_MEMORY
3309 		/* ... but can we try and grab from the secluded queue? */
3310 		if (vm_page_secluded_count > 0 &&
3311 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3312 		    task_can_use_secluded_mem(current_task(), TRUE))) {
3313 			mem = vm_page_grab_secluded();
3314 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3315 				vm_page_secluded.grab_for_iokit++;
3316 				if (mem) {
3317 					vm_page_secluded.grab_for_iokit_success++;
3318 				}
3319 			}
3320 			if (mem) {
3321 				VM_CHECK_MEMORYSTATUS;
3322 
3323 				vm_page_grab_diags();
3324 				counter_inc(&vm_page_grab_count);
3325 				VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3326 
3327 				assert(!mem->vmp_realtime);
3328 				// TODO: missing vm_page_finalize_grabed_page()?
3329 				return mem;
3330 			}
3331 		}
3332 #else /* CONFIG_SECLUDED_MEMORY */
3333 		(void) grab_options;
3334 #endif /* CONFIG_SECLUDED_MEMORY */
3335 	} else {
3336 		vm_page_t        head;
3337 		vm_page_t        tail;
3338 		unsigned int     pages_to_steal;
3339 		unsigned int     color;
3340 		unsigned int clump_end, sub_count;
3341 
3342 		/*
3343 		 * Replenishing our per-CPU cache of free pages might take
3344 		 * too long to keep holding the "free_page" lock as a spinlock,
3345 		 * so convert to the full mutex to prevent other threads trying
3346 		 * to acquire the "free_page" lock from timing out spinning on
3347 		 * the mutex interlock.
3348 		 */
3349 		vm_free_page_lock_convert();
3350 
3351 		while (vm_page_free_count == 0) {
3352 			vm_free_page_unlock();
3353 			/*
3354 			 * must be a privileged thread to be
3355 			 * in this state since a non-privileged
3356 			 * thread would have bailed if we were
3357 			 * under the vm_page_free_reserved mark
3358 			 */
3359 			VM_PAGE_WAIT();
3360 			vm_free_page_lock();
3361 		}
3362 
3363 		/*
3364 		 * Need to repopulate the per-CPU free list from the global free list.
3365 		 * Note we don't do any processing of pending retirement pages here.
3366 		 * That'll happen in the code above when the page comes off the per-CPU list.
3367 		 */
3368 		disable_preemption();
3369 
3370 		/*
3371 		 * If we got preempted the cache might now have pages.
3372 		 */
3373 		if ((mem = *PERCPU_GET(free_pages))) {
3374 			vm_free_page_unlock();
3375 			enable_preemption();
3376 			goto restart;
3377 		}
3378 
3379 		if (vm_page_free_count <= vm_page_free_reserved) {
3380 			pages_to_steal = 1;
3381 		} else {
3382 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3383 				pages_to_steal = vm_free_magazine_refill_limit;
3384 			} else {
3385 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3386 			}
3387 		}
3388 		color = *PERCPU_GET(start_color);
3389 		head = tail = NULL;
3390 
3391 		vm_page_free_count -= pages_to_steal;
3392 		clump_end = sub_count = 0;
3393 
3394 		while (pages_to_steal--) {
3395 			while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3396 				color = (color + 1) & vm_color_mask;
3397 			}
3398 #if defined(__x86_64__)
3399 			vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3400 			    mem, clump_end);
3401 #else
3402 			vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3403 			    mem, vmp_pageq);
3404 #endif
3405 
3406 			assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3407 
3408 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3409 
3410 #if defined(__arm64__)
3411 			color = (color + 1) & vm_color_mask;
3412 #else
3413 
3414 #if DEVELOPMENT || DEBUG
3415 
3416 			sub_count++;
3417 			if (clump_end) {
3418 				vm_clump_update_stats(sub_count);
3419 				sub_count = 0;
3420 				color = (color + 1) & vm_color_mask;
3421 			}
3422 #else
3423 			if (clump_end) {
3424 				color = (color + 1) & vm_color_mask;
3425 			}
3426 
3427 #endif /* if DEVELOPMENT || DEBUG */
3428 
3429 #endif  /* if defined(__arm64__) */
3430 
3431 			if (head == NULL) {
3432 				head = mem;
3433 			} else {
3434 				tail->vmp_snext = mem;
3435 			}
3436 			tail = mem;
3437 
3438 			assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3439 			assert(mem->vmp_tabled == FALSE);
3440 			assert(mem->vmp_object == 0);
3441 			assert(!mem->vmp_laundry);
3442 
3443 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3444 
3445 			assert(mem->vmp_busy);
3446 			assert(!mem->vmp_pmapped);
3447 			assert(!mem->vmp_wpmapped);
3448 			assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3449 			assert(!mem->vmp_realtime);
3450 
3451 			vm_page_validate_no_references(mem);
3452 		}
3453 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3454 		vm_clump_update_stats(sub_count);
3455 #endif
3456 
3457 #if HIBERNATION
3458 		if (hibernate_rebuild_needed) {
3459 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3460 		}
3461 #endif /* HIBERNATION */
3462 		vm_offset_t pcpu_base = current_percpu_base();
3463 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3464 		*PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3465 
3466 		/*
3467 		 * We decremented vm_page_free_count above
3468 		 * so we must wake up vm_pageout_scan() if
3469 		 * we brought it down below vm_page_free_min.
3470 		 */
3471 		bool wakeup_pageout_scan = false;
3472 		if (vm_page_free_count < vm_page_free_min &&
3473 		    !vm_pageout_running) {
3474 			wakeup_pageout_scan = true;
3475 		}
3476 		vm_free_page_unlock();
3477 
3478 		enable_preemption();
3479 
3480 		if (wakeup_pageout_scan) {
3481 			thread_wakeup((event_t) &vm_page_free_wanted);
3482 		}
3483 		VM_CHECK_MEMORYSTATUS;
3484 
3485 		goto restart;
3486 	}
3487 
3488 	/*
3489 	 *	Decide if we should poke the pageout daemon.
3490 	 *	We do this if the free count is less than the low
3491 	 *	water mark. VM Pageout Scan will keep running till
3492 	 *	the free_count > free_target (& hence above free_min).
3493 	 *	This wakeup is to catch the possibility of the counts
3494 	 *	dropping between VM Pageout Scan parking and this check.
3495 	 *
3496 	 *	We don't have the counts locked ... if they change a little,
3497 	 *	it doesn't really matter.
3498 	 */
3499 	if (vm_page_free_count < vm_page_free_min) {
3500 		vm_free_page_lock();
3501 		if (vm_pageout_running == FALSE) {
3502 			vm_free_page_unlock();
3503 			thread_wakeup((event_t) &vm_page_free_wanted);
3504 		} else {
3505 			vm_free_page_unlock();
3506 		}
3507 	}
3508 
3509 	VM_CHECK_MEMORYSTATUS;
3510 
3511 	if (mem) {
3512 		assert(!mem->vmp_realtime);
3513 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
3514 
3515 		vm_page_finalize_grabed_page(mem);
3516 	}
3517 	return mem;
3518 }
3519 
3520 #if CONFIG_SECLUDED_MEMORY
3521 vm_page_t
vm_page_grab_secluded(void)3522 vm_page_grab_secluded(void)
3523 {
3524 	vm_page_t       mem;
3525 	vm_object_t     object;
3526 	int             refmod_state;
3527 
3528 	if (vm_page_secluded_count == 0) {
3529 		/* no secluded pages to grab... */
3530 		return VM_PAGE_NULL;
3531 	}
3532 
3533 	/* secluded queue is protected by the VM page queue lock */
3534 	vm_page_lock_queues();
3535 
3536 	if (vm_page_secluded_count == 0) {
3537 		/* no secluded pages to grab... */
3538 		vm_page_unlock_queues();
3539 		return VM_PAGE_NULL;
3540 	}
3541 
3542 #if 00
3543 	/* can we grab from the secluded queue? */
3544 	if (vm_page_secluded_count > vm_page_secluded_target ||
3545 	    (vm_page_secluded_count > 0 &&
3546 	    task_can_use_secluded_mem(current_task(), TRUE))) {
3547 		/* OK */
3548 	} else {
3549 		/* can't grab from secluded queue... */
3550 		vm_page_unlock_queues();
3551 		return VM_PAGE_NULL;
3552 	}
3553 #endif
3554 
3555 	/* we can grab a page from secluded queue! */
3556 	assert((vm_page_secluded_count_free +
3557 	    vm_page_secluded_count_inuse) ==
3558 	    vm_page_secluded_count);
3559 	if (current_task()->task_can_use_secluded_mem) {
3560 		assert(num_tasks_can_use_secluded_mem > 0);
3561 	}
3562 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3563 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3564 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3565 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3566 	vm_page_queues_remove(mem, TRUE);
3567 
3568 	object = VM_PAGE_OBJECT(mem);
3569 
3570 	assert(!mem->vmp_fictitious);
3571 	assert(!VM_PAGE_WIRED(mem));
3572 	if (object == VM_OBJECT_NULL) {
3573 		/* free for grab! */
3574 		vm_page_unlock_queues();
3575 		vm_page_secluded.grab_success_free++;
3576 
3577 		assert(mem->vmp_busy);
3578 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3579 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3580 		assert(mem->vmp_pageq.next == 0);
3581 		assert(mem->vmp_pageq.prev == 0);
3582 		assert(mem->vmp_listq.next == 0);
3583 		assert(mem->vmp_listq.prev == 0);
3584 		assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3585 		assert(mem->vmp_specialq.next == 0);
3586 		assert(mem->vmp_specialq.prev == 0);
3587 		return mem;
3588 	}
3589 
3590 	assert(!object->internal);
3591 //	vm_page_pageable_external_count--;
3592 
3593 	if (!vm_object_lock_try(object)) {
3594 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
3595 		vm_page_secluded.grab_failure_locked++;
3596 reactivate_secluded_page:
3597 		vm_page_activate(mem);
3598 		vm_page_unlock_queues();
3599 		return VM_PAGE_NULL;
3600 	}
3601 	if (mem->vmp_busy ||
3602 	    mem->vmp_cleaning ||
3603 	    mem->vmp_laundry) {
3604 		/* can't steal page in this state... */
3605 		vm_object_unlock(object);
3606 		vm_page_secluded.grab_failure_state++;
3607 		goto reactivate_secluded_page;
3608 	}
3609 	if (mem->vmp_realtime) {
3610 		/* don't steal pages used by realtime threads... */
3611 		vm_object_unlock(object);
3612 		vm_page_secluded.grab_failure_realtime++;
3613 		goto reactivate_secluded_page;
3614 	}
3615 
3616 	mem->vmp_busy = TRUE;
3617 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3618 	if (refmod_state & VM_MEM_REFERENCED) {
3619 		mem->vmp_reference = TRUE;
3620 	}
3621 	if (refmod_state & VM_MEM_MODIFIED) {
3622 		SET_PAGE_DIRTY(mem, FALSE);
3623 	}
3624 	if (mem->vmp_dirty || mem->vmp_precious) {
3625 		/* can't grab a dirty page; re-activate */
3626 //		printf("SECLUDED: dirty page %p\n", mem);
3627 		vm_page_wakeup_done(object, mem);
3628 		vm_page_secluded.grab_failure_dirty++;
3629 		vm_object_unlock(object);
3630 		goto reactivate_secluded_page;
3631 	}
3632 	if (mem->vmp_reference) {
3633 		/* it's been used but we do need to grab a page... */
3634 	}
3635 
3636 	vm_page_unlock_queues();
3637 
3638 
3639 	/* finish what vm_page_free() would have done... */
3640 	vm_page_free_prepare_object(mem, TRUE);
3641 	vm_object_unlock(object);
3642 	object = VM_OBJECT_NULL;
3643 
3644 	vm_page_validate_no_references(mem);
3645 
3646 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3647 	vm_page_secluded.grab_success_other++;
3648 
3649 	assert(mem->vmp_busy);
3650 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3651 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3652 	assert(mem->vmp_pageq.next == 0);
3653 	assert(mem->vmp_pageq.prev == 0);
3654 	assert(mem->vmp_listq.next == 0);
3655 	assert(mem->vmp_listq.prev == 0);
3656 	assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3657 	assert(mem->vmp_specialq.next == 0);
3658 	assert(mem->vmp_specialq.prev == 0);
3659 
3660 	return mem;
3661 }
3662 
3663 uint64_t
vm_page_secluded_drain(void)3664 vm_page_secluded_drain(void)
3665 {
3666 	vm_page_t local_freeq;
3667 	int local_freed;
3668 	uint64_t num_reclaimed;
3669 	unsigned int saved_secluded_count, saved_secluded_target;
3670 
3671 	num_reclaimed = 0;
3672 	local_freeq = NULL;
3673 	local_freed = 0;
3674 
3675 	vm_page_lock_queues();
3676 
3677 	saved_secluded_count = vm_page_secluded_count;
3678 	saved_secluded_target = vm_page_secluded_target;
3679 	vm_page_secluded_target = 0;
3680 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3681 	while (vm_page_secluded_count) {
3682 		vm_page_t secluded_page;
3683 
3684 		assert((vm_page_secluded_count_free +
3685 		    vm_page_secluded_count_inuse) ==
3686 		    vm_page_secluded_count);
3687 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3688 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3689 
3690 		vm_page_queues_remove(secluded_page, FALSE);
3691 		assert(!secluded_page->vmp_fictitious);
3692 		assert(!VM_PAGE_WIRED(secluded_page));
3693 
3694 		if (secluded_page->vmp_object == 0) {
3695 			/* transfer to free queue */
3696 			assert(secluded_page->vmp_busy);
3697 			secluded_page->vmp_snext = local_freeq;
3698 			local_freeq = secluded_page;
3699 			local_freed += 1;
3700 		} else {
3701 			/* transfer to head of active queue */
3702 			vm_page_enqueue_active(secluded_page, FALSE);
3703 			secluded_page = VM_PAGE_NULL;
3704 		}
3705 		num_reclaimed++;
3706 	}
3707 	vm_page_secluded_target = saved_secluded_target;
3708 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3709 
3710 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3711 
3712 	vm_page_unlock_queues();
3713 
3714 	if (local_freed) {
3715 		vm_page_free_list(local_freeq, TRUE);
3716 		local_freeq = NULL;
3717 		local_freed = 0;
3718 	}
3719 
3720 	return num_reclaimed;
3721 }
3722 #endif /* CONFIG_SECLUDED_MEMORY */
3723 
3724 static inline void
vm_page_grab_diags()3725 vm_page_grab_diags()
3726 {
3727 #if DEVELOPMENT || DEBUG
3728 	task_t task = current_task_early();
3729 	if (task == NULL) {
3730 		return;
3731 	}
3732 
3733 	ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3734 #endif /* DEVELOPMENT || DEBUG */
3735 }
3736 
3737 /*
3738  *	vm_page_release:
3739  *
3740  *	Return a page to the free list.
3741  */
3742 
3743 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3744 vm_page_release(
3745 	vm_page_t       mem,
3746 	boolean_t       page_queues_locked)
3747 {
3748 	unsigned int    color;
3749 	int     need_wakeup = 0;
3750 	int     need_priv_wakeup = 0;
3751 #if CONFIG_SECLUDED_MEMORY
3752 	int     need_secluded_wakeup = 0;
3753 #endif /* CONFIG_SECLUDED_MEMORY */
3754 	event_t wakeup_event = NULL;
3755 
3756 	if (page_queues_locked) {
3757 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3758 	} else {
3759 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3760 	}
3761 
3762 	assert(!mem->vmp_private && !mem->vmp_fictitious);
3763 
3764 #if MACH_ASSERT
3765 	if (vm_check_refs_on_free) {
3766 		vm_page_validate_no_references(mem);
3767 	}
3768 #endif /* MACH_ASSERT */
3769 
3770 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5);	/* (TEST/DEBUG) */
3771 
3772 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3773 
3774 	if (__improbable(mem->vmp_realtime)) {
3775 		if (!page_queues_locked) {
3776 			vm_page_lock_queues();
3777 		}
3778 		if (mem->vmp_realtime) {
3779 			mem->vmp_realtime = false;
3780 			vm_page_realtime_count--;
3781 		}
3782 		if (!page_queues_locked) {
3783 			vm_page_unlock_queues();
3784 		}
3785 	}
3786 
3787 	vm_free_page_lock_spin();
3788 
3789 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3790 	assert(mem->vmp_busy);
3791 	assert(!mem->vmp_laundry);
3792 	assert(mem->vmp_object == 0);
3793 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3794 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3795 	assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3796 
3797 	/* Clear any specialQ hints before releasing page to the free pool*/
3798 	mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3799 
3800 	if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3801 	    vm_lopage_free_count < vm_lopage_free_limit &&
3802 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3803 		/*
3804 		 * this exists to support hardware controllers
3805 		 * incapable of generating DMAs with more than 32 bits
3806 		 * of address on platforms with physical memory > 4G...
3807 		 */
3808 		vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3809 		vm_lopage_free_count++;
3810 
3811 		if (vm_lopage_free_count >= vm_lopage_free_limit) {
3812 			vm_lopage_refill = FALSE;
3813 		}
3814 
3815 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3816 		mem->vmp_lopage = TRUE;
3817 #if CONFIG_SECLUDED_MEMORY
3818 	} else if (vm_page_free_count > vm_page_free_reserved &&
3819 	    vm_page_secluded_count < vm_page_secluded_target &&
3820 	    num_tasks_can_use_secluded_mem == 0) {
3821 		/*
3822 		 * XXX FBDP TODO: also avoid refilling secluded queue
3823 		 * when some IOKit objects are already grabbing from it...
3824 		 */
3825 		if (!page_queues_locked) {
3826 			if (!vm_page_trylock_queues()) {
3827 				/* take locks in right order */
3828 				vm_free_page_unlock();
3829 				vm_page_lock_queues();
3830 				vm_free_page_lock_spin();
3831 			}
3832 		}
3833 		mem->vmp_lopage = FALSE;
3834 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3835 		vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3836 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3837 		vm_page_secluded_count++;
3838 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3839 		vm_page_secluded_count_free++;
3840 		if (!page_queues_locked) {
3841 			vm_page_unlock_queues();
3842 		}
3843 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3844 		if (vm_page_free_wanted_secluded > 0) {
3845 			vm_page_free_wanted_secluded--;
3846 			need_secluded_wakeup = 1;
3847 		}
3848 #endif /* CONFIG_SECLUDED_MEMORY */
3849 	} else {
3850 		mem->vmp_lopage = FALSE;
3851 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3852 
3853 		color = VM_PAGE_GET_COLOR(mem);
3854 #if defined(__x86_64__)
3855 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3856 #else
3857 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3858 #endif
3859 		vm_page_free_count++;
3860 		/*
3861 		 *	Check if we should wake up someone waiting for page.
3862 		 *	But don't bother waking them unless they can allocate.
3863 		 *
3864 		 *	We wakeup only one thread, to prevent starvation.
3865 		 *	Because the scheduling system handles wait queues FIFO,
3866 		 *	if we wakeup all waiting threads, one greedy thread
3867 		 *	can starve multiple niceguy threads.  When the threads
3868 		 *	all wakeup, the greedy threads runs first, grabs the page,
3869 		 *	and waits for another page.  It will be the first to run
3870 		 *	when the next page is freed.
3871 		 *
3872 		 *	However, there is a slight danger here.
3873 		 *	The thread we wake might not use the free page.
3874 		 *	Then the other threads could wait indefinitely
3875 		 *	while the page goes unused.  To forestall this,
3876 		 *	the pageout daemon will keep making free pages
3877 		 *	as long as vm_page_free_wanted is non-zero.
3878 		 */
3879 
3880 		assert(vm_page_free_count > 0);
3881 		if (vm_page_free_wanted_privileged > 0) {
3882 			vm_page_free_wanted_privileged--;
3883 			need_priv_wakeup = 1;
3884 #if CONFIG_SECLUDED_MEMORY
3885 		} else if (vm_page_free_wanted_secluded > 0 &&
3886 		    vm_page_free_count > vm_page_free_reserved) {
3887 			vm_page_free_wanted_secluded--;
3888 			need_secluded_wakeup = 1;
3889 #endif /* CONFIG_SECLUDED_MEMORY */
3890 		} else if (vm_page_free_wanted > 0 &&
3891 		    vm_page_free_count > vm_page_free_reserved) {
3892 			vm_page_free_wanted--;
3893 			need_wakeup = 1;
3894 		}
3895 	}
3896 	vm_pageout_vminfo.vm_page_pages_freed++;
3897 
3898 	vm_free_page_unlock();
3899 
3900 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3901 
3902 	if (need_priv_wakeup) {
3903 		wakeup_event = &vm_page_free_wanted_privileged;
3904 	}
3905 #if CONFIG_SECLUDED_MEMORY
3906 	else if (need_secluded_wakeup) {
3907 		wakeup_event = &vm_page_free_wanted_secluded;
3908 	}
3909 #endif /* CONFIG_SECLUDED_MEMORY */
3910 	else if (need_wakeup) {
3911 		wakeup_event = &vm_page_free_count;
3912 	}
3913 
3914 	if (wakeup_event) {
3915 		if (vps_dynamic_priority_enabled) {
3916 			wakeup_one_with_inheritor((event_t) wakeup_event,
3917 			    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
3918 			    NULL);
3919 		} else {
3920 			thread_wakeup_one((event_t) wakeup_event);
3921 		}
3922 	}
3923 
3924 	VM_CHECK_MEMORYSTATUS;
3925 }
3926 
3927 /*
3928  * This version of vm_page_release() is used only at startup
3929  * when we are single-threaded and pages are being released
3930  * for the first time. Hence, no locking or unnecessary checks are made.
3931  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3932  */
3933 void
vm_page_release_startup(vm_page_t mem)3934 vm_page_release_startup(
3935 	vm_page_t       mem)
3936 {
3937 	vm_page_queue_t queue_free;
3938 
3939 	if (vm_lopage_free_count < vm_lopage_free_limit &&
3940 	    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3941 		mem->vmp_lopage = TRUE;
3942 		mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3943 		vm_lopage_free_count++;
3944 		queue_free = &vm_lopage_queue_free;
3945 #if CONFIG_SECLUDED_MEMORY
3946 	} else if (vm_page_secluded_count < vm_page_secluded_target) {
3947 		mem->vmp_lopage = FALSE;
3948 		mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3949 		vm_page_secluded_count++;
3950 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3951 		vm_page_secluded_count_free++;
3952 		queue_free = &vm_page_queue_secluded;
3953 #endif /* CONFIG_SECLUDED_MEMORY */
3954 	} else {
3955 		mem->vmp_lopage = FALSE;
3956 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3957 		vm_page_free_count++;
3958 		queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3959 	}
3960 	if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3961 #if defined(__x86_64__)
3962 		vm_page_queue_enter_clump(queue_free, mem);
3963 #else
3964 		vm_page_queue_enter(queue_free, mem, vmp_pageq);
3965 #endif
3966 	} else {
3967 		vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3968 	}
3969 }
3970 
3971 /*
3972  *	vm_page_wait:
3973  *
3974  *	Wait for a page to become available.
3975  *	If there are plenty of free pages, then we don't sleep.
3976  *
3977  *	Returns:
3978  *		TRUE:  There may be another page, try again
3979  *		FALSE: We were interrupted out of our wait, don't try again
3980  */
3981 
3982 boolean_t
vm_page_wait(int interruptible)3983 vm_page_wait(
3984 	int     interruptible )
3985 {
3986 	/*
3987 	 *	We can't use vm_page_free_reserved to make this
3988 	 *	determination.  Consider: some thread might
3989 	 *	need to allocate two pages.  The first allocation
3990 	 *	succeeds, the second fails.  After the first page is freed,
3991 	 *	a call to vm_page_wait must really block.
3992 	 */
3993 	kern_return_t   wait_result;
3994 	int             need_wakeup = 0;
3995 	int             is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3996 	event_t         wait_event = NULL;
3997 
3998 	vm_free_page_lock_spin();
3999 
4000 	if (is_privileged && vm_page_free_count) {
4001 		vm_free_page_unlock();
4002 		return TRUE;
4003 	}
4004 
4005 	if (vm_page_free_count >= vm_page_free_target) {
4006 		vm_free_page_unlock();
4007 		return TRUE;
4008 	}
4009 
4010 	if (is_privileged) {
4011 		if (vm_page_free_wanted_privileged++ == 0) {
4012 			need_wakeup = 1;
4013 		}
4014 		wait_event = (event_t)&vm_page_free_wanted_privileged;
4015 #if CONFIG_SECLUDED_MEMORY
4016 	} else if (secluded_for_apps &&
4017 	    task_can_use_secluded_mem(current_task(), FALSE)) {
4018 #if 00
4019 		/* XXX FBDP: need pageq lock for this... */
4020 		/* XXX FBDP: might wait even if pages available, */
4021 		/* XXX FBDP: hopefully not for too long... */
4022 		if (vm_page_secluded_count > 0) {
4023 			vm_free_page_unlock();
4024 			return TRUE;
4025 		}
4026 #endif
4027 		if (vm_page_free_wanted_secluded++ == 0) {
4028 			need_wakeup = 1;
4029 		}
4030 		wait_event = (event_t)&vm_page_free_wanted_secluded;
4031 #endif /* CONFIG_SECLUDED_MEMORY */
4032 	} else {
4033 		if (vm_page_free_wanted++ == 0) {
4034 			need_wakeup = 1;
4035 		}
4036 		wait_event = (event_t)&vm_page_free_count;
4037 	}
4038 
4039 	/*
4040 	 * We don't do a vm_pageout_scan wakeup if we already have
4041 	 * some waiters because vm_pageout_scan checks for waiters
4042 	 * before it returns and does so behind the vm_page_queue_free_lock,
4043 	 * which we own when we bump the waiter counts.
4044 	 */
4045 
4046 	if (vps_dynamic_priority_enabled) {
4047 		/*
4048 		 * We are waking up vm_pageout_scan here. If it needs
4049 		 * the vm_page_queue_free_lock before we unlock it
4050 		 * we'll end up just blocking and incur an extra
4051 		 * context switch. Could be a perf. issue.
4052 		 */
4053 
4054 		if (need_wakeup) {
4055 			thread_wakeup((event_t)&vm_page_free_wanted);
4056 		}
4057 
4058 		/*
4059 		 * LD: This event is going to get recorded every time because
4060 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4061 		 * We just block in that routine.
4062 		 */
4063 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4064 		    vm_page_free_wanted_privileged,
4065 		    vm_page_free_wanted,
4066 #if CONFIG_SECLUDED_MEMORY
4067 		    vm_page_free_wanted_secluded,
4068 #else /* CONFIG_SECLUDED_MEMORY */
4069 		    0,
4070 #endif /* CONFIG_SECLUDED_MEMORY */
4071 		    0);
4072 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4073 		    LCK_SLEEP_UNLOCK,
4074 		    wait_event,
4075 		    vm_pageout_scan_thread,
4076 		    interruptible,
4077 		    0);
4078 	} else {
4079 		wait_result = assert_wait(wait_event, interruptible);
4080 
4081 		vm_free_page_unlock();
4082 
4083 		if (need_wakeup) {
4084 			thread_wakeup((event_t)&vm_page_free_wanted);
4085 		}
4086 
4087 		if (wait_result == THREAD_WAITING) {
4088 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4089 			    vm_page_free_wanted_privileged,
4090 			    vm_page_free_wanted,
4091 #if CONFIG_SECLUDED_MEMORY
4092 			    vm_page_free_wanted_secluded,
4093 #else /* CONFIG_SECLUDED_MEMORY */
4094 			    0,
4095 #endif /* CONFIG_SECLUDED_MEMORY */
4096 			    0);
4097 			wait_result = thread_block(THREAD_CONTINUE_NULL);
4098 			VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4099 			    DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4100 		}
4101 	}
4102 
4103 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4104 }
4105 
4106 /*
4107  *	vm_page_alloc:
4108  *
4109  *	Allocate and return a memory cell associated
4110  *	with this VM object/offset pair.
4111  *
4112  *	Object must be locked.
4113  */
4114 
4115 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4116 vm_page_alloc(
4117 	vm_object_t             object,
4118 	vm_object_offset_t      offset)
4119 {
4120 	vm_page_t       mem;
4121 	int             grab_options;
4122 
4123 	vm_object_lock_assert_exclusive(object);
4124 	grab_options = 0;
4125 #if CONFIG_SECLUDED_MEMORY
4126 	if (object->can_grab_secluded) {
4127 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4128 	}
4129 #endif /* CONFIG_SECLUDED_MEMORY */
4130 	mem = vm_page_grab_options(grab_options);
4131 	if (mem == VM_PAGE_NULL) {
4132 		return VM_PAGE_NULL;
4133 	}
4134 
4135 	vm_page_insert(mem, object, offset);
4136 
4137 	return mem;
4138 }
4139 
4140 /*
4141  *	vm_page_free_prepare:
4142  *
4143  *	Removes page from any queue it may be on
4144  *	and disassociates it from its VM object.
4145  *
4146  *	Object and page queues must be locked prior to entry.
4147  */
4148 static void
vm_page_free_prepare(vm_page_t mem)4149 vm_page_free_prepare(
4150 	vm_page_t       mem)
4151 {
4152 #if CONFIG_SPTM
4153 	/**
4154 	 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
4155 	 *            created and destroyed. In order to catch potential cases where this
4156 	 *            does not happen, add an appropriate assert here. This code should be
4157 	 *            executed on every frame that is about to be released to the VM.
4158 	 */
4159 	const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
4160 	__unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
4161 
4162 	assert(frame_type == XNU_DEFAULT);
4163 #endif /* CONFIG_SPTM */
4164 
4165 	vm_page_free_prepare_queues(mem);
4166 	vm_page_free_prepare_object(mem, TRUE);
4167 }
4168 
4169 
4170 void
vm_page_free_prepare_queues(vm_page_t mem)4171 vm_page_free_prepare_queues(
4172 	vm_page_t       mem)
4173 {
4174 	vm_object_t     m_object;
4175 
4176 	VM_PAGE_CHECK(mem);
4177 
4178 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4179 	assert(!mem->vmp_cleaning);
4180 	m_object = VM_PAGE_OBJECT(mem);
4181 
4182 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4183 	if (m_object) {
4184 		vm_object_lock_assert_exclusive(m_object);
4185 	}
4186 	if (mem->vmp_laundry) {
4187 		/*
4188 		 * We may have to free a page while it's being laundered
4189 		 * if we lost its pager (due to a forced unmount, for example).
4190 		 * We need to call vm_pageout_steal_laundry() before removing
4191 		 * the page from its VM object, so that we can remove it
4192 		 * from its pageout queue and adjust the laundry accounting
4193 		 */
4194 		vm_pageout_steal_laundry(mem, TRUE);
4195 	}
4196 
4197 	vm_page_queues_remove(mem, TRUE);
4198 
4199 	if (__improbable(mem->vmp_realtime)) {
4200 		mem->vmp_realtime = false;
4201 		vm_page_realtime_count--;
4202 	}
4203 
4204 	if (VM_PAGE_WIRED(mem)) {
4205 		assert(mem->vmp_wire_count > 0);
4206 
4207 		if (m_object) {
4208 			task_t          owner;
4209 			int             ledger_idx_volatile;
4210 			int             ledger_idx_nonvolatile;
4211 			int             ledger_idx_volatile_compressed;
4212 			int             ledger_idx_nonvolatile_compressed;
4213 			int             ledger_idx_composite;
4214 			int             ledger_idx_external_wired;
4215 			boolean_t       do_footprint;
4216 
4217 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4218 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4219 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4220 
4221 			assert(m_object->resident_page_count >=
4222 			    m_object->wired_page_count);
4223 
4224 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4225 				OSAddAtomic(+1, &vm_page_purgeable_count);
4226 				assert(vm_page_purgeable_wired_count > 0);
4227 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4228 			}
4229 			if (m_object->internal &&
4230 			    m_object->vo_owner != TASK_NULL &&
4231 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4232 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
4233 				owner = VM_OBJECT_OWNER(m_object);
4234 				vm_object_ledger_tag_ledgers(
4235 					m_object,
4236 					&ledger_idx_volatile,
4237 					&ledger_idx_nonvolatile,
4238 					&ledger_idx_volatile_compressed,
4239 					&ledger_idx_nonvolatile_compressed,
4240 					&ledger_idx_composite,
4241 					&ledger_idx_external_wired,
4242 					&do_footprint);
4243 				/*
4244 				 * While wired, this page was accounted
4245 				 * as "non-volatile" but it should now
4246 				 * be accounted as "volatile".
4247 				 */
4248 				/* one less "non-volatile"... */
4249 				ledger_debit(owner->ledger,
4250 				    ledger_idx_nonvolatile,
4251 				    PAGE_SIZE);
4252 				if (do_footprint) {
4253 					/* ... and "phys_footprint" */
4254 					ledger_debit(owner->ledger,
4255 					    task_ledgers.phys_footprint,
4256 					    PAGE_SIZE);
4257 				} else if (ledger_idx_composite != -1) {
4258 					ledger_debit(owner->ledger,
4259 					    ledger_idx_composite,
4260 					    PAGE_SIZE);
4261 				}
4262 				/* one more "volatile" */
4263 				ledger_credit(owner->ledger,
4264 				    ledger_idx_volatile,
4265 				    PAGE_SIZE);
4266 			}
4267 		}
4268 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4269 			vm_page_wire_count--;
4270 		}
4271 
4272 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4273 		mem->vmp_wire_count = 0;
4274 		assert(!mem->vmp_gobbled);
4275 	} else if (mem->vmp_gobbled) {
4276 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4277 			vm_page_wire_count--;
4278 		}
4279 		vm_page_gobble_count--;
4280 	}
4281 }
4282 
4283 
4284 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4285 vm_page_free_prepare_object(
4286 	vm_page_t       mem,
4287 	boolean_t       remove_from_hash)
4288 {
4289 	assert(!mem->vmp_realtime);
4290 	if (mem->vmp_tabled) {
4291 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4292 	}
4293 	vm_page_wakeup(VM_OBJECT_NULL, mem);               /* clears wanted */
4294 
4295 	if (mem->vmp_private) {
4296 		mem->vmp_private = FALSE;
4297 		mem->vmp_fictitious = TRUE;
4298 		VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4299 	}
4300 	if (!mem->vmp_fictitious) {
4301 		assert(mem->vmp_pageq.next == 0);
4302 		assert(mem->vmp_pageq.prev == 0);
4303 		assert(mem->vmp_listq.next == 0);
4304 		assert(mem->vmp_listq.prev == 0);
4305 		assert(mem->vmp_specialq.next == 0);
4306 		assert(mem->vmp_specialq.prev == 0);
4307 		assert(mem->vmp_next_m == 0);
4308 
4309 #if MACH_ASSERT
4310 		if (vm_check_refs_on_free) {
4311 			vm_page_validate_no_references(mem);
4312 		}
4313 #endif /* MACH_ASSERT */
4314 
4315 		{
4316 			vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4317 		}
4318 	}
4319 }
4320 
4321 /*
4322  *	vm_page_free:
4323  *
4324  *	Returns the given page to the free list,
4325  *	disassociating it with any VM object.
4326  *
4327  *	Object and page queues must be locked prior to entry.
4328  */
4329 void
vm_page_free(vm_page_t mem)4330 vm_page_free(
4331 	vm_page_t       mem)
4332 {
4333 	vm_page_free_prepare(mem);
4334 
4335 	if (mem->vmp_fictitious) {
4336 		vm_page_release_fictitious(mem);
4337 	} else {
4338 		vm_page_release(mem, TRUE);  /* page queues are locked */
4339 	}
4340 }
4341 
4342 
4343 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4344 vm_page_free_unlocked(
4345 	vm_page_t       mem,
4346 	boolean_t       remove_from_hash)
4347 {
4348 	vm_page_lockspin_queues();
4349 	vm_page_free_prepare_queues(mem);
4350 	vm_page_unlock_queues();
4351 
4352 	vm_page_free_prepare_object(mem, remove_from_hash);
4353 
4354 	if (mem->vmp_fictitious) {
4355 		vm_page_release_fictitious(mem);
4356 	} else {
4357 		vm_page_release(mem, FALSE); /* page queues are not locked */
4358 	}
4359 }
4360 
4361 
4362 /*
4363  * Free a list of pages.  The list can be up to several hundred pages,
4364  * as blocked up by vm_pageout_scan().
4365  * The big win is not having to take the free list lock once
4366  * per page.
4367  *
4368  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4369  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4370  */
4371 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4372 vm_page_free_list(
4373 	vm_page_t       freeq,
4374 	boolean_t       prepare_object)
4375 {
4376 	vm_page_t       mem;
4377 	vm_page_t       nxt;
4378 	vm_page_t       local_freeq;
4379 	int             pg_count;
4380 
4381 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4382 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4383 
4384 	while (freeq) {
4385 		pg_count = 0;
4386 		local_freeq = VM_PAGE_NULL;
4387 		mem = freeq;
4388 
4389 		/*
4390 		 * break up the processing into smaller chunks so
4391 		 * that we can 'pipeline' the pages onto the
4392 		 * free list w/o introducing too much
4393 		 * contention on the global free queue lock
4394 		 */
4395 		while (mem && pg_count < 64) {
4396 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4397 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4398 			assert(mem->vmp_specialq.next == 0 &&
4399 			    mem->vmp_specialq.prev == 0);
4400 			/*
4401 			 * &&
4402 			 *   mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4403 			 */
4404 			nxt = mem->vmp_snext;
4405 			mem->vmp_snext = NULL;
4406 			assert(mem->vmp_pageq.prev == 0);
4407 
4408 #if MACH_ASSERT
4409 			if (vm_check_refs_on_free) {
4410 				if (!mem->vmp_fictitious && !mem->vmp_private) {
4411 					vm_page_validate_no_references(mem);
4412 				}
4413 			}
4414 #endif /* MACH_ASSERT */
4415 
4416 			if (__improbable(mem->vmp_realtime)) {
4417 				vm_page_lock_queues();
4418 				if (mem->vmp_realtime) {
4419 					mem->vmp_realtime = false;
4420 					vm_page_realtime_count--;
4421 				}
4422 				vm_page_unlock_queues();
4423 			}
4424 
4425 			if (prepare_object == TRUE) {
4426 				vm_page_free_prepare_object(mem, TRUE);
4427 			}
4428 
4429 			if (!mem->vmp_fictitious) {
4430 				assert(mem->vmp_busy);
4431 
4432 				if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4433 				    vm_lopage_free_count < vm_lopage_free_limit &&
4434 				    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4435 					vm_page_release(mem, FALSE); /* page queues are not locked */
4436 #if CONFIG_SECLUDED_MEMORY
4437 				} else if (vm_page_secluded_count < vm_page_secluded_target &&
4438 				    num_tasks_can_use_secluded_mem == 0) {
4439 					vm_page_release(mem,
4440 					    FALSE);             /* page queues are not locked */
4441 #endif /* CONFIG_SECLUDED_MEMORY */
4442 				} else {
4443 					/*
4444 					 * IMPORTANT: we can't set the page "free" here
4445 					 * because that would make the page eligible for
4446 					 * a physically-contiguous allocation (see
4447 					 * vm_page_find_contiguous()) right away (we don't
4448 					 * hold the vm_page_queue_free lock).  That would
4449 					 * cause trouble because the page is not actually
4450 					 * in the free queue yet...
4451 					 */
4452 					mem->vmp_snext = local_freeq;
4453 					local_freeq = mem;
4454 					pg_count++;
4455 
4456 					pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4457 				}
4458 			} else {
4459 				assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4460 				    VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4461 				vm_page_release_fictitious(mem);
4462 			}
4463 			mem = nxt;
4464 		}
4465 		freeq = mem;
4466 
4467 		if ((mem = local_freeq)) {
4468 			unsigned int    avail_free_count;
4469 			unsigned int    need_wakeup = 0;
4470 			unsigned int    need_priv_wakeup = 0;
4471 #if CONFIG_SECLUDED_MEMORY
4472 			unsigned int    need_wakeup_secluded = 0;
4473 #endif /* CONFIG_SECLUDED_MEMORY */
4474 			event_t         priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4475 			boolean_t       priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4476 
4477 			vm_free_page_lock_spin();
4478 
4479 			while (mem) {
4480 				int     color;
4481 
4482 				nxt = mem->vmp_snext;
4483 
4484 				assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4485 				assert(mem->vmp_busy);
4486 				assert(!mem->vmp_realtime);
4487 				mem->vmp_lopage = FALSE;
4488 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4489 
4490 				color = VM_PAGE_GET_COLOR(mem);
4491 #if defined(__x86_64__)
4492 				vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4493 #else
4494 				vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4495 				    mem, vmp_pageq);
4496 #endif
4497 				mem = nxt;
4498 			}
4499 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4500 			vm_page_free_count += pg_count;
4501 			avail_free_count = vm_page_free_count;
4502 
4503 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4504 
4505 			if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4506 				if (avail_free_count < vm_page_free_wanted_privileged) {
4507 					need_priv_wakeup = avail_free_count;
4508 					vm_page_free_wanted_privileged -= avail_free_count;
4509 					avail_free_count = 0;
4510 				} else {
4511 					need_priv_wakeup = vm_page_free_wanted_privileged;
4512 					avail_free_count -= vm_page_free_wanted_privileged;
4513 					vm_page_free_wanted_privileged = 0;
4514 				}
4515 			}
4516 #if CONFIG_SECLUDED_MEMORY
4517 			if (vm_page_free_wanted_secluded > 0 &&
4518 			    avail_free_count > vm_page_free_reserved) {
4519 				unsigned int available_pages;
4520 				available_pages = (avail_free_count -
4521 				    vm_page_free_reserved);
4522 				if (available_pages <
4523 				    vm_page_free_wanted_secluded) {
4524 					need_wakeup_secluded = available_pages;
4525 					vm_page_free_wanted_secluded -=
4526 					    available_pages;
4527 					avail_free_count -= available_pages;
4528 				} else {
4529 					need_wakeup_secluded =
4530 					    vm_page_free_wanted_secluded;
4531 					avail_free_count -=
4532 					    vm_page_free_wanted_secluded;
4533 					vm_page_free_wanted_secluded = 0;
4534 				}
4535 			}
4536 #endif /* CONFIG_SECLUDED_MEMORY */
4537 			if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4538 				unsigned int  available_pages;
4539 
4540 				available_pages = avail_free_count - vm_page_free_reserved;
4541 
4542 				if (available_pages >= vm_page_free_wanted) {
4543 					need_wakeup = vm_page_free_wanted;
4544 					vm_page_free_wanted = 0;
4545 				} else {
4546 					need_wakeup = available_pages;
4547 					vm_page_free_wanted -= available_pages;
4548 				}
4549 			}
4550 			vm_free_page_unlock();
4551 
4552 			priv_wakeup_event = NULL;
4553 			secluded_wakeup_event = NULL;
4554 			normal_wakeup_event = NULL;
4555 
4556 			priv_wakeup_all = FALSE;
4557 			secluded_wakeup_all = FALSE;
4558 			normal_wakeup_all = FALSE;
4559 
4560 
4561 			if (need_priv_wakeup != 0) {
4562 				/*
4563 				 * There shouldn't be that many VM-privileged threads,
4564 				 * so let's wake them all up, even if we don't quite
4565 				 * have enough pages to satisfy them all.
4566 				 */
4567 				priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4568 				priv_wakeup_all = TRUE;
4569 			}
4570 #if CONFIG_SECLUDED_MEMORY
4571 			if (need_wakeup_secluded != 0 &&
4572 			    vm_page_free_wanted_secluded == 0) {
4573 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4574 				secluded_wakeup_all = TRUE;
4575 				need_wakeup_secluded = 0;
4576 			} else {
4577 				secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4578 			}
4579 #endif /* CONFIG_SECLUDED_MEMORY */
4580 			if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4581 				/*
4582 				 * We don't expect to have any more waiters
4583 				 * after this, so let's wake them all up at
4584 				 * once.
4585 				 */
4586 				normal_wakeup_event = (event_t) &vm_page_free_count;
4587 				normal_wakeup_all = TRUE;
4588 				need_wakeup = 0;
4589 			} else {
4590 				normal_wakeup_event = (event_t) &vm_page_free_count;
4591 			}
4592 
4593 			if (priv_wakeup_event ||
4594 #if CONFIG_SECLUDED_MEMORY
4595 			    secluded_wakeup_event ||
4596 #endif /* CONFIG_SECLUDED_MEMORY */
4597 			    normal_wakeup_event) {
4598 				if (vps_dynamic_priority_enabled) {
4599 					if (priv_wakeup_all == TRUE) {
4600 						wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4601 					}
4602 
4603 #if CONFIG_SECLUDED_MEMORY
4604 					if (secluded_wakeup_all == TRUE) {
4605 						wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4606 					}
4607 
4608 					while (need_wakeup_secluded-- != 0) {
4609 						/*
4610 						 * Wake up one waiter per page we just released.
4611 						 */
4612 						wakeup_one_with_inheritor(secluded_wakeup_event,
4613 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
4614 					}
4615 #endif /* CONFIG_SECLUDED_MEMORY */
4616 
4617 					if (normal_wakeup_all == TRUE) {
4618 						wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4619 					}
4620 
4621 					while (need_wakeup-- != 0) {
4622 						/*
4623 						 * Wake up one waiter per page we just released.
4624 						 */
4625 						wakeup_one_with_inheritor(normal_wakeup_event,
4626 						    THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH,
4627 						    NULL);
4628 					}
4629 				} else {
4630 					/*
4631 					 * Non-priority-aware wakeups.
4632 					 */
4633 
4634 					if (priv_wakeup_all == TRUE) {
4635 						thread_wakeup(priv_wakeup_event);
4636 					}
4637 
4638 #if CONFIG_SECLUDED_MEMORY
4639 					if (secluded_wakeup_all == TRUE) {
4640 						thread_wakeup(secluded_wakeup_event);
4641 					}
4642 
4643 					while (need_wakeup_secluded-- != 0) {
4644 						/*
4645 						 * Wake up one waiter per page we just released.
4646 						 */
4647 						thread_wakeup_one(secluded_wakeup_event);
4648 					}
4649 
4650 #endif /* CONFIG_SECLUDED_MEMORY */
4651 					if (normal_wakeup_all == TRUE) {
4652 						thread_wakeup(normal_wakeup_event);
4653 					}
4654 
4655 					while (need_wakeup-- != 0) {
4656 						/*
4657 						 * Wake up one waiter per page we just released.
4658 						 */
4659 						thread_wakeup_one(normal_wakeup_event);
4660 					}
4661 				}
4662 			}
4663 
4664 			VM_CHECK_MEMORYSTATUS;
4665 		}
4666 	}
4667 }
4668 
4669 
4670 /*
4671  *	vm_page_wire:
4672  *
4673  *	Mark this page as wired down by yet
4674  *	another map, removing it from paging queues
4675  *	as necessary.
4676  *
4677  *	The page's object and the page queues must be locked.
4678  */
4679 
4680 
4681 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4682 vm_page_wire(
4683 	vm_page_t mem,
4684 	vm_tag_t           tag,
4685 	boolean_t          check_memorystatus)
4686 {
4687 	vm_object_t     m_object;
4688 
4689 	m_object = VM_PAGE_OBJECT(mem);
4690 
4691 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
4692 
4693 	VM_PAGE_CHECK(mem);
4694 	if (m_object) {
4695 		vm_object_lock_assert_exclusive(m_object);
4696 	} else {
4697 		/*
4698 		 * In theory, the page should be in an object before it
4699 		 * gets wired, since we need to hold the object lock
4700 		 * to update some fields in the page structure.
4701 		 * However, some code (i386 pmap, for example) might want
4702 		 * to wire a page before it gets inserted into an object.
4703 		 * That's somewhat OK, as long as nobody else can get to
4704 		 * that page and update it at the same time.
4705 		 */
4706 	}
4707 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4708 	if (!VM_PAGE_WIRED(mem)) {
4709 		if (mem->vmp_laundry) {
4710 			vm_pageout_steal_laundry(mem, TRUE);
4711 		}
4712 
4713 		vm_page_queues_remove(mem, TRUE);
4714 
4715 		assert(mem->vmp_wire_count == 0);
4716 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
4717 
4718 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4719 		if (mem->vmp_unmodified_ro == true) {
4720 			/* Object and PageQ locks are held*/
4721 			mem->vmp_unmodified_ro = false;
4722 			os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4723 			vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
4724 		}
4725 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4726 
4727 		if (m_object) {
4728 			task_t          owner;
4729 			int             ledger_idx_volatile;
4730 			int             ledger_idx_nonvolatile;
4731 			int             ledger_idx_volatile_compressed;
4732 			int             ledger_idx_nonvolatile_compressed;
4733 			int             ledger_idx_composite;
4734 			int             ledger_idx_external_wired;
4735 			boolean_t       do_footprint;
4736 
4737 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4738 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4739 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4740 
4741 			assert(m_object->resident_page_count >=
4742 			    m_object->wired_page_count);
4743 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4744 				assert(vm_page_purgeable_count > 0);
4745 				OSAddAtomic(-1, &vm_page_purgeable_count);
4746 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
4747 			}
4748 			if (m_object->internal &&
4749 			    m_object->vo_owner != TASK_NULL &&
4750 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4751 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
4752 				owner = VM_OBJECT_OWNER(m_object);
4753 				vm_object_ledger_tag_ledgers(
4754 					m_object,
4755 					&ledger_idx_volatile,
4756 					&ledger_idx_nonvolatile,
4757 					&ledger_idx_volatile_compressed,
4758 					&ledger_idx_nonvolatile_compressed,
4759 					&ledger_idx_composite,
4760 					&ledger_idx_external_wired,
4761 					&do_footprint);
4762 				/* less volatile bytes */
4763 				ledger_debit(owner->ledger,
4764 				    ledger_idx_volatile,
4765 				    PAGE_SIZE);
4766 				/* more not-quite-volatile bytes */
4767 				ledger_credit(owner->ledger,
4768 				    ledger_idx_nonvolatile,
4769 				    PAGE_SIZE);
4770 				if (do_footprint) {
4771 					/* more footprint */
4772 					ledger_credit(owner->ledger,
4773 					    task_ledgers.phys_footprint,
4774 					    PAGE_SIZE);
4775 				} else if (ledger_idx_composite != -1) {
4776 					ledger_credit(owner->ledger,
4777 					    ledger_idx_composite,
4778 					    PAGE_SIZE);
4779 				}
4780 			}
4781 
4782 			if (m_object->all_reusable) {
4783 				/*
4784 				 * Wired pages are not counted as "re-usable"
4785 				 * in "all_reusable" VM objects, so nothing
4786 				 * to do here.
4787 				 */
4788 			} else if (mem->vmp_reusable) {
4789 				/*
4790 				 * This page is not "re-usable" when it's
4791 				 * wired, so adjust its state and the
4792 				 * accounting.
4793 				 */
4794 				vm_page_lockconvert_queues();
4795 				vm_object_reuse_pages(m_object,
4796 				    mem->vmp_offset,
4797 				    mem->vmp_offset + PAGE_SIZE_64,
4798 				    FALSE);
4799 			}
4800 		}
4801 		assert(!mem->vmp_reusable);
4802 
4803 		if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4804 			vm_page_wire_count++;
4805 		}
4806 		if (mem->vmp_gobbled) {
4807 			vm_page_gobble_count--;
4808 		}
4809 		mem->vmp_gobbled = FALSE;
4810 
4811 		if (check_memorystatus == TRUE) {
4812 			VM_CHECK_MEMORYSTATUS;
4813 		}
4814 	}
4815 	assert(!mem->vmp_gobbled);
4816 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4817 	mem->vmp_wire_count++;
4818 	if (__improbable(mem->vmp_wire_count == 0)) {
4819 		panic("vm_page_wire(%p): wire_count overflow", mem);
4820 	}
4821 	VM_PAGE_CHECK(mem);
4822 }
4823 
4824 /*
4825  *	vm_page_unwire:
4826  *
4827  *	Release one wiring of this page, potentially
4828  *	enabling it to be paged again.
4829  *
4830  *	The page's object and the page queues must be locked.
4831  */
4832 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4833 vm_page_unwire(
4834 	vm_page_t       mem,
4835 	boolean_t       queueit)
4836 {
4837 	vm_object_t     m_object;
4838 
4839 	m_object = VM_PAGE_OBJECT(mem);
4840 
4841 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
4842 
4843 	VM_PAGE_CHECK(mem);
4844 	assert(VM_PAGE_WIRED(mem));
4845 	assert(mem->vmp_wire_count > 0);
4846 	assert(!mem->vmp_gobbled);
4847 	assert(m_object != VM_OBJECT_NULL);
4848 	vm_object_lock_assert_exclusive(m_object);
4849 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4850 	if (--mem->vmp_wire_count == 0) {
4851 		task_t          owner;
4852 		int             ledger_idx_volatile;
4853 		int             ledger_idx_nonvolatile;
4854 		int             ledger_idx_volatile_compressed;
4855 		int             ledger_idx_nonvolatile_compressed;
4856 		int             ledger_idx_composite;
4857 		int             ledger_idx_external_wired;
4858 		boolean_t       do_footprint;
4859 
4860 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4861 
4862 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4863 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4864 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4865 		if (!mem->vmp_private && !mem->vmp_fictitious) {
4866 			vm_page_wire_count--;
4867 		}
4868 
4869 		assert(m_object->resident_page_count >=
4870 		    m_object->wired_page_count);
4871 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4872 			OSAddAtomic(+1, &vm_page_purgeable_count);
4873 			assert(vm_page_purgeable_wired_count > 0);
4874 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4875 		}
4876 		if (m_object->internal &&
4877 		    m_object->vo_owner != TASK_NULL &&
4878 		    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4879 		    m_object->purgable == VM_PURGABLE_EMPTY)) {
4880 			owner = VM_OBJECT_OWNER(m_object);
4881 			vm_object_ledger_tag_ledgers(
4882 				m_object,
4883 				&ledger_idx_volatile,
4884 				&ledger_idx_nonvolatile,
4885 				&ledger_idx_volatile_compressed,
4886 				&ledger_idx_nonvolatile_compressed,
4887 				&ledger_idx_composite,
4888 				&ledger_idx_external_wired,
4889 				&do_footprint);
4890 			/* more volatile bytes */
4891 			ledger_credit(owner->ledger,
4892 			    ledger_idx_volatile,
4893 			    PAGE_SIZE);
4894 			/* less not-quite-volatile bytes */
4895 			ledger_debit(owner->ledger,
4896 			    ledger_idx_nonvolatile,
4897 			    PAGE_SIZE);
4898 			if (do_footprint) {
4899 				/* less footprint */
4900 				ledger_debit(owner->ledger,
4901 				    task_ledgers.phys_footprint,
4902 				    PAGE_SIZE);
4903 			} else if (ledger_idx_composite != -1) {
4904 				ledger_debit(owner->ledger,
4905 				    ledger_idx_composite,
4906 				    PAGE_SIZE);
4907 			}
4908 		}
4909 		assert(!is_kernel_object(m_object));
4910 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4911 
4912 		if (queueit == TRUE) {
4913 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
4914 				vm_page_deactivate(mem);
4915 			} else {
4916 				vm_page_activate(mem);
4917 			}
4918 		}
4919 
4920 		VM_CHECK_MEMORYSTATUS;
4921 	}
4922 	VM_PAGE_CHECK(mem);
4923 }
4924 
4925 /*
4926  *	vm_page_deactivate:
4927  *
4928  *	Returns the given page to the inactive list,
4929  *	indicating that no physical maps have access
4930  *	to this page.  [Used by the physical mapping system.]
4931  *
4932  *	The page queues must be locked.
4933  */
4934 void
vm_page_deactivate(vm_page_t m)4935 vm_page_deactivate(
4936 	vm_page_t       m)
4937 {
4938 	vm_page_deactivate_internal(m, TRUE);
4939 }
4940 
4941 
4942 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4943 vm_page_deactivate_internal(
4944 	vm_page_t       m,
4945 	boolean_t       clear_hw_reference)
4946 {
4947 	vm_object_t     m_object;
4948 
4949 	m_object = VM_PAGE_OBJECT(m);
4950 
4951 	VM_PAGE_CHECK(m);
4952 	assert(!is_kernel_object(m_object));
4953 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4954 
4955 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
4956 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4957 	/*
4958 	 *	This page is no longer very interesting.  If it was
4959 	 *	interesting (active or inactive/referenced), then we
4960 	 *	clear the reference bit and (re)enter it in the
4961 	 *	inactive queue.  Note wired pages should not have
4962 	 *	their reference bit cleared.
4963 	 */
4964 	assert( !(m->vmp_absent && !m->vmp_unusual));
4965 
4966 	if (m->vmp_gobbled) {           /* can this happen? */
4967 		assert( !VM_PAGE_WIRED(m));
4968 
4969 		if (!m->vmp_private && !m->vmp_fictitious) {
4970 			vm_page_wire_count--;
4971 		}
4972 		vm_page_gobble_count--;
4973 		m->vmp_gobbled = FALSE;
4974 	}
4975 	/*
4976 	 * if this page is currently on the pageout queue, we can't do the
4977 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4978 	 * and we can't remove it manually since we would need the object lock
4979 	 * (which is not required here) to decrement the activity_in_progress
4980 	 * reference which is held on the object while the page is in the pageout queue...
4981 	 * just let the normal laundry processing proceed
4982 	 */
4983 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4984 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4985 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4986 	    VM_PAGE_WIRED(m)) {
4987 		return;
4988 	}
4989 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
4990 		vm_page_lockconvert_queues();
4991 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4992 	}
4993 
4994 	m->vmp_reference = FALSE;
4995 	m->vmp_no_cache = FALSE;
4996 
4997 	if (!VM_PAGE_INACTIVE(m)) {
4998 		vm_page_queues_remove(m, FALSE);
4999 
5000 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5001 		    m->vmp_dirty && m_object->internal &&
5002 		    (m_object->purgable == VM_PURGABLE_DENY ||
5003 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5004 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5005 			vm_page_check_pageable_safe(m);
5006 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5007 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5008 			vm_page_throttled_count++;
5009 		} else {
5010 			if (m_object->named &&
5011 			    os_ref_get_count_raw(&m_object->ref_count) == 1) {
5012 				vm_page_speculate(m, FALSE);
5013 #if DEVELOPMENT || DEBUG
5014 				vm_page_speculative_recreated++;
5015 #endif
5016 			} else {
5017 				vm_page_enqueue_inactive(m, FALSE);
5018 			}
5019 		}
5020 	}
5021 }
5022 
5023 /*
5024  * vm_page_enqueue_cleaned
5025  *
5026  * Put the page on the cleaned queue, mark it cleaned, etc.
5027  * Being on the cleaned queue (and having m->clean_queue set)
5028  * does ** NOT ** guarantee that the page is clean!
5029  *
5030  * Call with the queues lock held.
5031  */
5032 
5033 void
vm_page_enqueue_cleaned(vm_page_t m)5034 vm_page_enqueue_cleaned(vm_page_t m)
5035 {
5036 	vm_object_t     m_object;
5037 
5038 	m_object = VM_PAGE_OBJECT(m);
5039 
5040 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5041 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5042 	assert( !(m->vmp_absent && !m->vmp_unusual));
5043 
5044 	if (VM_PAGE_WIRED(m)) {
5045 		return;
5046 	}
5047 
5048 	if (m->vmp_gobbled) {
5049 		if (!m->vmp_private && !m->vmp_fictitious) {
5050 			vm_page_wire_count--;
5051 		}
5052 		vm_page_gobble_count--;
5053 		m->vmp_gobbled = FALSE;
5054 	}
5055 	/*
5056 	 * if this page is currently on the pageout queue, we can't do the
5057 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5058 	 * and we can't remove it manually since we would need the object lock
5059 	 * (which is not required here) to decrement the activity_in_progress
5060 	 * reference which is held on the object while the page is in the pageout queue...
5061 	 * just let the normal laundry processing proceed
5062 	 */
5063 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5064 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5065 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5066 		return;
5067 	}
5068 	vm_page_queues_remove(m, FALSE);
5069 
5070 	vm_page_check_pageable_safe(m);
5071 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5072 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5073 	vm_page_cleaned_count++;
5074 
5075 	vm_page_inactive_count++;
5076 	if (m_object->internal) {
5077 		vm_page_pageable_internal_count++;
5078 	} else {
5079 		vm_page_pageable_external_count++;
5080 	}
5081 	vm_page_add_to_specialq(m, TRUE);
5082 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5083 }
5084 
5085 /*
5086  *	vm_page_activate:
5087  *
5088  *	Put the specified page on the active list (if appropriate).
5089  *
5090  *	The page queues must be locked.
5091  */
5092 
5093 void
vm_page_activate(vm_page_t m)5094 vm_page_activate(
5095 	vm_page_t       m)
5096 {
5097 	vm_object_t     m_object;
5098 
5099 	m_object = VM_PAGE_OBJECT(m);
5100 
5101 	VM_PAGE_CHECK(m);
5102 #ifdef  FIXME_4778297
5103 	assert(!is_kernel_object(m_object));
5104 #endif
5105 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5106 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5107 	assert( !(m->vmp_absent && !m->vmp_unusual));
5108 
5109 	if (m->vmp_gobbled) {
5110 		assert( !VM_PAGE_WIRED(m));
5111 		if (!m->vmp_private && !m->vmp_fictitious) {
5112 			vm_page_wire_count--;
5113 		}
5114 		vm_page_gobble_count--;
5115 		m->vmp_gobbled = FALSE;
5116 	}
5117 	/*
5118 	 * if this page is currently on the pageout queue, we can't do the
5119 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5120 	 * and we can't remove it manually since we would need the object lock
5121 	 * (which is not required here) to decrement the activity_in_progress
5122 	 * reference which is held on the object while the page is in the pageout queue...
5123 	 * just let the normal laundry processing proceed
5124 	 */
5125 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5126 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5127 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5128 		return;
5129 	}
5130 
5131 #if DEBUG
5132 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5133 		panic("vm_page_activate: already active");
5134 	}
5135 #endif
5136 
5137 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5138 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5139 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5140 	}
5141 
5142 	/*
5143 	 * A freshly activated page should be promoted in the donation queue.
5144 	 * So we remove it here while preserving its hint and we will enqueue
5145 	 * it again in vm_page_enqueue_active.
5146 	 */
5147 	vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5148 
5149 	if (!VM_PAGE_WIRED(m)) {
5150 		vm_page_check_pageable_safe(m);
5151 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5152 		    m->vmp_dirty && m_object->internal &&
5153 		    (m_object->purgable == VM_PURGABLE_DENY ||
5154 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5155 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5156 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5157 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5158 			vm_page_throttled_count++;
5159 		} else {
5160 #if CONFIG_SECLUDED_MEMORY
5161 			if (secluded_for_filecache &&
5162 			    vm_page_secluded_target != 0 &&
5163 			    num_tasks_can_use_secluded_mem == 0 &&
5164 			    m_object->eligible_for_secluded &&
5165 			    !m->vmp_realtime) {
5166 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5167 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5168 				vm_page_secluded_count++;
5169 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5170 				vm_page_secluded_count_inuse++;
5171 				assert(!m_object->internal);
5172 //				vm_page_pageable_external_count++;
5173 			} else
5174 #endif /* CONFIG_SECLUDED_MEMORY */
5175 			vm_page_enqueue_active(m, FALSE);
5176 		}
5177 		m->vmp_reference = TRUE;
5178 		m->vmp_no_cache = FALSE;
5179 	}
5180 	VM_PAGE_CHECK(m);
5181 }
5182 
5183 
5184 /*
5185  *      vm_page_speculate:
5186  *
5187  *      Put the specified page on the speculative list (if appropriate).
5188  *
5189  *      The page queues must be locked.
5190  */
5191 void
vm_page_speculate(vm_page_t m,boolean_t new)5192 vm_page_speculate(
5193 	vm_page_t       m,
5194 	boolean_t       new)
5195 {
5196 	struct vm_speculative_age_q     *aq;
5197 	vm_object_t     m_object;
5198 
5199 	m_object = VM_PAGE_OBJECT(m);
5200 
5201 	VM_PAGE_CHECK(m);
5202 	vm_page_check_pageable_safe(m);
5203 
5204 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5205 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5206 	assert( !(m->vmp_absent && !m->vmp_unusual));
5207 	assert(m_object->internal == FALSE);
5208 
5209 	/*
5210 	 * if this page is currently on the pageout queue, we can't do the
5211 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5212 	 * and we can't remove it manually since we would need the object lock
5213 	 * (which is not required here) to decrement the activity_in_progress
5214 	 * reference which is held on the object while the page is in the pageout queue...
5215 	 * just let the normal laundry processing proceed
5216 	 */
5217 	if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5218 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5219 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5220 		return;
5221 	}
5222 
5223 	vm_page_queues_remove(m, FALSE);
5224 
5225 	if (!VM_PAGE_WIRED(m)) {
5226 		mach_timespec_t         ts;
5227 		clock_sec_t sec;
5228 		clock_nsec_t nsec;
5229 
5230 		clock_get_system_nanotime(&sec, &nsec);
5231 		ts.tv_sec = (unsigned int) sec;
5232 		ts.tv_nsec = nsec;
5233 
5234 		if (vm_page_speculative_count == 0) {
5235 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5236 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5237 
5238 			aq = &vm_page_queue_speculative[speculative_age_index];
5239 
5240 			/*
5241 			 * set the timer to begin a new group
5242 			 */
5243 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5244 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5245 
5246 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5247 		} else {
5248 			aq = &vm_page_queue_speculative[speculative_age_index];
5249 
5250 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5251 				speculative_age_index++;
5252 
5253 				if (speculative_age_index > vm_page_max_speculative_age_q) {
5254 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5255 				}
5256 				if (speculative_age_index == speculative_steal_index) {
5257 					speculative_steal_index = speculative_age_index + 1;
5258 
5259 					if (speculative_steal_index > vm_page_max_speculative_age_q) {
5260 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5261 					}
5262 				}
5263 				aq = &vm_page_queue_speculative[speculative_age_index];
5264 
5265 				if (!vm_page_queue_empty(&aq->age_q)) {
5266 					vm_page_speculate_ageit(aq);
5267 				}
5268 
5269 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5270 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5271 
5272 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5273 			}
5274 		}
5275 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5276 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5277 		vm_page_speculative_count++;
5278 		vm_page_pageable_external_count++;
5279 
5280 		if (new == TRUE) {
5281 			vm_object_lock_assert_exclusive(m_object);
5282 
5283 			m_object->pages_created++;
5284 #if DEVELOPMENT || DEBUG
5285 			vm_page_speculative_created++;
5286 #endif
5287 		}
5288 	}
5289 	VM_PAGE_CHECK(m);
5290 }
5291 
5292 
5293 /*
5294  * move pages from the specified aging bin to
5295  * the speculative bin that pageout_scan claims from
5296  *
5297  *      The page queues must be locked.
5298  */
5299 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5300 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5301 {
5302 	struct vm_speculative_age_q     *sq;
5303 	vm_page_t       t;
5304 
5305 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5306 
5307 	if (vm_page_queue_empty(&sq->age_q)) {
5308 		sq->age_q.next = aq->age_q.next;
5309 		sq->age_q.prev = aq->age_q.prev;
5310 
5311 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5312 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5313 
5314 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5315 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5316 	} else {
5317 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5318 		t->vmp_pageq.next = aq->age_q.next;
5319 
5320 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5321 		t->vmp_pageq.prev = sq->age_q.prev;
5322 
5323 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5324 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5325 
5326 		sq->age_q.prev = aq->age_q.prev;
5327 	}
5328 	vm_page_queue_init(&aq->age_q);
5329 }
5330 
5331 
5332 void
vm_page_lru(vm_page_t m)5333 vm_page_lru(
5334 	vm_page_t       m)
5335 {
5336 	VM_PAGE_CHECK(m);
5337 	assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5338 	assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5339 
5340 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5341 
5342 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5343 		/*
5344 		 * we don't need to do all the other work that
5345 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5346 		 * bring along for the ride
5347 		 */
5348 		assert(!m->vmp_laundry);
5349 		assert(!m->vmp_private);
5350 
5351 		m->vmp_no_cache = FALSE;
5352 
5353 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5354 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5355 
5356 		return;
5357 	}
5358 	/*
5359 	 * if this page is currently on the pageout queue, we can't do the
5360 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5361 	 * and we can't remove it manually since we would need the object lock
5362 	 * (which is not required here) to decrement the activity_in_progress
5363 	 * reference which is held on the object while the page is in the pageout queue...
5364 	 * just let the normal laundry processing proceed
5365 	 */
5366 	if (m->vmp_laundry || m->vmp_private ||
5367 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5368 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5369 	    VM_PAGE_WIRED(m)) {
5370 		return;
5371 	}
5372 
5373 	m->vmp_no_cache = FALSE;
5374 
5375 	vm_page_queues_remove(m, FALSE);
5376 
5377 	vm_page_enqueue_inactive(m, FALSE);
5378 }
5379 
5380 
5381 void
vm_page_reactivate_all_throttled(void)5382 vm_page_reactivate_all_throttled(void)
5383 {
5384 	vm_page_t       first_throttled, last_throttled;
5385 	vm_page_t       first_active;
5386 	vm_page_t       m;
5387 	int             extra_active_count;
5388 	int             extra_internal_count, extra_external_count;
5389 	vm_object_t     m_object;
5390 
5391 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5392 		return;
5393 	}
5394 
5395 	extra_active_count = 0;
5396 	extra_internal_count = 0;
5397 	extra_external_count = 0;
5398 	vm_page_lock_queues();
5399 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5400 		/*
5401 		 * Switch "throttled" pages to "active".
5402 		 */
5403 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5404 			VM_PAGE_CHECK(m);
5405 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5406 
5407 			m_object = VM_PAGE_OBJECT(m);
5408 
5409 			extra_active_count++;
5410 			if (m_object->internal) {
5411 				extra_internal_count++;
5412 			} else {
5413 				extra_external_count++;
5414 			}
5415 
5416 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5417 			VM_PAGE_CHECK(m);
5418 			vm_page_add_to_specialq(m, FALSE);
5419 		}
5420 
5421 		/*
5422 		 * Transfer the entire throttled queue to a regular LRU page queues.
5423 		 * We insert it at the head of the active queue, so that these pages
5424 		 * get re-evaluated by the LRU algorithm first, since they've been
5425 		 * completely out of it until now.
5426 		 */
5427 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5428 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5429 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5430 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5431 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5432 		} else {
5433 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5434 		}
5435 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5436 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5437 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5438 
5439 #if DEBUG
5440 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5441 #endif
5442 		vm_page_queue_init(&vm_page_queue_throttled);
5443 		/*
5444 		 * Adjust the global page counts.
5445 		 */
5446 		vm_page_active_count += extra_active_count;
5447 		vm_page_pageable_internal_count += extra_internal_count;
5448 		vm_page_pageable_external_count += extra_external_count;
5449 		vm_page_throttled_count = 0;
5450 	}
5451 	assert(vm_page_throttled_count == 0);
5452 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5453 	vm_page_unlock_queues();
5454 }
5455 
5456 
5457 /*
5458  * move pages from the indicated local queue to the global active queue
5459  * its ok to fail if we're below the hard limit and force == FALSE
5460  * the nolocks == TRUE case is to allow this function to be run on
5461  * the hibernate path
5462  */
5463 
5464 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5465 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5466 {
5467 	struct vpl      *lq;
5468 	vm_page_t       first_local, last_local;
5469 	vm_page_t       first_active;
5470 	vm_page_t       m;
5471 	uint32_t        count = 0;
5472 
5473 	if (vm_page_local_q == NULL) {
5474 		return;
5475 	}
5476 
5477 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5478 
5479 	if (nolocks == FALSE) {
5480 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5481 			if (!vm_page_trylockspin_queues()) {
5482 				return;
5483 			}
5484 		} else {
5485 			vm_page_lockspin_queues();
5486 		}
5487 
5488 		VPL_LOCK(&lq->vpl_lock);
5489 	}
5490 	if (lq->vpl_count) {
5491 		/*
5492 		 * Switch "local" pages to "active".
5493 		 */
5494 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5495 
5496 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5497 			VM_PAGE_CHECK(m);
5498 			vm_page_check_pageable_safe(m);
5499 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5500 			assert(!m->vmp_fictitious);
5501 
5502 			if (m->vmp_local_id != lid) {
5503 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5504 			}
5505 
5506 			m->vmp_local_id = 0;
5507 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5508 			VM_PAGE_CHECK(m);
5509 			vm_page_add_to_specialq(m, FALSE);
5510 			count++;
5511 		}
5512 		if (count != lq->vpl_count) {
5513 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5514 		}
5515 
5516 		/*
5517 		 * Transfer the entire local queue to a regular LRU page queues.
5518 		 */
5519 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5520 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5521 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5522 
5523 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5524 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5525 		} else {
5526 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5527 		}
5528 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5529 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5530 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5531 
5532 		vm_page_queue_init(&lq->vpl_queue);
5533 		/*
5534 		 * Adjust the global page counts.
5535 		 */
5536 		vm_page_active_count += lq->vpl_count;
5537 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5538 		vm_page_pageable_external_count += lq->vpl_external_count;
5539 		lq->vpl_count = 0;
5540 		lq->vpl_internal_count = 0;
5541 		lq->vpl_external_count = 0;
5542 	}
5543 	assert(vm_page_queue_empty(&lq->vpl_queue));
5544 
5545 	if (nolocks == FALSE) {
5546 		VPL_UNLOCK(&lq->vpl_lock);
5547 
5548 		vm_page_balance_inactive(count / 4);
5549 		vm_page_unlock_queues();
5550 	}
5551 }
5552 
5553 /*
5554  *	vm_page_part_zero_fill:
5555  *
5556  *	Zero-fill a part of the page.
5557  */
5558 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5559 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5560 vm_page_part_zero_fill(
5561 	vm_page_t       m,
5562 	vm_offset_t     m_pa,
5563 	vm_size_t       len)
5564 {
5565 #if 0
5566 	/*
5567 	 * we don't hold the page queue lock
5568 	 * so this check isn't safe to make
5569 	 */
5570 	VM_PAGE_CHECK(m);
5571 #endif
5572 
5573 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5574 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5575 #else
5576 	vm_page_t       tmp;
5577 	while (1) {
5578 		tmp = vm_page_grab();
5579 		if (tmp == VM_PAGE_NULL) {
5580 			vm_page_wait(THREAD_UNINT);
5581 			continue;
5582 		}
5583 		break;
5584 	}
5585 	vm_page_zero_fill(tmp);
5586 	if (m_pa != 0) {
5587 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
5588 	}
5589 	if ((m_pa + len) < PAGE_SIZE) {
5590 		vm_page_part_copy(m, m_pa + len, tmp,
5591 		    m_pa + len, PAGE_SIZE - (m_pa + len));
5592 	}
5593 	vm_page_copy(tmp, m);
5594 	VM_PAGE_FREE(tmp);
5595 #endif
5596 }
5597 
5598 /*
5599  *	vm_page_zero_fill:
5600  *
5601  *	Zero-fill the specified page.
5602  */
5603 void
vm_page_zero_fill(vm_page_t m)5604 vm_page_zero_fill(
5605 	vm_page_t       m)
5606 {
5607 #if 0
5608 	/*
5609 	 * we don't hold the page queue lock
5610 	 * so this check isn't safe to make
5611 	 */
5612 	VM_PAGE_CHECK(m);
5613 #endif
5614 
5615 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
5616 	pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5617 }
5618 
5619 /*
5620  *	vm_page_part_copy:
5621  *
5622  *	copy part of one page to another
5623  */
5624 
5625 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5626 vm_page_part_copy(
5627 	vm_page_t       src_m,
5628 	vm_offset_t     src_pa,
5629 	vm_page_t       dst_m,
5630 	vm_offset_t     dst_pa,
5631 	vm_size_t       len)
5632 {
5633 #if 0
5634 	/*
5635 	 * we don't hold the page queue lock
5636 	 * so this check isn't safe to make
5637 	 */
5638 	VM_PAGE_CHECK(src_m);
5639 	VM_PAGE_CHECK(dst_m);
5640 #endif
5641 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5642 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5643 }
5644 
5645 /*
5646  *	vm_page_copy:
5647  *
5648  *	Copy one page to another
5649  */
5650 
5651 int vm_page_copy_cs_validations = 0;
5652 int vm_page_copy_cs_tainted = 0;
5653 
5654 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5655 vm_page_copy(
5656 	vm_page_t       src_m,
5657 	vm_page_t       dest_m)
5658 {
5659 	vm_object_t     src_m_object;
5660 
5661 	src_m_object = VM_PAGE_OBJECT(src_m);
5662 
5663 #if 0
5664 	/*
5665 	 * we don't hold the page queue lock
5666 	 * so this check isn't safe to make
5667 	 */
5668 	VM_PAGE_CHECK(src_m);
5669 	VM_PAGE_CHECK(dest_m);
5670 #endif
5671 	vm_object_lock_assert_held(src_m_object);
5672 
5673 	if (src_m_object != VM_OBJECT_NULL &&
5674 	    src_m_object->code_signed) {
5675 		/*
5676 		 * We're copying a page from a code-signed object.
5677 		 * Whoever ends up mapping the copy page might care about
5678 		 * the original page's integrity, so let's validate the
5679 		 * source page now.
5680 		 */
5681 		vm_page_copy_cs_validations++;
5682 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5683 #if DEVELOPMENT || DEBUG
5684 		DTRACE_VM4(codesigned_copy,
5685 		    vm_object_t, src_m_object,
5686 		    vm_object_offset_t, src_m->vmp_offset,
5687 		    int, src_m->vmp_cs_validated,
5688 		    int, src_m->vmp_cs_tainted);
5689 #endif /* DEVELOPMENT || DEBUG */
5690 	}
5691 
5692 	/*
5693 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
5694 	 * the cs_validated bit.
5695 	 */
5696 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5697 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5698 	if (dest_m->vmp_cs_tainted) {
5699 		vm_page_copy_cs_tainted++;
5700 	}
5701 	dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5702 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5703 }
5704 
5705 #if MACH_ASSERT
5706 static void
_vm_page_print(vm_page_t p)5707 _vm_page_print(
5708 	vm_page_t       p)
5709 {
5710 	printf("vm_page %p: \n", p);
5711 	printf("  pageq: next=%p prev=%p\n",
5712 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5713 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5714 	printf("  listq: next=%p prev=%p\n",
5715 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5716 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5717 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5718 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5719 	printf("  wire_count=%u\n", p->vmp_wire_count);
5720 	printf("  q_state=%u\n", p->vmp_q_state);
5721 
5722 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
5723 	    (p->vmp_laundry ? "" : "!"),
5724 	    (p->vmp_reference ? "" : "!"),
5725 	    (p->vmp_gobbled ? "" : "!"),
5726 	    (p->vmp_private ? "" : "!"));
5727 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5728 	    (p->vmp_busy ? "" : "!"),
5729 	    (p->vmp_wanted ? "" : "!"),
5730 	    (p->vmp_tabled ? "" : "!"),
5731 	    (p->vmp_fictitious ? "" : "!"),
5732 	    (p->vmp_pmapped ? "" : "!"),
5733 	    (p->vmp_wpmapped ? "" : "!"));
5734 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5735 	    (p->vmp_free_when_done ? "" : "!"),
5736 	    (p->vmp_absent ? "" : "!"),
5737 	    (VMP_ERROR_GET(p) ? "" : "!"),
5738 	    (p->vmp_dirty ? "" : "!"),
5739 	    (p->vmp_cleaning ? "" : "!"),
5740 	    (p->vmp_precious ? "" : "!"),
5741 	    (p->vmp_clustered ? "" : "!"));
5742 	printf("  %soverwriting, %srestart, %sunusual\n",
5743 	    (p->vmp_overwriting ? "" : "!"),
5744 	    (p->vmp_restart ? "" : "!"),
5745 	    (p->vmp_unusual ? "" : "!"));
5746 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5747 	    p->vmp_cs_validated,
5748 	    p->vmp_cs_tainted,
5749 	    p->vmp_cs_nx,
5750 	    (p->vmp_no_cache ? "" : "!"));
5751 
5752 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5753 }
5754 
5755 /*
5756  *	Check that the list of pages is ordered by
5757  *	ascending physical address and has no holes.
5758  */
5759 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5760 vm_page_verify_contiguous(
5761 	vm_page_t       pages,
5762 	unsigned int    npages)
5763 {
5764 	vm_page_t               m;
5765 	unsigned int            page_count;
5766 	vm_offset_t             prev_addr;
5767 
5768 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5769 	page_count = 1;
5770 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5771 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5772 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5773 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5774 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5775 			panic("vm_page_verify_contiguous:  not contiguous!");
5776 		}
5777 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5778 		++page_count;
5779 	}
5780 	if (page_count != npages) {
5781 		printf("pages %p actual count 0x%x but requested 0x%x\n",
5782 		    pages, page_count, npages);
5783 		panic("vm_page_verify_contiguous:  count error");
5784 	}
5785 	return 1;
5786 }
5787 
5788 
5789 /*
5790  *	Check the free lists for proper length etc.
5791  */
5792 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5793 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5794 vm_page_verify_free_list(
5795 	vm_page_queue_head_t    *vm_page_queue,
5796 	unsigned int    color,
5797 	vm_page_t       look_for_page,
5798 	boolean_t       expect_page)
5799 {
5800 	unsigned int    npages;
5801 	vm_page_t       m;
5802 	vm_page_t       prev_m;
5803 	boolean_t       found_page;
5804 
5805 	if (!vm_page_verify_this_free_list_enabled) {
5806 		return 0;
5807 	}
5808 
5809 	found_page = FALSE;
5810 	npages = 0;
5811 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5812 
5813 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5814 		if (m == look_for_page) {
5815 			found_page = TRUE;
5816 		}
5817 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5818 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5819 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5820 		}
5821 		if (!m->vmp_busy) {
5822 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5823 			    color, npages, m);
5824 		}
5825 		if (color != (unsigned int) -1) {
5826 			if (VM_PAGE_GET_COLOR(m) != color) {
5827 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5828 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
5829 			}
5830 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5831 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5832 				    color, npages, m, m->vmp_q_state);
5833 			}
5834 		} else {
5835 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5836 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5837 				    npages, m, m->vmp_q_state);
5838 			}
5839 		}
5840 		++npages;
5841 		prev_m = m;
5842 	}
5843 	if (look_for_page != VM_PAGE_NULL) {
5844 		unsigned int other_color;
5845 
5846 		if (expect_page && !found_page) {
5847 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5848 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5849 			_vm_page_print(look_for_page);
5850 			for (other_color = 0;
5851 			    other_color < vm_colors;
5852 			    other_color++) {
5853 				if (other_color == color) {
5854 					continue;
5855 				}
5856 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5857 				    other_color, look_for_page, FALSE);
5858 			}
5859 			if (color == (unsigned int) -1) {
5860 				vm_page_verify_free_list(&vm_lopage_queue_free,
5861 				    (unsigned int) -1, look_for_page, FALSE);
5862 			}
5863 			panic("vm_page_verify_free_list(color=%u)", color);
5864 		}
5865 		if (!expect_page && found_page) {
5866 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5867 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5868 		}
5869 	}
5870 	return npages;
5871 }
5872 
5873 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5874 static void
vm_page_verify_free_lists(void)5875 vm_page_verify_free_lists( void )
5876 {
5877 	unsigned int    color, npages, nlopages;
5878 	boolean_t       toggle = TRUE;
5879 
5880 	if (!vm_page_verify_all_free_lists_enabled) {
5881 		return;
5882 	}
5883 
5884 	npages = 0;
5885 
5886 	vm_free_page_lock();
5887 
5888 	if (vm_page_verify_this_free_list_enabled == TRUE) {
5889 		/*
5890 		 * This variable has been set globally for extra checking of
5891 		 * each free list Q. Since we didn't set it, we don't own it
5892 		 * and we shouldn't toggle it.
5893 		 */
5894 		toggle = FALSE;
5895 	}
5896 
5897 	if (toggle == TRUE) {
5898 		vm_page_verify_this_free_list_enabled = TRUE;
5899 	}
5900 
5901 	for (color = 0; color < vm_colors; color++) {
5902 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5903 		    color, VM_PAGE_NULL, FALSE);
5904 	}
5905 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5906 	    (unsigned int) -1,
5907 	    VM_PAGE_NULL, FALSE);
5908 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5909 		panic("vm_page_verify_free_lists:  "
5910 		    "npages %u free_count %d nlopages %u lo_free_count %u",
5911 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5912 	}
5913 
5914 	if (toggle == TRUE) {
5915 		vm_page_verify_this_free_list_enabled = FALSE;
5916 	}
5917 
5918 	vm_free_page_unlock();
5919 }
5920 
5921 #endif  /* MACH_ASSERT */
5922 
5923 /*
5924  * wrapper for pmap_enter()
5925  */
5926 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired)5927 pmap_enter_check(
5928 	pmap_t           pmap,
5929 	vm_map_address_t virtual_address,
5930 	vm_page_t        page,
5931 	vm_prot_t        protection,
5932 	vm_prot_t        fault_type,
5933 	unsigned int     flags,
5934 	boolean_t        wired)
5935 {
5936 	int             options = 0;
5937 	vm_object_t     obj;
5938 
5939 	if (VMP_ERROR_GET(page)) {
5940 		return KERN_MEMORY_FAILURE;
5941 	}
5942 	obj = VM_PAGE_OBJECT(page);
5943 	if (obj->internal) {
5944 		options |= PMAP_OPTIONS_INTERNAL;
5945 	}
5946 	if (page->vmp_reusable || obj->all_reusable) {
5947 		options |= PMAP_OPTIONS_REUSABLE;
5948 	}
5949 	return pmap_enter_options(pmap,
5950 	           virtual_address,
5951 	           VM_PAGE_GET_PHYS_PAGE(page),
5952 	           protection,
5953 	           fault_type,
5954 	           flags,
5955 	           wired,
5956 	           options,
5957 	           NULL,
5958 	           PMAP_MAPPING_TYPE_INFER);
5959 }
5960 
5961 
5962 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5963 
5964 /*
5965  *	CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
5966  */
5967 
5968 /*
5969  * Helper function used to determine if a page can be relocated
5970  * A page is relocatable if it is in a stable non-transient state
5971  */
5972 static inline boolean_t
vm_page_is_relocatable(vm_page_t m)5973 vm_page_is_relocatable(vm_page_t m)
5974 {
5975 
5976 	if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
5977 	    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5978 		/*
5979 		 * Page is in a transient state
5980 		 * or a state we don't want to deal with.
5981 		 */
5982 		return FALSE;
5983 	} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5984 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5985 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5986 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5987 		/*
5988 		 * Page needs to be on one of our queues (other then the pageout or special
5989 		 * free queues) or it needs to belong to the compressor pool (which is now
5990 		 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
5991 		 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
5992 		 * locks we hold at this point...
5993 		 */
5994 		return FALSE;
5995 	} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
5996 	    (!m->vmp_tabled || m->vmp_busy)) {
5997 		/*
5998 		 * pages on the free list are always 'busy'
5999 		 * so we couldn't test for 'busy' in the check
6000 		 * for the transient states... pages that are
6001 		 * 'free' are never 'tabled', so we also couldn't
6002 		 * test for 'tabled'.  So we check here to make
6003 		 * sure that a non-free page is not busy and is
6004 		 * tabled on an object...
6005 		 */
6006 		return FALSE;
6007 	}
6008 	return TRUE;
6009 }
6010 
6011 /*
6012  * Free up the given page by possibily relocating its contents to a new page
6013  * If the page is on an object the object lock must be held.
6014  */
6015 static kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages)6016 vm_page_relocate(vm_page_t m1, int *compressed_pages)
6017 {
6018 	int refmod = 0;
6019 	vm_object_t object = VM_PAGE_OBJECT(m1);
6020 	kern_return_t kr;
6021 
6022 	if (object == VM_OBJECT_NULL) {
6023 		return KERN_FAILURE;
6024 	}
6025 
6026 	vm_object_lock_assert_held(object);
6027 
6028 	if (VM_PAGE_WIRED(m1) ||
6029 	    m1->vmp_gobbled ||
6030 	    m1->vmp_laundry ||
6031 	    m1->vmp_wanted ||
6032 	    m1->vmp_cleaning ||
6033 	    m1->vmp_overwriting ||
6034 	    m1->vmp_free_when_done ||
6035 	    m1->vmp_busy ||
6036 	    m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6037 		return KERN_FAILURE;
6038 	}
6039 
6040 	boolean_t disconnected = FALSE;
6041 	boolean_t reusable = FALSE;
6042 
6043 	/*
6044 	 * Pages from reusable objects can be reclaimed directly.
6045 	 */
6046 	if ((m1->vmp_reusable || object->all_reusable) &&
6047 	    m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6048 	    !m1->vmp_reference) {
6049 		/*
6050 		 * reusable page...
6051 		 */
6052 
6053 		refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6054 		disconnected = TRUE;
6055 		if (refmod == 0) {
6056 			/*
6057 			 * ... not reused: can steal without relocating contents.
6058 			 */
6059 			reusable = TRUE;
6060 		}
6061 	}
6062 
6063 	if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6064 		vm_object_offset_t offset;
6065 
6066 		/* page is not reusable, we need to allocate a new page
6067 		 * and move its contents there.
6068 		 */
6069 		vm_page_t m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6070 
6071 		if (m2 == VM_PAGE_NULL) {
6072 			return KERN_RESOURCE_SHORTAGE;
6073 		}
6074 
6075 		if (!disconnected) {
6076 			if (m1->vmp_pmapped) {
6077 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6078 			} else {
6079 				refmod = 0;
6080 			}
6081 		}
6082 
6083 		/* copy the page's contents */
6084 		pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6085 
6086 		/* copy the page's state */
6087 		assert(!VM_PAGE_WIRED(m1));
6088 		assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6089 		assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6090 		assert(!m1->vmp_laundry);
6091 		m2->vmp_reference = m1->vmp_reference;
6092 		assert(!m1->vmp_gobbled);
6093 		assert(!m1->vmp_private);
6094 		m2->vmp_no_cache = m1->vmp_no_cache;
6095 		m2->vmp_xpmapped = 0;
6096 		assert(!m1->vmp_busy);
6097 		assert(!m1->vmp_wanted);
6098 		assert(!m1->vmp_fictitious);
6099 		m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6100 		m2->vmp_wpmapped = m1->vmp_wpmapped;
6101 		assert(!m1->vmp_free_when_done);
6102 		m2->vmp_absent = m1->vmp_absent;
6103 		m2->vmp_error = VMP_ERROR_GET(m1);
6104 		m2->vmp_dirty = m1->vmp_dirty;
6105 		assert(!m1->vmp_cleaning);
6106 		m2->vmp_precious = m1->vmp_precious;
6107 		m2->vmp_clustered = m1->vmp_clustered;
6108 		assert(!m1->vmp_overwriting);
6109 		m2->vmp_restart = m1->vmp_restart;
6110 		m2->vmp_unusual = m1->vmp_unusual;
6111 		m2->vmp_cs_validated = m1->vmp_cs_validated;
6112 		m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6113 		m2->vmp_cs_nx = m1->vmp_cs_nx;
6114 
6115 		m2->vmp_realtime = m1->vmp_realtime;
6116 		m1->vmp_realtime = false;
6117 
6118 		/*
6119 		 * If m1 had really been reusable,
6120 		 * we would have just stolen it, so
6121 		 * let's not propagate its "reusable"
6122 		 * bit and assert that m2 is not
6123 		 * marked as "reusable".
6124 		 */
6125 		// m2->vmp_reusable	= m1->vmp_reusable;
6126 		assert(!m2->vmp_reusable);
6127 
6128 		// assert(!m1->vmp_lopage);
6129 
6130 		if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6131 			m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6132 			/*
6133 			 * We just grabbed m2 up above and so it isn't
6134 			 * going to be on any special Q as yet and so
6135 			 * we don't need to 'remove' it from the special
6136 			 * queues. Just resetting the state should be enough.
6137 			 */
6138 			m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6139 		}
6140 
6141 		/*
6142 		 * page may need to be flushed if
6143 		 * it is marshalled into a UPL
6144 		 * that is going to be used by a device
6145 		 * that doesn't support coherency
6146 		 */
6147 		m2->vmp_written_by_kernel = TRUE;
6148 
6149 		/*
6150 		 * make sure we clear the ref/mod state
6151 		 * from the pmap layer... else we risk
6152 		 * inheriting state from the last time
6153 		 * this page was used...
6154 		 */
6155 		pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6156 		    VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6157 
6158 		if (refmod & VM_MEM_REFERENCED) {
6159 			m2->vmp_reference = TRUE;
6160 		}
6161 		if (refmod & VM_MEM_MODIFIED) {
6162 			SET_PAGE_DIRTY(m2, TRUE);
6163 		}
6164 		offset = m1->vmp_offset;
6165 
6166 		/*
6167 		 * completely cleans up the state
6168 		 * of the page so that it is ready
6169 		 * to be put onto the free list, or
6170 		 * for this purpose it looks like it
6171 		 * just came off of the free list
6172 		 */
6173 		vm_page_free_prepare(m1);
6174 
6175 		/*
6176 		 * now put the substitute page on the object
6177 		 */
6178 		vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6179 		    TRUE, FALSE, FALSE, NULL);
6180 
6181 		if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6182 			m2->vmp_pmapped = TRUE;
6183 			m2->vmp_wpmapped = TRUE;
6184 
6185 			kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6186 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE);
6187 
6188 			assert(kr == KERN_SUCCESS);
6189 
6190 			if (compressed_pages) {
6191 				++*compressed_pages;
6192 			}
6193 		} else {
6194 			/* relocated page was not used by the compressor
6195 			 * put it on either the active or inactive lists */
6196 			if (m2->vmp_reference) {
6197 				vm_page_activate(m2);
6198 			} else {
6199 				vm_page_deactivate(m2);
6200 			}
6201 		}
6202 
6203 		/* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6204 		vm_page_wakeup_done(object, m2);
6205 
6206 		return KERN_SUCCESS;
6207 	} else {
6208 		assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6209 
6210 		/*
6211 		 * completely cleans up the state
6212 		 * of the page so that it is ready
6213 		 * to be put onto the free list, or
6214 		 * for this purpose it looks like it
6215 		 * just came off of the free list
6216 		 */
6217 		vm_page_free_prepare(m1);
6218 
6219 		/* we're done here */
6220 		return KERN_SUCCESS;
6221 	}
6222 
6223 	return KERN_FAILURE;
6224 }
6225 
6226 /*
6227  *	CONTIGUOUS PAGE ALLOCATION
6228  *
6229  *	Find a region large enough to contain at least n pages
6230  *	of contiguous physical memory.
6231  *
6232  *	This is done by traversing the vm_page_t array in a linear fashion
6233  *	we assume that the vm_page_t array has the avaiable physical pages in an
6234  *	ordered, ascending list... this is currently true of all our implementations
6235  *      and must remain so... there can be 'holes' in the array...  we also can
6236  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6237  *      which use to happen via 'vm_page_convert'... that function was no longer
6238  *      being called and was removed...
6239  *
6240  *	The basic flow consists of stabilizing some of the interesting state of
6241  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6242  *	sweep at the beginning of the array looking for pages that meet our criterea
6243  *	for a 'stealable' page... currently we are pretty conservative... if the page
6244  *	meets this criterea and is physically contiguous to the previous page in the 'run'
6245  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
6246  *	and start to develop a new run... if at this point we've already considered
6247  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
6248  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
6249  *	to other threads trying to acquire free pages (or move pages from q to q),
6250  *	and then continue from the spot we left off... we only make 1 pass through the
6251  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
6252  *      which steals the pages from the queues they're currently on... pages on the free
6253  *	queue can be stolen directly... pages that are on any of the other queues
6254  *	must be removed from the object they are tabled on... this requires taking the
6255  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
6256  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
6257  *	dump the pages we've currently stolen back to the free list, and pick up our
6258  *	scan from the point where we aborted the 'current' run.
6259  *
6260  *
6261  *	Requirements:
6262  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
6263  *
6264  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
6265  *
6266  * Algorithm:
6267  */
6268 
6269 #define MAX_CONSIDERED_BEFORE_YIELD     1000
6270 
6271 
6272 #define RESET_STATE_OF_RUN()    \
6273 	MACRO_BEGIN             \
6274 	prevcontaddr = -2;      \
6275 	start_pnum = -1;        \
6276 	free_considered = 0;    \
6277 	substitute_needed = 0;  \
6278 	npages = 0;             \
6279 	MACRO_END
6280 
6281 /*
6282  * Can we steal in-use (i.e. not free) pages when searching for
6283  * physically-contiguous pages ?
6284  */
6285 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
6286 
6287 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
6288 #if DEBUG
6289 int vm_page_find_contig_debug = 0;
6290 #endif
6291 
6292 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6293 vm_page_find_contiguous(
6294 	unsigned int    contig_pages,
6295 	ppnum_t         max_pnum,
6296 	ppnum_t         pnum_mask,
6297 	boolean_t       wire,
6298 	int             flags)
6299 {
6300 	vm_page_t       m = NULL;
6301 	ppnum_t         prevcontaddr = 0;
6302 	ppnum_t         start_pnum = 0;
6303 	unsigned int    npages = 0, considered = 0, scanned = 0;
6304 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
6305 	unsigned int    idx_last_contig_page_found = 0;
6306 	int             free_considered = 0, free_available = 0;
6307 	int             substitute_needed = 0;
6308 	int             zone_gc_called = 0;
6309 	boolean_t       wrapped;
6310 	kern_return_t   kr;
6311 #if DEBUG
6312 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
6313 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
6314 #endif
6315 
6316 	int             yielded = 0;
6317 	int             dumped_run = 0;
6318 	int             stolen_pages = 0;
6319 	int             compressed_pages = 0;
6320 
6321 
6322 	if (contig_pages == 0) {
6323 		return VM_PAGE_NULL;
6324 	}
6325 
6326 full_scan_again:
6327 
6328 #if MACH_ASSERT
6329 	vm_page_verify_free_lists();
6330 #endif
6331 #if DEBUG
6332 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
6333 #endif
6334 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6335 
6336 	/*
6337 	 * If there are still delayed pages, try to free up some that match.
6338 	 */
6339 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
6340 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
6341 	}
6342 
6343 	vm_page_lock_queues();
6344 	vm_free_page_lock();
6345 
6346 	RESET_STATE_OF_RUN();
6347 
6348 	scanned = 0;
6349 	considered = 0;
6350 	free_available = vm_page_free_count - vm_page_free_reserved;
6351 
6352 	wrapped = FALSE;
6353 
6354 	if (flags & KMA_LOMEM) {
6355 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
6356 	} else {
6357 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
6358 	}
6359 
6360 	orig_last_idx = idx_last_contig_page_found;
6361 	last_idx = orig_last_idx;
6362 
6363 	for (page_idx = last_idx, start_idx = last_idx;
6364 	    npages < contig_pages && page_idx < vm_pages_count;
6365 	    page_idx++) {
6366 retry:
6367 		if (wrapped &&
6368 		    npages == 0 &&
6369 		    page_idx >= orig_last_idx) {
6370 			/*
6371 			 * We're back where we started and we haven't
6372 			 * found any suitable contiguous range.  Let's
6373 			 * give up.
6374 			 */
6375 			break;
6376 		}
6377 		scanned++;
6378 		m = &vm_pages[page_idx];
6379 
6380 		assert(!m->vmp_fictitious);
6381 		assert(!m->vmp_private);
6382 
6383 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
6384 			/* no more low pages... */
6385 			break;
6386 		}
6387 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
6388 			/*
6389 			 * not aligned
6390 			 */
6391 			RESET_STATE_OF_RUN();
6392 		} else if (!vm_page_is_relocatable(m)) {
6393 			/*
6394 			 * page is not relocatable */
6395 			RESET_STATE_OF_RUN();
6396 		} else {
6397 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6398 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6399 					RESET_STATE_OF_RUN();
6400 					goto did_consider;
6401 				} else {
6402 					npages = 1;
6403 					start_idx = page_idx;
6404 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6405 				}
6406 			} else {
6407 				npages++;
6408 			}
6409 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6410 
6411 			VM_PAGE_CHECK(m);
6412 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6413 				free_considered++;
6414 			} else {
6415 				/*
6416 				 * This page is not free.
6417 				 * If we can't steal used pages,
6418 				 * we have to give up this run
6419 				 * and keep looking.
6420 				 * Otherwise, we might need to
6421 				 * move the contents of this page
6422 				 * into a substitute page.
6423 				 */
6424 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6425 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6426 					substitute_needed++;
6427 				}
6428 #else
6429 				RESET_STATE_OF_RUN();
6430 #endif
6431 			}
6432 
6433 			if ((free_considered + substitute_needed) > free_available) {
6434 				/*
6435 				 * if we let this run continue
6436 				 * we will end up dropping the vm_page_free_count
6437 				 * below the reserve limit... we need to abort
6438 				 * this run, but we can at least re-consider this
6439 				 * page... thus the jump back to 'retry'
6440 				 */
6441 				RESET_STATE_OF_RUN();
6442 
6443 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6444 					considered++;
6445 					goto retry;
6446 				}
6447 				/*
6448 				 * free_available == 0
6449 				 * so can't consider any free pages... if
6450 				 * we went to retry in this case, we'd
6451 				 * get stuck looking at the same page
6452 				 * w/o making any forward progress
6453 				 * we also want to take this path if we've already
6454 				 * reached our limit that controls the lock latency
6455 				 */
6456 			}
6457 		}
6458 did_consider:
6459 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6460 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6461 
6462 			vm_free_page_unlock();
6463 			vm_page_unlock_queues();
6464 
6465 			mutex_pause(0);
6466 
6467 			PAGE_REPLACEMENT_ALLOWED(TRUE);
6468 
6469 			vm_page_lock_queues();
6470 			vm_free_page_lock();
6471 
6472 			RESET_STATE_OF_RUN();
6473 			/*
6474 			 * reset our free page limit since we
6475 			 * dropped the lock protecting the vm_page_free_queue
6476 			 */
6477 			free_available = vm_page_free_count - vm_page_free_reserved;
6478 			considered = 0;
6479 
6480 			yielded++;
6481 
6482 			goto retry;
6483 		}
6484 		considered++;
6485 	} /* main for-loop end */
6486 
6487 	m = VM_PAGE_NULL;
6488 
6489 	if (npages != contig_pages) {
6490 		if (!wrapped) {
6491 			/*
6492 			 * We didn't find a contiguous range but we didn't
6493 			 * start from the very first page.
6494 			 * Start again from the very first page.
6495 			 */
6496 			RESET_STATE_OF_RUN();
6497 			if (flags & KMA_LOMEM) {
6498 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6499 			} else {
6500 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6501 			}
6502 			last_idx = 0;
6503 			page_idx = last_idx;
6504 			wrapped = TRUE;
6505 			goto retry;
6506 		}
6507 		vm_free_page_unlock();
6508 	} else {
6509 		vm_page_t m1;
6510 		unsigned int cur_idx;
6511 		unsigned int tmp_start_idx;
6512 		vm_object_t locked_object = VM_OBJECT_NULL;
6513 		boolean_t abort_run = FALSE;
6514 
6515 		assert(page_idx - start_idx == contig_pages);
6516 
6517 		tmp_start_idx = start_idx;
6518 
6519 		/*
6520 		 * first pass through to pull the free pages
6521 		 * off of the free queue so that in case we
6522 		 * need substitute pages, we won't grab any
6523 		 * of the free pages in the run... we'll clear
6524 		 * the 'free' bit in the 2nd pass, and even in
6525 		 * an abort_run case, we'll collect all of the
6526 		 * free pages in this run and return them to the free list
6527 		 */
6528 		while (start_idx < page_idx) {
6529 			m1 = &vm_pages[start_idx++];
6530 
6531 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6532 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6533 #endif
6534 
6535 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6536 				unsigned int color;
6537 
6538 				color = VM_PAGE_GET_COLOR(m1);
6539 #if MACH_ASSERT
6540 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6541 #endif
6542 				vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6543 
6544 				VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6545 #if MACH_ASSERT
6546 				vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6547 #endif
6548 				/*
6549 				 * Clear the "free" bit so that this page
6550 				 * does not get considered for another
6551 				 * concurrent physically-contiguous allocation.
6552 				 */
6553 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6554 				assert(m1->vmp_busy);
6555 
6556 				vm_page_free_count--;
6557 			}
6558 		}
6559 		if (flags & KMA_LOMEM) {
6560 			vm_page_lomem_find_contiguous_last_idx = page_idx;
6561 		} else {
6562 			vm_page_find_contiguous_last_idx = page_idx;
6563 		}
6564 
6565 		/*
6566 		 * we can drop the free queue lock at this point since
6567 		 * we've pulled any 'free' candidates off of the list
6568 		 * we need it dropped so that we can do a vm_page_grab
6569 		 * when substituing for pmapped/dirty pages
6570 		 */
6571 		vm_free_page_unlock();
6572 
6573 		start_idx = tmp_start_idx;
6574 		cur_idx = page_idx - 1;
6575 
6576 		while (start_idx++ < page_idx) {
6577 			/*
6578 			 * must go through the list from back to front
6579 			 * so that the page list is created in the
6580 			 * correct order - low -> high phys addresses
6581 			 */
6582 			m1 = &vm_pages[cur_idx--];
6583 
6584 			if (m1->vmp_object == 0) {
6585 				/*
6586 				 * page has already been removed from
6587 				 * the free list in the 1st pass
6588 				 */
6589 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6590 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
6591 				assert(m1->vmp_busy);
6592 				assert(!m1->vmp_wanted);
6593 				assert(!m1->vmp_laundry);
6594 			} else {
6595 				/*
6596 				 * try to relocate/steal the page
6597 				 */
6598 				if (abort_run == TRUE) {
6599 					continue;
6600 				}
6601 
6602 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6603 
6604 				vm_object_t object = VM_PAGE_OBJECT(m1);
6605 
6606 				if (object != locked_object) {
6607 					if (locked_object) {
6608 						vm_object_unlock(locked_object);
6609 						locked_object = VM_OBJECT_NULL;
6610 					}
6611 					if (vm_object_lock_try(object)) {
6612 						locked_object = object;
6613 					} else {
6614 						/* object must be locked to relocate its pages */
6615 						tmp_start_idx = cur_idx;
6616 						abort_run = TRUE;
6617 						continue;
6618 					}
6619 				}
6620 
6621 				kr = vm_page_relocate(m1, &compressed_pages);
6622 				if (kr != KERN_SUCCESS) {
6623 					if (locked_object) {
6624 						vm_object_unlock(locked_object);
6625 						locked_object = VM_OBJECT_NULL;
6626 					}
6627 					tmp_start_idx = cur_idx;
6628 					abort_run = TRUE;
6629 					continue;
6630 				}
6631 
6632 				stolen_pages++;
6633 			}
6634 
6635 			/* m1 is ours at this point ... */
6636 
6637 			if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6638 				/*
6639 				 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6640 				 * change it for pages marked as used-by-compressor.
6641 				 */
6642 				vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6643 			}
6644 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6645 			m1->vmp_snext = m;
6646 			m = m1;
6647 		}
6648 
6649 		if (locked_object) {
6650 			vm_object_unlock(locked_object);
6651 			locked_object = VM_OBJECT_NULL;
6652 		}
6653 
6654 		if (abort_run == TRUE) {
6655 			/*
6656 			 * want the index of the last
6657 			 * page in this run that was
6658 			 * successfully 'stolen', so back
6659 			 * it up 1 for the auto-decrement on use
6660 			 * and 1 more to bump back over this page
6661 			 */
6662 			page_idx = tmp_start_idx + 2;
6663 			if (page_idx >= vm_pages_count) {
6664 				if (wrapped) {
6665 					if (m != VM_PAGE_NULL) {
6666 						vm_page_unlock_queues();
6667 						vm_page_free_list(m, FALSE);
6668 						vm_page_lock_queues();
6669 						m = VM_PAGE_NULL;
6670 					}
6671 					dumped_run++;
6672 					goto done_scanning;
6673 				}
6674 				page_idx = last_idx = 0;
6675 				wrapped = TRUE;
6676 			}
6677 			abort_run = FALSE;
6678 
6679 			/*
6680 			 * We didn't find a contiguous range but we didn't
6681 			 * start from the very first page.
6682 			 * Start again from the very first page.
6683 			 */
6684 			RESET_STATE_OF_RUN();
6685 
6686 			if (flags & KMA_LOMEM) {
6687 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
6688 			} else {
6689 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6690 			}
6691 
6692 			last_idx = page_idx;
6693 
6694 			if (m != VM_PAGE_NULL) {
6695 				vm_page_unlock_queues();
6696 				vm_page_free_list(m, FALSE);
6697 				vm_page_lock_queues();
6698 				m = VM_PAGE_NULL;
6699 			}
6700 			dumped_run++;
6701 
6702 			vm_free_page_lock();
6703 			/*
6704 			 * reset our free page limit since we
6705 			 * dropped the lock protecting the vm_page_free_queue
6706 			 */
6707 			free_available = vm_page_free_count - vm_page_free_reserved;
6708 			goto retry;
6709 		}
6710 
6711 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6712 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6713 			assert(m1->vmp_wire_count == 0);
6714 
6715 			if (wire == TRUE) {
6716 				m1->vmp_wire_count++;
6717 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
6718 			} else {
6719 				m1->vmp_gobbled = TRUE;
6720 			}
6721 		}
6722 		if (wire == FALSE) {
6723 			vm_page_gobble_count += npages;
6724 		}
6725 
6726 		/*
6727 		 * gobbled pages are also counted as wired pages
6728 		 */
6729 		vm_page_wire_count += npages;
6730 
6731 		assert(vm_page_verify_contiguous(m, npages));
6732 	}
6733 done_scanning:
6734 	PAGE_REPLACEMENT_ALLOWED(FALSE);
6735 
6736 	vm_page_unlock_queues();
6737 
6738 #if DEBUG
6739 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6740 
6741 	tv_end_sec -= tv_start_sec;
6742 	if (tv_end_usec < tv_start_usec) {
6743 		tv_end_sec--;
6744 		tv_end_usec += 1000000;
6745 	}
6746 	tv_end_usec -= tv_start_usec;
6747 	if (tv_end_usec >= 1000000) {
6748 		tv_end_sec++;
6749 		tv_end_sec -= 1000000;
6750 	}
6751 	if (vm_page_find_contig_debug) {
6752 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
6753 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6754 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
6755 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6756 	}
6757 
6758 #endif
6759 #if MACH_ASSERT
6760 	vm_page_verify_free_lists();
6761 #endif
6762 	if (m == NULL && zone_gc_called < 2) {
6763 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6764 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6765 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6766 
6767 		if (consider_buffer_cache_collect != NULL) {
6768 			(void)(*consider_buffer_cache_collect)(1);
6769 		}
6770 
6771 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6772 
6773 		zone_gc_called++;
6774 
6775 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6776 		goto full_scan_again;
6777 	}
6778 
6779 	return m;
6780 }
6781 
6782 /*
6783  *	Allocate a list of contiguous, wired pages.
6784  */
6785 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6786 cpm_allocate(
6787 	vm_size_t       size,
6788 	vm_page_t       *list,
6789 	ppnum_t         max_pnum,
6790 	ppnum_t         pnum_mask,
6791 	boolean_t       wire,
6792 	int             flags)
6793 {
6794 	vm_page_t               pages;
6795 	unsigned int            npages;
6796 
6797 	if (size % PAGE_SIZE != 0) {
6798 		return KERN_INVALID_ARGUMENT;
6799 	}
6800 
6801 	npages = (unsigned int) (size / PAGE_SIZE);
6802 	if (npages != size / PAGE_SIZE) {
6803 		/* 32-bit overflow */
6804 		return KERN_INVALID_ARGUMENT;
6805 	}
6806 
6807 	/*
6808 	 *	Obtain a pointer to a subset of the free
6809 	 *	list large enough to satisfy the request;
6810 	 *	the region will be physically contiguous.
6811 	 */
6812 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6813 
6814 	if (pages == VM_PAGE_NULL) {
6815 		return KERN_NO_SPACE;
6816 	}
6817 	/*
6818 	 * determine need for wakeups
6819 	 */
6820 	if (vm_page_free_count < vm_page_free_min) {
6821 		vm_free_page_lock();
6822 		if (vm_pageout_running == FALSE) {
6823 			vm_free_page_unlock();
6824 			thread_wakeup((event_t) &vm_page_free_wanted);
6825 		} else {
6826 			vm_free_page_unlock();
6827 		}
6828 	}
6829 
6830 	VM_CHECK_MEMORYSTATUS;
6831 
6832 	/*
6833 	 *	The CPM pages should now be available and
6834 	 *	ordered by ascending physical address.
6835 	 */
6836 	assert(vm_page_verify_contiguous(pages, npages));
6837 
6838 	if (flags & KMA_ZERO) {
6839 		for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
6840 			vm_page_zero_fill(m);
6841 		}
6842 	}
6843 
6844 	*list = pages;
6845 	return KERN_SUCCESS;
6846 }
6847 
6848 
6849 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6850 
6851 /*
6852  * when working on a 'run' of pages, it is necessary to hold
6853  * the vm_page_queue_lock (a hot global lock) for certain operations
6854  * on the page... however, the majority of the work can be done
6855  * while merely holding the object lock... in fact there are certain
6856  * collections of pages that don't require any work brokered by the
6857  * vm_page_queue_lock... to mitigate the time spent behind the global
6858  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6859  * while doing all of the work that doesn't require the vm_page_queue_lock...
6860  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6861  * necessary work for each page... we will grab the busy bit on the page
6862  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6863  * if it can't immediately take the vm_page_queue_lock in order to compete
6864  * for the locks in the same order that vm_pageout_scan takes them.
6865  * the operation names are modeled after the names of the routines that
6866  * need to be called in order to make the changes very obvious in the
6867  * original loop
6868  */
6869 
6870 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6871 vm_page_do_delayed_work(
6872 	vm_object_t     object,
6873 	vm_tag_t        tag,
6874 	struct vm_page_delayed_work *dwp,
6875 	int             dw_count)
6876 {
6877 	int             j;
6878 	vm_page_t       m;
6879 	vm_page_t       local_free_q = VM_PAGE_NULL;
6880 
6881 	/*
6882 	 * pageout_scan takes the vm_page_lock_queues first
6883 	 * then tries for the object lock... to avoid what
6884 	 * is effectively a lock inversion, we'll go to the
6885 	 * trouble of taking them in that same order... otherwise
6886 	 * if this object contains the majority of the pages resident
6887 	 * in the UBC (or a small set of large objects actively being
6888 	 * worked on contain the majority of the pages), we could
6889 	 * cause the pageout_scan thread to 'starve' in its attempt
6890 	 * to find pages to move to the free queue, since it has to
6891 	 * successfully acquire the object lock of any candidate page
6892 	 * before it can steal/clean it.
6893 	 */
6894 	if (!vm_page_trylock_queues()) {
6895 		vm_object_unlock(object);
6896 
6897 		/*
6898 		 * "Turnstile enabled vm_pageout_scan" can be runnable
6899 		 * for a very long time without getting on a core.
6900 		 * If this is a higher priority thread it could be
6901 		 * waiting here for a very long time respecting the fact
6902 		 * that pageout_scan would like its object after VPS does
6903 		 * a mutex_pause(0).
6904 		 * So we cap the number of yields in the vm_object_lock_avoid()
6905 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
6906 		 * 10us to run and grab the object if needed.
6907 		 */
6908 		vm_page_lock_queues();
6909 
6910 		for (j = 0;; j++) {
6911 			if ((!vm_object_lock_avoid(object) ||
6912 			    (vps_dynamic_priority_enabled && (j > 0))) &&
6913 			    _vm_object_lock_try(object)) {
6914 				break;
6915 			}
6916 			vm_page_unlock_queues();
6917 			mutex_pause(j);
6918 			vm_page_lock_queues();
6919 		}
6920 	}
6921 	for (j = 0; j < dw_count; j++, dwp++) {
6922 		m = dwp->dw_m;
6923 
6924 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6925 			vm_pageout_throttle_up(m);
6926 		}
6927 #if CONFIG_PHANTOM_CACHE
6928 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6929 			vm_phantom_cache_update(m);
6930 		}
6931 #endif
6932 		if (dwp->dw_mask & DW_vm_page_wire) {
6933 			vm_page_wire(m, tag, FALSE);
6934 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
6935 			boolean_t       queueit;
6936 
6937 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6938 
6939 			vm_page_unwire(m, queueit);
6940 		}
6941 		if (dwp->dw_mask & DW_vm_page_free) {
6942 			vm_page_free_prepare_queues(m);
6943 
6944 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6945 			/*
6946 			 * Add this page to our list of reclaimed pages,
6947 			 * to be freed later.
6948 			 */
6949 			m->vmp_snext = local_free_q;
6950 			local_free_q = m;
6951 		} else {
6952 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6953 				vm_page_deactivate_internal(m, FALSE);
6954 			} else if (dwp->dw_mask & DW_vm_page_activate) {
6955 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6956 					vm_page_activate(m);
6957 				}
6958 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
6959 				vm_page_speculate(m, TRUE);
6960 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
6961 				/*
6962 				 * if we didn't hold the object lock and did this,
6963 				 * we might disconnect the page, then someone might
6964 				 * soft fault it back in, then we would put it on the
6965 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
6966 				 * page on that queue, which we don't want
6967 				 */
6968 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6969 
6970 				if ((refmod_state & VM_MEM_REFERENCED)) {
6971 					/*
6972 					 * this page has been touched since it got cleaned; let's activate it
6973 					 * if it hasn't already been
6974 					 */
6975 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6976 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6977 
6978 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6979 						vm_page_activate(m);
6980 					}
6981 				} else {
6982 					m->vmp_reference = FALSE;
6983 					vm_page_enqueue_cleaned(m);
6984 				}
6985 			} else if (dwp->dw_mask & DW_vm_page_lru) {
6986 				vm_page_lru(m);
6987 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6988 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6989 					vm_page_queues_remove(m, TRUE);
6990 				}
6991 			}
6992 			if (dwp->dw_mask & DW_set_reference) {
6993 				m->vmp_reference = TRUE;
6994 			} else if (dwp->dw_mask & DW_clear_reference) {
6995 				m->vmp_reference = FALSE;
6996 			}
6997 
6998 			if (dwp->dw_mask & DW_move_page) {
6999 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7000 					vm_page_queues_remove(m, FALSE);
7001 
7002 					assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
7003 
7004 					vm_page_enqueue_inactive(m, FALSE);
7005 				}
7006 			}
7007 			if (dwp->dw_mask & DW_clear_busy) {
7008 				m->vmp_busy = FALSE;
7009 			}
7010 
7011 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7012 				vm_page_wakeup(object, m);
7013 			}
7014 		}
7015 	}
7016 	vm_page_unlock_queues();
7017 
7018 	if (local_free_q) {
7019 		vm_page_free_list(local_free_q, TRUE);
7020 	}
7021 
7022 	VM_CHECK_MEMORYSTATUS;
7023 }
7024 
7025 __abortlike
7026 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7027 __vm_page_alloc_list_failed_panic(
7028 	vm_size_t       page_count,
7029 	kma_flags_t     flags,
7030 	kern_return_t   kr)
7031 {
7032 	panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7033 	    (size_t)page_count, flags, kr);
7034 }
7035 
7036 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7037 vm_page_alloc_list(
7038 	vm_size_t   page_count,
7039 	kma_flags_t flags,
7040 	vm_page_t  *list)
7041 {
7042 	vm_page_t       page_list = VM_PAGE_NULL;
7043 	vm_page_t       mem;
7044 	kern_return_t   kr = KERN_SUCCESS;
7045 	int             page_grab_count = 0;
7046 #if DEVELOPMENT || DEBUG
7047 	task_t          task;
7048 #endif /* DEVELOPMENT || DEBUG */
7049 
7050 	for (vm_size_t i = 0; i < page_count; i++) {
7051 		for (;;) {
7052 			if (flags & KMA_LOMEM) {
7053 				mem = vm_page_grablo();
7054 			} else {
7055 				uint_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7056 				mem = vm_page_grab_options(options);
7057 			}
7058 
7059 			if (mem != VM_PAGE_NULL) {
7060 				break;
7061 			}
7062 
7063 			if (flags & KMA_NOPAGEWAIT) {
7064 				kr = KERN_RESOURCE_SHORTAGE;
7065 				goto out;
7066 			}
7067 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
7068 				kr = KERN_RESOURCE_SHORTAGE;
7069 				goto out;
7070 			}
7071 
7072 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
7073 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
7074 
7075 			if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_count) > max_mem / 4) {
7076 				uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7077 				if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7078 					kr = KERN_RESOURCE_SHORTAGE;
7079 					goto out;
7080 				}
7081 			}
7082 			VM_PAGE_WAIT();
7083 		}
7084 
7085 		page_grab_count++;
7086 		mem->vmp_snext = page_list;
7087 		page_list = mem;
7088 	}
7089 
7090 	if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7091 		for (mem = page_list; mem; mem = mem->vmp_snext) {
7092 			vm_page_zero_fill(mem);
7093 		}
7094 	}
7095 
7096 out:
7097 #if DEBUG || DEVELOPMENT
7098 	task = current_task_early();
7099 	if (task != NULL) {
7100 		ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
7101 	}
7102 #endif
7103 
7104 	if (kr == KERN_SUCCESS) {
7105 		*list = page_list;
7106 	} else if (flags & KMA_NOFAIL) {
7107 		__vm_page_alloc_list_failed_panic(page_count, flags, kr);
7108 	} else {
7109 		vm_page_free_list(page_list, FALSE);
7110 	}
7111 
7112 	return kr;
7113 }
7114 
7115 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7116 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7117 {
7118 	page->vmp_offset = offset;
7119 }
7120 
7121 vm_page_t
vm_page_get_next(vm_page_t page)7122 vm_page_get_next(vm_page_t page)
7123 {
7124 	return page->vmp_snext;
7125 }
7126 
7127 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7128 vm_page_get_offset(vm_page_t page)
7129 {
7130 	return page->vmp_offset;
7131 }
7132 
7133 ppnum_t
vm_page_get_phys_page(vm_page_t page)7134 vm_page_get_phys_page(vm_page_t page)
7135 {
7136 	return VM_PAGE_GET_PHYS_PAGE(page);
7137 }
7138 
7139 
7140 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7141 
7142 #if HIBERNATION
7143 
7144 static vm_page_t hibernate_gobble_queue;
7145 
7146 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
7147 static int  hibernate_flush_dirty_pages(int);
7148 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
7149 
7150 void hibernate_flush_wait(void);
7151 void hibernate_mark_in_progress(void);
7152 void hibernate_clear_in_progress(void);
7153 
7154 void            hibernate_free_range(int, int);
7155 void            hibernate_hash_insert_page(vm_page_t);
7156 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
7157 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7158 ppnum_t         hibernate_lookup_paddr(unsigned int);
7159 
7160 struct hibernate_statistics {
7161 	int hibernate_considered;
7162 	int hibernate_reentered_on_q;
7163 	int hibernate_found_dirty;
7164 	int hibernate_skipped_cleaning;
7165 	int hibernate_skipped_transient;
7166 	int hibernate_skipped_precious;
7167 	int hibernate_skipped_external;
7168 	int hibernate_queue_nolock;
7169 	int hibernate_queue_paused;
7170 	int hibernate_throttled;
7171 	int hibernate_throttle_timeout;
7172 	int hibernate_drained;
7173 	int hibernate_drain_timeout;
7174 	int cd_lock_failed;
7175 	int cd_found_precious;
7176 	int cd_found_wired;
7177 	int cd_found_busy;
7178 	int cd_found_unusual;
7179 	int cd_found_cleaning;
7180 	int cd_found_laundry;
7181 	int cd_found_dirty;
7182 	int cd_found_xpmapped;
7183 	int cd_skipped_xpmapped;
7184 	int cd_local_free;
7185 	int cd_total_free;
7186 	int cd_vm_page_wire_count;
7187 	int cd_vm_struct_pages_unneeded;
7188 	int cd_pages;
7189 	int cd_discarded;
7190 	int cd_count_wire;
7191 } hibernate_stats;
7192 
7193 #if CONFIG_SPTM
7194 /**
7195  * On SPTM-based systems don't save any executable pages into the hibernation
7196  * image. The SPTM has stronger guarantees around not allowing write access to
7197  * the executable pages than on older systems, which prevents XNU from being
7198  * able to restore any pages mapped as executable.
7199  */
7200 #define HIBERNATE_XPMAPPED_LIMIT        0ULL
7201 #else /* CONFIG_SPTM */
7202 /*
7203  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7204  * so that we don't overrun the estimated image size, which would
7205  * result in a hibernation failure.
7206  *
7207  * We use a size value instead of pages because we don't want to take up more space
7208  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7209  * to have that additional space available.
7210  *
7211  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7212  * xpmapped size.
7213  */
7214 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7215 #endif /* CONFIG_SPTM */
7216 
7217 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7218 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7219 {
7220 	wait_result_t   wait_result;
7221 
7222 	vm_page_lock_queues();
7223 
7224 	while (!vm_page_queue_empty(&q->pgo_pending)) {
7225 		q->pgo_draining = TRUE;
7226 
7227 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7228 
7229 		vm_page_unlock_queues();
7230 
7231 		wait_result = thread_block(THREAD_CONTINUE_NULL);
7232 
7233 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7234 			hibernate_stats.hibernate_drain_timeout++;
7235 
7236 			if (q == &vm_pageout_queue_external) {
7237 				return 0;
7238 			}
7239 
7240 			return 1;
7241 		}
7242 		vm_page_lock_queues();
7243 
7244 		hibernate_stats.hibernate_drained++;
7245 	}
7246 	vm_page_unlock_queues();
7247 
7248 	return 0;
7249 }
7250 
7251 
7252 boolean_t hibernate_skip_external = FALSE;
7253 
7254 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7255 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7256 {
7257 	vm_page_t       m;
7258 	vm_object_t     l_object = NULL;
7259 	vm_object_t     m_object = NULL;
7260 	int             refmod_state = 0;
7261 	int             try_failed_count = 0;
7262 	int             retval = 0;
7263 	int             current_run = 0;
7264 	struct  vm_pageout_queue *iq;
7265 	struct  vm_pageout_queue *eq;
7266 	struct  vm_pageout_queue *tq;
7267 
7268 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7269 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7270 
7271 	iq = &vm_pageout_queue_internal;
7272 	eq = &vm_pageout_queue_external;
7273 
7274 	vm_page_lock_queues();
7275 
7276 	while (qcount && !vm_page_queue_empty(q)) {
7277 		if (current_run++ == 1000) {
7278 			if (hibernate_should_abort()) {
7279 				retval = 1;
7280 				break;
7281 			}
7282 			current_run = 0;
7283 		}
7284 
7285 		m = (vm_page_t) vm_page_queue_first(q);
7286 		m_object = VM_PAGE_OBJECT(m);
7287 
7288 		/*
7289 		 * check to see if we currently are working
7290 		 * with the same object... if so, we've
7291 		 * already got the lock
7292 		 */
7293 		if (m_object != l_object) {
7294 			/*
7295 			 * the object associated with candidate page is
7296 			 * different from the one we were just working
7297 			 * with... dump the lock if we still own it
7298 			 */
7299 			if (l_object != NULL) {
7300 				vm_object_unlock(l_object);
7301 				l_object = NULL;
7302 			}
7303 			/*
7304 			 * Try to lock object; since we've alread got the
7305 			 * page queues lock, we can only 'try' for this one.
7306 			 * if the 'try' fails, we need to do a mutex_pause
7307 			 * to allow the owner of the object lock a chance to
7308 			 * run...
7309 			 */
7310 			if (!vm_object_lock_try_scan(m_object)) {
7311 				if (try_failed_count > 20) {
7312 					hibernate_stats.hibernate_queue_nolock++;
7313 
7314 					goto reenter_pg_on_q;
7315 				}
7316 
7317 				vm_page_unlock_queues();
7318 				mutex_pause(try_failed_count++);
7319 				vm_page_lock_queues();
7320 
7321 				hibernate_stats.hibernate_queue_paused++;
7322 				continue;
7323 			} else {
7324 				l_object = m_object;
7325 			}
7326 		}
7327 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7328 			/*
7329 			 * page is not to be cleaned
7330 			 * put it back on the head of its queue
7331 			 */
7332 			if (m->vmp_cleaning) {
7333 				hibernate_stats.hibernate_skipped_cleaning++;
7334 			} else {
7335 				hibernate_stats.hibernate_skipped_transient++;
7336 			}
7337 
7338 			goto reenter_pg_on_q;
7339 		}
7340 		if (m_object->vo_copy == VM_OBJECT_NULL) {
7341 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7342 				/*
7343 				 * let the normal hibernate image path
7344 				 * deal with these
7345 				 */
7346 				goto reenter_pg_on_q;
7347 			}
7348 		}
7349 		if (!m->vmp_dirty && m->vmp_pmapped) {
7350 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7351 
7352 			if ((refmod_state & VM_MEM_MODIFIED)) {
7353 				SET_PAGE_DIRTY(m, FALSE);
7354 			}
7355 		} else {
7356 			refmod_state = 0;
7357 		}
7358 
7359 		if (!m->vmp_dirty) {
7360 			/*
7361 			 * page is not to be cleaned
7362 			 * put it back on the head of its queue
7363 			 */
7364 			if (m->vmp_precious) {
7365 				hibernate_stats.hibernate_skipped_precious++;
7366 			}
7367 
7368 			goto reenter_pg_on_q;
7369 		}
7370 
7371 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7372 			hibernate_stats.hibernate_skipped_external++;
7373 
7374 			goto reenter_pg_on_q;
7375 		}
7376 		tq = NULL;
7377 
7378 		if (m_object->internal) {
7379 			if (VM_PAGE_Q_THROTTLED(iq)) {
7380 				tq = iq;
7381 			}
7382 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7383 			tq = eq;
7384 		}
7385 
7386 		if (tq != NULL) {
7387 			wait_result_t   wait_result;
7388 			int             wait_count = 5;
7389 
7390 			if (l_object != NULL) {
7391 				vm_object_unlock(l_object);
7392 				l_object = NULL;
7393 			}
7394 
7395 			while (retval == 0) {
7396 				tq->pgo_throttled = TRUE;
7397 
7398 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7399 
7400 				vm_page_unlock_queues();
7401 
7402 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7403 
7404 				vm_page_lock_queues();
7405 
7406 				if (wait_result != THREAD_TIMED_OUT) {
7407 					break;
7408 				}
7409 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7410 					break;
7411 				}
7412 
7413 				if (hibernate_should_abort()) {
7414 					retval = 1;
7415 				}
7416 
7417 				if (--wait_count == 0) {
7418 					hibernate_stats.hibernate_throttle_timeout++;
7419 
7420 					if (tq == eq) {
7421 						hibernate_skip_external = TRUE;
7422 						break;
7423 					}
7424 					retval = 1;
7425 				}
7426 			}
7427 			if (retval) {
7428 				break;
7429 			}
7430 
7431 			hibernate_stats.hibernate_throttled++;
7432 
7433 			continue;
7434 		}
7435 		/*
7436 		 * we've already factored out pages in the laundry which
7437 		 * means this page can't be on the pageout queue so it's
7438 		 * safe to do the vm_page_queues_remove
7439 		 */
7440 		vm_page_queues_remove(m, TRUE);
7441 
7442 		if (m_object->internal == TRUE) {
7443 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7444 		}
7445 
7446 		vm_pageout_cluster(m);
7447 
7448 		hibernate_stats.hibernate_found_dirty++;
7449 
7450 		goto next_pg;
7451 
7452 reenter_pg_on_q:
7453 		vm_page_queue_remove(q, m, vmp_pageq);
7454 		vm_page_queue_enter(q, m, vmp_pageq);
7455 
7456 		hibernate_stats.hibernate_reentered_on_q++;
7457 next_pg:
7458 		hibernate_stats.hibernate_considered++;
7459 
7460 		qcount--;
7461 		try_failed_count = 0;
7462 	}
7463 	if (l_object != NULL) {
7464 		vm_object_unlock(l_object);
7465 		l_object = NULL;
7466 	}
7467 
7468 	vm_page_unlock_queues();
7469 
7470 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7471 
7472 	return retval;
7473 }
7474 
7475 
7476 static int
hibernate_flush_dirty_pages(int pass)7477 hibernate_flush_dirty_pages(int pass)
7478 {
7479 	struct vm_speculative_age_q     *aq;
7480 	uint32_t        i;
7481 
7482 	if (vm_page_local_q) {
7483 		zpercpu_foreach_cpu(lid) {
7484 			vm_page_reactivate_local(lid, TRUE, FALSE);
7485 		}
7486 	}
7487 
7488 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
7489 		int             qcount;
7490 		vm_page_t       m;
7491 
7492 		aq = &vm_page_queue_speculative[i];
7493 
7494 		if (vm_page_queue_empty(&aq->age_q)) {
7495 			continue;
7496 		}
7497 		qcount = 0;
7498 
7499 		vm_page_lockspin_queues();
7500 
7501 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7502 			qcount++;
7503 		}
7504 		vm_page_unlock_queues();
7505 
7506 		if (qcount) {
7507 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7508 				return 1;
7509 			}
7510 		}
7511 	}
7512 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7513 		return 1;
7514 	}
7515 	/* XXX FBDP TODO: flush secluded queue */
7516 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7517 		return 1;
7518 	}
7519 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7520 		return 1;
7521 	}
7522 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7523 		return 1;
7524 	}
7525 
7526 	if (pass == 1) {
7527 		vm_compressor_record_warmup_start();
7528 	}
7529 
7530 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7531 		if (pass == 1) {
7532 			vm_compressor_record_warmup_end();
7533 		}
7534 		return 1;
7535 	}
7536 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7537 		if (pass == 1) {
7538 			vm_compressor_record_warmup_end();
7539 		}
7540 		return 1;
7541 	}
7542 	if (pass == 1) {
7543 		vm_compressor_record_warmup_end();
7544 	}
7545 
7546 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7547 		return 1;
7548 	}
7549 
7550 	return 0;
7551 }
7552 
7553 
7554 void
hibernate_reset_stats()7555 hibernate_reset_stats()
7556 {
7557 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7558 }
7559 
7560 
7561 int
hibernate_flush_memory()7562 hibernate_flush_memory()
7563 {
7564 	int     retval;
7565 
7566 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7567 
7568 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7569 
7570 	hibernate_cleaning_in_progress = TRUE;
7571 	hibernate_skip_external = FALSE;
7572 
7573 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7574 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7575 
7576 		vm_compressor_flush();
7577 
7578 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7579 
7580 		if (consider_buffer_cache_collect != NULL) {
7581 			unsigned int orig_wire_count;
7582 
7583 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7584 			orig_wire_count = vm_page_wire_count;
7585 
7586 			(void)(*consider_buffer_cache_collect)(1);
7587 			zone_gc(ZONE_GC_DRAIN);
7588 
7589 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7590 
7591 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7592 		}
7593 	}
7594 	hibernate_cleaning_in_progress = FALSE;
7595 
7596 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7597 
7598 	if (retval) {
7599 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7600 	}
7601 
7602 
7603 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7604 	    hibernate_stats.hibernate_considered,
7605 	    hibernate_stats.hibernate_reentered_on_q,
7606 	    hibernate_stats.hibernate_found_dirty);
7607 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7608 	    hibernate_stats.hibernate_skipped_cleaning,
7609 	    hibernate_stats.hibernate_skipped_transient,
7610 	    hibernate_stats.hibernate_skipped_precious,
7611 	    hibernate_stats.hibernate_skipped_external,
7612 	    hibernate_stats.hibernate_queue_nolock);
7613 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7614 	    hibernate_stats.hibernate_queue_paused,
7615 	    hibernate_stats.hibernate_throttled,
7616 	    hibernate_stats.hibernate_throttle_timeout,
7617 	    hibernate_stats.hibernate_drained,
7618 	    hibernate_stats.hibernate_drain_timeout);
7619 
7620 	return retval;
7621 }
7622 
7623 
7624 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7625 hibernate_page_list_zero(hibernate_page_list_t *list)
7626 {
7627 	uint32_t             bank;
7628 	hibernate_bitmap_t * bitmap;
7629 
7630 	bitmap = &list->bank_bitmap[0];
7631 	for (bank = 0; bank < list->bank_count; bank++) {
7632 		uint32_t last_bit;
7633 
7634 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7635 		// set out-of-bound bits at end of bitmap.
7636 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7637 		if (last_bit) {
7638 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7639 		}
7640 
7641 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7642 	}
7643 }
7644 
7645 void
hibernate_free_gobble_pages(void)7646 hibernate_free_gobble_pages(void)
7647 {
7648 	vm_page_t m, next;
7649 	uint32_t  count = 0;
7650 
7651 	m = (vm_page_t) hibernate_gobble_queue;
7652 	while (m) {
7653 		next = m->vmp_snext;
7654 		vm_page_free(m);
7655 		count++;
7656 		m = next;
7657 	}
7658 	hibernate_gobble_queue = VM_PAGE_NULL;
7659 
7660 	if (count) {
7661 		HIBLOG("Freed %d pages\n", count);
7662 	}
7663 }
7664 
7665 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7666 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7667 {
7668 	vm_object_t object = NULL;
7669 	int                  refmod_state;
7670 	boolean_t            discard = FALSE;
7671 
7672 	do{
7673 		if (m->vmp_private) {
7674 			panic("hibernate_consider_discard: private");
7675 		}
7676 
7677 		object = VM_PAGE_OBJECT(m);
7678 
7679 		if (!vm_object_lock_try(object)) {
7680 			object = NULL;
7681 			if (!preflight) {
7682 				hibernate_stats.cd_lock_failed++;
7683 			}
7684 			break;
7685 		}
7686 		if (VM_PAGE_WIRED(m)) {
7687 			if (!preflight) {
7688 				hibernate_stats.cd_found_wired++;
7689 			}
7690 			break;
7691 		}
7692 		if (m->vmp_precious) {
7693 			if (!preflight) {
7694 				hibernate_stats.cd_found_precious++;
7695 			}
7696 			break;
7697 		}
7698 		if (m->vmp_busy || !object->alive) {
7699 			/*
7700 			 *	Somebody is playing with this page.
7701 			 */
7702 			if (!preflight) {
7703 				hibernate_stats.cd_found_busy++;
7704 			}
7705 			break;
7706 		}
7707 		if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7708 			/*
7709 			 * If it's unusual in anyway, ignore it
7710 			 */
7711 			if (!preflight) {
7712 				hibernate_stats.cd_found_unusual++;
7713 			}
7714 			break;
7715 		}
7716 		if (m->vmp_cleaning) {
7717 			if (!preflight) {
7718 				hibernate_stats.cd_found_cleaning++;
7719 			}
7720 			break;
7721 		}
7722 		if (m->vmp_laundry) {
7723 			if (!preflight) {
7724 				hibernate_stats.cd_found_laundry++;
7725 			}
7726 			break;
7727 		}
7728 		if (!m->vmp_dirty) {
7729 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7730 
7731 			if (refmod_state & VM_MEM_REFERENCED) {
7732 				m->vmp_reference = TRUE;
7733 			}
7734 			if (refmod_state & VM_MEM_MODIFIED) {
7735 				SET_PAGE_DIRTY(m, FALSE);
7736 			}
7737 		}
7738 
7739 		/*
7740 		 * If it's clean or purgeable we can discard the page on wakeup.
7741 		 */
7742 		discard = (!m->vmp_dirty)
7743 		    || (VM_PURGABLE_VOLATILE == object->purgable)
7744 		    || (VM_PURGABLE_EMPTY == object->purgable);
7745 
7746 
7747 		if (discard == FALSE) {
7748 			if (!preflight) {
7749 				hibernate_stats.cd_found_dirty++;
7750 			}
7751 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7752 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7753 				if (!preflight) {
7754 					hibernate_stats.cd_found_xpmapped++;
7755 				}
7756 				discard = FALSE;
7757 			} else {
7758 				if (!preflight) {
7759 					hibernate_stats.cd_skipped_xpmapped++;
7760 				}
7761 			}
7762 		}
7763 	}while (FALSE);
7764 
7765 	if (object) {
7766 		vm_object_unlock(object);
7767 	}
7768 
7769 	return discard;
7770 }
7771 
7772 
7773 static void
hibernate_discard_page(vm_page_t m)7774 hibernate_discard_page(vm_page_t m)
7775 {
7776 	vm_object_t m_object;
7777 
7778 	if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7779 		/*
7780 		 * If it's unusual in anyway, ignore
7781 		 */
7782 		return;
7783 	}
7784 
7785 	m_object = VM_PAGE_OBJECT(m);
7786 
7787 #if MACH_ASSERT || DEBUG
7788 	if (!vm_object_lock_try(m_object)) {
7789 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7790 	}
7791 #else
7792 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
7793 	 *  makes sure these locks are uncontended before sleep */
7794 #endif /* MACH_ASSERT || DEBUG */
7795 
7796 	if (m->vmp_pmapped == TRUE) {
7797 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7798 	}
7799 
7800 	if (m->vmp_laundry) {
7801 		panic("hibernate_discard_page(%p) laundry", m);
7802 	}
7803 	if (m->vmp_private) {
7804 		panic("hibernate_discard_page(%p) private", m);
7805 	}
7806 	if (m->vmp_fictitious) {
7807 		panic("hibernate_discard_page(%p) fictitious", m);
7808 	}
7809 
7810 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7811 		/* object should be on a queue */
7812 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7813 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7814 		assert(old_queue);
7815 		if (m_object->purgeable_when_ripe) {
7816 			vm_purgeable_token_delete_first(old_queue);
7817 		}
7818 		vm_object_lock_assert_exclusive(m_object);
7819 		VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
7820 
7821 		/*
7822 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
7823 		 * accounted in the "volatile" ledger, so no change here.
7824 		 * We have to update vm_page_purgeable_count, though, since we're
7825 		 * effectively purging this object.
7826 		 */
7827 		unsigned int delta;
7828 		assert(m_object->resident_page_count >= m_object->wired_page_count);
7829 		delta = (m_object->resident_page_count - m_object->wired_page_count);
7830 		assert(vm_page_purgeable_count >= delta);
7831 		assert(delta > 0);
7832 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7833 	}
7834 
7835 	vm_page_free(m);
7836 
7837 #if MACH_ASSERT || DEBUG
7838 	vm_object_unlock(m_object);
7839 #endif  /* MACH_ASSERT || DEBUG */
7840 }
7841 
7842 /*
7843  *  Grab locks for hibernate_page_list_setall()
7844  */
7845 void
hibernate_vm_lock_queues(void)7846 hibernate_vm_lock_queues(void)
7847 {
7848 	vm_object_lock(compressor_object);
7849 	vm_page_lock_queues();
7850 	vm_free_page_lock();
7851 	lck_mtx_lock(&vm_purgeable_queue_lock);
7852 
7853 	if (vm_page_local_q) {
7854 		zpercpu_foreach(lq, vm_page_local_q) {
7855 			VPL_LOCK(&lq->vpl_lock);
7856 		}
7857 	}
7858 }
7859 
7860 void
hibernate_vm_unlock_queues(void)7861 hibernate_vm_unlock_queues(void)
7862 {
7863 	if (vm_page_local_q) {
7864 		zpercpu_foreach(lq, vm_page_local_q) {
7865 			VPL_UNLOCK(&lq->vpl_lock);
7866 		}
7867 	}
7868 	lck_mtx_unlock(&vm_purgeable_queue_lock);
7869 	vm_free_page_unlock();
7870 	vm_page_unlock_queues();
7871 	vm_object_unlock(compressor_object);
7872 }
7873 
7874 #if CONFIG_SPTM
7875 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)7876 hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)
7877 {
7878 	const sptm_paddr_t paddr = ptoa_64(VM_PAGE_GET_PHYS_PAGE(vmp));
7879 	const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
7880 	const vm_object_t vmp_objp = VM_PAGE_OBJECT(vmp);
7881 
7882 	return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG ||
7883 	       (frame_type == XNU_USER_EXEC && vmp_objp->internal == TRUE);
7884 }
7885 #endif
7886 
7887 /*
7888  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7889  *  pages known to VM to not need saving are subtracted.
7890  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
7891  */
7892 
7893 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7894 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7895     hibernate_page_list_t * page_list_wired,
7896     hibernate_page_list_t * page_list_pal,
7897     boolean_t preflight,
7898     boolean_t will_discard,
7899     uint32_t * pagesOut)
7900 {
7901 	uint64_t start, end, nsec;
7902 	vm_page_t m;
7903 	vm_page_t next;
7904 	uint32_t pages = page_list->page_count;
7905 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7906 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7907 	uint32_t count_wire = pages;
7908 	uint32_t count_discard_active    = 0;
7909 	uint32_t count_discard_inactive  = 0;
7910 	uint32_t count_retired = 0;
7911 	uint32_t count_discard_cleaned   = 0;
7912 	uint32_t count_discard_purgeable = 0;
7913 	uint32_t count_discard_speculative = 0;
7914 	uint32_t count_discard_vm_struct_pages = 0;
7915 	uint32_t i;
7916 	uint32_t             bank;
7917 	hibernate_bitmap_t * bitmap;
7918 	hibernate_bitmap_t * bitmap_wired;
7919 	boolean_t                    discard_all;
7920 	boolean_t            discard = FALSE;
7921 
7922 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7923 
7924 	if (preflight) {
7925 		page_list       = NULL;
7926 		page_list_wired = NULL;
7927 		page_list_pal   = NULL;
7928 		discard_all     = FALSE;
7929 	} else {
7930 		discard_all     = will_discard;
7931 	}
7932 
7933 #if MACH_ASSERT || DEBUG
7934 	if (!preflight) {
7935 		assert(hibernate_vm_locks_are_safe());
7936 		vm_page_lock_queues();
7937 		if (vm_page_local_q) {
7938 			zpercpu_foreach(lq, vm_page_local_q) {
7939 				VPL_LOCK(&lq->vpl_lock);
7940 			}
7941 		}
7942 	}
7943 #endif  /* MACH_ASSERT || DEBUG */
7944 
7945 
7946 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7947 
7948 	clock_get_uptime(&start);
7949 
7950 	if (!preflight) {
7951 		hibernate_page_list_zero(page_list);
7952 		hibernate_page_list_zero(page_list_wired);
7953 		hibernate_page_list_zero(page_list_pal);
7954 
7955 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7956 		hibernate_stats.cd_pages = pages;
7957 	}
7958 
7959 	if (vm_page_local_q) {
7960 		zpercpu_foreach_cpu(lid) {
7961 			vm_page_reactivate_local(lid, TRUE, !preflight);
7962 		}
7963 	}
7964 
7965 	if (preflight) {
7966 		vm_object_lock(compressor_object);
7967 		vm_page_lock_queues();
7968 		vm_free_page_lock();
7969 	}
7970 
7971 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7972 
7973 	hibernation_vmqueues_inspection = TRUE;
7974 
7975 	m = (vm_page_t) hibernate_gobble_queue;
7976 	while (m) {
7977 		pages--;
7978 		count_wire--;
7979 		if (!preflight) {
7980 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7981 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7982 		}
7983 		m = m->vmp_snext;
7984 	}
7985 
7986 	if (!preflight) {
7987 		percpu_foreach(free_pages_head, free_pages) {
7988 			for (m = *free_pages_head; m; m = m->vmp_snext) {
7989 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7990 
7991 				pages--;
7992 				count_wire--;
7993 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7994 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7995 
7996 				hibernate_stats.cd_local_free++;
7997 				hibernate_stats.cd_total_free++;
7998 			}
7999 		}
8000 	}
8001 
8002 	for (i = 0; i < vm_colors; i++) {
8003 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
8004 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
8005 
8006 			pages--;
8007 			count_wire--;
8008 			if (!preflight) {
8009 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8010 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8011 
8012 				hibernate_stats.cd_total_free++;
8013 			}
8014 		}
8015 	}
8016 
8017 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
8018 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
8019 
8020 		pages--;
8021 		count_wire--;
8022 		if (!preflight) {
8023 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8024 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8025 
8026 			hibernate_stats.cd_total_free++;
8027 		}
8028 	}
8029 
8030 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8031 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8032 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8033 
8034 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8035 		discard = FALSE;
8036 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8037 		    && hibernate_consider_discard(m, preflight)) {
8038 			if (!preflight) {
8039 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8040 			}
8041 			count_discard_inactive++;
8042 			discard = discard_all;
8043 		} else {
8044 			count_throttled++;
8045 		}
8046 		count_wire--;
8047 		if (!preflight) {
8048 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8049 		}
8050 
8051 		if (discard) {
8052 			hibernate_discard_page(m);
8053 		}
8054 		m = next;
8055 	}
8056 
8057 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8058 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8059 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8060 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8061 #if CONFIG_SPTM
8062 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8063 #endif
8064 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8065 		discard = FALSE;
8066 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8067 		    hibernate_consider_discard(m, preflight)) {
8068 			if (!preflight) {
8069 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8070 			}
8071 			if (m->vmp_dirty) {
8072 				count_discard_purgeable++;
8073 			} else {
8074 				count_discard_inactive++;
8075 			}
8076 			discard = discard_all;
8077 		} else {
8078 			/*
8079 			 * If the page must be force-added to the wired page list, prevent it from appearing
8080 			 * in the unwired page list.
8081 			 */
8082 			if (force_to_wired_list) {
8083 				if (!preflight) {
8084 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8085 				}
8086 			} else {
8087 				count_anonymous++;
8088 			}
8089 		}
8090 		/*
8091 		 * If the page is NOT being forced into the wired page list, remove it from the
8092 		 * wired page list here.
8093 		 */
8094 		if (!force_to_wired_list) {
8095 			count_wire--;
8096 			if (!preflight) {
8097 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8098 			}
8099 		}
8100 		if (discard) {
8101 			hibernate_discard_page(m);
8102 		}
8103 		m = next;
8104 	}
8105 
8106 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8107 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8108 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8109 
8110 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8111 		discard = FALSE;
8112 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8113 		    hibernate_consider_discard(m, preflight)) {
8114 			if (!preflight) {
8115 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8116 			}
8117 			if (m->vmp_dirty) {
8118 				count_discard_purgeable++;
8119 			} else {
8120 				count_discard_cleaned++;
8121 			}
8122 			discard = discard_all;
8123 		} else {
8124 			count_cleaned++;
8125 		}
8126 		count_wire--;
8127 		if (!preflight) {
8128 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8129 		}
8130 		if (discard) {
8131 			hibernate_discard_page(m);
8132 		}
8133 		m = next;
8134 	}
8135 
8136 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8137 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8138 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8139 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8140 #if CONFIG_SPTM
8141 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8142 #endif
8143 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8144 		discard = FALSE;
8145 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8146 		    hibernate_consider_discard(m, preflight)) {
8147 			if (!preflight) {
8148 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8149 			}
8150 			if (m->vmp_dirty) {
8151 				count_discard_purgeable++;
8152 			} else {
8153 				count_discard_active++;
8154 			}
8155 			discard = discard_all;
8156 		} else {
8157 			/*
8158 			 * If the page must be force-added to the wired page list, prevent it from appearing
8159 			 * in the unwired page list.
8160 			 */
8161 			if (force_to_wired_list) {
8162 				if (!preflight) {
8163 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8164 				}
8165 			} else {
8166 				count_active++;
8167 			}
8168 		}
8169 		/*
8170 		 * If the page is NOT being forced into the wired page list, remove it from the
8171 		 * wired page list here.
8172 		 */
8173 		if (!force_to_wired_list) {
8174 			count_wire--;
8175 			if (!preflight) {
8176 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8177 			}
8178 		}
8179 		if (discard) {
8180 			hibernate_discard_page(m);
8181 		}
8182 		m = next;
8183 	}
8184 
8185 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8186 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8187 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8188 		bool force_to_wired_list = false;        /* Default to NOT forcing page into the wired page list */
8189 #if CONFIG_SPTM
8190 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8191 #endif
8192 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8193 		discard = FALSE;
8194 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8195 		    hibernate_consider_discard(m, preflight)) {
8196 			if (!preflight) {
8197 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8198 			}
8199 			if (m->vmp_dirty) {
8200 				count_discard_purgeable++;
8201 			} else {
8202 				count_discard_inactive++;
8203 			}
8204 			discard = discard_all;
8205 		} else {
8206 			/*
8207 			 * If the page must be force-added to the wired page list, prevent it from appearing
8208 			 * in the unwired page list.
8209 			 */
8210 			if (force_to_wired_list) {
8211 				if (!preflight) {
8212 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8213 				}
8214 			} else {
8215 				count_inactive++;
8216 			}
8217 		}
8218 		/*
8219 		 * If the page is NOT being forced into the wired page list, remove it from the
8220 		 * wired page list here.
8221 		 */
8222 		if (!force_to_wired_list) {
8223 			count_wire--;
8224 			if (!preflight) {
8225 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8226 			}
8227 		}
8228 		if (discard) {
8229 			hibernate_discard_page(m);
8230 		}
8231 		m = next;
8232 	}
8233 	/* XXX FBDP TODO: secluded queue */
8234 
8235 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8236 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8237 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8238 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8239 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8240 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8241 
8242 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8243 			discard = FALSE;
8244 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8245 			    hibernate_consider_discard(m, preflight)) {
8246 				if (!preflight) {
8247 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8248 				}
8249 				count_discard_speculative++;
8250 				discard = discard_all;
8251 			} else {
8252 				count_speculative++;
8253 			}
8254 			count_wire--;
8255 			if (!preflight) {
8256 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8257 			}
8258 			if (discard) {
8259 				hibernate_discard_page(m);
8260 			}
8261 			m = next;
8262 		}
8263 	}
8264 
8265 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
8266 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
8267 
8268 		count_compressor++;
8269 		count_wire--;
8270 		if (!preflight) {
8271 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8272 		}
8273 	}
8274 
8275 
8276 	if (preflight == FALSE && discard_all == TRUE) {
8277 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8278 
8279 		HIBLOG("hibernate_teardown started\n");
8280 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8281 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8282 
8283 		pages -= count_discard_vm_struct_pages;
8284 		count_wire -= count_discard_vm_struct_pages;
8285 
8286 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8287 
8288 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8289 	}
8290 
8291 	if (!preflight) {
8292 		// pull wired from hibernate_bitmap
8293 		bitmap = &page_list->bank_bitmap[0];
8294 		bitmap_wired = &page_list_wired->bank_bitmap[0];
8295 		for (bank = 0; bank < page_list->bank_count; bank++) {
8296 			for (i = 0; i < bitmap->bitmapwords; i++) {
8297 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8298 			}
8299 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8300 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8301 		}
8302 	}
8303 
8304 	// machine dependent adjustments
8305 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8306 
8307 	if (!preflight) {
8308 		hibernate_stats.cd_count_wire = count_wire;
8309 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8310 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8311 	}
8312 
8313 	clock_get_uptime(&end);
8314 	absolutetime_to_nanoseconds(end - start, &nsec);
8315 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8316 
8317 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8318 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8319 	    discard_all ? "did" : "could",
8320 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8321 
8322 	if (hibernate_stats.cd_skipped_xpmapped) {
8323 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8324 	}
8325 
8326 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8327 
8328 	if (preflight && will_discard) {
8329 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8330 		/*
8331 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8332 		 * even if these are clean and so we need to size the hibernation image accordingly.
8333 		 *
8334 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8335 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8336 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8337 		 * clean xpmapped pages.
8338 		 *
8339 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8340 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8341 		 */
8342 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
8343 	}
8344 
8345 	hibernation_vmqueues_inspection = FALSE;
8346 
8347 #if MACH_ASSERT || DEBUG
8348 	if (!preflight) {
8349 		if (vm_page_local_q) {
8350 			zpercpu_foreach(lq, vm_page_local_q) {
8351 				VPL_UNLOCK(&lq->vpl_lock);
8352 			}
8353 		}
8354 		vm_page_unlock_queues();
8355 	}
8356 #endif  /* MACH_ASSERT || DEBUG */
8357 
8358 	if (preflight) {
8359 		vm_free_page_unlock();
8360 		vm_page_unlock_queues();
8361 		vm_object_unlock(compressor_object);
8362 	}
8363 
8364 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8365 }
8366 
8367 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8368 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8369 {
8370 	uint64_t  start, end, nsec;
8371 	vm_page_t m;
8372 	vm_page_t next;
8373 	uint32_t  i;
8374 	uint32_t  count_discard_active    = 0;
8375 	uint32_t  count_discard_inactive  = 0;
8376 	uint32_t  count_discard_purgeable = 0;
8377 	uint32_t  count_discard_cleaned   = 0;
8378 	uint32_t  count_discard_speculative = 0;
8379 
8380 
8381 #if MACH_ASSERT || DEBUG
8382 	vm_page_lock_queues();
8383 	if (vm_page_local_q) {
8384 		zpercpu_foreach(lq, vm_page_local_q) {
8385 			VPL_LOCK(&lq->vpl_lock);
8386 		}
8387 	}
8388 #endif  /* MACH_ASSERT || DEBUG */
8389 
8390 	clock_get_uptime(&start);
8391 
8392 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8393 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8394 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8395 
8396 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8397 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8398 			if (m->vmp_dirty) {
8399 				count_discard_purgeable++;
8400 			} else {
8401 				count_discard_inactive++;
8402 			}
8403 			hibernate_discard_page(m);
8404 		}
8405 		m = next;
8406 	}
8407 
8408 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8409 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8410 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8411 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8412 
8413 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8414 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8415 				count_discard_speculative++;
8416 				hibernate_discard_page(m);
8417 			}
8418 			m = next;
8419 		}
8420 	}
8421 
8422 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8423 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8424 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8425 
8426 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8427 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8428 			if (m->vmp_dirty) {
8429 				count_discard_purgeable++;
8430 			} else {
8431 				count_discard_inactive++;
8432 			}
8433 			hibernate_discard_page(m);
8434 		}
8435 		m = next;
8436 	}
8437 	/* XXX FBDP TODO: secluded queue */
8438 
8439 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8440 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8441 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8442 
8443 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8444 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8445 			if (m->vmp_dirty) {
8446 				count_discard_purgeable++;
8447 			} else {
8448 				count_discard_active++;
8449 			}
8450 			hibernate_discard_page(m);
8451 		}
8452 		m = next;
8453 	}
8454 
8455 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8456 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8457 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8458 
8459 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8460 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8461 			if (m->vmp_dirty) {
8462 				count_discard_purgeable++;
8463 			} else {
8464 				count_discard_cleaned++;
8465 			}
8466 			hibernate_discard_page(m);
8467 		}
8468 		m = next;
8469 	}
8470 
8471 #if MACH_ASSERT || DEBUG
8472 	if (vm_page_local_q) {
8473 		zpercpu_foreach(lq, vm_page_local_q) {
8474 			VPL_UNLOCK(&lq->vpl_lock);
8475 		}
8476 	}
8477 	vm_page_unlock_queues();
8478 #endif  /* MACH_ASSERT || DEBUG */
8479 
8480 	clock_get_uptime(&end);
8481 	absolutetime_to_nanoseconds(end - start, &nsec);
8482 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8483 	    nsec / 1000000ULL,
8484 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8485 }
8486 
8487 boolean_t       hibernate_paddr_map_inited = FALSE;
8488 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8489 vm_page_t       hibernate_rebuild_hash_list = NULL;
8490 
8491 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8492 unsigned int    hibernate_teardown_found_created_pages = 0;
8493 unsigned int    hibernate_teardown_found_free_pages = 0;
8494 unsigned int    hibernate_teardown_vm_page_free_count;
8495 
8496 
8497 struct ppnum_mapping {
8498 	struct ppnum_mapping    *ppnm_next;
8499 	ppnum_t                 ppnm_base_paddr;
8500 	unsigned int            ppnm_sindx;
8501 	unsigned int            ppnm_eindx;
8502 };
8503 
8504 struct ppnum_mapping    *ppnm_head;
8505 struct ppnum_mapping    *ppnm_last_found = NULL;
8506 
8507 
8508 void
hibernate_create_paddr_map(void)8509 hibernate_create_paddr_map(void)
8510 {
8511 	unsigned int    i;
8512 	ppnum_t         next_ppnum_in_run = 0;
8513 	struct ppnum_mapping *ppnm = NULL;
8514 
8515 	if (hibernate_paddr_map_inited == FALSE) {
8516 		for (i = 0; i < vm_pages_count; i++) {
8517 			if (ppnm) {
8518 				ppnm->ppnm_eindx = i;
8519 			}
8520 
8521 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8522 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
8523 
8524 				ppnm->ppnm_next = ppnm_head;
8525 				ppnm_head = ppnm;
8526 
8527 				ppnm->ppnm_sindx = i;
8528 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8529 			}
8530 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8531 		}
8532 		ppnm->ppnm_eindx = vm_pages_count;
8533 
8534 		hibernate_paddr_map_inited = TRUE;
8535 	}
8536 }
8537 
8538 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8539 hibernate_lookup_paddr(unsigned int indx)
8540 {
8541 	struct ppnum_mapping *ppnm = NULL;
8542 
8543 	ppnm = ppnm_last_found;
8544 
8545 	if (ppnm) {
8546 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8547 			goto done;
8548 		}
8549 	}
8550 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8551 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8552 			ppnm_last_found = ppnm;
8553 			break;
8554 		}
8555 	}
8556 	if (ppnm == NULL) {
8557 		panic("hibernate_lookup_paddr of %d failed", indx);
8558 	}
8559 done:
8560 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8561 }
8562 
8563 
8564 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8565 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8566 {
8567 	addr64_t        saddr_aligned;
8568 	addr64_t        eaddr_aligned;
8569 	addr64_t        addr;
8570 	ppnum_t         paddr;
8571 	unsigned int    mark_as_unneeded_pages = 0;
8572 
8573 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8574 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
8575 
8576 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8577 		paddr = pmap_find_phys(kernel_pmap, addr);
8578 
8579 		assert(paddr);
8580 
8581 		hibernate_page_bitset(page_list, TRUE, paddr);
8582 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
8583 
8584 		mark_as_unneeded_pages++;
8585 	}
8586 	return mark_as_unneeded_pages;
8587 }
8588 
8589 
8590 void
hibernate_hash_insert_page(vm_page_t mem)8591 hibernate_hash_insert_page(vm_page_t mem)
8592 {
8593 	vm_page_bucket_t *bucket;
8594 	int             hash_id;
8595 	vm_object_t     m_object;
8596 
8597 	m_object = VM_PAGE_OBJECT(mem);
8598 
8599 	assert(mem->vmp_hashed);
8600 	assert(m_object);
8601 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
8602 
8603 	/*
8604 	 *	Insert it into the object_object/offset hash table
8605 	 */
8606 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
8607 	bucket = &vm_page_buckets[hash_id];
8608 
8609 	mem->vmp_next_m = bucket->page_list;
8610 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
8611 }
8612 
8613 
8614 void
hibernate_free_range(int sindx,int eindx)8615 hibernate_free_range(int sindx, int eindx)
8616 {
8617 	vm_page_t       mem;
8618 	unsigned int    color;
8619 
8620 	while (sindx < eindx) {
8621 		mem = &vm_pages[sindx];
8622 
8623 		vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8624 
8625 		mem->vmp_lopage = FALSE;
8626 		mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8627 
8628 		color = VM_PAGE_GET_COLOR(mem);
8629 #if defined(__x86_64__)
8630 		vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8631 #else
8632 		vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8633 #endif
8634 		vm_page_free_count++;
8635 
8636 		sindx++;
8637 	}
8638 }
8639 
8640 void
hibernate_rebuild_vm_structs(void)8641 hibernate_rebuild_vm_structs(void)
8642 {
8643 	int             i, cindx, sindx, eindx;
8644 	vm_page_t       mem, tmem, mem_next;
8645 	AbsoluteTime    startTime, endTime;
8646 	uint64_t        nsec;
8647 
8648 	if (hibernate_rebuild_needed == FALSE) {
8649 		return;
8650 	}
8651 
8652 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8653 	HIBLOG("hibernate_rebuild started\n");
8654 
8655 	clock_get_uptime(&startTime);
8656 
8657 	pal_hib_rebuild_pmap_structs();
8658 
8659 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8660 	eindx = vm_pages_count;
8661 
8662 	/*
8663 	 * Mark all the vm_pages[] that have not been initialized yet as being
8664 	 * transient. This is needed to ensure that buddy page search is corrrect.
8665 	 * Without this random data in these vm_pages[] can trip the buddy search
8666 	 */
8667 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8668 		vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8669 	}
8670 
8671 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8672 		mem = &vm_pages[cindx];
8673 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8674 		/*
8675 		 * hibernate_teardown_vm_structs leaves the location where
8676 		 * this vm_page_t must be located in "next".
8677 		 */
8678 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8679 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8680 
8681 		sindx = (int)(tmem - &vm_pages[0]);
8682 
8683 		if (mem != tmem) {
8684 			/*
8685 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8686 			 * so move it back to its real location
8687 			 */
8688 			*tmem = *mem;
8689 			mem = tmem;
8690 		}
8691 		if (mem->vmp_hashed) {
8692 			hibernate_hash_insert_page(mem);
8693 		}
8694 		/*
8695 		 * the 'hole' between this vm_page_t and the previous
8696 		 * vm_page_t we moved needs to be initialized as
8697 		 * a range of free vm_page_t's
8698 		 */
8699 		hibernate_free_range(sindx + 1, eindx);
8700 
8701 		eindx = sindx;
8702 	}
8703 	if (sindx) {
8704 		hibernate_free_range(0, sindx);
8705 	}
8706 
8707 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8708 
8709 	/*
8710 	 * process the list of vm_page_t's that were entered in the hash,
8711 	 * but were not located in the vm_pages arrary... these are
8712 	 * vm_page_t's that were created on the fly (i.e. fictitious)
8713 	 */
8714 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8715 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8716 
8717 		mem->vmp_next_m = 0;
8718 		hibernate_hash_insert_page(mem);
8719 	}
8720 	hibernate_rebuild_hash_list = NULL;
8721 
8722 	clock_get_uptime(&endTime);
8723 	SUB_ABSOLUTETIME(&endTime, &startTime);
8724 	absolutetime_to_nanoseconds(endTime, &nsec);
8725 
8726 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8727 
8728 	hibernate_rebuild_needed = FALSE;
8729 
8730 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8731 }
8732 
8733 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8734 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8735 {
8736 	unsigned int    i;
8737 	unsigned int    compact_target_indx;
8738 	vm_page_t       mem, mem_next;
8739 	vm_page_bucket_t *bucket;
8740 	unsigned int    mark_as_unneeded_pages = 0;
8741 	unsigned int    unneeded_vm_page_bucket_pages = 0;
8742 	unsigned int    unneeded_vm_pages_pages = 0;
8743 	unsigned int    unneeded_pmap_pages = 0;
8744 	addr64_t        start_of_unneeded = 0;
8745 	addr64_t        end_of_unneeded = 0;
8746 
8747 
8748 	if (hibernate_should_abort()) {
8749 		return 0;
8750 	}
8751 
8752 	hibernate_rebuild_needed = TRUE;
8753 
8754 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8755 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8756 	    vm_page_cleaned_count, compressor_object->resident_page_count);
8757 
8758 	for (i = 0; i < vm_page_bucket_count; i++) {
8759 		bucket = &vm_page_buckets[i];
8760 
8761 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8762 			assert(mem->vmp_hashed);
8763 
8764 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8765 
8766 			if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8767 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8768 				hibernate_rebuild_hash_list = mem;
8769 			}
8770 		}
8771 	}
8772 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8773 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8774 
8775 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
8776 
8777 	compact_target_indx = 0;
8778 
8779 	for (i = 0; i < vm_pages_count; i++) {
8780 		mem = &vm_pages[i];
8781 
8782 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8783 			unsigned int color;
8784 
8785 			assert(mem->vmp_busy);
8786 			assert(!mem->vmp_lopage);
8787 
8788 			color = VM_PAGE_GET_COLOR(mem);
8789 
8790 			vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8791 
8792 			VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8793 
8794 			vm_page_free_count--;
8795 
8796 			hibernate_teardown_found_free_pages++;
8797 
8798 			if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8799 				compact_target_indx = i;
8800 			}
8801 		} else {
8802 			/*
8803 			 * record this vm_page_t's original location
8804 			 * we need this even if it doesn't get moved
8805 			 * as an indicator to the rebuild function that
8806 			 * we don't have to move it
8807 			 */
8808 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8809 
8810 			if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8811 				/*
8812 				 * we've got a hole to fill, so
8813 				 * move this vm_page_t to it's new home
8814 				 */
8815 				vm_pages[compact_target_indx] = *mem;
8816 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8817 
8818 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8819 				compact_target_indx++;
8820 			} else {
8821 				hibernate_teardown_last_valid_compact_indx = i;
8822 			}
8823 		}
8824 	}
8825 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8826 	    (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8827 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
8828 
8829 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8830 
8831 	if (start_of_unneeded) {
8832 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8833 		mark_as_unneeded_pages += unneeded_pmap_pages;
8834 	}
8835 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8836 
8837 	return mark_as_unneeded_pages;
8838 }
8839 
8840 
8841 #endif /* HIBERNATION */
8842 
8843 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8844 
8845 #include <mach_vm_debug.h>
8846 #if     MACH_VM_DEBUG
8847 
8848 #include <mach_debug/hash_info.h>
8849 #include <vm/vm_debug_internal.h>
8850 
8851 /*
8852  *	Routine:	vm_page_info
8853  *	Purpose:
8854  *		Return information about the global VP table.
8855  *		Fills the buffer with as much information as possible
8856  *		and returns the desired size of the buffer.
8857  *	Conditions:
8858  *		Nothing locked.  The caller should provide
8859  *		possibly-pageable memory.
8860  */
8861 
8862 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8863 vm_page_info(
8864 	hash_info_bucket_t *info,
8865 	unsigned int count)
8866 {
8867 	unsigned int i;
8868 	lck_spin_t      *bucket_lock;
8869 
8870 	if (vm_page_bucket_count < count) {
8871 		count = vm_page_bucket_count;
8872 	}
8873 
8874 	for (i = 0; i < count; i++) {
8875 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
8876 		unsigned int bucket_count = 0;
8877 		vm_page_t m;
8878 
8879 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8880 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8881 
8882 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8883 		    m != VM_PAGE_NULL;
8884 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8885 			bucket_count++;
8886 		}
8887 
8888 		lck_spin_unlock(bucket_lock);
8889 
8890 		/* don't touch pageable memory while holding locks */
8891 		info[i].hib_count = bucket_count;
8892 	}
8893 
8894 	return vm_page_bucket_count;
8895 }
8896 #endif  /* MACH_VM_DEBUG */
8897 
8898 #if VM_PAGE_BUCKETS_CHECK
8899 void
vm_page_buckets_check(void)8900 vm_page_buckets_check(void)
8901 {
8902 	unsigned int i;
8903 	vm_page_t p;
8904 	unsigned int p_hash;
8905 	vm_page_bucket_t *bucket;
8906 	lck_spin_t      *bucket_lock;
8907 
8908 	if (!vm_page_buckets_check_ready) {
8909 		return;
8910 	}
8911 
8912 #if HIBERNATION
8913 	if (hibernate_rebuild_needed ||
8914 	    hibernate_rebuild_hash_list) {
8915 		panic("BUCKET_CHECK: hibernation in progress: "
8916 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
8917 		    hibernate_rebuild_needed,
8918 		    hibernate_rebuild_hash_list);
8919 	}
8920 #endif /* HIBERNATION */
8921 
8922 #if VM_PAGE_FAKE_BUCKETS
8923 	char *cp;
8924 	for (cp = (char *) vm_page_fake_buckets_start;
8925 	    cp < (char *) vm_page_fake_buckets_end;
8926 	    cp++) {
8927 		if (*cp != 0x5a) {
8928 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
8929 			    "[0x%llx:0x%llx]\n",
8930 			    cp,
8931 			    (uint64_t) vm_page_fake_buckets_start,
8932 			    (uint64_t) vm_page_fake_buckets_end);
8933 		}
8934 	}
8935 #endif /* VM_PAGE_FAKE_BUCKETS */
8936 
8937 	for (i = 0; i < vm_page_bucket_count; i++) {
8938 		vm_object_t     p_object;
8939 
8940 		bucket = &vm_page_buckets[i];
8941 		if (!bucket->page_list) {
8942 			continue;
8943 		}
8944 
8945 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8946 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8947 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8948 
8949 		while (p != VM_PAGE_NULL) {
8950 			p_object = VM_PAGE_OBJECT(p);
8951 
8952 			if (!p->vmp_hashed) {
8953 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8954 				    "hash %d in bucket %d at %p "
8955 				    "is not hashed\n",
8956 				    p, p_object, p->vmp_offset,
8957 				    p_hash, i, bucket);
8958 			}
8959 			p_hash = vm_page_hash(p_object, p->vmp_offset);
8960 			if (p_hash != i) {
8961 				panic("BUCKET_CHECK: corruption in bucket %d "
8962 				    "at %p: page %p object %p offset 0x%llx "
8963 				    "hash %d\n",
8964 				    i, bucket, p, p_object, p->vmp_offset,
8965 				    p_hash);
8966 			}
8967 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8968 		}
8969 		lck_spin_unlock(bucket_lock);
8970 	}
8971 
8972 //	printf("BUCKET_CHECK: checked buckets\n");
8973 }
8974 #endif /* VM_PAGE_BUCKETS_CHECK */
8975 
8976 /*
8977  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8978  * local queues if they exist... its the only spot in the system where we add pages
8979  * to those queues...  once on those queues, those pages can only move to one of the
8980  * global page queues or the free queues... they NEVER move from local q to local q.
8981  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8982  * the global vm_page_queue_lock at this point...  we still need to take the local lock
8983  * in case this operation is being run on a different CPU then the local queue's identity,
8984  * but we don't have to worry about the page moving to a global queue or becoming wired
8985  * while we're grabbing the local lock since those operations would require the global
8986  * vm_page_queue_lock to be held, and we already own it.
8987  *
8988  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8989  * 'wired' and local are ALWAYS mutually exclusive conditions.
8990  */
8991 
8992 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8993 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8994 {
8995 	boolean_t       was_pageable = TRUE;
8996 	vm_object_t     m_object;
8997 
8998 	m_object = VM_PAGE_OBJECT(mem);
8999 
9000 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9001 
9002 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
9003 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9004 		if (remove_from_specialq == TRUE) {
9005 			vm_page_remove_from_specialq(mem);
9006 		}
9007 		/*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
9008 		 *       assert(mem->vmp_specialq.next != 0);
9009 		 *       assert(mem->vmp_specialq.prev != 0);
9010 		 *  } else {*/
9011 		if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
9012 			assert(mem->vmp_specialq.next == 0);
9013 			assert(mem->vmp_specialq.prev == 0);
9014 		}
9015 		return;
9016 	}
9017 
9018 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9019 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9020 		assert(mem->vmp_specialq.next == 0 &&
9021 		    mem->vmp_specialq.prev == 0 &&
9022 		    mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9023 		return;
9024 	}
9025 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9026 		/*
9027 		 * might put these guys on a list for debugging purposes
9028 		 * if we do, we'll need to remove this assert
9029 		 */
9030 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9031 		assert(mem->vmp_specialq.next == 0 &&
9032 		    mem->vmp_specialq.prev == 0);
9033 		/*
9034 		 * Recall that vmp_on_specialq also means a request to put
9035 		 * it on the special Q. So we don't want to reset that bit
9036 		 * just because a wiring request came in. We might want to
9037 		 * put it on the special queue post-unwiring.
9038 		 *
9039 		 * &&
9040 		 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9041 		 */
9042 		return;
9043 	}
9044 
9045 	assert(m_object != compressor_object);
9046 	assert(!is_kernel_object(m_object));
9047 	assert(!mem->vmp_fictitious);
9048 
9049 	switch (mem->vmp_q_state) {
9050 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9051 	{
9052 		struct vpl      *lq;
9053 
9054 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9055 		VPL_LOCK(&lq->vpl_lock);
9056 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9057 		mem->vmp_local_id = 0;
9058 		lq->vpl_count--;
9059 		if (m_object->internal) {
9060 			lq->vpl_internal_count--;
9061 		} else {
9062 			lq->vpl_external_count--;
9063 		}
9064 		VPL_UNLOCK(&lq->vpl_lock);
9065 		was_pageable = FALSE;
9066 		break;
9067 	}
9068 	case VM_PAGE_ON_ACTIVE_Q:
9069 	{
9070 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9071 		vm_page_active_count--;
9072 		break;
9073 	}
9074 
9075 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9076 	{
9077 		assert(m_object->internal == TRUE);
9078 
9079 		vm_page_inactive_count--;
9080 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9081 		vm_page_anonymous_count--;
9082 
9083 		vm_purgeable_q_advance_all();
9084 		vm_page_balance_inactive(3);
9085 		break;
9086 	}
9087 
9088 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9089 	{
9090 		assert(m_object->internal == FALSE);
9091 
9092 		vm_page_inactive_count--;
9093 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9094 		vm_purgeable_q_advance_all();
9095 		vm_page_balance_inactive(3);
9096 		break;
9097 	}
9098 
9099 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9100 	{
9101 		assert(m_object->internal == FALSE);
9102 
9103 		vm_page_inactive_count--;
9104 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9105 		vm_page_cleaned_count--;
9106 		vm_page_balance_inactive(3);
9107 		break;
9108 	}
9109 
9110 	case VM_PAGE_ON_THROTTLED_Q:
9111 	{
9112 		assert(m_object->internal == TRUE);
9113 
9114 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9115 		vm_page_throttled_count--;
9116 		was_pageable = FALSE;
9117 		break;
9118 	}
9119 
9120 	case VM_PAGE_ON_SPECULATIVE_Q:
9121 	{
9122 		assert(m_object->internal == FALSE);
9123 
9124 		vm_page_remque(&mem->vmp_pageq);
9125 		vm_page_speculative_count--;
9126 		vm_page_balance_inactive(3);
9127 		break;
9128 	}
9129 
9130 #if CONFIG_SECLUDED_MEMORY
9131 	case VM_PAGE_ON_SECLUDED_Q:
9132 	{
9133 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9134 		vm_page_secluded_count--;
9135 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9136 		if (m_object == VM_OBJECT_NULL) {
9137 			vm_page_secluded_count_free--;
9138 			was_pageable = FALSE;
9139 		} else {
9140 			assert(!m_object->internal);
9141 			vm_page_secluded_count_inuse--;
9142 			was_pageable = FALSE;
9143 //			was_pageable = TRUE;
9144 		}
9145 		break;
9146 	}
9147 #endif /* CONFIG_SECLUDED_MEMORY */
9148 
9149 	default:
9150 	{
9151 		/*
9152 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9153 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9154 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
9155 		 *              either first remove it (it needs both the page queues lock and the object lock to do
9156 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9157 		 *
9158 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9159 		 *	or any of the undefined states
9160 		 */
9161 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9162 		break;
9163 	}
9164 	}
9165 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9166 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9167 
9168 	if (remove_from_specialq == TRUE) {
9169 		vm_page_remove_from_specialq(mem);
9170 	}
9171 	if (was_pageable) {
9172 		if (m_object->internal) {
9173 			vm_page_pageable_internal_count--;
9174 		} else {
9175 			vm_page_pageable_external_count--;
9176 		}
9177 	}
9178 }
9179 
9180 void
vm_page_remove_internal(vm_page_t page)9181 vm_page_remove_internal(vm_page_t page)
9182 {
9183 	vm_object_t __object = VM_PAGE_OBJECT(page);
9184 	if (page == __object->memq_hint) {
9185 		vm_page_t       __new_hint;
9186 		vm_page_queue_entry_t   __qe;
9187 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9188 		if (vm_page_queue_end(&__object->memq, __qe)) {
9189 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9190 			if (vm_page_queue_end(&__object->memq, __qe)) {
9191 				__qe = NULL;
9192 			}
9193 		}
9194 		__new_hint = (vm_page_t)((uintptr_t) __qe);
9195 		__object->memq_hint = __new_hint;
9196 	}
9197 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
9198 #if CONFIG_SECLUDED_MEMORY
9199 	if (__object->eligible_for_secluded) {
9200 		vm_page_secluded.eligible_for_secluded--;
9201 	}
9202 #endif /* CONFIG_SECLUDED_MEMORY */
9203 }
9204 
9205 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9206 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9207 {
9208 	vm_object_t     m_object;
9209 
9210 	m_object = VM_PAGE_OBJECT(mem);
9211 
9212 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9213 	assert(!mem->vmp_fictitious);
9214 	assert(!mem->vmp_laundry);
9215 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9216 	vm_page_check_pageable_safe(mem);
9217 
9218 	if (m_object->internal) {
9219 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9220 
9221 		if (first == TRUE) {
9222 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9223 		} else {
9224 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9225 		}
9226 
9227 		vm_page_anonymous_count++;
9228 		vm_page_pageable_internal_count++;
9229 	} else {
9230 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9231 
9232 		if (first == TRUE) {
9233 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9234 		} else {
9235 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9236 		}
9237 
9238 		vm_page_pageable_external_count++;
9239 	}
9240 	vm_page_inactive_count++;
9241 	token_new_pagecount++;
9242 
9243 	vm_page_add_to_specialq(mem, FALSE);
9244 }
9245 
9246 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9247 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9248 {
9249 	vm_object_t     m_object;
9250 
9251 	m_object = VM_PAGE_OBJECT(mem);
9252 
9253 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9254 	assert(!mem->vmp_fictitious);
9255 	assert(!mem->vmp_laundry);
9256 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9257 	vm_page_check_pageable_safe(mem);
9258 
9259 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
9260 	if (first == TRUE) {
9261 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
9262 	} else {
9263 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
9264 	}
9265 	vm_page_active_count++;
9266 
9267 	if (m_object->internal) {
9268 		vm_page_pageable_internal_count++;
9269 	} else {
9270 		vm_page_pageable_external_count++;
9271 	}
9272 
9273 	vm_page_add_to_specialq(mem, FALSE);
9274 	vm_page_balance_inactive(3);
9275 }
9276 
9277 /*
9278  * Pages from special kernel objects shouldn't
9279  * be placed on pageable queues.
9280  */
9281 void
vm_page_check_pageable_safe(vm_page_t page)9282 vm_page_check_pageable_safe(vm_page_t page)
9283 {
9284 	vm_object_t     page_object;
9285 
9286 	page_object = VM_PAGE_OBJECT(page);
9287 
9288 	if (is_kernel_object(page_object)) {
9289 		panic("vm_page_check_pageable_safe: trying to add page"
9290 		    "from a kernel object to pageable queue");
9291 	}
9292 
9293 	if (page_object == compressor_object) {
9294 		panic("vm_page_check_pageable_safe: trying to add page"
9295 		    "from compressor object (%p) to pageable queue", compressor_object);
9296 	}
9297 }
9298 
9299 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9300 * wired page diagnose
9301 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9302 
9303 #include <libkern/OSKextLibPrivate.h>
9304 
9305 #define KA_SIZE(namelen, subtotalscount)        \
9306 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9307 
9308 #define KA_NAME(alloc)  \
9309 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9310 
9311 #define KA_NAME_LEN(alloc)      \
9312     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9313 
9314 vm_tag_t
vm_tag_bt(void)9315 vm_tag_bt(void)
9316 {
9317 	uintptr_t* frameptr;
9318 	uintptr_t* frameptr_next;
9319 	uintptr_t retaddr;
9320 	uintptr_t kstackb, kstackt;
9321 	const vm_allocation_site_t * site;
9322 	thread_t cthread;
9323 	kern_allocation_name_t name;
9324 
9325 	cthread = current_thread();
9326 	if (__improbable(cthread == NULL)) {
9327 		return VM_KERN_MEMORY_OSFMK;
9328 	}
9329 
9330 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9331 		if (!name->tag) {
9332 			vm_tag_alloc(name);
9333 		}
9334 		return name->tag;
9335 	}
9336 
9337 	kstackb = cthread->kernel_stack;
9338 	kstackt = kstackb + kernel_stack_size;
9339 
9340 	/* Load stack frame pointer (EBP on x86) into frameptr */
9341 	frameptr = __builtin_frame_address(0);
9342 	site = NULL;
9343 	while (frameptr != NULL) {
9344 		/* Verify thread stack bounds */
9345 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9346 			break;
9347 		}
9348 
9349 		/* Next frame pointer is pointed to by the previous one */
9350 		frameptr_next = (uintptr_t*) *frameptr;
9351 #if defined(HAS_APPLE_PAC)
9352 		frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9353 #endif
9354 
9355 		/* Pull return address from one spot above the frame pointer */
9356 		retaddr = *(frameptr + 1);
9357 
9358 #if defined(HAS_APPLE_PAC)
9359 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9360 #endif
9361 
9362 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9363 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9364 			site = OSKextGetAllocationSiteForCaller(retaddr);
9365 			break;
9366 		}
9367 		frameptr = frameptr_next;
9368 	}
9369 
9370 	if (site) {
9371 		return site->tag;
9372 	}
9373 
9374 #if MACH_ASSERT
9375 	/*
9376 	 * Kernel tests appear here as unrecognized call sites and would get
9377 	 * no memory tag. Give them a default tag to prevent panics later.
9378 	 */
9379 	if (thread_get_test_option(test_option_vm_prevent_wire_tag_panic)) {
9380 		return VM_KERN_MEMORY_OSFMK;
9381 	}
9382 #endif
9383 
9384 	return VM_KERN_MEMORY_NONE;
9385 }
9386 
9387 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9388 
9389 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9390 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9391 {
9392 	vm_tag_t tag;
9393 	uint64_t avail;
9394 	uint32_t idx;
9395 	vm_allocation_site_t * prev;
9396 
9397 	if (site->tag) {
9398 		return;
9399 	}
9400 
9401 	idx = 0;
9402 	while (TRUE) {
9403 		avail = free_tag_bits[idx];
9404 		if (avail) {
9405 			tag = (vm_tag_t)__builtin_clzll(avail);
9406 			avail &= ~(1ULL << (63 - tag));
9407 			free_tag_bits[idx] = avail;
9408 			tag += (idx << 6);
9409 			break;
9410 		}
9411 		idx++;
9412 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
9413 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9414 				prev = vm_allocation_sites[idx];
9415 				if (!prev) {
9416 					continue;
9417 				}
9418 				if (!KA_NAME_LEN(prev)) {
9419 					continue;
9420 				}
9421 				if (!prev->tag) {
9422 					continue;
9423 				}
9424 				if (prev->total) {
9425 					continue;
9426 				}
9427 				if (1 != prev->refcount) {
9428 					continue;
9429 				}
9430 
9431 				assert(idx == prev->tag);
9432 				tag = (vm_tag_t)idx;
9433 				prev->tag = VM_KERN_MEMORY_NONE;
9434 				*releasesiteP = prev;
9435 				break;
9436 			}
9437 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9438 				tag = VM_KERN_MEMORY_ANY;
9439 			}
9440 			break;
9441 		}
9442 	}
9443 	site->tag = tag;
9444 
9445 	OSAddAtomic16(1, &site->refcount);
9446 
9447 	if (VM_KERN_MEMORY_ANY != tag) {
9448 		vm_allocation_sites[tag] = site;
9449 	}
9450 
9451 	if (tag > vm_allocation_tag_highest) {
9452 		vm_allocation_tag_highest = tag;
9453 	}
9454 }
9455 
9456 static void
vm_tag_free_locked(vm_tag_t tag)9457 vm_tag_free_locked(vm_tag_t tag)
9458 {
9459 	uint64_t avail;
9460 	uint32_t idx;
9461 	uint64_t bit;
9462 
9463 	if (VM_KERN_MEMORY_ANY == tag) {
9464 		return;
9465 	}
9466 
9467 	idx = (tag >> 6);
9468 	avail = free_tag_bits[idx];
9469 	tag &= 63;
9470 	bit = (1ULL << (63 - tag));
9471 	assert(!(avail & bit));
9472 	free_tag_bits[idx] = (avail | bit);
9473 }
9474 
9475 static void
vm_tag_init(void)9476 vm_tag_init(void)
9477 {
9478 	vm_tag_t tag;
9479 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9480 		vm_tag_free_locked(tag);
9481 	}
9482 
9483 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9484 		vm_tag_free_locked(tag);
9485 	}
9486 }
9487 
9488 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9489 vm_tag_alloc(vm_allocation_site_t * site)
9490 {
9491 	vm_allocation_site_t * releasesite;
9492 
9493 	if (!site->tag) {
9494 		releasesite = NULL;
9495 		lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9496 		vm_tag_alloc_locked(site, &releasesite);
9497 		lck_ticket_unlock(&vm_allocation_sites_lock);
9498 		if (releasesite) {
9499 			kern_allocation_name_release(releasesite);
9500 		}
9501 	}
9502 
9503 	return site->tag;
9504 }
9505 
9506 #if VM_BTLOG_TAGS
9507 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9508 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9509 #define VM_TAG_BTLOG_SIZE (16u << 10)
9510 
9511 btlog_t vmtaglog_btlog;
9512 vm_tag_t vmtaglog_tag;
9513 
9514 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9515 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9516 {
9517 	if (is_kernel_object(object)) {
9518 		/* kernel object backtraces are tracked in vm entries */
9519 		return;
9520 	}
9521 	if (delta > 0) {
9522 		btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9523 		btlog_record(vmtaglog_btlog, object, 0, ref);
9524 	} else if (object->wired_page_count == 0) {
9525 		btlog_erase(vmtaglog_btlog, object);
9526 	}
9527 }
9528 
9529 #ifndef ARRAY_SIZE
9530 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
9531 #endif /* ARRAY_SIZE */
9532 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
9533 const char *vm_kern_memory_strs[] = {
9534 	VM_KERN_MEMORY_ELEM(OSFMK),
9535 	VM_KERN_MEMORY_ELEM(BSD),
9536 	VM_KERN_MEMORY_ELEM(IOKIT),
9537 	VM_KERN_MEMORY_ELEM(LIBKERN),
9538 	VM_KERN_MEMORY_ELEM(OSKEXT),
9539 	VM_KERN_MEMORY_ELEM(KEXT),
9540 	VM_KERN_MEMORY_ELEM(IPC),
9541 	VM_KERN_MEMORY_ELEM(STACK),
9542 	VM_KERN_MEMORY_ELEM(CPU),
9543 	VM_KERN_MEMORY_ELEM(PMAP),
9544 	VM_KERN_MEMORY_ELEM(PTE),
9545 	VM_KERN_MEMORY_ELEM(ZONE),
9546 	VM_KERN_MEMORY_ELEM(KALLOC),
9547 	VM_KERN_MEMORY_ELEM(COMPRESSOR),
9548 	VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
9549 	VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
9550 	VM_KERN_MEMORY_ELEM(WAITQ),
9551 	VM_KERN_MEMORY_ELEM(DIAG),
9552 	VM_KERN_MEMORY_ELEM(LOG),
9553 	VM_KERN_MEMORY_ELEM(FILE),
9554 	VM_KERN_MEMORY_ELEM(MBUF),
9555 	VM_KERN_MEMORY_ELEM(UBC),
9556 	VM_KERN_MEMORY_ELEM(SECURITY),
9557 	VM_KERN_MEMORY_ELEM(MLOCK),
9558 	VM_KERN_MEMORY_ELEM(REASON),
9559 	VM_KERN_MEMORY_ELEM(SKYWALK),
9560 	VM_KERN_MEMORY_ELEM(LTABLE),
9561 	VM_KERN_MEMORY_ELEM(HV),
9562 	VM_KERN_MEMORY_ELEM(KALLOC_DATA),
9563 	VM_KERN_MEMORY_ELEM(RETIRED),
9564 	VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
9565 	VM_KERN_MEMORY_ELEM(TRIAGE),
9566 	VM_KERN_MEMORY_ELEM(RECOUNT),
9567 };
9568 
9569 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])9570 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
9571 {
9572 	for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
9573 		if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
9574 			return i;
9575 		}
9576 	}
9577 
9578 	printf("Unable to find vm tag %s for btlog\n", tagstr);
9579 	return VM_KERN_MEMORY_NONE;
9580 }
9581 
9582 __startup_func
9583 static void
vm_btlog_init(void)9584 vm_btlog_init(void)
9585 {
9586 	vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
9587 
9588 	if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
9589 		vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
9590 	}
9591 }
9592 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
9593 #endif /* VM_BTLOG_TAGS */
9594 
9595 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)9596 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
9597 {
9598 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9599 
9600 	kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
9601 }
9602 
9603 uint64_t
vm_tag_get_size(vm_tag_t tag)9604 vm_tag_get_size(vm_tag_t tag)
9605 {
9606 	vm_allocation_site_t *allocation;
9607 
9608 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
9609 
9610 	allocation = vm_allocation_sites[tag];
9611 	return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9612 }
9613 
9614 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)9615 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
9616 {
9617 	uint64_t value;
9618 
9619 	value = os_atomic_add(&allocation->total, delta, relaxed);
9620 	if (delta < 0) {
9621 		assertf(value + (uint64_t)-delta > value,
9622 		    "tag %d, site %p", allocation->tag, allocation);
9623 	}
9624 
9625 #if DEBUG || DEVELOPMENT
9626 	if (value > allocation->peak) {
9627 		os_atomic_max(&allocation->peak, value, relaxed);
9628 	}
9629 #endif /* DEBUG || DEVELOPMENT */
9630 
9631 	if (value == (uint64_t)delta && !allocation->tag) {
9632 		vm_tag_alloc(allocation);
9633 	}
9634 
9635 #if VM_BTLOG_TAGS
9636 	if (vmtaglog_tag && (allocation->tag == vmtaglog_tag) && object) {
9637 		vm_tag_log(object, delta, __builtin_frame_address(0));
9638 	}
9639 #endif /* VM_BTLOG_TAGS */
9640 }
9641 
9642 #if VM_TAG_SIZECLASSES
9643 
9644 void
vm_allocation_zones_init(void)9645 vm_allocation_zones_init(void)
9646 {
9647 	vm_offset_t   addr;
9648 	vm_size_t     size;
9649 
9650 	const vm_tag_t early_tags[] = {
9651 		VM_KERN_MEMORY_DIAG,
9652 		VM_KERN_MEMORY_KALLOC,
9653 		VM_KERN_MEMORY_KALLOC_DATA,
9654 		VM_KERN_MEMORY_KALLOC_TYPE,
9655 		VM_KERN_MEMORY_LIBKERN,
9656 		VM_KERN_MEMORY_OSFMK,
9657 		VM_KERN_MEMORY_RECOUNT,
9658 	};
9659 
9660 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9661 	    + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9662 
9663 	kmem_alloc(kernel_map, &addr, round_page(size),
9664 	    KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9665 	    VM_KERN_MEMORY_DIAG);
9666 
9667 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9668 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9669 
9670 	// prepopulate early tag ranges so allocations
9671 	// in vm_tag_update_zone_size() and early boot won't recurse
9672 	for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9673 		vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9674 		addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9675 	}
9676 }
9677 
9678 __attribute__((noinline))
9679 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9680 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9681 {
9682 	vm_allocation_zone_total_t *stats;
9683 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9684 
9685 	flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9686 	stats = kalloc_data(size, flags);
9687 	if (!stats) {
9688 		return VM_KERN_MEMORY_NONE;
9689 	}
9690 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9691 		kfree_data(stats, size);
9692 	}
9693 	return tag;
9694 }
9695 
9696 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)9697 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
9698 {
9699 	assert(VM_KERN_MEMORY_NONE != tag);
9700 	assert(tag < VM_MAX_TAG_VALUE);
9701 
9702 	if (__probable(vm_allocation_zone_totals[tag])) {
9703 		return tag;
9704 	}
9705 	return vm_tag_zone_stats_alloc(tag, zflags);
9706 }
9707 
9708 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9709 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9710 {
9711 	vm_allocation_zone_total_t *stats;
9712 	vm_size_t value;
9713 
9714 	assert(VM_KERN_MEMORY_NONE != tag);
9715 	assert(tag < VM_MAX_TAG_VALUE);
9716 
9717 	if (zidx >= VM_TAG_SIZECLASSES) {
9718 		return;
9719 	}
9720 
9721 	stats = vm_allocation_zone_totals[tag];
9722 	assert(stats);
9723 	stats += zidx;
9724 
9725 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9726 	if (delta < 0) {
9727 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9728 		return;
9729 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9730 		os_atomic_max(&stats->vazt_peak, value, relaxed);
9731 	}
9732 }
9733 
9734 #endif /* VM_TAG_SIZECLASSES */
9735 
9736 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)9737 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
9738 {
9739 	kern_allocation_name_t other;
9740 	struct vm_allocation_total * total;
9741 	uint32_t subidx;
9742 
9743 	assert(VM_KERN_MEMORY_NONE != subtag);
9744 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9745 	for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9746 		total = &allocation->subtotals[subidx];
9747 		if (subtag == total->tag) {
9748 			break;
9749 		}
9750 	}
9751 	if (subidx >= allocation->subtotalscount) {
9752 		for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
9753 			total = &allocation->subtotals[subidx];
9754 			if ((VM_KERN_MEMORY_NONE == total->tag)
9755 			    || !total->total) {
9756 				total->tag = (vm_tag_t)subtag;
9757 				break;
9758 			}
9759 		}
9760 	}
9761 	assert(subidx < allocation->subtotalscount);
9762 	if (subidx >= allocation->subtotalscount) {
9763 		lck_ticket_unlock(&vm_allocation_sites_lock);
9764 		return;
9765 	}
9766 	if (delta < 0) {
9767 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9768 	}
9769 	OSAddAtomic64(delta, &total->total);
9770 	lck_ticket_unlock(&vm_allocation_sites_lock);
9771 
9772 	other = vm_allocation_sites[subtag];
9773 	assert(other);
9774 	if (delta < 0) {
9775 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9776 	}
9777 	OSAddAtomic64(delta, &other->mapped);
9778 }
9779 
9780 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9781 kern_allocation_get_name(kern_allocation_name_t allocation)
9782 {
9783 	return KA_NAME(allocation);
9784 }
9785 
9786 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9787 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9788 {
9789 	kern_allocation_name_t allocation;
9790 	uint16_t namelen;
9791 
9792 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9793 
9794 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9795 	allocation->refcount       = 1;
9796 	allocation->subtotalscount = subtotalscount;
9797 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9798 	strlcpy(KA_NAME(allocation), name, namelen + 1);
9799 
9800 	vm_tag_alloc(allocation);
9801 	return allocation;
9802 }
9803 
9804 void
kern_allocation_name_release(kern_allocation_name_t allocation)9805 kern_allocation_name_release(kern_allocation_name_t allocation)
9806 {
9807 	assert(allocation->refcount > 0);
9808 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9809 		kfree_data(allocation,
9810 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9811 	}
9812 }
9813 
9814 #if !VM_TAG_ACTIVE_UPDATE
9815 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9816 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9817 {
9818 	if (!object->wired_page_count) {
9819 		return;
9820 	}
9821 	if (!is_kernel_object(object)) {
9822 		assert(object->wire_tag < num_info);
9823 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9824 	}
9825 }
9826 
9827 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9828     unsigned int num_info, vm_object_t object);
9829 
9830 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9831 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9832     vm_page_iterate_proc proc, purgeable_q_t queue,
9833     int group)
9834 {
9835 	vm_object_t object;
9836 
9837 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
9838 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
9839 	    object = (vm_object_t) queue_next(&object->objq)) {
9840 		proc(info, num_info, object);
9841 	}
9842 }
9843 
9844 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9845 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9846     vm_page_iterate_proc proc)
9847 {
9848 	vm_object_t     object;
9849 
9850 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9851 	queue_iterate(&vm_objects_wired,
9852 	    object,
9853 	    vm_object_t,
9854 	    wired_objq)
9855 	{
9856 		proc(info, num_info, object);
9857 	}
9858 	lck_spin_unlock(&vm_objects_wired_lock);
9859 }
9860 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9861 
9862 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)9863 process_account(mach_memory_info_t * info, unsigned int num_info,
9864     uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
9865 {
9866 	size_t                 namelen;
9867 	unsigned int           idx, count, nextinfo;
9868 	vm_allocation_site_t * site;
9869 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9870 
9871 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9872 		site = vm_allocation_sites[idx];
9873 		if (!site) {
9874 			continue;
9875 		}
9876 		info[idx].mapped = site->mapped;
9877 		info[idx].tag    = site->tag;
9878 		if (!iterated) {
9879 			info[idx].size = site->total;
9880 #if DEBUG || DEVELOPMENT
9881 			info[idx].peak = site->peak;
9882 #endif /* DEBUG || DEVELOPMENT */
9883 		} else {
9884 			if (!site->subtotalscount && (site->total != info[idx].size)) {
9885 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9886 				info[idx].size = site->total;
9887 			}
9888 		}
9889 		info[idx].flags |= VM_KERN_SITE_WIRED;
9890 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9891 			info[idx].site   = idx;
9892 			info[idx].flags |= VM_KERN_SITE_TAG;
9893 			if (VM_KERN_MEMORY_ZONE == idx) {
9894 				info[idx].flags |= VM_KERN_SITE_HIDE;
9895 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
9896 				info[idx].collectable_bytes = zones_collectable_bytes;
9897 			}
9898 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9899 			info[idx].site   = 0;
9900 			info[idx].flags |= VM_KERN_SITE_NAMED;
9901 			if (namelen > sizeof(info[idx].name)) {
9902 				namelen = sizeof(info[idx].name);
9903 			}
9904 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9905 		} else if (VM_TAG_KMOD & site->flags) {
9906 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
9907 			info[idx].flags |= VM_KERN_SITE_KMOD;
9908 		} else {
9909 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
9910 			info[idx].flags |= VM_KERN_SITE_KERNEL;
9911 		}
9912 	}
9913 
9914 	nextinfo = (vm_allocation_tag_highest + 1);
9915 	count    = nextinfo;
9916 	if (count >= num_info) {
9917 		count = num_info;
9918 	}
9919 
9920 	for (idx = 0; idx < count; idx++) {
9921 		site = vm_allocation_sites[idx];
9922 		if (!site) {
9923 			continue;
9924 		}
9925 #if VM_TAG_SIZECLASSES
9926 		vm_allocation_zone_total_t * zone;
9927 		unsigned int                 zidx;
9928 
9929 		if (!redact_info
9930 		    && vm_allocation_zone_totals
9931 		    && (zone = vm_allocation_zone_totals[idx])
9932 		    && (nextinfo < num_info)) {
9933 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9934 				if (!zone[zidx].vazt_peak) {
9935 					continue;
9936 				}
9937 				info[nextinfo]        = info[idx];
9938 				info[nextinfo].zone   = zone_index_from_tag_index(zidx);
9939 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
9940 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
9941 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
9942 				info[nextinfo].size   = zone[zidx].vazt_total;
9943 				info[nextinfo].peak   = zone[zidx].vazt_peak;
9944 				info[nextinfo].mapped = 0;
9945 				nextinfo++;
9946 			}
9947 		}
9948 #endif /* VM_TAG_SIZECLASSES */
9949 		if (site->subtotalscount) {
9950 			uint64_t mapped, mapcost, take;
9951 			uint32_t sub;
9952 			vm_tag_t alloctag;
9953 
9954 			info[idx].size = site->total;
9955 			mapped = info[idx].size;
9956 			info[idx].mapped = mapped;
9957 			mapcost = 0;
9958 			for (sub = 0; sub < site->subtotalscount; sub++) {
9959 				alloctag = site->subtotals[sub].tag;
9960 				assert(alloctag < num_info);
9961 				if (info[alloctag].name[0]) {
9962 					continue;
9963 				}
9964 				take = site->subtotals[sub].total;
9965 				if (take > info[alloctag].size) {
9966 					take = info[alloctag].size;
9967 				}
9968 				if (take > mapped) {
9969 					take = mapped;
9970 				}
9971 				info[alloctag].mapped  -= take;
9972 				info[alloctag].size    -= take;
9973 				mapped                 -= take;
9974 				mapcost                += take;
9975 			}
9976 			info[idx].size = mapcost;
9977 		}
9978 	}
9979 	lck_ticket_unlock(&vm_allocation_sites_lock);
9980 
9981 	return 0;
9982 }
9983 
9984 uint32_t
vm_page_diagnose_estimate(void)9985 vm_page_diagnose_estimate(void)
9986 {
9987 	vm_allocation_site_t * site;
9988 	uint32_t               count = zone_view_count;
9989 	uint32_t               idx;
9990 
9991 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9992 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9993 		site = vm_allocation_sites[idx];
9994 		if (!site) {
9995 			continue;
9996 		}
9997 		count++;
9998 #if VM_TAG_SIZECLASSES
9999 		if (vm_allocation_zone_totals) {
10000 			vm_allocation_zone_total_t * zone;
10001 			zone = vm_allocation_zone_totals[idx];
10002 			if (!zone) {
10003 				continue;
10004 			}
10005 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10006 				count += (zone[zidx].vazt_peak != 0);
10007 			}
10008 		}
10009 #endif
10010 	}
10011 	lck_ticket_unlock(&vm_allocation_sites_lock);
10012 
10013 	/* some slop for new tags created */
10014 	count += 8;
10015 	count += VM_KERN_COUNTER_COUNT;
10016 
10017 	return count;
10018 }
10019 
10020 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)10021 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
10022     bool percpu)
10023 {
10024 	zpercpu_foreach(zs, zstats) {
10025 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
10026 	}
10027 	if (percpu) {
10028 		info->size *= zpercpu_count();
10029 	}
10030 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
10031 }
10032 
10033 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10034 vm_page_add_info(
10035 	mach_memory_info_t     *info,
10036 	zone_stats_t            stats,
10037 	bool                    per_cpu,
10038 	const char             *parent_heap_name,
10039 	const char             *parent_zone_name,
10040 	const char             *view_name)
10041 {
10042 	vm_page_diagnose_zone_stats(info, stats, per_cpu);
10043 	snprintf(info->name, sizeof(info->name),
10044 	    "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10045 }
10046 
10047 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10048 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10049 {
10050 	vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10051 	    z->z_name, "raw");
10052 }
10053 
10054 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10055 vm_page_add_view(
10056 	mach_memory_info_t     *info,
10057 	zone_stats_t            stats,
10058 	const char             *parent_heap_name,
10059 	const char             *parent_zone_name,
10060 	const char             *view_name)
10061 {
10062 	vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10063 	    view_name);
10064 }
10065 
10066 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10067 vm_page_diagnose_heap_views(
10068 	mach_memory_info_t     *info,
10069 	kalloc_heap_t           kh,
10070 	const char             *parent_heap_name,
10071 	const char             *parent_zone_name)
10072 {
10073 	uint32_t i = 0;
10074 
10075 	while (kh) {
10076 		vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10077 		    parent_zone_name, kh->kh_name);
10078 		kh = kh->kh_views;
10079 		i++;
10080 	}
10081 	return i;
10082 }
10083 
10084 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10085 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10086 {
10087 	uint32_t i = 0;
10088 
10089 	for (; i < KHEAP_NUM_ZONES; i++) {
10090 		vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10091 	}
10092 
10093 	i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10094 	    NULL);
10095 	return i;
10096 }
10097 
10098 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10099 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10100 {
10101 	uint32_t idx = 0;
10102 	vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10103 	    "", "raw");
10104 	idx++;
10105 
10106 	for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10107 		struct kheap_info heap = kalloc_type_heap_array[i];
10108 		char heap_num_tmp[MAX_ZONE_NAME] = "";
10109 		const char *heap_num;
10110 
10111 		snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10112 		heap_num = &heap_num_tmp[0];
10113 
10114 		for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10115 		    ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10116 			if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10117 				vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10118 				    heap_num, ktv->kt_name);
10119 				idx++;
10120 			}
10121 		}
10122 
10123 		idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10124 		    KHEAP_KT_VAR->kh_name, heap_num);
10125 	}
10126 
10127 	return idx;
10128 }
10129 
10130 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10131 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10132 {
10133 	uint64_t                 wired_size;
10134 	uint64_t                 wired_managed_size;
10135 	uint64_t                 wired_reserved_size;
10136 	boolean_t                iterate;
10137 	mach_memory_info_t     * counts;
10138 	uint32_t                 i;
10139 
10140 	bzero(info, num_info * sizeof(mach_memory_info_t));
10141 
10142 	if (!vm_page_wire_count_initial) {
10143 		return KERN_ABORTED;
10144 	}
10145 
10146 #if !XNU_TARGET_OS_OSX
10147 	wired_size          = ptoa_64(vm_page_wire_count);
10148 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10149 #else /* !XNU_TARGET_OS_OSX */
10150 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
10151 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
10152 #endif /* !XNU_TARGET_OS_OSX */
10153 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10154 
10155 	wired_size += booter_size;
10156 
10157 	assert(num_info >= VM_KERN_COUNTER_COUNT);
10158 	num_info -= VM_KERN_COUNTER_COUNT;
10159 	counts = &info[num_info];
10160 
10161 #define SET_COUNT(xcount, xsize, xflags)                        \
10162     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
10163     counts[xcount].site  = (xcount);                            \
10164     counts[xcount].size  = (xsize);                                 \
10165     counts[xcount].mapped  = (xsize);                           \
10166     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10167 
10168 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10169 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10170 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10171 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10172 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10173 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10174 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10175 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10176 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10177 #if CONFIG_SPTM
10178 	SET_COUNT(VM_KERN_COUNT_EXCLAVES_CARVEOUT, SPTMArgs->sk_carveout_size, 0);
10179 #endif
10180 
10181 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10182     counts[xcount].site    = (xcount);                  \
10183     counts[xcount].size    = (xsize);                   \
10184     counts[xcount].mapped  = (xsize);                   \
10185     counts[xcount].free    = (xfree);                   \
10186     counts[xcount].largest = (xlargest);                \
10187     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
10188 
10189 	vm_map_size_t map_size, map_free, map_largest;
10190 
10191 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10192 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10193 
10194 	zone_map_sizes(&map_size, &map_free, &map_largest);
10195 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10196 
10197 	assert(num_info >= zone_view_count);
10198 	num_info -= zone_view_count;
10199 	counts = &info[num_info];
10200 	i = 0;
10201 
10202 	if (!redact_info) {
10203 		if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
10204 			i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10205 		}
10206 		if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10207 			i += vm_page_diagnose_kt_heaps(counts + i);
10208 		}
10209 		assert(i <= zone_view_count);
10210 
10211 		zone_index_foreach(zidx) {
10212 			zone_t z = &zone_array[zidx];
10213 			zone_security_flags_t zsflags = zone_security_array[zidx];
10214 			zone_view_t zv = z->z_views;
10215 
10216 			if (zv == NULL) {
10217 				continue;
10218 			}
10219 
10220 			zone_stats_t zv_stats_head = z->z_stats;
10221 			bool has_raw_view = false;
10222 
10223 			for (; zv; zv = zv->zv_next) {
10224 				/*
10225 				 * kalloc_types that allocate from the same zone are linked
10226 				 * as views. Only print the ones that have their own stats.
10227 				 */
10228 				if (zv->zv_stats == zv_stats_head) {
10229 					continue;
10230 				}
10231 				has_raw_view = true;
10232 				vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10233 				    z->z_percpu);
10234 				snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10235 				    zone_heap_name(z), z->z_name, zv->zv_name);
10236 				i++;
10237 				assert(i <= zone_view_count);
10238 			}
10239 
10240 			/*
10241 			 * Print raw views for non kalloc or kalloc_type zones
10242 			 */
10243 			bool kalloc_type = zsflags.z_kalloc_type;
10244 			if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10245 			    (kalloc_type && has_raw_view)) {
10246 				vm_page_diagnose_zone(counts + i, z);
10247 				i++;
10248 				assert(i <= zone_view_count);
10249 			}
10250 		}
10251 	}
10252 
10253 	iterate = !VM_TAG_ACTIVE_UPDATE;
10254 	if (iterate) {
10255 		enum                       { kMaxKernelDepth = 1 };
10256 		vm_map_t                     maps[kMaxKernelDepth];
10257 		vm_map_entry_t               entries[kMaxKernelDepth];
10258 		vm_map_t                     map;
10259 		vm_map_entry_t               entry;
10260 		vm_object_offset_t           offset;
10261 		vm_page_t                    page;
10262 		int                          stackIdx, count;
10263 
10264 #if !VM_TAG_ACTIVE_UPDATE
10265 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
10266 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10267 
10268 		map = kernel_map;
10269 		stackIdx = 0;
10270 		while (map) {
10271 			vm_map_lock(map);
10272 			for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
10273 				if (entry->is_sub_map) {
10274 					assert(stackIdx < kMaxKernelDepth);
10275 					maps[stackIdx] = map;
10276 					entries[stackIdx] = entry;
10277 					stackIdx++;
10278 					map = VME_SUBMAP(entry);
10279 					entry = NULL;
10280 					break;
10281 				}
10282 				if (is_kernel_object(VME_OBJECT(entry))) {
10283 					count = 0;
10284 					vm_object_lock(VME_OBJECT(entry));
10285 					for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
10286 						page = vm_page_lookup(VME_OBJECT(entry), offset);
10287 						if (page && VM_PAGE_WIRED(page)) {
10288 							count++;
10289 						}
10290 					}
10291 					vm_object_unlock(VME_OBJECT(entry));
10292 
10293 					if (count) {
10294 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10295 						assert(VME_ALIAS(entry) < num_info);
10296 						info[VME_ALIAS(entry)].size += ptoa_64(count);
10297 					}
10298 				}
10299 				while (map && (entry == vm_map_last_entry(map))) {
10300 					vm_map_unlock(map);
10301 					if (!stackIdx) {
10302 						map = NULL;
10303 					} else {
10304 						--stackIdx;
10305 						map = maps[stackIdx];
10306 						entry = entries[stackIdx];
10307 					}
10308 				}
10309 			}
10310 		}
10311 	}
10312 
10313 	process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10314 
10315 	return KERN_SUCCESS;
10316 }
10317 
10318 #if DEBUG || DEVELOPMENT
10319 
10320 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10321 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10322 {
10323 	kern_return_t  ret;
10324 	vm_size_t      zsize;
10325 	vm_map_t       map;
10326 	vm_map_entry_t entry;
10327 
10328 	zsize = zone_element_info((void *) addr, tag);
10329 	if (zsize) {
10330 		*zone_size = *size = zsize;
10331 		return KERN_SUCCESS;
10332 	}
10333 
10334 	*zone_size = 0;
10335 	ret = KERN_INVALID_ADDRESS;
10336 	for (map = kernel_map; map;) {
10337 		vm_map_lock(map);
10338 		if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10339 			break;
10340 		}
10341 		if (entry->is_sub_map) {
10342 			if (map != kernel_map) {
10343 				break;
10344 			}
10345 			map = VME_SUBMAP(entry);
10346 			continue;
10347 		}
10348 		if (entry->vme_start != addr) {
10349 			break;
10350 		}
10351 		*tag = (vm_tag_t)VME_ALIAS(entry);
10352 		*size = (entry->vme_end - addr);
10353 		ret = KERN_SUCCESS;
10354 		break;
10355 	}
10356 	if (map != kernel_map) {
10357 		vm_map_unlock(map);
10358 	}
10359 	vm_map_unlock(kernel_map);
10360 
10361 	return ret;
10362 }
10363 
10364 #endif /* DEBUG || DEVELOPMENT */
10365 
10366 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10367 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10368 {
10369 	vm_allocation_site_t * site;
10370 	uint32_t               kmodId;
10371 
10372 	kmodId = 0;
10373 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10374 	if ((site = vm_allocation_sites[tag])) {
10375 		if (VM_TAG_KMOD & site->flags) {
10376 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10377 		}
10378 	}
10379 	lck_ticket_unlock(&vm_allocation_sites_lock);
10380 
10381 	return kmodId;
10382 }
10383 
10384 
10385 #if CONFIG_SECLUDED_MEMORY
10386 /*
10387  * Note that there's no locking around other accesses to vm_page_secluded_target.
10388  * That should be OK, since these are the only place where it can be changed after
10389  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10390  * but will eventually get the correct value. This brief mismatch is OK as pageout
10391  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10392  * over time.
10393  */
10394 unsigned int vm_page_secluded_suppress_cnt = 0;
10395 unsigned int vm_page_secluded_save_target;
10396 
10397 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10398 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10399 
10400 void
start_secluded_suppression(task_t task)10401 start_secluded_suppression(task_t task)
10402 {
10403 	if (task->task_suppressed_secluded) {
10404 		return;
10405 	}
10406 	lck_spin_lock(&secluded_suppress_slock);
10407 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10408 		task->task_suppressed_secluded = TRUE;
10409 		vm_page_secluded_save_target = vm_page_secluded_target;
10410 		vm_page_secluded_target = 0;
10411 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10412 	}
10413 	lck_spin_unlock(&secluded_suppress_slock);
10414 }
10415 
10416 void
stop_secluded_suppression(task_t task)10417 stop_secluded_suppression(task_t task)
10418 {
10419 	lck_spin_lock(&secluded_suppress_slock);
10420 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10421 		task->task_suppressed_secluded = FALSE;
10422 		vm_page_secluded_target = vm_page_secluded_save_target;
10423 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10424 	}
10425 	lck_spin_unlock(&secluded_suppress_slock);
10426 }
10427 
10428 #endif /* CONFIG_SECLUDED_MEMORY */
10429 
10430 /*
10431  * Move the list of retired pages on the vm_page_queue_retired to
10432  * their final resting place on retired_pages_object.
10433  */
10434 void
vm_retire_boot_pages(void)10435 vm_retire_boot_pages(void)
10436 {
10437 }
10438 
10439 /*
10440  * This holds the reported physical address if an ECC error leads to a panic.
10441  * SMC will store it in PMU SRAM under the 'sECC' key.
10442  */
10443 uint64_t ecc_panic_physical_address = 0;
10444 
10445 
10446 boolean_t
vm_page_created(vm_page_t page)10447 vm_page_created(vm_page_t page)
10448 {
10449 	return (page < &vm_pages[0]) || (page >= &vm_pages[vm_pages_count]);
10450 }
10451