xref: /xnu-11417.101.15/osfmk/vm/vm_resident.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_page.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Resident memory management module.
63  */
64 
65 #include <debug.h>
66 #include <libkern/OSDebug.h>
67 
68 #include <mach/clock_types.h>
69 #include <mach/vm_prot.h>
70 #include <mach/vm_statistics.h>
71 #include <mach/sdt.h>
72 #include <kern/counter.h>
73 #include <kern/host_statistics.h>
74 #include <kern/sched_prim.h>
75 #include <kern/policy_internal.h>
76 #include <kern/task.h>
77 #include <kern/thread.h>
78 #include <kern/kalloc.h>
79 #include <kern/zalloc_internal.h>
80 #include <kern/ledger.h>
81 #include <kern/ecc.h>
82 #include <vm/pmap.h>
83 #include <vm/vm_init_xnu.h>
84 #include <vm/vm_map_internal.h>
85 #include <vm/vm_page_internal.h>
86 #include <vm/vm_pageout_internal.h>
87 #include <vm/vm_kern_xnu.h>                 /* kmem_alloc() */
88 #include <vm/vm_compressor_pager_internal.h>
89 #include <kern/misc_protos.h>
90 #include <mach_debug/zone_info.h>
91 #include <vm/cpm_internal.h>
92 #include <pexpert/pexpert.h>
93 #include <pexpert/device_tree.h>
94 #include <san/kasan.h>
95 #include <os/log.h>
96 
97 #include <libkern/coreanalytics/coreanalytics.h>
98 #include <kern/backtrace.h>
99 #include <kern/telemetry.h>
100 
101 #include <vm/vm_protos_internal.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h>
104 #include <vm/vm_compressor_internal.h>
105 #include <vm/vm_iokit.h>
106 #include <vm/vm_object_internal.h>
107 
108 
109 #if defined (__x86_64__)
110 #include <i386/misc_protos.h>
111 #endif
112 
113 #if CONFIG_SPTM
114 #include <arm64/sptm/sptm.h>
115 #endif
116 
117 #if CONFIG_PHANTOM_CACHE
118 #include <vm/vm_phantom_cache_internal.h>
119 #endif
120 
121 #if HIBERNATION
122 #include <IOKit/IOHibernatePrivate.h>
123 #include <machine/pal_hibernate.h>
124 #endif /* HIBERNATION */
125 
126 #include <sys/kdebug.h>
127 
128 #if defined(HAS_APPLE_PAC)
129 #include <ptrauth.h>
130 #endif
131 #if defined(__arm64__)
132 #include <arm/cpu_internal.h>
133 #endif /* defined(__arm64__) */
134 
135 /*
136  * During single threaded early boot we don't initialize all pages.
137  * This avoids some delay during boot. They'll be initialized and
138  * added to the free list as needed or after we are multithreaded by
139  * what becomes the pageout thread.
140  *
141  * This slows down booting the DEBUG kernel, particularly on
142  * large memory systems, but is worthwhile in deterministically
143  * trapping uninitialized memory usage.
144  */
145 #if DEBUG
146 static TUNABLE(uint32_t, fillval, "fill", 0xDEB8F177);
147 #else
148 static TUNABLE(uint32_t, fillval, "fill", 0);
149 #endif
150 
151 #if MACH_ASSERT
152 
153 TUNABLE(bool, vm_check_refs_on_alloc, "vm_check_refs_on_alloc", false);
154 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
155 
156 #else /* MACH_ASSERT */
157 
158 #define ASSERT_PMAP_FREE(mem) /* nothing */
159 
160 #endif /* MACH_ASSERT */
161 
162 
163 extern boolean_t vm_pageout_running;
164 extern thread_t  vm_pageout_scan_thread;
165 extern bool vps_dynamic_priority_enabled;
166 
167 const bool vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE] = {
168 	[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = true,
169 	[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = true,
170 	[VM_PAGE_ON_INACTIVE_CLEANED_Q] = true,
171 };
172 
173 const bool vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE] = {
174 	[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = true,
175 	[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = true,
176 	[VM_PAGE_ON_INACTIVE_CLEANED_Q] = true,
177 	[VM_PAGE_ON_ACTIVE_Q] = true,
178 	[VM_PAGE_ON_SPECULATIVE_Q] = true,
179 	[VM_PAGE_ON_THROTTLED_Q] = true,
180 #if CONFIG_SECLUDED_MEMORY
181 	[VM_PAGE_ON_SECLUDED_Q] = true,
182 #endif /* CONFIG_SECLUDED_MEMORY */
183 };
184 
185 const bool vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE] = {
186 	[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = true,
187 	[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = true,
188 	[VM_PAGE_ON_INACTIVE_CLEANED_Q] = true,
189 	[VM_PAGE_ON_ACTIVE_Q] = true,
190 	[VM_PAGE_ON_THROTTLED_Q] = true,
191 #if CONFIG_SECLUDED_MEMORY
192 	[VM_PAGE_ON_SECLUDED_Q] = true,
193 #endif /* CONFIG_SECLUDED_MEMORY */
194 };
195 
196 const bool vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE] = {
197 	[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = true,
198 	[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = true,
199 	[VM_PAGE_ON_INACTIVE_CLEANED_Q] = true,
200 	[VM_PAGE_ON_ACTIVE_Q] = true,
201 #if CONFIG_SECLUDED_MEMORY
202 	[VM_PAGE_ON_SECLUDED_Q] = true,
203 #endif /* CONFIG_SECLUDED_MEMORY */
204 };
205 
206 #if CONFIG_SECLUDED_MEMORY
207 struct vm_page_secluded_data vm_page_secluded;
208 #endif /* CONFIG_SECLUDED_MEMORY */
209 
210 #if DEVELOPMENT || DEBUG
211 extern struct memory_object_pager_ops shared_region_pager_ops;
212 unsigned int shared_region_pagers_resident_count = 0;
213 unsigned int shared_region_pagers_resident_peak = 0;
214 #endif /* DEVELOPMENT || DEBUG */
215 
216 
217 
218 int             PERCPU_DATA(start_color);
219 vm_page_t       PERCPU_DATA(free_pages);
220 boolean_t       hibernate_cleaning_in_progress = FALSE;
221 
222 atomic_counter_t vm_guard_count;
223 uint32_t        vm_lopage_free_count = 0;
224 uint32_t        vm_lopage_free_limit = 0;
225 uint32_t        vm_lopage_lowater    = 0;
226 boolean_t       vm_lopage_refill = FALSE;
227 boolean_t       vm_lopage_needed = FALSE;
228 
229 int             speculative_age_index = 0;
230 int             speculative_steal_index = 0;
231 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_RESERVED_SPECULATIVE_AGE_Q + 1];
232 
233 boolean_t       hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
234                                                           * Updated and checked behind the vm_page_queues_lock. */
235 
236 static void             vm_page_free_prepare(vm_page_t  page);
237 
238 
239 static void vm_tag_init(void);
240 
241 /* for debugging purposes */
242 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
243     VM_PAGE_PACKED_FROM_ARRAY;
244 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
245     VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
246 
247 /*
248  *	Associated with page of user-allocatable memory is a
249  *	page structure.
250  */
251 
252 /*
253  *	These variables record the values returned by vm_page_bootstrap,
254  *	for debugging purposes.  The implementation of pmap_steal_memory
255  *	and pmap_startup here also uses them internally.
256  */
257 
258 vm_offset_t virtual_space_start;
259 vm_offset_t virtual_space_end;
260 uint32_t        vm_page_pages;
261 
262 /*
263  *	The vm_page_lookup() routine, which provides for fast
264  *	(virtual memory object, offset) to page lookup, employs
265  *	the following hash table.  The vm_page_{insert,remove}
266  *	routines install and remove associations in the table.
267  *	[This table is often called the virtual-to-physical,
268  *	or VP, table.]
269  */
270 typedef struct {
271 	vm_page_packed_t page_list;
272 #if     MACH_PAGE_HASH_STATS
273 	int             cur_count;              /* current count */
274 	int             hi_count;               /* high water mark */
275 #endif /* MACH_PAGE_HASH_STATS */
276 } vm_page_bucket_t;
277 
278 
279 #define BUCKETS_PER_LOCK        16
280 
281 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets;                /* Array of buckets */
282 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_count = 0;       /* How big is array? */
283 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_mask;              /* Mask for hash function */
284 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_hash_shift;             /* Shift for hash function */
285 SECURITY_READ_ONLY_LATE(uint32_t)           vm_page_bucket_hash;            /* Basic bucket hash */
286 SECURITY_READ_ONLY_LATE(unsigned int)       vm_page_bucket_lock_count = 0;  /* How big is array of locks? */
287 
288 #ifndef VM_TAG_ACTIVE_UPDATE
289 #error VM_TAG_ACTIVE_UPDATE
290 #endif
291 #ifndef VM_TAG_SIZECLASSES
292 #error VM_TAG_SIZECLASSES
293 #endif
294 
295 /* for debugging */
296 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
297 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
298 
299 vm_allocation_site_t            vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
300 vm_allocation_site_t *          vm_allocation_sites[VM_MAX_TAG_VALUE];
301 #if VM_TAG_SIZECLASSES
302 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
303 #endif /* VM_TAG_SIZECLASSES */
304 
305 vm_tag_t vm_allocation_tag_highest;
306 
307 #if VM_PAGE_BUCKETS_CHECK
308 boolean_t vm_page_buckets_check_ready = FALSE;
309 #if VM_PAGE_FAKE_BUCKETS
310 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
311 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
312 #endif /* VM_PAGE_FAKE_BUCKETS */
313 #endif /* VM_PAGE_BUCKETS_CHECK */
314 
315 #if     MACH_PAGE_HASH_STATS
316 /* This routine is only for debug.  It is intended to be called by
317  * hand by a developer using a kernel debugger.  This routine prints
318  * out vm_page_hash table statistics to the kernel debug console.
319  */
320 void
hash_debug(void)321 hash_debug(void)
322 {
323 	int     i;
324 	int     numbuckets = 0;
325 	int     highsum = 0;
326 	int     maxdepth = 0;
327 
328 	for (i = 0; i < vm_page_bucket_count; i++) {
329 		if (vm_page_buckets[i].hi_count) {
330 			numbuckets++;
331 			highsum += vm_page_buckets[i].hi_count;
332 			if (vm_page_buckets[i].hi_count > maxdepth) {
333 				maxdepth = vm_page_buckets[i].hi_count;
334 			}
335 		}
336 	}
337 	printf("Total number of buckets: %d\n", vm_page_bucket_count);
338 	printf("Number used buckets:     %d = %d%%\n",
339 	    numbuckets, 100 * numbuckets / vm_page_bucket_count);
340 	printf("Number unused buckets:   %d = %d%%\n",
341 	    vm_page_bucket_count - numbuckets,
342 	    100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
343 	printf("Sum of bucket max depth: %d\n", highsum);
344 	printf("Average bucket depth:    %d.%2d\n",
345 	    highsum / vm_page_bucket_count,
346 	    highsum % vm_page_bucket_count);
347 	printf("Maximum bucket depth:    %d\n", maxdepth);
348 }
349 #endif /* MACH_PAGE_HASH_STATS */
350 
351 /*
352  *	The virtual page size is currently implemented as a runtime
353  *	variable, but is constant once initialized using vm_set_page_size.
354  *	This initialization must be done in the machine-dependent
355  *	bootstrap sequence, before calling other machine-independent
356  *	initializations.
357  *
358  *	All references to the virtual page size outside this
359  *	module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
360  *	constants.
361  */
362 #if defined(__arm64__)
363 vm_size_t       page_size;
364 vm_size_t       page_mask;
365 int             page_shift;
366 #else
367 vm_size_t       page_size  = PAGE_SIZE;
368 vm_size_t       page_mask  = PAGE_MASK;
369 int             page_shift = PAGE_SHIFT;
370 #endif
371 
372 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages;
373 #if XNU_VM_HAS_DELAYED_PAGES
374 vm_page_t                          vm_pages_end;
375 uint32_t                           vm_pages_count;
376 #else
377 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages_end;
378 SECURITY_READ_ONLY_LATE(uint32_t)  vm_pages_count;
379 #endif /* XNU_VM_HAS_DELAYED_PAGES */
380 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
381 SECURITY_READ_ONLY_LATE(ppnum_t)   vm_pages_first_pnum;
382 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
383 
384 
385 /*
386  *	Resident pages that represent real memory
387  *	are allocated from a set of free lists,
388  *	one per color.
389  */
390 unsigned int    vm_colors;
391 unsigned int    vm_color_mask;                  /* mask is == (vm_colors-1) */
392 unsigned int    vm_cache_geometry_colors = 0;   /* set by hw dependent code during startup */
393 unsigned int    vm_free_magazine_refill_limit = 0;
394 
395 struct vm_page_queue_free_head  vm_page_queue_free[MAX_COLORS];
396 
397 unsigned int    vm_page_free_wanted;
398 unsigned int    vm_page_free_wanted_privileged;
399 #if CONFIG_SECLUDED_MEMORY
400 unsigned int    vm_page_free_wanted_secluded;
401 #endif /* CONFIG_SECLUDED_MEMORY */
402 unsigned int    vm_page_free_count;
403 
404 unsigned int    vm_page_realtime_count;
405 
406 /*
407  *	Occasionally, the virtual memory system uses
408  *	resident page structures that do not refer to
409  *	real pages, for example to leave a page with
410  *	important state information in the VP table.
411  *
412  *	These page structures are allocated the way
413  *	most other kernel structures are.
414  */
415 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
416 vm_locks_array_t vm_page_locks;
417 
418 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
419 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
420 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
421 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
422 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
423 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
424 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
425 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
426 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
427 
428 unsigned int    vm_page_local_q_soft_limit = 250;
429 unsigned int    vm_page_local_q_hard_limit = 500;
430 struct vpl     *__zpercpu vm_page_local_q;
431 
432 /* N.B. Guard and fictitious pages must not
433  * be assigned a zero phys_page value.
434  */
435 /*
436  *	Fictitious pages don't have a physical address,
437  *	but we must initialize phys_page to something.
438  *	For debugging, this should be a strange value
439  *	that the pmap module can recognize in assertions.
440  */
441 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
442 
443 /*
444  *	Guard pages are not accessible so they don't
445  *      need a physical address, but we need to enter
446  *	one in the pmap.
447  *	Let's make it recognizable and make sure that
448  *	we don't use a real physical page with that
449  *	physical address.
450  */
451 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
452 
453 /*
454  *	Resident page structures are also chained on
455  *	queues that are used by the page replacement
456  *	system (pageout daemon).  These queues are
457  *	defined here, but are shared by the pageout
458  *	module.  The inactive queue is broken into
459  *	file backed and anonymous for convenience as the
460  *	pageout daemon often assignes a higher
461  *	importance to anonymous pages (less likely to pick)
462  */
463 vm_page_queue_head_t    vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
464 vm_page_queue_head_t    vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
465 #if CONFIG_SECLUDED_MEMORY
466 vm_page_queue_head_t    vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
467 #endif /* CONFIG_SECLUDED_MEMORY */
468 vm_page_queue_head_t    vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED;  /* inactive memory queue for anonymous pages */
469 vm_page_queue_head_t    vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
470 
471 queue_head_t    vm_objects_wired;
472 
473 vm_page_queue_head_t    vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
474 uint32_t        vm_page_donate_mode;
475 uint32_t        vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
476 uint32_t        vm_page_donate_count;
477 bool            vm_page_donate_queue_ripe;
478 
479 
480 vm_page_queue_head_t    vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
481 uint32_t        vm_page_background_target;
482 uint32_t        vm_page_background_target_snapshot;
483 uint32_t        vm_page_background_count;
484 uint64_t        vm_page_background_promoted_count;
485 
486 uint32_t        vm_page_background_internal_count;
487 uint32_t        vm_page_background_external_count;
488 
489 uint32_t        vm_page_background_mode;
490 uint32_t        vm_page_background_exclude_external;
491 
492 unsigned int    vm_page_active_count;
493 unsigned int    vm_page_inactive_count;
494 unsigned int    vm_page_kernelcache_count;
495 #if CONFIG_SECLUDED_MEMORY
496 unsigned int    vm_page_secluded_count;
497 unsigned int    vm_page_secluded_count_free;
498 unsigned int    vm_page_secluded_count_inuse;
499 unsigned int    vm_page_secluded_count_over_target;
500 #endif /* CONFIG_SECLUDED_MEMORY */
501 unsigned int    vm_page_anonymous_count;
502 unsigned int    vm_page_throttled_count;
503 unsigned int    vm_page_speculative_count;
504 
505 unsigned int    vm_page_wire_count;
506 unsigned int    vm_page_wire_count_on_boot = 0;
507 unsigned int    vm_page_stolen_count = 0;
508 unsigned int    vm_page_wire_count_initial;
509 unsigned int    vm_page_gobble_count = 0;
510 unsigned int    vm_page_kern_lpage_count = 0;
511 
512 uint64_t        booter_size;  /* external so it can be found in core dumps */
513 
514 #define VM_PAGE_WIRE_COUNT_WARNING      0
515 #define VM_PAGE_GOBBLE_COUNT_WARNING    0
516 
517 unsigned int    vm_page_purgeable_count = 0; /* # of pages purgeable now */
518 unsigned int    vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
519 uint64_t        vm_page_purged_count = 0;    /* total count of purged pages */
520 
521 unsigned int    vm_page_xpmapped_external_count = 0;
522 unsigned int    vm_page_external_count = 0;
523 unsigned int    vm_page_internal_count = 0;
524 unsigned int    vm_page_pageable_external_count = 0;
525 unsigned int    vm_page_pageable_internal_count = 0;
526 
527 #if DEVELOPMENT || DEBUG
528 unsigned int    vm_page_speculative_recreated = 0;
529 unsigned int    vm_page_speculative_created = 0;
530 unsigned int    vm_page_speculative_used = 0;
531 #endif
532 
533 vm_page_queue_head_t    vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
534 
535 unsigned int    vm_page_cleaned_count = 0;
536 
537 uint64_t        max_valid_dma_address = 0xffffffffffffffffULL;
538 ppnum_t         max_valid_low_ppnum = PPNUM_MAX;
539 
540 
541 /*
542  *	Several page replacement parameters are also
543  *	shared with this module, so that page allocation
544  *	(done here in vm_page_alloc) can trigger the
545  *	pageout daemon.
546  */
547 unsigned int    vm_page_free_target = 0;
548 unsigned int    vm_page_free_min = 0;
549 unsigned int    vm_page_throttle_limit = 0;
550 unsigned int    vm_page_inactive_target = 0;
551 #if CONFIG_SECLUDED_MEMORY
552 unsigned int    vm_page_secluded_target = 0;
553 #endif /* CONFIG_SECLUDED_MEMORY */
554 unsigned int    vm_page_anonymous_min = 0;
555 unsigned int    vm_page_free_reserved = 0;
556 
557 
558 /*
559  *	The VM system has a couple of heuristics for deciding
560  *	that pages are "uninteresting" and should be placed
561  *	on the inactive queue as likely candidates for replacement.
562  *	These variables let the heuristics be controlled at run-time
563  *	to make experimentation easier.
564  */
565 
566 boolean_t vm_page_deactivate_hint = TRUE;
567 
568 struct vm_page_stats_reusable vm_page_stats_reusable;
569 
570 /*
571  *	vm_set_page_size:
572  *
573  *	Sets the page size, perhaps based upon the memory
574  *	size.  Must be called before any use of page-size
575  *	dependent functions.
576  *
577  *	Sets page_shift and page_mask from page_size.
578  */
579 void
vm_set_page_size(void)580 vm_set_page_size(void)
581 {
582 	page_size  = PAGE_SIZE;
583 	page_mask  = PAGE_MASK;
584 	page_shift = PAGE_SHIFT;
585 
586 	if ((page_mask & page_size) != 0) {
587 		panic("vm_set_page_size: page size not a power of two");
588 	}
589 
590 	for (page_shift = 0;; page_shift++) {
591 		if ((1U << page_shift) == page_size) {
592 			break;
593 		}
594 	}
595 }
596 
597 /*
598  * See the header for function documentation.
599  */
600 vm_memory_class_t
vm_page_get_memory_class(vm_page_t page __unused)601 vm_page_get_memory_class(vm_page_t page __unused)
602 {
603 	assert(!vm_page_is_fictitious(page));
604 
605 	return VM_MEMORY_CLASS_REGULAR;
606 }
607 
608 /*
609  *	vm_page_validate_no_references:
610  *
611  *	Make sure the physical page has no refcounts.
612  *
613  */
614 static inline void
vm_page_validate_no_references(vm_page_t mem)615 vm_page_validate_no_references(
616 	vm_page_t       mem)
617 {
618 	bool is_freed;
619 
620 	if (vm_page_is_fictitious(mem)) {
621 		return;
622 	}
623 
624 	pmap_paddr_t paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(mem));
625 
626 #if CONFIG_SPTM
627 	is_freed = pmap_is_page_free(paddr);
628 #else
629 	is_freed = pmap_verify_free(VM_PAGE_GET_PHYS_PAGE(mem));
630 #endif /* CONFIG_SPTM */
631 
632 	if (!is_freed) {
633 		/*
634 		 * There is a redundancy here, but we are going to panic anyways,
635 		 * and ASSERT_PMAP_FREE traces useful information. So, we keep this
636 		 * behavior.
637 		 */
638 		ASSERT_PMAP_FREE(mem);
639 		panic("%s: page 0x%llx is referenced", __func__, paddr);
640 	}
641 }
642 
643 /*
644  * vm_page_is_restricted:
645  *
646  * Checks if a given vm_page_t is a restricted page.
647  */
648 inline bool
vm_page_is_restricted(vm_page_t mem)649 vm_page_is_restricted(vm_page_t mem)
650 {
651 	ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(mem);
652 	return pmap_is_page_restricted(pn);
653 }
654 
655 #ifdef __x86_64__
656 
657 #define MAX_CLUMP_SIZE      16
658 #define DEFAULT_CLUMP_SIZE  4
659 
660 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
661 
662 #if DEVELOPMENT || DEBUG
663 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
664 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
665 
666 static inline void
vm_clump_update_stats(unsigned int c)667 vm_clump_update_stats(unsigned int c)
668 {
669 	assert(c <= vm_clump_size);
670 	if (c > 0 && c <= vm_clump_size) {
671 		vm_clump_stats[c] += c;
672 	}
673 	vm_clump_allocs += c;
674 }
675 #endif  /*  if DEVELOPMENT || DEBUG */
676 
677 /* Called once to setup the VM clump knobs */
678 static void
vm_page_setup_clump(void)679 vm_page_setup_clump( void )
680 {
681 	unsigned int override, n;
682 
683 	vm_clump_size = DEFAULT_CLUMP_SIZE;
684 	if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
685 		vm_clump_size = override;
686 	}
687 
688 	if (vm_clump_size > MAX_CLUMP_SIZE) {
689 		panic("vm_page_setup_clump:: clump_size is too large!");
690 	}
691 	if (vm_clump_size < 1) {
692 		panic("vm_page_setup_clump:: clump_size must be >= 1");
693 	}
694 	if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
695 		panic("vm_page_setup_clump:: clump_size must be a power of 2");
696 	}
697 
698 	vm_clump_promote_threshold = vm_clump_size;
699 	vm_clump_mask = vm_clump_size - 1;
700 	for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
701 		;
702 	}
703 
704 #if DEVELOPMENT || DEBUG
705 	bzero(vm_clump_stats, sizeof(vm_clump_stats));
706 	vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
707 #endif  /*  if DEVELOPMENT || DEBUG */
708 }
709 
710 #endif /* __x86_64__ */
711 
712 /*
713  * vm_page_queue_free_remove:
714  * Removes a specific page from the global free queues.  Based on the remove
715  * reason, this may update the page state... but it does not update the queue
716  * state.
717  */
718 static void
vm_page_queue_free_remove(vm_page_t page,vm_remove_reason_t remove_reason)719 vm_page_queue_free_remove(vm_page_t page, vm_remove_reason_t remove_reason)
720 {
721 	unsigned int color = VM_PAGE_GET_COLOR(page);
722 
723 	vm_page_queue_remove(&vm_page_queue_free[color].qhead, page, vmp_pageq);
724 	vm_page_free_count--;
725 
726 	switch (remove_reason) {
727 	case VM_REMOVE_REASON_USE:
728 	{
729 		break;
730 	}
731 	case VM_REMOVE_REASON_REBALANCE:
732 	{
733 		break;
734 	}
735 	default:
736 	{
737 		panic("Unrecognized remove reason %u", remove_reason);
738 		__builtin_unreachable();
739 	}
740 	}
741 }
742 
743 /*
744  * vm_page_queue_free_remove_first:
745  * Given a number of pages, removes that many pages from the head of the global
746  * free queues and returns a page list of these pages, with the queue state set
747  * to a state specified by the caller.
748  *
749  * Must be called with the free page lock held, preemption disabled, and with
750  * enough pages in the global free queues to satisfy the request.
751  */
752 static vm_page_t
vm_page_queue_free_remove_first(unsigned int num_pages,unsigned int q_state)753 vm_page_queue_free_remove_first(unsigned int num_pages, unsigned int q_state)
754 {
755 	vm_page_t        mem                = VM_PAGE_NULL;
756 	vm_page_t        list               = VM_PAGE_NULL;
757 	vm_page_t        old_list           = VM_PAGE_NULL;
758 	vm_page_t        new_list           = VM_PAGE_NULL;
759 	int             *colorp;
760 	unsigned int     color;
761 	unsigned int     clump_end __unused = 0;
762 	unsigned int     sub_count __unused = 0;
763 
764 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
765 	assert(get_preemption_level() != 0);
766 	assert(q_state <= VM_PAGE_Q_STATE_LAST_VALID_VALUE);
767 	assert(vm_page_free_count >= num_pages);
768 
769 	colorp = PERCPU_GET(start_color);
770 	color = *colorp;
771 	vm_page_free_count -= num_pages;
772 
773 	/* Get the pages. */
774 	while (num_pages--) {
775 		while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
776 			/* This color queue is empty; skip to the next one. */
777 			color = (color + 1) & vm_color_mask;
778 		}
779 
780 #if defined(__x86_64__)
781 		/*
782 		 * x86_64 uses a bespoke free queue scheme, where the free path
783 		 * tries to cluster clumps of contiguous pages together on
784 		 * the free queue to optimize for the platform's memory
785 		 * controller.
786 		 */
787 		vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
788 		    mem, clump_end);
789 
790 		if (clump_end) {
791 			/* Only change colors at the end of a clump. */
792 			color = (color + 1) & vm_color_mask;
793 		}
794 
795 #if DEVELOPMENT || DEBUG
796 		sub_count++;
797 
798 		if (clump_end) {
799 			vm_clump_update_stats(sub_count);
800 			sub_count = 0;
801 		}
802 #endif /* !DEVELOPMENT && !DEBUG */
803 
804 #else /* !defined(__x86_64__) */
805 		/* Other targets default to rotating colors after each pop. */
806 		vm_page_queue_remove_first(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
807 		color = (color + 1) & vm_color_mask;
808 #endif /* !defined(__x86_64__) */
809 
810 		vm_page_list_push(&list, mem);
811 
812 		/* Assert that we got a sane page from the free queue. */
813 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
814 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
815 		assert(mem->vmp_tabled == FALSE);
816 		assert(mem->vmp_object == 0);
817 		assert(!mem->vmp_laundry);
818 		assert(mem->vmp_busy);
819 		assert(!mem->vmp_pmapped);
820 		assert(!mem->vmp_wpmapped);
821 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
822 		assert(!mem->vmp_realtime);
823 
824 		/* Set the page to the client's desired queue state. */
825 		mem->vmp_q_state = q_state;
826 
827 	}
828 
829 #if defined(__x86_64__) && (DEVELOPMENT || DEBUG)
830 	vm_clump_update_stats(sub_count);
831 #endif /* defined(__x86_64__) && (DEVELOPMENT || DEBUG) */
832 
833 	/* Record the next page color the CPU should try to get. */
834 	*colorp = color;
835 
836 	/*
837 	 * Some existing driver/IOKit code deals badly with getting physically
838 	 * contiguous memory... which this alloc code is rather likely to
839 	 * provide by accident immediately after boot.
840 	 *
841 	 * To avoid hitting issues related to this, we'll invert the order of
842 	 * the list we return.  This code should be removed once we've tracked
843 	 * down the various driver issues.
844 	 */
845 	old_list = list;
846 
847 	vm_page_list_foreach_consume(mem, &old_list) {
848 		vm_page_list_push(&new_list, mem);
849 	}
850 
851 	list = new_list;
852 
853 	return list;
854 }
855 
856 /*
857  * vm_page_queue_free_enter:
858  * Given a page, puts that pages onto the global free page queues.
859  *
860  * Must be called with the VM page free lock held.
861  */
862 static void
vm_page_queue_free_enter(vm_page_t mem)863 vm_page_queue_free_enter(vm_page_t mem)
864 {
865 	int color = VM_PAGE_GET_COLOR(mem);;
866 
867 
868 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
869 	assert(mem->vmp_busy);
870 	assert(!mem->vmp_realtime);
871 
872 	mem->vmp_lopage = FALSE;
873 	mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
874 
875 #if defined(__x86_64__)
876 	vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
877 #else
878 	vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
879 #endif
880 
881 
882 	vm_page_free_count++;
883 }
884 
885 /*
886  * See the header for documentation.
887  */
888 void
vm_page_steal_free_page(vm_page_t page,vm_remove_reason_t remove_reason)889 vm_page_steal_free_page(vm_page_t page, vm_remove_reason_t remove_reason)
890 {
891 	vm_memory_class_t memory_class = vm_page_get_memory_class(page);
892 
893 	assert(page->vmp_q_state == VM_PAGE_ON_FREE_Q);
894 
895 	switch (memory_class) {
896 	case VM_MEMORY_CLASS_REGULAR:
897 	{
898 		vm_page_queue_free_remove(page, remove_reason);
899 		break;
900 	}
901 	default:
902 	{
903 		panic("Unrecognized memory class %u\n", memory_class);
904 		break;
905 	}
906 	}
907 
908 #if MACH_ASSERT
909 	if (vm_check_refs_on_alloc) {
910 		/*
911 		 * Stolen free pages should be unreferenced, just like grabbed free
912 		 * pages.
913 		 */
914 		vm_page_validate_no_references(page);
915 	}
916 #endif /* MACH_ASSERT */
917 }
918 
919 /*
920  * See the header for documentation.
921  */
922 vmp_free_list_result_t
vm_page_put_list_on_free_queue(vm_page_t list,bool page_queues_locked)923 vm_page_put_list_on_free_queue(vm_page_t list, bool page_queues_locked)
924 {
925 	vmp_free_list_result_t result = { };
926 	vm_page_t mem;
927 
928 	LCK_MTX_ASSERT(&vm_page_queue_lock,
929 	    page_queues_locked ? LCK_MTX_ASSERT_OWNED : LCK_MTX_ASSERT_NOTOWNED);
930 #if !HIBERNATION
931 	if (startup_phase >= STARTUP_SUB_KMEM) {
932 		/* vm_page_release_startup() doesn't hold the lock */
933 		LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
934 	}
935 #endif /* !HIBERNATION */
936 
937 	vm_page_list_foreach_consume(mem, &list) {
938 		vm_memory_class_t memory_class = vm_page_get_memory_class(mem);
939 
940 
941 		/* Clear any specialQ hints before releasing page to the free pool*/
942 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
943 
944 		if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
945 		    vm_lopage_free_count < vm_lopage_free_limit &&
946 		    VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
947 			/*
948 			 * this exists to support hardware controllers
949 			 * incapable of generating DMAs with more than 32 bits
950 			 * of address on platforms with physical memory > 4G...
951 			 */
952 			vm_page_queue_enter_first(&vm_lopage_queue_free, mem,
953 			    vmp_pageq);
954 			vm_lopage_free_count++;
955 
956 			if (vm_lopage_free_count >= vm_lopage_free_limit) {
957 				vm_lopage_refill = FALSE;
958 			}
959 
960 			mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
961 			mem->vmp_lopage = TRUE;
962 			result.vmpr_lopage++;
963 			continue;
964 		}
965 
966 #if CONFIG_SECLUDED_MEMORY
967 		if (memory_class == VM_MEMORY_CLASS_REGULAR &&
968 		    vm_page_free_count > vm_page_free_reserved &&
969 		    vm_page_secluded_count < vm_page_secluded_target &&
970 		    num_tasks_can_use_secluded_mem == 0) {
971 			/*
972 			 * XXX FBDP TODO: also avoid refilling secluded queue
973 			 * when some IOKit objects are already grabbing from it...
974 			 */
975 			if (!page_queues_locked && !vm_page_trylock_queues()) {
976 				/* take locks in right order */
977 				vm_free_page_unlock();
978 				vm_page_lock_queues();
979 				vm_free_page_lock_spin();
980 			}
981 
982 			mem->vmp_lopage = FALSE;
983 			vm_page_queue_enter_first(&vm_page_queue_secluded, mem,
984 			    vmp_pageq);
985 			mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
986 			vm_page_secluded_count++;
987 			vm_page_secluded_count_free++;
988 			VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
989 
990 			if (!page_queues_locked) {
991 				vm_page_unlock_queues();
992 			}
993 			result.vmpr_secluded++;
994 			continue;
995 		}
996 #else
997 		(void)page_queues_locked;
998 #endif /* CONFIG_SECLUDED_MEMORY */
999 
1000 		switch (memory_class) {
1001 		case VM_MEMORY_CLASS_REGULAR:
1002 			vm_page_queue_free_enter(mem);
1003 			result.vmpr_regular++;
1004 			break;
1005 		default:
1006 			panic("unrecognized memory class %u", memory_class);
1007 		}
1008 	}
1009 
1010 	return result;
1011 }
1012 
1013 static bool
vm_page_free_has_any_waiters(void)1014 vm_page_free_has_any_waiters(void)
1015 {
1016 	uint32_t result = 0;
1017 
1018 	result |= vm_page_free_wanted_privileged;
1019 #if CONFIG_SECLUDED_MEMORY
1020 	result |= vm_page_free_wanted_secluded;
1021 #endif /* CONFIG_SECLUDED_MEMORY */
1022 	result |= vm_page_free_wanted;
1023 
1024 	return result != 0;
1025 }
1026 
1027 static void
vm_page_free_wakeup(event_t event,uint32_t n)1028 vm_page_free_wakeup(event_t event, uint32_t n)
1029 {
1030 	if (vps_dynamic_priority_enabled) {
1031 		if (n == UINT32_MAX) {
1032 			wakeup_all_with_inheritor(event, THREAD_AWAKENED);
1033 		} else {
1034 			while (n-- > 0) {
1035 				wakeup_one_with_inheritor(event, THREAD_AWAKENED,
1036 				    LCK_WAKE_DO_NOT_TRANSFER_PUSH, NULL);
1037 			}
1038 		}
1039 	} else {
1040 		thread_wakeup_nthreads(event, n);
1041 	}
1042 }
1043 
1044 __attribute__((noinline))
1045 static void
vm_page_free_handle_wakeups_and_unlock(vmp_free_list_result_t vmpr)1046 vm_page_free_handle_wakeups_and_unlock(vmp_free_list_result_t  vmpr)
1047 {
1048 	unsigned int    need_wakeup = 0;
1049 	unsigned int    need_priv_wakeup = 0;
1050 #if CONFIG_SECLUDED_MEMORY
1051 	unsigned int    need_wakeup_secluded = 0;
1052 #endif /* CONFIG_SECLUDED_MEMORY */
1053 
1054 #define DONATE_TO_WAITERS(count, waiters_count)  ({ \
1055 	uint32_t __n = MIN(waiters_count, vmpr.count); \
1056 	waiters_count -= __n;                          \
1057 	vmpr.count    -= __n;                          \
1058 	__n;                                           \
1059 })
1060 
1061 	/*
1062 	 *	Step 1: privileged waiters get to be satisfied first
1063 	 */
1064 	if (vm_page_free_wanted_privileged) {
1065 		need_priv_wakeup += DONATE_TO_WAITERS(vmpr_regular,
1066 		    vm_page_free_wanted_privileged);
1067 	}
1068 
1069 
1070 	/*
1071 	 *	Step 2: the privileged reserve needs to be replenished
1072 	 *
1073 	 *	Let's make sure that we only wake up regular threads
1074 	 *	for free pages above the reserve threshold.
1075 	 */
1076 	if (vm_page_free_count <= vm_page_free_reserved) {
1077 		vmpr.vmpr_regular = 0;
1078 	} else if (vm_page_free_count - vmpr.vmpr_regular <
1079 	    vm_page_free_reserved) {
1080 		vmpr.vmpr_regular = (uint8_t)(vm_page_free_count -
1081 		    vm_page_free_reserved);
1082 	}
1083 
1084 	/*
1085 	 *	Step 3: satisfy secluded waiters, using the secluded pool first,
1086 	 *	regular pages second.
1087 	 */
1088 #if CONFIG_SECLUDED_MEMORY
1089 	if (vm_page_free_wanted_secluded) {
1090 		need_wakeup_secluded += DONATE_TO_WAITERS(vmpr_secluded,
1091 		    vm_page_free_wanted_secluded);
1092 		need_wakeup_secluded += DONATE_TO_WAITERS(vmpr_regular,
1093 		    vm_page_free_wanted_secluded);
1094 		if (vm_page_free_wanted_secluded == 0) {
1095 			need_wakeup_secluded = UINT32_MAX;
1096 		}
1097 	}
1098 #endif /* CONFIG_SECLUDED_MEMORY */
1099 
1100 	/*
1101 	 *	Step 4: satisfy regular demand last.
1102 	 */
1103 	if (vm_page_free_wanted) {
1104 		need_wakeup += DONATE_TO_WAITERS(vmpr_regular,
1105 		    vm_page_free_wanted);
1106 		if (vm_page_free_wanted == 0) {
1107 			need_wakeup = UINT32_MAX;
1108 		}
1109 	}
1110 
1111 	/*
1112 	 * We have updated waiter counts, and if that release page happens
1113 	 * from the context of a thread that's super low priority we might
1114 	 * starve waking up privileged threads.
1115 	 *
1116 	 * While we hold the free page lock, such threads would wake us up via
1117 	 * the mutex priority inheritance mechanism, but as soon as we drop the
1118 	 * lock all bets are off.
1119 	 *
1120 	 * To avoid this priority inversion that could really hurt the VM,
1121 	 * disable preemption until we've woken up all privileged threads.
1122 	 */
1123 	disable_preemption();
1124 
1125 	vm_free_page_unlock();
1126 
1127 	/*
1128 	 * Dispatch privileged wakeups
1129 	 *
1130 	 * There shouldn't be that many VM-privileged threads,
1131 	 * so let's wake them all up, even if we don't quite
1132 	 * have enough pages to satisfy them all.
1133 	 */
1134 	if (need_priv_wakeup) {
1135 		vm_page_free_wakeup(&vm_page_free_wanted_privileged,
1136 		    UINT32_MAX);
1137 	}
1138 	enable_preemption();
1139 
1140 #if CONFIG_SECLUDED_MEMORY
1141 	if (need_wakeup_secluded) {
1142 		vm_page_free_wakeup(&vm_page_free_wanted_secluded,
1143 		    need_wakeup_secluded);
1144 	}
1145 #endif /* CONFIG_SECLUDED_MEMORY */
1146 	if (need_wakeup) {
1147 		vm_page_free_wakeup(&vm_page_free_count, need_wakeup);
1148 	}
1149 
1150 #undef DONATE_TO_WAITERS
1151 }
1152 
1153 
1154 #define COLOR_GROUPS_TO_STEAL   4
1155 
1156 /* Called once during statup, once the cache geometry is known.
1157  */
1158 static void
vm_page_set_colors(void)1159 vm_page_set_colors( void )
1160 {
1161 	unsigned int    n, override;
1162 
1163 #if defined (__x86_64__)
1164 	/* adjust #colors because we need to color outside the clump boundary */
1165 	vm_cache_geometry_colors >>= vm_clump_shift;
1166 #endif
1167 	if (PE_parse_boot_argn("colors", &override, sizeof(override))) {                /* colors specified as a boot-arg? */
1168 		n = override;
1169 	} else if (vm_cache_geometry_colors) {                  /* do we know what the cache geometry is? */
1170 		n = vm_cache_geometry_colors;
1171 	} else {
1172 		n = DEFAULT_COLORS;                             /* use default if all else fails */
1173 	}
1174 	if (n == 0) {
1175 		n = 1;
1176 	}
1177 	if (n > MAX_COLORS) {
1178 		n = MAX_COLORS;
1179 	}
1180 
1181 	/* the count must be a power of 2  */
1182 	if ((n & (n - 1)) != 0) {
1183 		n = DEFAULT_COLORS;                             /* use default if all else fails */
1184 	}
1185 	vm_colors = n;
1186 	vm_color_mask = n - 1;
1187 
1188 	vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
1189 
1190 #if defined (__x86_64__)
1191 	/* adjust for reduction in colors due to clumping and multiple cores */
1192 	if (real_ncpus) {
1193 		vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
1194 	}
1195 #endif
1196 }
1197 
1198 #if XNU_VM_HAS_DELAYED_PAGES
1199 
1200 static uint32_t vm_delayed_count = 0;    /* when non-zero, indicates we may have more pages to init */
1201 static ppnum_t delay_above_pnum = PPNUM_MAX;
1202 
1203 /*
1204  * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
1205  * If ARM ever uses delayed page initialization, this value may need to be quite different.
1206  */
1207 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
1208 
1209 /*
1210  * When we have to dip into more delayed pages due to low memory, free up
1211  * a large chunk to get things back to normal. This avoids contention on the
1212  * delayed code allocating page by page.
1213  */
1214 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
1215 
1216 /*
1217  * Get and initialize the next delayed page.
1218  */
1219 static vm_page_t
vm_get_delayed_page(int grab_options)1220 vm_get_delayed_page(int grab_options)
1221 {
1222 	vm_page_t p;
1223 	ppnum_t   pnum;
1224 
1225 	/*
1226 	 * Get a new page if we have one.
1227 	 */
1228 	vm_free_page_lock();
1229 	if (vm_delayed_count == 0) {
1230 		vm_free_page_unlock();
1231 		return NULL;
1232 	}
1233 
1234 	if (!pmap_next_page(&pnum)) {
1235 		vm_delayed_count = 0;
1236 		vm_free_page_unlock();
1237 		return NULL;
1238 	}
1239 
1240 
1241 	assert(vm_delayed_count > 0);
1242 	--vm_delayed_count;
1243 
1244 #if defined(__x86_64__)
1245 	/* x86 cluster code requires increasing phys_page in vm_pages[] */
1246 	if (vm_pages_count > 0) {
1247 		assert(pnum > vm_page_get(vm_pages_count - 1)->vmp_phys_page);
1248 	}
1249 #endif
1250 	p = vm_page_get(vm_pages_count);
1251 	assert(p < vm_pages_end);
1252 	vm_page_init(p, pnum);
1253 	++vm_pages_count;
1254 	++vm_page_pages;
1255 	vm_free_page_unlock();
1256 
1257 	/*
1258 	 * These pages were initially counted as wired, undo that now.
1259 	 */
1260 	if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
1261 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
1262 	} else {
1263 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
1264 		vm_page_lockspin_queues();
1265 	}
1266 	--vm_page_wire_count;
1267 	--vm_page_wire_count_initial;
1268 	if (vm_page_wire_count_on_boot != 0) {
1269 		--vm_page_wire_count_on_boot;
1270 	}
1271 	if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
1272 		vm_page_unlock_queues();
1273 	}
1274 
1275 
1276 	if (fillval) {
1277 		fillPage(pnum, fillval);
1278 	}
1279 	return p;
1280 }
1281 
1282 /*
1283  * Free all remaining delayed pages to the free lists.
1284  */
1285 void
vm_free_delayed_pages(void)1286 vm_free_delayed_pages(void)
1287 {
1288 	vm_page_t   p;
1289 	vm_page_t   list = NULL;
1290 	uint_t      cnt = 0;
1291 	vm_offset_t start_free_va;
1292 	int64_t     free_size;
1293 
1294 	while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
1295 		if (vm_himemory_mode) {
1296 			vm_page_release(p, FALSE);
1297 		} else {
1298 			p->vmp_snext = list;
1299 			list = p;
1300 		}
1301 		++cnt;
1302 	}
1303 
1304 	/*
1305 	 * Free the pages in reverse order if not himemory mode.
1306 	 * Hence the low memory pages will be first on free lists. (LIFO)
1307 	 */
1308 	while (list != NULL) {
1309 		p = list;
1310 		list = p->vmp_snext;
1311 		p->vmp_snext = NULL;
1312 		vm_page_release(p, FALSE);
1313 	}
1314 #if DEVELOPMENT || DEBUG
1315 	kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
1316 #endif
1317 
1318 	/*
1319 	 * Free up any unused full pages at the end of the vm_pages[] array
1320 	 */
1321 	start_free_va = round_page((vm_offset_t)vm_page_get(vm_pages_count));
1322 
1323 #if defined(__x86_64__)
1324 	/*
1325 	 * Since x86 might have used large pages for vm_pages[], we can't
1326 	 * free starting in the middle of a partially used large page.
1327 	 */
1328 	if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
1329 		start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
1330 	}
1331 #endif
1332 	if (start_free_va < (vm_offset_t)vm_pages_end) {
1333 		free_size = trunc_page((vm_offset_t)vm_pages_end - start_free_va);
1334 		if (free_size > 0) {
1335 			ml_static_mfree(start_free_va, (vm_offset_t)free_size);
1336 			vm_pages_end = (void *)start_free_va;
1337 
1338 			/*
1339 			 * Note there's no locking here, as only this thread will ever change this value.
1340 			 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
1341 			 */
1342 			vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
1343 
1344 #if DEVELOPMENT || DEBUG
1345 			kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
1346 			    (long)free_size, (long)start_free_va);
1347 #endif
1348 		}
1349 	}
1350 }
1351 
1352 /*
1353  * Try and free up enough delayed pages to match a contig memory allocation.
1354  */
1355 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)1356 vm_free_delayed_pages_contig(
1357 	uint_t    npages,
1358 	ppnum_t   max_pnum,
1359 	ppnum_t   pnum_mask)
1360 {
1361 	vm_page_t p;
1362 	ppnum_t   pnum;
1363 	uint_t    cnt = 0;
1364 
1365 	/*
1366 	 * Treat 0 as the absolute max page number.
1367 	 */
1368 	if (max_pnum == 0) {
1369 		max_pnum = PPNUM_MAX;
1370 	}
1371 
1372 	/*
1373 	 * Free till we get a properly aligned start page
1374 	 */
1375 	for (;;) {
1376 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
1377 		if (p == NULL) {
1378 			return;
1379 		}
1380 		pnum = VM_PAGE_GET_PHYS_PAGE(p);
1381 		vm_page_release(p, FALSE);
1382 		if (pnum >= max_pnum) {
1383 			return;
1384 		}
1385 		if ((pnum & pnum_mask) == 0) {
1386 			break;
1387 		}
1388 	}
1389 
1390 	/*
1391 	 * Having a healthy pool of free pages will help performance. We don't
1392 	 * want to fall back to the delayed code for every page allocation.
1393 	 */
1394 	if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
1395 		npages += VM_DELAY_PAGE_CHUNK;
1396 	}
1397 
1398 	/*
1399 	 * Now free up the pages
1400 	 */
1401 	for (cnt = 1; cnt < npages; ++cnt) {
1402 		p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
1403 		if (p == NULL) {
1404 			return;
1405 		}
1406 		vm_page_release(p, FALSE);
1407 	}
1408 }
1409 
1410 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1411 
1412 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
1413 
1414 void
vm_page_init_local_q(unsigned int num_cpus)1415 vm_page_init_local_q(unsigned int num_cpus)
1416 {
1417 	struct vpl *t_local_q;
1418 
1419 	/*
1420 	 * no point in this for a uni-processor system
1421 	 */
1422 	if (num_cpus >= 2) {
1423 		ml_cpu_info_t cpu_info;
1424 
1425 		/*
1426 		 * Force the allocation alignment to a cacheline,
1427 		 * because the `vpl` struct has a lock and will be taken
1428 		 * cross CPU so we want to isolate the rest of the per-CPU
1429 		 * data to avoid false sharing due to this lock being taken.
1430 		 */
1431 
1432 		ml_cpu_get_info(&cpu_info);
1433 
1434 		t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
1435 		    cpu_info.cache_line_size - 1);
1436 
1437 		zpercpu_foreach(lq, t_local_q) {
1438 			VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
1439 			vm_page_queue_init(&lq->vpl_queue);
1440 		}
1441 
1442 		/* make the initialization visible to all cores */
1443 		os_atomic_store(&vm_page_local_q, t_local_q, release);
1444 	}
1445 }
1446 
1447 /*
1448  * vm_init_before_launchd
1449  *
1450  * This should be called right before launchd is loaded.
1451  */
1452 void
vm_init_before_launchd()1453 vm_init_before_launchd()
1454 {
1455 	vm_page_lockspin_queues();
1456 	vm_page_wire_count_on_boot = vm_page_wire_count;
1457 	vm_page_unlock_queues();
1458 }
1459 
1460 
1461 /*
1462  *	vm_page_bootstrap:
1463  *
1464  *	Initializes the resident memory module.
1465  *
1466  *	Allocates memory for the page cells, and
1467  *	for the object/offset-to-page hash table headers.
1468  *	Each page cell is initialized and placed on the free list.
1469  *	Returns the range of available kernel virtual memory.
1470  */
1471 __startup_func
1472 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)1473 vm_page_bootstrap(
1474 	vm_offset_t             *startp,
1475 	vm_offset_t             *endp)
1476 {
1477 	unsigned int            i;
1478 	unsigned int            log1;
1479 	unsigned int            log2;
1480 	unsigned int            size;
1481 
1482 	/*
1483 	 *	Initialize the page queues.
1484 	 */
1485 
1486 	lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
1487 	lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
1488 	lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
1489 
1490 	for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
1491 		int group;
1492 
1493 		purgeable_queues[i].token_q_head = 0;
1494 		purgeable_queues[i].token_q_tail = 0;
1495 		for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
1496 			queue_init(&purgeable_queues[i].objq[group]);
1497 		}
1498 
1499 		purgeable_queues[i].type = i;
1500 		purgeable_queues[i].new_pages = 0;
1501 #if MACH_ASSERT
1502 		purgeable_queues[i].debug_count_tokens = 0;
1503 		purgeable_queues[i].debug_count_objects = 0;
1504 #endif
1505 	}
1506 	;
1507 	purgeable_nonvolatile_count = 0;
1508 	queue_init(&purgeable_nonvolatile_queue);
1509 
1510 	for (i = 0; i < MAX_COLORS; i++) {
1511 		vm_page_queue_init(&vm_page_queue_free[i].qhead);
1512 	}
1513 
1514 	vm_page_queue_init(&vm_lopage_queue_free);
1515 	vm_page_queue_init(&vm_page_queue_active);
1516 	vm_page_queue_init(&vm_page_queue_inactive);
1517 #if CONFIG_SECLUDED_MEMORY
1518 	vm_page_queue_init(&vm_page_queue_secluded);
1519 #endif /* CONFIG_SECLUDED_MEMORY */
1520 	vm_page_queue_init(&vm_page_queue_cleaned);
1521 	vm_page_queue_init(&vm_page_queue_throttled);
1522 	vm_page_queue_init(&vm_page_queue_anonymous);
1523 	queue_init(&vm_objects_wired);
1524 
1525 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
1526 		vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
1527 
1528 		vm_page_queue_speculative[i].age_ts.tv_sec = 0;
1529 		vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
1530 	}
1531 
1532 	vm_page_queue_init(&vm_page_queue_donate);
1533 	vm_page_queue_init(&vm_page_queue_background);
1534 
1535 	vm_page_background_count = 0;
1536 	vm_page_background_internal_count = 0;
1537 	vm_page_background_external_count = 0;
1538 	vm_page_background_promoted_count = 0;
1539 
1540 	vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
1541 
1542 	if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
1543 		vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
1544 	}
1545 
1546 #if    defined(__LP64__)
1547 	vm_page_background_mode = VM_PAGE_BG_ENABLED;
1548 	vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
1549 #else
1550 	vm_page_background_mode = VM_PAGE_BG_DISABLED;
1551 	vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1552 #endif
1553 	vm_page_background_exclude_external = 0;
1554 
1555 	PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1556 	PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1557 	PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1558 
1559 	if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1560 		vm_page_background_mode = VM_PAGE_BG_DISABLED;
1561 	}
1562 
1563 	PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1564 	if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1565 		vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1566 	}
1567 
1568 	vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1569 	vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1570 	vm_page_donate_target = vm_page_donate_target_high;
1571 	vm_page_donate_count = 0;
1572 
1573 	vm_page_free_wanted = 0;
1574 	vm_page_free_wanted_privileged = 0;
1575 #if CONFIG_SECLUDED_MEMORY
1576 	vm_page_free_wanted_secluded = 0;
1577 #endif /* CONFIG_SECLUDED_MEMORY */
1578 
1579 #if defined (__x86_64__)
1580 	/* this must be called before vm_page_set_colors() */
1581 	vm_page_setup_clump();
1582 #endif
1583 
1584 	vm_page_set_colors();
1585 
1586 	for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1587 		vm_allocation_sites_static[t].refcount = 2;
1588 		vm_allocation_sites_static[t].tag = t;
1589 		vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1590 	}
1591 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1592 	vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1593 	vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1594 
1595 	/*
1596 	 *	Steal memory for the map and zone subsystems.
1597 	 *
1598 	 *	make sure initialize_ram_ranges() has run before we steal pages for the first time on arm
1599 	 */
1600 	(void)pmap_free_pages();
1601 
1602 	kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1603 
1604 	/*
1605 	 *	Allocate (and initialize) the virtual-to-physical
1606 	 *	table hash buckets.
1607 	 *
1608 	 *	The number of buckets should be a power of two to
1609 	 *	get a good hash function.  The following computation
1610 	 *	chooses the first power of two that is greater
1611 	 *	than the number of physical pages in the system.
1612 	 */
1613 
1614 	if (vm_page_bucket_count == 0) {
1615 		unsigned int npages = pmap_free_pages();
1616 
1617 		vm_page_bucket_count = 1;
1618 		while (vm_page_bucket_count < npages) {
1619 			vm_page_bucket_count <<= 1;
1620 		}
1621 	}
1622 	vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1623 
1624 	vm_page_hash_mask = vm_page_bucket_count - 1;
1625 
1626 	/*
1627 	 *	Calculate object shift value for hashing algorithm:
1628 	 *		O = log2(sizeof(struct vm_object))
1629 	 *		B = log2(vm_page_bucket_count)
1630 	 *	        hash shifts the object left by
1631 	 *		B/2 - O
1632 	 */
1633 	size = vm_page_bucket_count;
1634 	for (log1 = 0; size > 1; log1++) {
1635 		size /= 2;
1636 	}
1637 	size = sizeof(struct vm_object);
1638 	for (log2 = 0; size > 1; log2++) {
1639 		size /= 2;
1640 	}
1641 	vm_page_hash_shift = log1 / 2 - log2 + 1;
1642 
1643 	vm_page_bucket_hash = 1 << ((log1 + 1) >> 1);           /* Get (ceiling of sqrt of table size) */
1644 	vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2);          /* Get (ceiling of quadroot of table size) */
1645 	vm_page_bucket_hash |= 1;                                                       /* Set bit and add 1 - always must be 1 to insure unique series */
1646 
1647 	if (vm_page_hash_mask & vm_page_bucket_count) {
1648 		printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1649 	}
1650 
1651 #if VM_PAGE_BUCKETS_CHECK
1652 #if VM_PAGE_FAKE_BUCKETS
1653 	/*
1654 	 * Allocate a decoy set of page buckets, to detect
1655 	 * any stomping there.
1656 	 */
1657 	vm_page_fake_buckets = (vm_page_bucket_t *)
1658 	    pmap_steal_memory(vm_page_bucket_count *
1659 	    sizeof(vm_page_bucket_t), 0);
1660 	vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1661 	vm_page_fake_buckets_end =
1662 	    vm_map_round_page((vm_page_fake_buckets_start +
1663 	    (vm_page_bucket_count *
1664 	    sizeof(vm_page_bucket_t))),
1665 	    PAGE_MASK);
1666 	char *cp;
1667 	for (cp = (char *)vm_page_fake_buckets_start;
1668 	    cp < (char *)vm_page_fake_buckets_end;
1669 	    cp++) {
1670 		*cp = 0x5a;
1671 	}
1672 #endif /* VM_PAGE_FAKE_BUCKETS */
1673 #endif /* VM_PAGE_BUCKETS_CHECK */
1674 
1675 	kernel_debug_string_early("vm_page_buckets");
1676 	vm_page_buckets = (vm_page_bucket_t *)
1677 	    pmap_steal_memory(vm_page_bucket_count *
1678 	    sizeof(vm_page_bucket_t), 0);
1679 
1680 	kernel_debug_string_early("vm_page_bucket_locks");
1681 	vm_page_bucket_locks = (lck_spin_t *)
1682 	    pmap_steal_memory(vm_page_bucket_lock_count *
1683 	    sizeof(lck_spin_t), 0);
1684 
1685 	for (i = 0; i < vm_page_bucket_count; i++) {
1686 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
1687 
1688 		bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1689 #if     MACH_PAGE_HASH_STATS
1690 		bucket->cur_count = 0;
1691 		bucket->hi_count = 0;
1692 #endif /* MACH_PAGE_HASH_STATS */
1693 	}
1694 
1695 	for (i = 0; i < vm_page_bucket_lock_count; i++) {
1696 		lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1697 	}
1698 
1699 	vm_tag_init();
1700 
1701 #if VM_PAGE_BUCKETS_CHECK
1702 	vm_page_buckets_check_ready = TRUE;
1703 #endif /* VM_PAGE_BUCKETS_CHECK */
1704 
1705 	/*
1706 	 *	Machine-dependent code allocates the resident page table.
1707 	 *	It uses vm_page_init to initialize the page frames.
1708 	 *	The code also returns to us the virtual space available
1709 	 *	to the kernel.  We don't trust the pmap module
1710 	 *	to get the alignment right.
1711 	 */
1712 
1713 	kernel_debug_string_early("pmap_startup");
1714 	pmap_startup(&virtual_space_start, &virtual_space_end);
1715 	virtual_space_start = round_page(virtual_space_start);
1716 	virtual_space_end = trunc_page(virtual_space_end);
1717 
1718 	*startp = virtual_space_start;
1719 	*endp = virtual_space_end;
1720 
1721 	/*
1722 	 *	Compute the initial "wire" count.
1723 	 *	Up until now, the pages which have been set aside are not under
1724 	 *	the VM system's control, so although they aren't explicitly
1725 	 *	wired, they nonetheless can't be moved. At this moment,
1726 	 *	all VM managed pages are "free", courtesy of pmap_startup.
1727 	 */
1728 	assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1729 	vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1730 	    vm_page_free_count - vm_lopage_free_count;
1731 #if CONFIG_SECLUDED_MEMORY
1732 	vm_page_wire_count -= vm_page_secluded_count;
1733 #endif
1734 	vm_page_wire_count_initial = vm_page_wire_count;
1735 
1736 	/* capture this for later use */
1737 	booter_size = ml_get_booter_memory_size();
1738 
1739 	printf("vm_page_bootstrap: %d free pages, %d wired pages"
1740 #if XNU_VM_HAS_DELAYED_PAGES
1741 	    ", (up to %d of which are delayed free)"
1742 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1743 	    "%c",
1744 	    vm_page_free_count,
1745 	    vm_page_wire_count,
1746 #if XNU_VM_HAS_DELAYED_PAGES
1747 	    vm_delayed_count,
1748 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1749 	    '\n');
1750 
1751 	kernel_debug_string_early("vm_page_bootstrap complete");
1752 }
1753 
1754 #ifndef MACHINE_PAGES
1755 /*
1756  * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1757  * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1758  * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1759  */
1760 static void *
pmap_steal_memory_internal(vm_size_t size,vm_size_t alignment,boolean_t might_free,unsigned int flags,pmap_mapping_type_t mapping_type)1761 pmap_steal_memory_internal(
1762 	vm_size_t size,
1763 	vm_size_t alignment,
1764 	boolean_t might_free,
1765 	unsigned int flags,
1766 	pmap_mapping_type_t mapping_type)
1767 {
1768 	kern_return_t kr;
1769 	vm_offset_t addr;
1770 	vm_offset_t map_addr;
1771 	ppnum_t phys_page;
1772 	unsigned int pmap_flags;
1773 
1774 	/*
1775 	 * Size needs to be aligned to word size.
1776 	 */
1777 	size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1778 
1779 	/*
1780 	 * Alignment defaults to word size if not specified.
1781 	 */
1782 	if (alignment == 0) {
1783 		alignment = sizeof(void*);
1784 	}
1785 
1786 	/*
1787 	 * Alignment must be no greater than a page and must be a power of two.
1788 	 */
1789 	assert(alignment <= PAGE_SIZE);
1790 	assert((alignment & (alignment - 1)) == 0);
1791 
1792 	/*
1793 	 * On the first call, get the initial values for virtual address space
1794 	 * and page align them.
1795 	 */
1796 	if (virtual_space_start == virtual_space_end) {
1797 		pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1798 		virtual_space_start = round_page(virtual_space_start);
1799 		virtual_space_end = trunc_page(virtual_space_end);
1800 
1801 #if defined(__x86_64__)
1802 		/*
1803 		 * Release remaining unused section of preallocated KVA and the 4K page tables
1804 		 * that map it. This makes the VA available for large page mappings.
1805 		 */
1806 		Idle_PTs_release(virtual_space_start, virtual_space_end);
1807 #endif
1808 	}
1809 
1810 	/*
1811 	 * Allocate the virtual space for this request. On x86, we'll align to a large page
1812 	 * address if the size is big enough to back with at least 1 large page.
1813 	 */
1814 #if defined(__x86_64__)
1815 	if (size >= I386_LPGBYTES) {
1816 		virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1817 	}
1818 #endif
1819 	virtual_space_start = (virtual_space_start + (alignment - 1)) & ~(alignment - 1);
1820 	addr = virtual_space_start;
1821 	virtual_space_start += size;
1822 
1823 	//kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size);	/* (TEST/DEBUG) */
1824 
1825 	/*
1826 	 * Allocate and map physical pages to back the new virtual space.
1827 	 */
1828 	map_addr = round_page(addr);
1829 	while (map_addr < addr + size) {
1830 #if defined(__x86_64__)
1831 		/*
1832 		 * Back with a large page if properly aligned on x86
1833 		 */
1834 		if ((map_addr & I386_LPGMASK) == 0 &&
1835 		    map_addr + I386_LPGBYTES <= addr + size &&
1836 		    pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1837 		    pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1838 			kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1839 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1840 			    VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE, mapping_type);
1841 
1842 			if (kr != KERN_SUCCESS) {
1843 				panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1844 				    (unsigned long)map_addr, phys_page);
1845 			}
1846 			map_addr += I386_LPGBYTES;
1847 			vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1848 			vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1849 			vm_page_kern_lpage_count++;
1850 			continue;
1851 		}
1852 #endif
1853 
1854 		if (!pmap_next_page_hi(&phys_page, might_free)) {
1855 			panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1856 		}
1857 
1858 #if defined(__x86_64__)
1859 		pmap_pre_expand(kernel_pmap, map_addr);
1860 #endif
1861 		pmap_flags = flags ? flags : VM_WIMG_USE_DEFAULT;
1862 
1863 
1864 		kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1865 		    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1866 		    pmap_flags, FALSE, mapping_type);
1867 
1868 		if (kr != KERN_SUCCESS) {
1869 			panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1870 			    (unsigned long)map_addr, phys_page);
1871 		}
1872 		map_addr += PAGE_SIZE;
1873 
1874 		/*
1875 		 * Account for newly stolen memory
1876 		 */
1877 		vm_page_wire_count++;
1878 		vm_page_stolen_count++;
1879 	}
1880 
1881 #if defined(__x86_64__)
1882 	/*
1883 	 * The call with might_free is currently the last use of pmap_steal_memory*().
1884 	 * Notify the pmap layer to record which high pages were allocated so far.
1885 	 */
1886 	if (might_free) {
1887 		pmap_hi_pages_done();
1888 	}
1889 #endif
1890 #if KASAN
1891 	kasan_notify_address(round_page(addr), size);
1892 #endif
1893 	return (void *) addr;
1894 }
1895 
1896 void *
pmap_steal_memory(vm_size_t size,vm_size_t alignment)1897 pmap_steal_memory(
1898 	vm_size_t size,
1899 	vm_size_t alignment)
1900 {
1901 	return pmap_steal_memory_internal(size, alignment, FALSE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1902 }
1903 
1904 void *
pmap_steal_freeable_memory(vm_size_t size)1905 pmap_steal_freeable_memory(
1906 	vm_size_t size)
1907 {
1908 	return pmap_steal_memory_internal(size, 0, TRUE, 0, PMAP_MAPPING_TYPE_RESTRICTED);
1909 }
1910 
1911 
1912 
1913 #if CONFIG_SECLUDED_MEMORY
1914 /* boot-args to control secluded memory */
1915 TUNABLE_DT(unsigned int, secluded_mem_mb, "/defaults", "kern.secluded_mem_mb", "secluded_mem_mb", 0, TUNABLE_DT_NONE);
1916 /* IOKit can use secluded memory */
1917 TUNABLE(bool, secluded_for_iokit, "secluded_for_iokit", true);
1918 /* apps can use secluded memory */
1919 TUNABLE(bool, secluded_for_apps, "secluded_for_apps", true);
1920 /* filecache can use seclude memory */
1921 TUNABLE(secluded_filecache_mode_t, secluded_for_filecache, "secluded_for_filecache", SECLUDED_FILECACHE_RDONLY);
1922 uint64_t secluded_shutoff_trigger = 0;
1923 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1924 #endif /* CONFIG_SECLUDED_MEMORY */
1925 
1926 
1927 #if defined(__arm64__)
1928 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1929 #endif
1930 
1931 void vm_page_release_startup(vm_page_t mem);
1932 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1933 pmap_startup(
1934 	vm_offset_t     *startp,
1935 	vm_offset_t     *endp)
1936 {
1937 	unsigned int    npages;
1938 	ppnum_t         phys_page;
1939 	uint64_t        mem_sz;
1940 	uint64_t        start_ns;
1941 	uint64_t        now_ns;
1942 	uint32_t        divisor;
1943 #if XNU_VM_HAS_DELAYED_PAGES
1944 	uint_t          low_page_count = 0;
1945 #endif /* XNU_VM_HAS_DELAYED_PAGES */
1946 
1947 	/*
1948 	 * make sure we are aligned on a 64 byte boundary
1949 	 * for VM_PAGE_PACK_PTR (it clips off the low-order
1950 	 * 6 bits of the pointer)
1951 	 */
1952 	if (virtual_space_start != virtual_space_end) {
1953 		virtual_space_start = round_page(virtual_space_start);
1954 	}
1955 
1956 	/*
1957 	 * We calculate how many page frames we will have
1958 	 * and then allocate the page structures in one chunk.
1959 	 *
1960 	 * Note that the calculation here doesn't take into account
1961 	 * the memory needed to map what's being allocated, i.e. the page
1962 	 * table entries. So the actual number of pages we get will be
1963 	 * less than this. To do someday: include that in the computation.
1964 	 *
1965 	 * Also for ARM, we don't use the count of free_pages, but rather the
1966 	 * range from last page to first page (ignore holes due to retired pages).
1967 	 */
1968 
1969 	/*
1970 	 * Initialize and release the page frames.
1971 	 */
1972 	kernel_debug_string_early("page_frame_init");
1973 	absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1974 	if (fillval) {
1975 		kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1976 	}
1977 
1978 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
1979 	mem_sz  = ptoa(pmap_free_pages_span());
1980 #else
1981 	mem_sz  = ptoa(pmap_free_pages());
1982 #endif
1983 	mem_sz += round_page(virtual_space_start) - virtual_space_start;   /* Account for any slop */
1984 	divisor = PAGE_SIZE + sizeof(struct vm_page);
1985 	npages  = (uint32_t)((mem_sz + divisor - 1) / divisor); /* scaled to include the vm_page_ts */
1986 
1987 
1988 	vm_pages     = pmap_steal_freeable_memory(npages * sizeof(struct vm_page));
1989 	vm_pages_end = vm_page_get(npages);
1990 
1991 #if CONFIG_SECLUDED_MEMORY
1992 	/*
1993 	 * Figure out how much secluded memory to have before we start
1994 	 * release pages to free lists.
1995 	 * The default, if specified nowhere else, is no secluded mem.
1996 	 */
1997 	vm_page_secluded_target = (unsigned int)atop_64(secluded_mem_mb * 1024ULL * 1024ULL);
1998 
1999 	/*
2000 	 * Allow a really large app to effectively use secluded memory until it exits.
2001 	 */
2002 	if (vm_page_secluded_target != 0) {
2003 		/*
2004 		 * Get an amount from boot-args, else use 1/2 of max_mem.
2005 		 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
2006 		 * used munch to induce jetsam thrashing of false idle daemons on N56.
2007 		 */
2008 		int secluded_shutoff_mb;
2009 		if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
2010 		    sizeof(secluded_shutoff_mb))) {
2011 			secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
2012 		} else {
2013 			secluded_shutoff_trigger = max_mem / 2;
2014 		}
2015 
2016 		/* ensure the headroom value is sensible and avoid underflows */
2017 		assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
2018 	}
2019 #endif /* CONFIG_SECLUDED_MEMORY */
2020 
2021 #if defined(__x86_64__)
2022 
2023 	/*
2024 	 * Decide how much memory we delay freeing at boot time.
2025 	 */
2026 	uint32_t delay_above_gb;
2027 	if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
2028 		delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
2029 	}
2030 
2031 	if (delay_above_gb == 0) {
2032 		delay_above_pnum = PPNUM_MAX;
2033 	} else {
2034 		delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
2035 	}
2036 
2037 	/* make sure we have sane breathing room: 1G above low memory */
2038 	if (delay_above_pnum <= max_valid_low_ppnum) {
2039 		delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
2040 	}
2041 
2042 	if (delay_above_pnum < PPNUM_MAX) {
2043 		printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
2044 	}
2045 
2046 #endif /* defined(__x86_64__) */
2047 
2048 
2049 	for (uint32_t i = 0; i < npages && pmap_next_page(&phys_page); i++) {
2050 #if XNU_VM_HAS_DELAYED_PAGES
2051 		if (phys_page < max_valid_low_ppnum) {
2052 			++low_page_count;
2053 		}
2054 
2055 		/* Are we at high enough pages to delay the rest? */
2056 		if (low_page_count > vm_lopage_free_limit &&
2057 		    phys_page > delay_above_pnum) {
2058 			vm_delayed_count = pmap_free_pages();
2059 			assert3u(vm_pages_count + vm_delayed_count, <=, npages);
2060 			break;
2061 		}
2062 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2063 
2064 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2065 		if (i == 0) {
2066 			vm_pages_first_pnum = phys_page;
2067 			patch_low_glo_vm_page_info(vm_pages, vm_pages_end,
2068 			    vm_pages_first_pnum);
2069 		}
2070 #else
2071 		/* The x86 clump freeing code requires increasing ppn's to work correctly */
2072 		if (i > 0) {
2073 			assert(phys_page > vm_page_get(i - 1)->vmp_phys_page);
2074 		}
2075 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2076 
2077 		++vm_pages_count;
2078 		vm_page_init(vm_page_get(i), phys_page);
2079 		if (fillval) {
2080 			fillPage(phys_page, fillval);
2081 		}
2082 		if (vm_himemory_mode) {
2083 			vm_page_release_startup(vm_page_get(i));
2084 		}
2085 	}
2086 
2087 	vm_page_pages = vm_pages_count; /* used to report to user space */
2088 
2089 	if (!vm_himemory_mode) {
2090 		for (uint32_t i = npages; i-- > 0;) {
2091 			/* skip retired pages */
2092 			if (!VMP_ERROR_GET(vm_page_get(i))) {
2093 				vm_page_release_startup(vm_page_get(i));
2094 			}
2095 		}
2096 	}
2097 
2098 	absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
2099 	printf("pmap_startup() init/release time: %lld microsec\n",
2100 	    (now_ns - start_ns) / NSEC_PER_USEC);
2101 #if XNU_VM_HAS_DELAYED_PAGES
2102 	printf("pmap_startup() delayed init/release of %d pages\n",
2103 	    vm_delayed_count);
2104 #endif /* XNU_VM_HAS_DELAYED_PAGES */
2105 
2106 	/*
2107 	 * Validate packing will work properly.  This needs to be done last
2108 	 * after vm_pages_count has been computed.
2109 	 */
2110 	if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
2111 		panic("pmap_startup(): too many pages to support vm_page packing");
2112 	}
2113 	if ((vm_page_t)VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(vm_pages)) != vm_pages) {
2114 		panic("VM_PAGE_PACK_PTR failed on vm_pages - %p", vm_pages);
2115 	}
2116 	if ((vm_page_t)VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(vm_page_get(vm_pages_count - 1))) !=
2117 	    vm_page_get(vm_pages_count - 1)) {
2118 		panic("VM_PAGE_PACK_PTR failed on vm_pages_end - %p",
2119 		    vm_page_get(vm_pages_count - 1));
2120 	}
2121 
2122 	VM_CHECK_MEMORYSTATUS;
2123 
2124 	/*
2125 	 * We have to re-align virtual_space_start,
2126 	 * because pmap_steal_memory has been using it.
2127 	 */
2128 	virtual_space_start = round_page(virtual_space_start);
2129 	*startp = virtual_space_start;
2130 	*endp = virtual_space_end;
2131 }
2132 #endif  /* MACHINE_PAGES */
2133 
2134 /*
2135  * Create the zone that represents the vm_pages[] array. Nothing ever allocates
2136  * or frees to this zone. It's just here for reporting purposes via zprint command.
2137  * This needs to be done after all initially delayed pages are put on the free lists.
2138  */
2139 void
vm_pages_array_finalize(void)2140 vm_pages_array_finalize(void)
2141 {
2142 	(void)zone_create_ext("vm pages array", sizeof(struct vm_page),
2143 	    ZC_KASAN_NOREDZONE | ZC_KASAN_NOQUARANTINE, ZONE_ID_VM_PAGES, ^(zone_t z) {
2144 		uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
2145 
2146 		zone_set_exhaustible(z, 0, true);
2147 		/*
2148 		 * Reflect size and usage information for vm_pages[].
2149 		 */
2150 
2151 		z->z_elems_avail = (uint32_t)(vm_pages_end - vm_pages);
2152 		z->z_elems_free = z->z_elems_avail - vm_pages_count;
2153 		zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
2154 		vm_pages_count * sizeof(struct vm_page);
2155 		vm_page_array_zone_data_size = (uint64_t)vm_pages_end - (uint64_t)vm_pages;
2156 		vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
2157 		z->z_wired_cur += vm_page_zone_pages;
2158 		z->z_wired_hwm = z->z_wired_cur;
2159 		z->z_va_cur = z->z_wired_cur;
2160 		/* since zone accounts for these, take them out of stolen */
2161 		VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
2162 	});
2163 }
2164 
2165 /*
2166  * Create the vm_pages zone. This is used for the vm_page structures for the pages
2167  * that are scavanged from other boot time usages by ml_static_mfree(). As such,
2168  * this needs to happen in early VM bootstrap.
2169  */
2170 
2171 __startup_func
2172 static void
vm_page_module_init(void)2173 vm_page_module_init(void)
2174 {
2175 	vm_size_t vm_page_with_ppnum_size;
2176 
2177 	/*
2178 	 * Since the pointers to elements in this zone will be packed, they
2179 	 * must have appropriate size. Not strictly what sizeof() reports.
2180 	 */
2181 	vm_page_with_ppnum_size =
2182 	    (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
2183 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
2184 
2185 	vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
2186 	    ZC_ALIGNMENT_REQUIRED | ZC_VM,
2187 	    ZONE_ID_ANY, ^(zone_t z) {
2188 		/*
2189 		 * The number "10" is a small number that is larger than the number
2190 		 * of fictitious pages that any single caller will attempt to allocate
2191 		 * without blocking.
2192 		 *
2193 		 * The largest such number at the moment is kmem_alloc()
2194 		 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
2195 		 * taking into account the 50% hysteresis the zone allocator uses.
2196 		 *
2197 		 * Note: this works at all because the zone allocator
2198 		 *       doesn't ever allocate fictitious pages.
2199 		 */
2200 		zone_raise_reserve(z, 10);
2201 	});
2202 }
2203 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
2204 
2205 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2206 /*
2207  * Radix tree of pages within the [pmap_first_pnum, vm_pages_first_pnum) range,
2208  * in order to support page lookup by pnum (@see vm_page_find_canonical()),
2209  * which corresponds to pages returned to the VM via @c ml_static_mfree().
2210  *
2211  * Kernel vm pages are never freed, which means that this data structure
2212  * is insert only.
2213  *
2214  * Empirically we have about 4-5k such pages, typically in only few rather dense
2215  * contiguous spans, inside a range of roughly 32k pnums.
2216  *
2217  * A radix tree works well with the distribution of keys, but also allows for
2218  * a straightforward lockless lookup path.
2219  */
2220 
2221 #define VM_PAGE_RADIX_FANOUT_SHIFT  8
2222 #define VM_PAGE_RADIX_FANOUT        (1u << VM_PAGE_RADIX_FANOUT_SHIFT)
2223 
2224 typedef uint32_t vm_page_radix_ptr_t;
2225 
2226 typedef struct vm_page_radix_node {
2227 	vm_page_radix_ptr_t     vmpr_array[VM_PAGE_RADIX_FANOUT];
2228 } *vm_page_radix_node_t;
2229 
2230 static LCK_GRP_DECLARE(vm_pages_radix_lock_grp, "VM pages radix");
2231 static LCK_MTX_DECLARE(vm_pages_radix_lock, &vm_pages_radix_lock_grp);
2232 
2233 static SECURITY_READ_ONLY_LATE(uintptr_t) vm_pages_radix_root;
2234 static uint32_t vm_pages_radix_count;
2235 
2236 static vm_page_radix_node_t
vm_page_radix_node_unpack(vm_page_radix_ptr_t ptr)2237 vm_page_radix_node_unpack(vm_page_radix_ptr_t ptr)
2238 {
2239 	return (vm_page_radix_node_t)VM_UNPACK_POINTER(ptr, VM_PAGE_PACKED_PTR);
2240 }
2241 
2242 static vm_page_radix_ptr_t
vm_page_radix_node_pack(vm_page_radix_node_t node)2243 vm_page_radix_node_pack(vm_page_radix_node_t node)
2244 {
2245 	vm_offset_t ptr = (vm_offset_t)node;
2246 
2247 	VM_ASSERT_POINTER_PACKABLE(ptr, VM_PAGE_PACKED_PTR);
2248 	return (vm_page_radix_ptr_t)VM_PACK_POINTER(ptr, VM_PAGE_PACKED_PTR);
2249 }
2250 
2251 static uint32_t
vm_page_radix_key(uint32_t level,uint32_t index)2252 vm_page_radix_key(uint32_t level, uint32_t index)
2253 {
2254 	uint32_t key = index >> (VM_PAGE_RADIX_FANOUT_SHIFT * level);
2255 
2256 	return key & (VM_PAGE_RADIX_FANOUT - 1);
2257 }
2258 
2259 static vm_page_radix_ptr_t *
vm_page_radix_slot(vm_page_radix_node_t node,uint32_t level,uint32_t index)2260 vm_page_radix_slot(vm_page_radix_node_t node, uint32_t level, uint32_t index)
2261 {
2262 	return node->vmpr_array + vm_page_radix_key(level, index);
2263 }
2264 
2265 __startup_func
2266 __attribute__((noinline))
2267 static vm_page_radix_node_t
vm_pages_radix_init_root(uint32_t * levelp)2268 vm_pages_radix_init_root(uint32_t *levelp)
2269 {
2270 	uint32_t max_index = vm_pages_first_pnum - pmap_first_pnum - 1;
2271 	vm_page_radix_node_t root;
2272 	uint32_t level;
2273 	vm_size_t size;
2274 
2275 	/*
2276 	 * Init a top-level node right away, to cover any index within
2277 	 * [0, vm_pages_first_pnum - pmap_first_pnum)
2278 	 */
2279 	level = (fls(max_index | 1) - 1) / VM_PAGE_RADIX_FANOUT_SHIFT;
2280 	size  = (vm_page_radix_key(level, max_index) + 1) *
2281 	    sizeof(vm_page_radix_ptr_t);
2282 
2283 	root  = zalloc_permanent(size, ZALIGN_64);
2284 
2285 	/*
2286 	 * Pack the level into the root pointer low bits,
2287 	 * so that pointer and level can be read atomically.
2288 	 *
2289 	 * See vm_pages_radix_load_root().
2290 	 */
2291 	os_atomic_store(&vm_pages_radix_root, (uintptr_t)root | level, release);
2292 
2293 	*levelp = level;
2294 	return root;
2295 }
2296 
2297 static vm_page_radix_node_t
vm_pages_radix_node_alloc(vm_page_radix_ptr_t * slot)2298 vm_pages_radix_node_alloc(vm_page_radix_ptr_t *slot)
2299 {
2300 	vm_page_radix_node_t node;
2301 
2302 	node = zalloc_permanent(sizeof(struct vm_page_radix_node),
2303 	    VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
2304 	os_atomic_store(slot, vm_page_radix_node_pack(node), release);
2305 	return node;
2306 }
2307 
2308 static vm_page_radix_node_t
vm_pages_radix_load_root(uint32_t * level)2309 vm_pages_radix_load_root(uint32_t *level)
2310 {
2311 	const uintptr_t VM_PAGE_RADIX_LEVEL_MASK = 0x7ul;
2312 
2313 	uintptr_t root = os_atomic_load(&vm_pages_radix_root, dependency);
2314 
2315 	*level = root & VM_PAGE_RADIX_LEVEL_MASK;
2316 	root &= ~VM_PAGE_RADIX_LEVEL_MASK;
2317 	return (vm_page_radix_node_t)root;
2318 }
2319 
2320 #if XNU_HANDLE_ECC || DEBUG || DEVELOPMENT
2321 
2322 static vm_page_t
vm_pages_radix_next(uint32_t * cursor)2323 vm_pages_radix_next(uint32_t *cursor)
2324 {
2325 	const uint32_t max_index = vm_pages_first_pnum - pmap_first_pnum;
2326 	vm_page_radix_node_t node;
2327 	uint32_t level, index;
2328 
2329 	index  = *cursor;
2330 	node   = vm_pages_radix_load_root(&level);
2331 
2332 	while (index < max_index) {
2333 		vm_page_radix_ptr_t *slot = vm_page_radix_slot(node, level, index);
2334 		vm_page_radix_ptr_t  ptr  = os_atomic_load(slot, dependency);
2335 
2336 		if (ptr == 0) {
2337 			uint32_t stride = 1 << (VM_PAGE_RADIX_FANOUT_SHIFT * level);
2338 
2339 			index = (index + stride) & -stride;
2340 			if (vm_page_radix_key(level, index) == 0) {
2341 				/* restart lookup at the top */
2342 				node = vm_pages_radix_load_root(&level);
2343 			}
2344 		} else if (level > 0) {
2345 			node   = vm_page_radix_node_unpack(ptr);
2346 			level -= 1;
2347 		} else {
2348 			*cursor = index + 1;
2349 			return (vm_page_t)VM_PAGE_UNPACK_PTR(ptr);
2350 		}
2351 	}
2352 
2353 	return VM_PAGE_NULL;
2354 }
2355 
2356 #define vm_pages_radix_for_each(it) \
2357 	for (uint32_t __index = 0; ((it) = vm_pages_radix_next(&__index)); )
2358 
2359 #if DEBUG || DEVELOPMENT
2360 
2361 static int
vm_page_radix_verify_test(int64_t in __unused,int64_t * out)2362 vm_page_radix_verify_test(int64_t in __unused, int64_t *out)
2363 {
2364 	uint32_t count = 0;
2365 	vm_page_t mem;
2366 
2367 	lck_mtx_lock(&vm_pages_radix_lock);
2368 
2369 	vm_pages_radix_for_each(mem) {
2370 		count++;
2371 		assert(mem == vm_page_find_canonical(VM_PAGE_GET_PHYS_PAGE(mem)));
2372 	}
2373 
2374 	assert(count == vm_pages_radix_count);
2375 
2376 	lck_mtx_unlock(&vm_pages_radix_lock);
2377 
2378 	*out = 1;
2379 	return 0;
2380 }
2381 SYSCTL_TEST_REGISTER(vm_page_radix_verify, vm_page_radix_verify_test);
2382 
2383 #endif /* DEBUG || DEVELOPMENT */
2384 #endif /* XNU_HANDLE_ECC || DEBUG || DEVELOPMENT */
2385 
2386 __attribute__((noinline))
2387 static void
vm_pages_radix_insert(ppnum_t pnum,vm_page_t page)2388 vm_pages_radix_insert(ppnum_t pnum, vm_page_t page)
2389 {
2390 	vm_page_radix_ptr_t *slot;
2391 	vm_page_radix_node_t node;
2392 	uint32_t level, index;
2393 
2394 	assert(!vm_page_in_array(page));
2395 	index = pnum - pmap_first_pnum;
2396 
2397 	lck_mtx_lock(&vm_pages_radix_lock);
2398 
2399 	node = vm_pages_radix_load_root(&level);
2400 	if (node == NULL) {
2401 		node = vm_pages_radix_init_root(&level);
2402 	}
2403 
2404 	for (; level > 0; level--) {
2405 		slot = vm_page_radix_slot(node, level, index);
2406 		if (*slot == 0) {
2407 			node = vm_pages_radix_node_alloc(slot);
2408 		} else {
2409 			node = vm_page_radix_node_unpack(*slot);
2410 		}
2411 	}
2412 
2413 	slot = vm_page_radix_slot(node, 0, index);
2414 	assert(*slot == 0);
2415 	os_atomic_store(slot, VM_PAGE_PACK_PTR(page), release);
2416 	vm_pages_radix_count++;
2417 
2418 	lck_mtx_unlock(&vm_pages_radix_lock);
2419 }
2420 
2421 __abortlike
2422 static void
vm_page_for_ppnum_panic(ppnum_t pnum)2423 vm_page_for_ppnum_panic(ppnum_t pnum)
2424 {
2425 	if (pnum < pmap_first_pnum) {
2426 		panic("physical page is before the start of DRAM: %#x < %#x)",
2427 		    pnum, pmap_first_pnum);
2428 	}
2429 	panic("physical page is beyond the end of managed DRAM: %#x >= %#x)",
2430 	    pnum, vm_pages_first_pnum + vm_pages_count);
2431 }
2432 
2433 vm_page_t
vm_page_find_canonical(ppnum_t pnum)2434 vm_page_find_canonical(ppnum_t pnum)
2435 {
2436 	vm_page_radix_ptr_t *slot;
2437 	vm_page_radix_node_t node;
2438 	vm_page_radix_ptr_t  ptr;
2439 	uint32_t level, index;
2440 
2441 	if (pnum < pmap_first_pnum) {
2442 		vm_page_for_ppnum_panic(pnum);
2443 	}
2444 
2445 	if (pnum >= vm_pages_first_pnum + vm_pages_count) {
2446 		/*
2447 		 * We could receive requests for pages which are beyond the xnu's managed space. (eg: ECC errors)
2448 		 * These need to be handled gracefully, so we return VM_PAGE_NULL here.
2449 		 */
2450 		return VM_PAGE_NULL;
2451 	}
2452 
2453 	if (__probable(pnum >= vm_pages_first_pnum)) {
2454 		return vm_page_get(pnum - vm_pages_first_pnum);
2455 	}
2456 
2457 	index = pnum - pmap_first_pnum;
2458 	node  = vm_pages_radix_load_root(&level);
2459 
2460 	for (; node && level > 0; level--) {
2461 		slot = vm_page_radix_slot(node, level, index);
2462 		ptr  = os_atomic_load(slot, dependency);
2463 		node = vm_page_radix_node_unpack(ptr);
2464 	}
2465 
2466 	if (__probable(node)) {
2467 		slot = vm_page_radix_slot(node, 0, index);
2468 		ptr  = os_atomic_load(slot, dependency);
2469 		return (vm_page_t)VM_PAGE_UNPACK_PTR(ptr);
2470 	}
2471 
2472 	return VM_PAGE_NULL;
2473 }
2474 
2475 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2476 
2477 /*!
2478  * @function vm_page_create()
2479  *
2480  * @brief
2481  * Common helper for all vm_page_create* functions.
2482  */
2483 static vm_page_t
vm_page_create(ppnum_t phys_page,bool canonical,zalloc_flags_t flags)2484 vm_page_create(ppnum_t phys_page, bool canonical, zalloc_flags_t flags)
2485 {
2486 	vm_page_t m;
2487 
2488 	m = zalloc_flags(vm_page_zone, flags);
2489 	if (m) {
2490 		vm_page_init(m, phys_page);
2491 		if (phys_page == vm_page_guard_addr) {
2492 			counter_inc(&vm_guard_count);
2493 		}
2494 	}
2495 	if (canonical) {
2496 		assert((flags & (Z_NOWAIT | Z_NOPAGEWAIT)) == 0);
2497 		m->vmp_canonical = true;
2498 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
2499 		vm_pages_radix_insert(phys_page, m);
2500 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
2501 	}
2502 	return m;
2503 }
2504 
2505 /*
2506  *	Routine:	vm_page_create_canonical
2507  *	Purpose:
2508  *		After the VM system is up, machine-dependent code
2509  *		may stumble across more physical memory.  For example,
2510  *		memory that it was reserving for a frame buffer.
2511  *		vm_page_create_canonical turns this memory into available pages.
2512  */
2513 
2514 void
vm_page_create_canonical(ppnum_t phys_page)2515 vm_page_create_canonical(ppnum_t phys_page)
2516 {
2517 	vm_page_t       m;
2518 
2519 	m = vm_page_create(phys_page, true, Z_WAITOK);
2520 
2521 	vm_free_page_lock();
2522 	vm_page_pages++;
2523 	vm_free_page_unlock();
2524 	vm_page_release(m, FALSE);
2525 }
2526 
2527 
2528 /*
2529  *	vm_page_hash:
2530  *
2531  *	Distributes the object/offset key pair among hash buckets.
2532  *
2533  *	NOTE:	The bucket count must be a power of 2
2534  */
2535 #define vm_page_hash(object, offset) (\
2536 	( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
2537 	 & vm_page_hash_mask)
2538 
2539 
2540 /*
2541  *	vm_page_insert:		[ internal use only ]
2542  *
2543  *	Inserts the given mem entry into the object/object-page
2544  *	table and object list.
2545  *
2546  *	The object must be locked.
2547  */
2548 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2549 vm_page_insert(
2550 	vm_page_t               mem,
2551 	vm_object_t             object,
2552 	vm_object_offset_t      offset)
2553 {
2554 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
2555 }
2556 
2557 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)2558 vm_page_insert_wired(
2559 	vm_page_t               mem,
2560 	vm_object_t             object,
2561 	vm_object_offset_t      offset,
2562 	vm_tag_t                tag)
2563 {
2564 	vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
2565 }
2566 
2567 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)2568 vm_page_insert_internal(
2569 	vm_page_t               mem,
2570 	vm_object_t             object,
2571 	vm_object_offset_t      offset,
2572 	vm_tag_t                tag,
2573 	boolean_t               queues_lock_held,
2574 	boolean_t               insert_in_hash,
2575 	boolean_t               batch_pmap_op,
2576 	boolean_t               batch_accounting,
2577 	uint64_t                *delayed_ledger_update)
2578 {
2579 	vm_page_bucket_t        *bucket;
2580 	lck_spin_t              *bucket_lock;
2581 	int                     hash_id;
2582 	task_t                  owner;
2583 	int                     ledger_idx_volatile;
2584 	int                     ledger_idx_nonvolatile;
2585 	int                     ledger_idx_volatile_compressed;
2586 	int                     ledger_idx_nonvolatile_compressed;
2587 	int                     ledger_idx_composite;
2588 	int                     ledger_idx_external_wired;
2589 	boolean_t               do_footprint;
2590 
2591 #if 0
2592 	/*
2593 	 * we may not hold the page queue lock
2594 	 * so this check isn't safe to make
2595 	 */
2596 	VM_PAGE_CHECK(mem);
2597 #endif
2598 
2599 	assertf(page_aligned(offset), "0x%llx\n", offset);
2600 
2601 	assert(!VM_PAGE_WIRED(mem) || !vm_page_is_canonical(mem) ||
2602 	    (tag != VM_KERN_MEMORY_NONE));
2603 
2604 	vm_object_lock_assert_exclusive(object);
2605 	LCK_MTX_ASSERT(&vm_page_queue_lock,
2606 	    queues_lock_held ? LCK_MTX_ASSERT_OWNED
2607 	    : LCK_MTX_ASSERT_NOTOWNED);
2608 
2609 	if (queues_lock_held == FALSE) {
2610 		assert(!VM_PAGE_PAGEABLE(mem));
2611 	}
2612 
2613 	if (insert_in_hash == TRUE) {
2614 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2615 		if (mem->vmp_tabled || mem->vmp_object) {
2616 			panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
2617 			    "already in (obj=%p,off=0x%llx)",
2618 			    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2619 		}
2620 #endif
2621 		if (object->internal && (offset >= object->vo_size)) {
2622 			panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
2623 			    mem, object, offset, object->vo_size);
2624 		}
2625 
2626 		assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
2627 
2628 		/*
2629 		 *	Record the object/offset pair in this page
2630 		 */
2631 
2632 		mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2633 		mem->vmp_offset = offset;
2634 
2635 #if CONFIG_SECLUDED_MEMORY
2636 		if (object->eligible_for_secluded) {
2637 			vm_page_secluded.eligible_for_secluded++;
2638 		}
2639 #endif /* CONFIG_SECLUDED_MEMORY */
2640 
2641 		/*
2642 		 *	Insert it into the object_object/offset hash table
2643 		 */
2644 		hash_id = vm_page_hash(object, offset);
2645 		bucket = &vm_page_buckets[hash_id];
2646 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2647 
2648 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2649 
2650 		mem->vmp_next_m = bucket->page_list;
2651 		bucket->page_list = VM_PAGE_PACK_PTR(mem);
2652 		assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
2653 
2654 #if     MACH_PAGE_HASH_STATS
2655 		if (++bucket->cur_count > bucket->hi_count) {
2656 			bucket->hi_count = bucket->cur_count;
2657 		}
2658 #endif /* MACH_PAGE_HASH_STATS */
2659 		mem->vmp_hashed = TRUE;
2660 		lck_spin_unlock(bucket_lock);
2661 	}
2662 
2663 	{
2664 		unsigned int    cache_attr;
2665 
2666 		cache_attr = object->wimg_bits & VM_WIMG_MASK;
2667 
2668 
2669 		if (cache_attr != VM_WIMG_USE_DEFAULT) {
2670 			PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
2671 		}
2672 
2673 	}
2674 
2675 	/*
2676 	 *	Now link into the object's list of backed pages.
2677 	 */
2678 	vm_page_queue_enter(&object->memq, mem, vmp_listq);
2679 	object->memq_hint = mem;
2680 	mem->vmp_tabled = TRUE;
2681 
2682 	/*
2683 	 *	Show that the object has one more resident page.
2684 	 */
2685 
2686 	object->resident_page_count++;
2687 	if (VM_PAGE_WIRED(mem)) {
2688 		assert(mem->vmp_wire_count > 0);
2689 		VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
2690 		VM_OBJECT_WIRED_PAGE_ADD(object, mem);
2691 		VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
2692 	}
2693 	assert(object->resident_page_count >= object->wired_page_count);
2694 
2695 #if DEVELOPMENT || DEBUG
2696 	if (object->object_is_shared_cache &&
2697 	    object->pager != NULL &&
2698 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
2699 		int new, old;
2700 		assert(!object->internal);
2701 		new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
2702 		do {
2703 			old = shared_region_pagers_resident_peak;
2704 		} while (old < new &&
2705 		    !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
2706 	}
2707 #endif /* DEVELOPMENT || DEBUG */
2708 
2709 	if (batch_accounting == FALSE) {
2710 		if (object->internal) {
2711 			OSAddAtomic(1, &vm_page_internal_count);
2712 		} else {
2713 			OSAddAtomic(1, &vm_page_external_count);
2714 		}
2715 	}
2716 
2717 	/*
2718 	 * It wouldn't make sense to insert a "reusable" page in
2719 	 * an object (the page would have been marked "reusable" only
2720 	 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
2721 	 * in the object at that time).
2722 	 * But a page could be inserted in a "all_reusable" object, if
2723 	 * something faults it in (a vm_read() from another task or a
2724 	 * "use-after-free" issue in user space, for example).  It can
2725 	 * also happen if we're relocating a page from that object to
2726 	 * a different physical page during a physically-contiguous
2727 	 * allocation.
2728 	 */
2729 	assert(!mem->vmp_reusable);
2730 	if (object->all_reusable) {
2731 		OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
2732 	}
2733 
2734 	if (object->purgable == VM_PURGABLE_DENY &&
2735 	    !object->vo_ledger_tag) {
2736 		owner = TASK_NULL;
2737 	} else {
2738 		owner = VM_OBJECT_OWNER(object);
2739 		vm_object_ledger_tag_ledgers(object,
2740 		    &ledger_idx_volatile,
2741 		    &ledger_idx_nonvolatile,
2742 		    &ledger_idx_volatile_compressed,
2743 		    &ledger_idx_nonvolatile_compressed,
2744 		    &ledger_idx_composite,
2745 		    &ledger_idx_external_wired,
2746 		    &do_footprint);
2747 	}
2748 	if (owner &&
2749 	    object->internal &&
2750 	    (object->purgable == VM_PURGABLE_NONVOLATILE ||
2751 	    object->purgable == VM_PURGABLE_DENY ||
2752 	    VM_PAGE_WIRED(mem))) {
2753 		if (delayed_ledger_update) {
2754 			*delayed_ledger_update += PAGE_SIZE;
2755 		} else {
2756 			/* more non-volatile bytes */
2757 			ledger_credit(owner->ledger,
2758 			    ledger_idx_nonvolatile,
2759 			    PAGE_SIZE);
2760 			if (do_footprint) {
2761 				/* more footprint */
2762 				ledger_credit(owner->ledger,
2763 				    task_ledgers.phys_footprint,
2764 				    PAGE_SIZE);
2765 			} else if (ledger_idx_composite != -1) {
2766 				ledger_credit(owner->ledger,
2767 				    ledger_idx_composite,
2768 				    PAGE_SIZE);
2769 			}
2770 		}
2771 	} else if (owner &&
2772 	    object->internal &&
2773 	    (object->purgable == VM_PURGABLE_VOLATILE ||
2774 	    object->purgable == VM_PURGABLE_EMPTY)) {
2775 		assert(!VM_PAGE_WIRED(mem));
2776 		/* more volatile bytes */
2777 		ledger_credit(owner->ledger,
2778 		    ledger_idx_volatile,
2779 		    PAGE_SIZE);
2780 	}
2781 
2782 	if (object->purgable == VM_PURGABLE_VOLATILE) {
2783 		if (VM_PAGE_WIRED(mem)) {
2784 			OSAddAtomic(+1, &vm_page_purgeable_wired_count);
2785 		} else {
2786 			OSAddAtomic(+1, &vm_page_purgeable_count);
2787 		}
2788 	} else if (object->purgable == VM_PURGABLE_EMPTY &&
2789 	    mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2790 		/*
2791 		 * This page belongs to a purged VM object but hasn't
2792 		 * been purged (because it was "busy").
2793 		 * It's in the "throttled" queue and hence not
2794 		 * visible to vm_pageout_scan().  Move it to a pageable
2795 		 * queue, so that it can eventually be reclaimed, instead
2796 		 * of lingering in the "empty" object.
2797 		 */
2798 		if (queues_lock_held == FALSE) {
2799 			vm_page_lockspin_queues();
2800 		}
2801 		vm_page_deactivate(mem);
2802 		if (queues_lock_held == FALSE) {
2803 			vm_page_unlock_queues();
2804 		}
2805 	}
2806 
2807 #if VM_OBJECT_TRACKING_OP_MODIFIED
2808 	if (vm_object_tracking_btlog &&
2809 	    object->internal &&
2810 	    object->resident_page_count == 0 &&
2811 	    object->pager == NULL &&
2812 	    object->shadow != NULL &&
2813 	    object->shadow->vo_copy == object) {
2814 		btlog_record(vm_object_tracking_btlog, object,
2815 		    VM_OBJECT_TRACKING_OP_MODIFIED,
2816 		    btref_get(__builtin_frame_address(0), 0));
2817 	}
2818 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2819 }
2820 
2821 /*
2822  *	vm_page_replace:
2823  *
2824  *	Exactly like vm_page_insert, except that we first
2825  *	remove any existing page at the given offset in object.
2826  *
2827  *	The object must be locked.
2828  */
2829 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2830 vm_page_replace(
2831 	vm_page_t               mem,
2832 	vm_object_t             object,
2833 	vm_object_offset_t      offset)
2834 {
2835 	vm_page_bucket_t *bucket;
2836 	vm_page_t        found_m = VM_PAGE_NULL;
2837 	lck_spin_t      *bucket_lock;
2838 	int             hash_id;
2839 
2840 #if 0
2841 	/*
2842 	 * we don't hold the page queue lock
2843 	 * so this check isn't safe to make
2844 	 */
2845 	VM_PAGE_CHECK(mem);
2846 #endif
2847 	vm_object_lock_assert_exclusive(object);
2848 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2849 	if (mem->vmp_tabled || mem->vmp_object) {
2850 		panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2851 		    "already in (obj=%p,off=0x%llx)",
2852 		    mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2853 	}
2854 #endif
2855 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2856 
2857 	assert(!VM_PAGE_PAGEABLE(mem));
2858 
2859 	/*
2860 	 *	Record the object/offset pair in this page
2861 	 */
2862 	mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2863 	mem->vmp_offset = offset;
2864 
2865 	/*
2866 	 *	Insert it into the object_object/offset hash table,
2867 	 *	replacing any page that might have been there.
2868 	 */
2869 
2870 	hash_id = vm_page_hash(object, offset);
2871 	bucket = &vm_page_buckets[hash_id];
2872 	bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2873 
2874 	lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2875 
2876 	if (bucket->page_list) {
2877 		vm_page_packed_t *mp = &bucket->page_list;
2878 		vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2879 
2880 		do {
2881 			/*
2882 			 * compare packed object pointers
2883 			 */
2884 			if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2885 				/*
2886 				 * Remove old page from hash list
2887 				 */
2888 				*mp = m->vmp_next_m;
2889 				m->vmp_hashed = FALSE;
2890 				m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2891 
2892 				found_m = m;
2893 				break;
2894 			}
2895 			mp = &m->vmp_next_m;
2896 		} while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2897 
2898 		mem->vmp_next_m = bucket->page_list;
2899 	} else {
2900 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2901 	}
2902 	/*
2903 	 * insert new page at head of hash list
2904 	 */
2905 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
2906 	mem->vmp_hashed = TRUE;
2907 
2908 	lck_spin_unlock(bucket_lock);
2909 
2910 	if (found_m) {
2911 		/*
2912 		 * there was already a page at the specified
2913 		 * offset for this object... remove it from
2914 		 * the object and free it back to the free list
2915 		 */
2916 		vm_page_free_unlocked(found_m, FALSE);
2917 	}
2918 	vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2919 }
2920 
2921 /*
2922  *	vm_page_remove:		[ internal use only ]
2923  *
2924  *	Removes the given mem entry from the object/offset-page
2925  *	table and the object page list.
2926  *
2927  *	The object must be locked.
2928  */
2929 
2930 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2931 vm_page_remove(
2932 	vm_page_t       mem,
2933 	boolean_t       remove_from_hash)
2934 {
2935 	vm_page_bucket_t *bucket;
2936 	vm_page_t       this;
2937 	lck_spin_t      *bucket_lock;
2938 	int             hash_id;
2939 	task_t          owner;
2940 	vm_object_t     m_object;
2941 	int             ledger_idx_volatile;
2942 	int             ledger_idx_nonvolatile;
2943 	int             ledger_idx_volatile_compressed;
2944 	int             ledger_idx_nonvolatile_compressed;
2945 	int             ledger_idx_composite;
2946 	int             ledger_idx_external_wired;
2947 	int             do_footprint;
2948 
2949 	m_object = VM_PAGE_OBJECT(mem);
2950 
2951 	vm_object_lock_assert_exclusive(m_object);
2952 	assert(mem->vmp_tabled);
2953 	assert(!mem->vmp_cleaning);
2954 	assert(!mem->vmp_laundry);
2955 
2956 	if (VM_PAGE_PAGEABLE(mem)) {
2957 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2958 	}
2959 #if 0
2960 	/*
2961 	 * we don't hold the page queue lock
2962 	 * so this check isn't safe to make
2963 	 */
2964 	VM_PAGE_CHECK(mem);
2965 #endif
2966 	if (remove_from_hash == TRUE) {
2967 		/*
2968 		 *	Remove from the object_object/offset hash table
2969 		 */
2970 		hash_id = vm_page_hash(m_object, mem->vmp_offset);
2971 		bucket = &vm_page_buckets[hash_id];
2972 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2973 
2974 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2975 
2976 		if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2977 			/* optimize for common case */
2978 
2979 			bucket->page_list = mem->vmp_next_m;
2980 		} else {
2981 			vm_page_packed_t        *prev;
2982 
2983 			for (prev = &this->vmp_next_m;
2984 			    (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2985 			    prev = &this->vmp_next_m) {
2986 				continue;
2987 			}
2988 			*prev = this->vmp_next_m;
2989 		}
2990 #if     MACH_PAGE_HASH_STATS
2991 		bucket->cur_count--;
2992 #endif /* MACH_PAGE_HASH_STATS */
2993 		mem->vmp_hashed = FALSE;
2994 		this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2995 		lck_spin_unlock(bucket_lock);
2996 	}
2997 	/*
2998 	 *	Now remove from the object's list of backed pages.
2999 	 */
3000 
3001 	vm_page_remove_internal(mem);
3002 
3003 	/*
3004 	 *	And show that the object has one fewer resident
3005 	 *	page.
3006 	 */
3007 
3008 	assert(m_object->resident_page_count > 0);
3009 	m_object->resident_page_count--;
3010 
3011 #if DEVELOPMENT || DEBUG
3012 	if (m_object->object_is_shared_cache &&
3013 	    m_object->pager != NULL &&
3014 	    m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
3015 		assert(!m_object->internal);
3016 		OSAddAtomic(-1, &shared_region_pagers_resident_count);
3017 	}
3018 #endif /* DEVELOPMENT || DEBUG */
3019 
3020 	if (m_object->internal) {
3021 #if DEBUG
3022 		assert(vm_page_internal_count);
3023 #endif /* DEBUG */
3024 
3025 		OSAddAtomic(-1, &vm_page_internal_count);
3026 	} else {
3027 		assert(vm_page_external_count);
3028 		OSAddAtomic(-1, &vm_page_external_count);
3029 
3030 		if (mem->vmp_xpmapped) {
3031 			assert(vm_page_xpmapped_external_count);
3032 			OSAddAtomic(-1, &vm_page_xpmapped_external_count);
3033 		}
3034 	}
3035 	if (!m_object->internal &&
3036 	    m_object->cached_list.next &&
3037 	    m_object->cached_list.prev) {
3038 		if (m_object->resident_page_count == 0) {
3039 			vm_object_cache_remove(m_object);
3040 		}
3041 	}
3042 
3043 	if (VM_PAGE_WIRED(mem)) {
3044 		assert(mem->vmp_wire_count > 0);
3045 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
3046 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
3047 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
3048 	}
3049 	assert(m_object->resident_page_count >=
3050 	    m_object->wired_page_count);
3051 	if (mem->vmp_reusable) {
3052 		assert(m_object->reusable_page_count > 0);
3053 		m_object->reusable_page_count--;
3054 		assert(m_object->reusable_page_count <=
3055 		    m_object->resident_page_count);
3056 		mem->vmp_reusable = FALSE;
3057 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
3058 		vm_page_stats_reusable.reused_remove++;
3059 	} else if (m_object->all_reusable) {
3060 		OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
3061 		vm_page_stats_reusable.reused_remove++;
3062 	}
3063 
3064 	if (m_object->purgable == VM_PURGABLE_DENY &&
3065 	    !m_object->vo_ledger_tag) {
3066 		owner = TASK_NULL;
3067 	} else {
3068 		owner = VM_OBJECT_OWNER(m_object);
3069 		vm_object_ledger_tag_ledgers(m_object,
3070 		    &ledger_idx_volatile,
3071 		    &ledger_idx_nonvolatile,
3072 		    &ledger_idx_volatile_compressed,
3073 		    &ledger_idx_nonvolatile_compressed,
3074 		    &ledger_idx_composite,
3075 		    &ledger_idx_external_wired,
3076 		    &do_footprint);
3077 	}
3078 	if (owner &&
3079 	    m_object->internal &&
3080 	    (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
3081 	    m_object->purgable == VM_PURGABLE_DENY ||
3082 	    VM_PAGE_WIRED(mem))) {
3083 		/* less non-volatile bytes */
3084 		ledger_debit(owner->ledger,
3085 		    ledger_idx_nonvolatile,
3086 		    PAGE_SIZE);
3087 		if (do_footprint) {
3088 			/* less footprint */
3089 			ledger_debit(owner->ledger,
3090 			    task_ledgers.phys_footprint,
3091 			    PAGE_SIZE);
3092 		} else if (ledger_idx_composite != -1) {
3093 			ledger_debit(owner->ledger,
3094 			    ledger_idx_composite,
3095 			    PAGE_SIZE);
3096 		}
3097 	} else if (owner &&
3098 	    m_object->internal &&
3099 	    (m_object->purgable == VM_PURGABLE_VOLATILE ||
3100 	    m_object->purgable == VM_PURGABLE_EMPTY)) {
3101 		assert(!VM_PAGE_WIRED(mem));
3102 		/* less volatile bytes */
3103 		ledger_debit(owner->ledger,
3104 		    ledger_idx_volatile,
3105 		    PAGE_SIZE);
3106 	}
3107 
3108 	if (m_object->purgable == VM_PURGABLE_VOLATILE) {
3109 		if (VM_PAGE_WIRED(mem)) {
3110 			assert(vm_page_purgeable_wired_count > 0);
3111 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
3112 		} else {
3113 			assert(vm_page_purgeable_count > 0);
3114 			OSAddAtomic(-1, &vm_page_purgeable_count);
3115 		}
3116 	}
3117 
3118 	if (m_object->set_cache_attr == TRUE) {
3119 		pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
3120 	}
3121 
3122 	mem->vmp_tabled = FALSE;
3123 	mem->vmp_object = 0;
3124 	mem->vmp_offset = (vm_object_offset_t) -1;
3125 }
3126 
3127 
3128 /*
3129  *	vm_page_lookup:
3130  *
3131  *	Returns the page associated with the object/offset
3132  *	pair specified; if none is found, VM_PAGE_NULL is returned.
3133  *
3134  *	The object must be locked.  No side effects.
3135  */
3136 
3137 #define VM_PAGE_HASH_LOOKUP_THRESHOLD   10
3138 
3139 #if DEBUG_VM_PAGE_LOOKUP
3140 
3141 struct {
3142 	uint64_t        vpl_total;
3143 	uint64_t        vpl_empty_obj;
3144 	uint64_t        vpl_bucket_NULL;
3145 	uint64_t        vpl_hit_hint;
3146 	uint64_t        vpl_hit_hint_next;
3147 	uint64_t        vpl_hit_hint_prev;
3148 	uint64_t        vpl_fast;
3149 	uint64_t        vpl_slow;
3150 	uint64_t        vpl_hit;
3151 	uint64_t        vpl_miss;
3152 
3153 	uint64_t        vpl_fast_elapsed;
3154 	uint64_t        vpl_slow_elapsed;
3155 } vm_page_lookup_stats __attribute__((aligned(8)));
3156 
3157 #endif
3158 
3159 #define KDP_VM_PAGE_WALK_MAX    1000
3160 
3161 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)3162 kdp_vm_page_lookup(
3163 	vm_object_t             object,
3164 	vm_object_offset_t      offset)
3165 {
3166 	vm_page_t cur_page;
3167 	int num_traversed = 0;
3168 
3169 	if (not_in_kdp) {
3170 		panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
3171 	}
3172 
3173 	vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
3174 		if (cur_page->vmp_offset == offset) {
3175 			return cur_page;
3176 		}
3177 		num_traversed++;
3178 
3179 		if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
3180 			return VM_PAGE_NULL;
3181 		}
3182 	}
3183 
3184 	return VM_PAGE_NULL;
3185 }
3186 
3187 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)3188 vm_page_lookup(
3189 	vm_object_t             object,
3190 	vm_object_offset_t      offset)
3191 {
3192 	vm_page_t       mem;
3193 	vm_page_bucket_t *bucket;
3194 	vm_page_queue_entry_t   qe;
3195 	lck_spin_t      *bucket_lock = NULL;
3196 	int             hash_id;
3197 #if DEBUG_VM_PAGE_LOOKUP
3198 	uint64_t        start, elapsed;
3199 
3200 	OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
3201 #endif
3202 
3203 #if CONFIG_KERNEL_TAGGING
3204 	if (is_kernel_object(object)) {
3205 		offset = vm_memtag_canonicalize_kernel(offset);
3206 	}
3207 #endif /* CONFIG_KERNEL_TAGGING */
3208 
3209 	vm_object_lock_assert_held(object);
3210 	assertf(page_aligned(offset), "offset 0x%llx\n", offset);
3211 
3212 	if (object->resident_page_count == 0) {
3213 #if DEBUG_VM_PAGE_LOOKUP
3214 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
3215 #endif
3216 		return VM_PAGE_NULL;
3217 	}
3218 
3219 	mem = object->memq_hint;
3220 
3221 	if (mem != VM_PAGE_NULL) {
3222 		assert(VM_PAGE_OBJECT(mem) == object);
3223 
3224 		if (mem->vmp_offset == offset) {
3225 #if DEBUG_VM_PAGE_LOOKUP
3226 			OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
3227 #endif
3228 			return mem;
3229 		}
3230 		qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
3231 
3232 		if (!vm_page_queue_end(&object->memq, qe)) {
3233 			vm_page_t       next_page;
3234 
3235 			next_page = (vm_page_t)((uintptr_t)qe);
3236 			assert(VM_PAGE_OBJECT(next_page) == object);
3237 
3238 			if (next_page->vmp_offset == offset) {
3239 				object->memq_hint = next_page; /* new hint */
3240 #if DEBUG_VM_PAGE_LOOKUP
3241 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
3242 #endif
3243 				return next_page;
3244 			}
3245 		}
3246 		qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
3247 
3248 		if (!vm_page_queue_end(&object->memq, qe)) {
3249 			vm_page_t prev_page;
3250 
3251 			prev_page = (vm_page_t)((uintptr_t)qe);
3252 			assert(VM_PAGE_OBJECT(prev_page) == object);
3253 
3254 			if (prev_page->vmp_offset == offset) {
3255 				object->memq_hint = prev_page; /* new hint */
3256 #if DEBUG_VM_PAGE_LOOKUP
3257 				OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
3258 #endif
3259 				return prev_page;
3260 			}
3261 		}
3262 	}
3263 	/*
3264 	 * Search the hash table for this object/offset pair
3265 	 */
3266 	hash_id = vm_page_hash(object, offset);
3267 	bucket = &vm_page_buckets[hash_id];
3268 
3269 	/*
3270 	 * since we hold the object lock, we are guaranteed that no
3271 	 * new pages can be inserted into this object... this in turn
3272 	 * guarantess that the page we're looking for can't exist
3273 	 * if the bucket it hashes to is currently NULL even when looked
3274 	 * at outside the scope of the hash bucket lock... this is a
3275 	 * really cheap optimiztion to avoid taking the lock
3276 	 */
3277 	if (!bucket->page_list) {
3278 #if DEBUG_VM_PAGE_LOOKUP
3279 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
3280 #endif
3281 		return VM_PAGE_NULL;
3282 	}
3283 
3284 #if DEBUG_VM_PAGE_LOOKUP
3285 	start = mach_absolute_time();
3286 #endif
3287 	if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
3288 		/*
3289 		 * on average, it's roughly 3 times faster to run a short memq list
3290 		 * than to take the spin lock and go through the hash list
3291 		 */
3292 		mem = (vm_page_t)vm_page_queue_first(&object->memq);
3293 
3294 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
3295 			if (mem->vmp_offset == offset) {
3296 				break;
3297 			}
3298 
3299 			mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
3300 		}
3301 		if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
3302 			mem = NULL;
3303 		}
3304 	} else {
3305 		vm_page_object_t        packed_object;
3306 
3307 		packed_object = VM_PAGE_PACK_OBJECT(object);
3308 
3309 		bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
3310 
3311 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
3312 
3313 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
3314 		    mem != VM_PAGE_NULL;
3315 		    mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
3316 #if 0
3317 			/*
3318 			 * we don't hold the page queue lock
3319 			 * so this check isn't safe to make
3320 			 */
3321 			VM_PAGE_CHECK(mem);
3322 #endif
3323 			if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
3324 				break;
3325 			}
3326 		}
3327 		lck_spin_unlock(bucket_lock);
3328 	}
3329 
3330 #if DEBUG_VM_PAGE_LOOKUP
3331 	elapsed = mach_absolute_time() - start;
3332 
3333 	if (bucket_lock) {
3334 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
3335 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
3336 	} else {
3337 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
3338 		OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
3339 	}
3340 	if (mem != VM_PAGE_NULL) {
3341 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
3342 	} else {
3343 		OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
3344 	}
3345 #endif
3346 	if (mem != VM_PAGE_NULL) {
3347 		assert(VM_PAGE_OBJECT(mem) == object);
3348 
3349 		object->memq_hint = mem;
3350 	}
3351 	return mem;
3352 }
3353 
3354 
3355 /*
3356  *	vm_page_rename:
3357  *
3358  *	Move the given memory entry from its
3359  *	current object to the specified target object/offset.
3360  *
3361  *	The object must be locked.
3362  */
3363 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)3364 vm_page_rename(
3365 	vm_page_t               mem,
3366 	vm_object_t             new_object,
3367 	vm_object_offset_t      new_offset)
3368 {
3369 	boolean_t       internal_to_external, external_to_internal;
3370 	vm_tag_t        tag;
3371 	vm_object_t     m_object;
3372 
3373 	m_object = VM_PAGE_OBJECT(mem);
3374 
3375 	assert(m_object != new_object);
3376 	assert(m_object);
3377 
3378 	/*
3379 	 *	Changes to mem->vmp_object require the page lock because
3380 	 *	the pageout daemon uses that lock to get the object.
3381 	 */
3382 	vm_page_lockspin_queues();
3383 
3384 	internal_to_external = FALSE;
3385 	external_to_internal = FALSE;
3386 
3387 	if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
3388 		/*
3389 		 * it's much easier to get the vm_page_pageable_xxx accounting correct
3390 		 * if we first move the page to the active queue... it's going to end
3391 		 * up there anyway, and we don't do vm_page_rename's frequently enough
3392 		 * for this to matter.
3393 		 */
3394 		vm_page_queues_remove(mem, FALSE);
3395 		vm_page_activate(mem);
3396 	}
3397 	if (VM_PAGE_PAGEABLE(mem)) {
3398 		if (m_object->internal && !new_object->internal) {
3399 			internal_to_external = TRUE;
3400 		}
3401 		if (!m_object->internal && new_object->internal) {
3402 			external_to_internal = TRUE;
3403 		}
3404 	}
3405 
3406 	tag = m_object->wire_tag;
3407 	vm_page_remove(mem, TRUE);
3408 	vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
3409 
3410 	if (internal_to_external) {
3411 		vm_page_pageable_internal_count--;
3412 		vm_page_pageable_external_count++;
3413 	} else if (external_to_internal) {
3414 		vm_page_pageable_external_count--;
3415 		vm_page_pageable_internal_count++;
3416 	}
3417 
3418 	vm_page_unlock_queues();
3419 }
3420 
3421 /*
3422  *	vm_page_init:
3423  *
3424  *	Initialize the fields in a new page.
3425  *	This takes a structure with random values and initializes it
3426  *	so that it can be given to vm_page_release or vm_page_insert.
3427  */
3428 void
vm_page_init(vm_page_t mem,ppnum_t phys_page)3429 vm_page_init(vm_page_t mem, ppnum_t phys_page)
3430 {
3431 	assert(phys_page);
3432 
3433 #if DEBUG
3434 	if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
3435 		if (!(pmap_valid_page(phys_page))) {
3436 			panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
3437 		}
3438 	}
3439 #endif /* DEBUG */
3440 
3441 	/*
3442 	 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
3443 	 * try to use initial values which match 0. This minimizes the number of writes
3444 	 * needed for boot-time initialization.
3445 	 */
3446 	assert(VM_PAGE_NOT_ON_Q == 0);
3447 	assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
3448 	*mem = (struct vm_page) {
3449 		.vmp_q_state = VM_PAGE_NOT_ON_Q,
3450 		.vmp_canonical = vm_page_in_array(mem),
3451 		.vmp_offset = (vm_object_offset_t)-1,
3452 		.vmp_busy = true,
3453 	};
3454 
3455 	VM_PAGE_INIT_PHYS_PAGE(mem, phys_page);
3456 
3457 #if 0
3458 	/*
3459 	 * we're leaving this turned off for now... currently pages
3460 	 * come off the free list and are either immediately dirtied/referenced
3461 	 * due to zero-fill or COW faults, or are used to read or write files...
3462 	 * in the file I/O case, the UPL mechanism takes care of clearing
3463 	 * the state of the HW ref/mod bits in a somewhat fragile way.
3464 	 * Since we may change the way this works in the future (to toughen it up),
3465 	 * I'm leaving this as a reminder of where these bits could get cleared
3466 	 */
3467 
3468 	/*
3469 	 * make sure both the h/w referenced and modified bits are
3470 	 * clear at this point... we are especially dependent on
3471 	 * not finding a 'stale' h/w modified in a number of spots
3472 	 * once this page goes back into use
3473 	 */
3474 	pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
3475 #endif
3476 }
3477 
3478 vm_page_t
vm_page_create_fictitious(void)3479 vm_page_create_fictitious(void)
3480 {
3481 	return vm_page_create(vm_page_fictitious_addr, false, Z_WAITOK);
3482 }
3483 
3484 vm_page_t
vm_page_create_guard(bool canwait)3485 vm_page_create_guard(bool canwait)
3486 {
3487 	return vm_page_create(vm_page_guard_addr, false, canwait ? Z_WAITOK : Z_NOWAIT);
3488 }
3489 
3490 vm_page_t
vm_page_create_private(ppnum_t base_page)3491 vm_page_create_private(ppnum_t base_page)
3492 {
3493 	assert(base_page != vm_page_fictitious_addr &&
3494 	    base_page != vm_page_guard_addr);
3495 	return vm_page_create(base_page, false, Z_WAITOK);
3496 }
3497 
3498 bool
vm_page_is_canonical(const struct vm_page * m)3499 vm_page_is_canonical(const struct vm_page *m)
3500 {
3501 	return m->vmp_canonical;
3502 }
3503 
3504 bool
vm_page_is_fictitious(const struct vm_page * m)3505 vm_page_is_fictitious(const struct vm_page *m)
3506 {
3507 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
3508 	if (vm_page_in_array(m)) {
3509 		return false;
3510 	}
3511 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
3512 	switch (VM_PAGE_GET_PHYS_PAGE(m)) {
3513 	case vm_page_guard_addr:
3514 	case vm_page_fictitious_addr:
3515 		return true;
3516 	default:
3517 		return false;
3518 	}
3519 }
3520 
3521 bool
vm_page_is_guard(const struct vm_page * m)3522 vm_page_is_guard(const struct vm_page *m)
3523 {
3524 #if XNU_VM_HAS_LINEAR_PAGES_ARRAY
3525 	if (vm_page_in_array(m)) {
3526 		return false;
3527 	}
3528 #endif /* XNU_VM_HAS_LINEAR_PAGES_ARRAY */
3529 	return VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr;
3530 }
3531 
3532 bool
vm_page_is_private(const struct vm_page * m)3533 vm_page_is_private(const struct vm_page *m)
3534 {
3535 	return !vm_page_is_canonical(m) && !vm_page_is_fictitious(m);
3536 }
3537 
3538 void
vm_page_make_private(vm_page_t m,ppnum_t base_page)3539 vm_page_make_private(vm_page_t m, ppnum_t base_page)
3540 {
3541 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3542 	assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr);
3543 
3544 	VM_PAGE_SET_PHYS_PAGE(m, base_page);
3545 }
3546 
3547 void
vm_page_reset_private(vm_page_t m)3548 vm_page_reset_private(vm_page_t m)
3549 {
3550 	assert(vm_page_is_private(m));
3551 
3552 	VM_PAGE_SET_PHYS_PAGE(m, vm_page_fictitious_addr);
3553 }
3554 
3555 /*
3556  *	vm_page_release_fictitious:
3557  *
3558  *	Release a fictitious page to the zone pool
3559  */
3560 static void
vm_page_release_fictitious(vm_page_t m)3561 vm_page_release_fictitious(vm_page_t m)
3562 {
3563 	assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3564 	    (m->vmp_q_state == VM_PAGE_IS_WIRED));
3565 	assert(vm_page_is_fictitious(m));
3566 	assert(!m->vmp_realtime);
3567 
3568 	if (vm_page_is_guard(m)) {
3569 		counter_dec(&vm_guard_count);
3570 	}
3571 	zfree(vm_page_zone, m);
3572 }
3573 
3574 /*
3575  *	vm_pool_low():
3576  *
3577  *	Return true if it is not likely that a non-vm_privileged thread
3578  *	can get memory without blocking.  Advisory only, since the
3579  *	situation may change under us.
3580  */
3581 bool
vm_pool_low(void)3582 vm_pool_low(void)
3583 {
3584 	/* No locking, at worst we will fib. */
3585 	return vm_page_free_count <= vm_page_free_reserved;
3586 }
3587 
3588 boolean_t vm_darkwake_mode = FALSE;
3589 
3590 /*
3591  * vm_update_darkwake_mode():
3592  *
3593  * Tells the VM that the system is in / out of darkwake.
3594  *
3595  * Today, the VM only lowers/raises the background queue target
3596  * so as to favor consuming more/less background pages when
3597  * darwake is ON/OFF.
3598  *
3599  * We might need to do more things in the future.
3600  */
3601 
3602 void
vm_update_darkwake_mode(boolean_t darkwake_mode)3603 vm_update_darkwake_mode(boolean_t darkwake_mode)
3604 {
3605 #if XNU_TARGET_OS_OSX && defined(__arm64__)
3606 #pragma unused(darkwake_mode)
3607 	assert(vm_darkwake_mode == FALSE);
3608 	/*
3609 	 * Darkwake mode isn't supported for AS macOS.
3610 	 */
3611 	return;
3612 #else /* XNU_TARGET_OS_OSX && __arm64__ */
3613 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3614 
3615 	vm_page_lockspin_queues();
3616 
3617 	if (vm_darkwake_mode == darkwake_mode) {
3618 		/*
3619 		 * No change.
3620 		 */
3621 		vm_page_unlock_queues();
3622 		return;
3623 	}
3624 
3625 	vm_darkwake_mode = darkwake_mode;
3626 
3627 	if (vm_darkwake_mode == TRUE) {
3628 		/* save background target to restore later */
3629 		vm_page_background_target_snapshot = vm_page_background_target;
3630 
3631 		/* target is set to 0...no protection for background pages */
3632 		vm_page_background_target = 0;
3633 	} else if (vm_darkwake_mode == FALSE) {
3634 		if (vm_page_background_target_snapshot) {
3635 			vm_page_background_target = vm_page_background_target_snapshot;
3636 		}
3637 	}
3638 	vm_page_unlock_queues();
3639 #endif
3640 }
3641 
3642 void
vm_page_update_special_state(vm_page_t mem)3643 vm_page_update_special_state(vm_page_t mem)
3644 {
3645 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
3646 		return;
3647 	}
3648 
3649 	int mode = mem->vmp_on_specialq;
3650 
3651 	switch (mode) {
3652 	case VM_PAGE_SPECIAL_Q_BG:
3653 	{
3654 		task_t  my_task = current_task_early();
3655 
3656 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
3657 			return;
3658 		}
3659 
3660 		if (my_task) {
3661 			if (task_get_darkwake_mode(my_task)) {
3662 				return;
3663 			}
3664 		}
3665 
3666 		if (my_task) {
3667 			if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
3668 				return;
3669 			}
3670 		}
3671 		vm_page_lockspin_queues();
3672 
3673 		vm_page_background_promoted_count++;
3674 
3675 		vm_page_remove_from_specialq(mem);
3676 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3677 
3678 		vm_page_unlock_queues();
3679 		break;
3680 	}
3681 
3682 	case VM_PAGE_SPECIAL_Q_DONATE:
3683 	{
3684 		task_t  my_task = current_task_early();
3685 
3686 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
3687 			return;
3688 		}
3689 
3690 		if (my_task->donates_own_pages == false) {
3691 			vm_page_lockspin_queues();
3692 
3693 			vm_page_remove_from_specialq(mem);
3694 			mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3695 
3696 			vm_page_unlock_queues();
3697 		}
3698 		break;
3699 	}
3700 
3701 	default:
3702 	{
3703 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
3704 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
3705 		break;
3706 	}
3707 	}
3708 }
3709 
3710 
3711 void
vm_page_assign_special_state(vm_page_t mem,int mode)3712 vm_page_assign_special_state(vm_page_t mem, int mode)
3713 {
3714 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3715 		return;
3716 	}
3717 
3718 	switch (mode) {
3719 	case VM_PAGE_SPECIAL_Q_BG:
3720 	{
3721 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
3722 			return;
3723 		}
3724 
3725 		task_t  my_task = current_task_early();
3726 
3727 		if (my_task) {
3728 			if (task_get_darkwake_mode(my_task)) {
3729 				mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3730 				return;
3731 			}
3732 		}
3733 
3734 		if (my_task) {
3735 			mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
3736 		}
3737 		break;
3738 	}
3739 
3740 	case VM_PAGE_SPECIAL_Q_DONATE:
3741 	{
3742 		if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
3743 			return;
3744 		}
3745 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3746 		break;
3747 	}
3748 
3749 	default:
3750 		break;
3751 	}
3752 }
3753 
3754 
3755 void
vm_page_remove_from_specialq(vm_page_t mem)3756 vm_page_remove_from_specialq(
3757 	vm_page_t       mem)
3758 {
3759 	vm_object_t     m_object;
3760 	unsigned short  mode;
3761 
3762 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3763 
3764 	mode = mem->vmp_on_specialq;
3765 
3766 	switch (mode) {
3767 	case VM_PAGE_SPECIAL_Q_BG:
3768 	{
3769 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
3770 			vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
3771 
3772 			mem->vmp_specialq.next = 0;
3773 			mem->vmp_specialq.prev = 0;
3774 
3775 			vm_page_background_count--;
3776 
3777 			m_object = VM_PAGE_OBJECT(mem);
3778 
3779 			if (m_object->internal) {
3780 				vm_page_background_internal_count--;
3781 			} else {
3782 				vm_page_background_external_count--;
3783 			}
3784 		}
3785 		break;
3786 	}
3787 
3788 	case VM_PAGE_SPECIAL_Q_DONATE:
3789 	{
3790 		if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
3791 			vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3792 			mem->vmp_specialq.next = 0;
3793 			mem->vmp_specialq.prev = 0;
3794 			vm_page_donate_count--;
3795 			if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
3796 				assert(vm_page_donate_target == vm_page_donate_target_low);
3797 				vm_page_donate_target = vm_page_donate_target_high;
3798 				vm_page_donate_queue_ripe = false;
3799 			}
3800 		}
3801 
3802 		break;
3803 	}
3804 
3805 	default:
3806 	{
3807 		assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
3808 		    VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
3809 		break;
3810 	}
3811 	}
3812 }
3813 
3814 
3815 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)3816 vm_page_add_to_specialq(
3817 	vm_page_t       mem,
3818 	boolean_t       first)
3819 {
3820 	vm_object_t     m_object;
3821 
3822 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3823 
3824 	if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
3825 		return;
3826 	}
3827 
3828 	int mode = mem->vmp_on_specialq;
3829 
3830 	switch (mode) {
3831 	case VM_PAGE_SPECIAL_Q_BG:
3832 	{
3833 		if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
3834 			return;
3835 		}
3836 
3837 		m_object = VM_PAGE_OBJECT(mem);
3838 
3839 		if (vm_page_background_exclude_external && !m_object->internal) {
3840 			return;
3841 		}
3842 
3843 		if (first == TRUE) {
3844 			vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
3845 		} else {
3846 			vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
3847 		}
3848 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
3849 
3850 		vm_page_background_count++;
3851 
3852 		if (m_object->internal) {
3853 			vm_page_background_internal_count++;
3854 		} else {
3855 			vm_page_background_external_count++;
3856 		}
3857 		break;
3858 	}
3859 
3860 	case VM_PAGE_SPECIAL_Q_DONATE:
3861 	{
3862 		if (first == TRUE) {
3863 			vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3864 		} else {
3865 			vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3866 		}
3867 		vm_page_donate_count++;
3868 		if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3869 			assert(vm_page_donate_target == vm_page_donate_target_high);
3870 			vm_page_donate_target = vm_page_donate_target_low;
3871 			vm_page_donate_queue_ripe = true;
3872 		}
3873 		mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3874 		break;
3875 	}
3876 
3877 	default:
3878 		break;
3879 	}
3880 }
3881 
3882 #if __x86_64__
3883 /*
3884  * This can be switched to FALSE to help debug drivers
3885  * that are having problems with memory > 4G.
3886  */
3887 boolean_t       vm_himemory_mode = TRUE;
3888 #endif /* __x86_64__ */
3889 
3890 /*
3891  * this interface exists to support hardware controllers
3892  * incapable of generating DMAs with more than 32 bits
3893  * of address on platforms with physical memory > 4G...
3894  */
3895 unsigned int    vm_lopages_allocated_q = 0;
3896 unsigned int    vm_lopages_allocated_cpm_success = 0;
3897 unsigned int    vm_lopages_allocated_cpm_failed = 0;
3898 vm_page_queue_head_t    vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3899 
3900 vm_page_t
vm_page_grablo(void)3901 vm_page_grablo(void)
3902 {
3903 	vm_page_t       mem;
3904 
3905 	if (vm_lopage_needed == FALSE) {
3906 		int grab_options = VM_PAGE_GRAB_OPTIONS_NONE;
3907 		return vm_page_grab_options(grab_options);
3908 	}
3909 
3910 	vm_free_page_lock_spin();
3911 
3912 	if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3913 		vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3914 		assert(vm_lopage_free_count);
3915 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3916 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3917 
3918 		vm_lopage_free_count--;
3919 		vm_lopages_allocated_q++;
3920 
3921 		if (vm_lopage_free_count < vm_lopage_lowater) {
3922 			vm_lopage_refill = TRUE;
3923 		}
3924 
3925 		vm_free_page_unlock();
3926 
3927 		if (current_task()->donates_own_pages) {
3928 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3929 		} else {
3930 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3931 		}
3932 	} else {
3933 		vm_free_page_unlock();
3934 
3935 		if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3936 			vm_free_page_lock_spin();
3937 			vm_lopages_allocated_cpm_failed++;
3938 			vm_free_page_unlock();
3939 
3940 			return VM_PAGE_NULL;
3941 		}
3942 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3943 
3944 		mem->vmp_busy = TRUE;
3945 
3946 		vm_page_lockspin_queues();
3947 
3948 		mem->vmp_gobbled = FALSE;
3949 		vm_page_gobble_count--;
3950 		vm_page_wire_count--;
3951 
3952 		vm_lopages_allocated_cpm_success++;
3953 		vm_page_unlock_queues();
3954 	}
3955 	assert(mem->vmp_busy);
3956 	assert(!mem->vmp_pmapped);
3957 	assert(!mem->vmp_wpmapped);
3958 	assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3959 
3960 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3961 
3962 	counter_inc(&vm_page_grab_count);
3963 	VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3964 
3965 	return mem;
3966 }
3967 
3968 /*
3969  *	vm_page_grab:
3970  *
3971  *	first try to grab a page from the per-cpu free list...
3972  *	this must be done while pre-emption is disabled... if
3973  *      a page is available, we're done...
3974  *	if no page is available, grab the vm_page_queue_free_lock
3975  *	and see if current number of free pages would allow us
3976  *      to grab at least 1... if not, return VM_PAGE_NULL as before...
3977  *	if there are pages available, disable preemption and
3978  *      recheck the state of the per-cpu free list... we could
3979  *	have been preempted and moved to a different cpu, or
3980  *      some other thread could have re-filled it... if still
3981  *	empty, figure out how many pages we can steal from the
3982  *	global free queue and move to the per-cpu queue...
3983  *	return 1 of these pages when done... only wakeup the
3984  *      pageout_scan thread if we moved pages from the global
3985  *	list... no need for the wakeup if we've satisfied the
3986  *	request from the per-cpu queue.
3987  */
3988 
3989 #if CONFIG_SECLUDED_MEMORY
3990 vm_page_t vm_page_grab_secluded(void);
3991 #endif /* CONFIG_SECLUDED_MEMORY */
3992 
3993 static inline void
3994 vm_page_grab_diags(void);
3995 
3996 vm_page_t
vm_page_grab(void)3997 vm_page_grab(void)
3998 {
3999 	return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
4000 }
4001 
4002 #if HIBERNATION
4003 boolean_t       hibernate_rebuild_needed = FALSE;
4004 #endif /* HIBERNATION */
4005 
4006 static void
vm_page_finalize_grabed_page(vm_page_t mem)4007 vm_page_finalize_grabed_page(vm_page_t mem)
4008 {
4009 	task_t cur_task = current_task_early();
4010 	if (cur_task && cur_task != kernel_task) {
4011 		/* tag:DONATE this is where the donate state of the page is decided according to what task grabs it */
4012 		if (cur_task->donates_own_pages) {
4013 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
4014 		} else {
4015 			vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
4016 		}
4017 	}
4018 }
4019 
4020 /*
4021  * vm_page_grab_options_internal:
4022  * Core logic for a page grab request.  This is separate from the actual
4023  * vm_page_grab_options interface so that this function can have multiple paths
4024  * to return a page, but the interface will unilaterally run any checks or
4025  * followup we want on a grabbed page.
4026  */
4027 static vm_page_t
vm_page_grab_options_internal(int grab_options)4028 vm_page_grab_options_internal(
4029 	int grab_options)
4030 {
4031 	vm_page_t       mem;
4032 
4033 restart:
4034 	disable_preemption();
4035 
4036 	if ((mem = *PERCPU_GET(free_pages))) {
4037 		assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
4038 
4039 #if HIBERNATION
4040 		if (hibernate_rebuild_needed) {
4041 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
4042 		}
4043 #endif /* HIBERNATION */
4044 
4045 		vm_page_grab_diags();
4046 
4047 		vm_offset_t pcpu_base = current_percpu_base();
4048 		counter_inc_preemption_disabled(&vm_page_grab_count);
4049 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
4050 		VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
4051 
4052 		VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
4053 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4054 		enable_preemption();
4055 
4056 		assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
4057 		assert(mem->vmp_tabled == FALSE);
4058 		assert(mem->vmp_object == 0);
4059 		assert(!mem->vmp_laundry);
4060 		assert(mem->vmp_busy);
4061 		assert(!mem->vmp_pmapped);
4062 		assert(!mem->vmp_wpmapped);
4063 		assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
4064 		assert(!mem->vmp_realtime);
4065 
4066 #if MACH_ASSERT
4067 		if (vm_check_refs_on_alloc) {
4068 			vm_page_validate_no_references(mem);
4069 		}
4070 #endif
4071 		vm_page_finalize_grabed_page(mem);
4072 		return mem;
4073 	}
4074 	enable_preemption();
4075 
4076 	/*
4077 	 *	Optionally produce warnings if the wire or gobble
4078 	 *	counts exceed some threshold.
4079 	 */
4080 #if VM_PAGE_WIRE_COUNT_WARNING
4081 	if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
4082 		printf("mk: vm_page_grab(): high wired page count of %d\n",
4083 		    vm_page_wire_count);
4084 	}
4085 #endif
4086 #if VM_PAGE_GOBBLE_COUNT_WARNING
4087 	if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
4088 		printf("mk: vm_page_grab(): high gobbled page count of %d\n",
4089 		    vm_page_gobble_count);
4090 	}
4091 #endif
4092 
4093 #if XNU_VM_HAS_DELAYED_PAGES
4094 	/*
4095 	 * If free count is low and we have delayed pages from early boot,
4096 	 * get one of those instead.
4097 	 */
4098 	if (__improbable(vm_delayed_count > 0 &&
4099 	    vm_page_free_count <= vm_page_free_target &&
4100 	    (mem = vm_get_delayed_page(grab_options)) != NULL)) {
4101 		assert(!mem->vmp_realtime);
4102 		// TODO: missing vm_page_finalize_grabed_page()?
4103 		return mem;
4104 	}
4105 #endif /* XNU_VM_HAS_DELAYED_PAGES */
4106 
4107 	vm_free_page_lock_spin();
4108 
4109 	/*
4110 	 *	Only let privileged threads (involved in pageout)
4111 	 *	dip into the reserved pool.
4112 	 */
4113 	if ((vm_page_free_count < vm_page_free_reserved) &&
4114 	    !(current_thread()->options & TH_OPT_VMPRIV)) {
4115 		/* no page for us in the free queue... */
4116 		vm_free_page_unlock();
4117 		mem = VM_PAGE_NULL;
4118 
4119 #if CONFIG_SECLUDED_MEMORY
4120 		/* ... but can we try and grab from the secluded queue? */
4121 		if (vm_page_secluded_count > 0 &&
4122 		    ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
4123 		    task_can_use_secluded_mem(current_task(), TRUE))) {
4124 			mem = vm_page_grab_secluded();
4125 			if (grab_options & VM_PAGE_GRAB_SECLUDED) {
4126 				vm_page_secluded.grab_for_iokit++;
4127 				if (mem) {
4128 					vm_page_secluded.grab_for_iokit_success++;
4129 				}
4130 			}
4131 			if (mem) {
4132 				VM_CHECK_MEMORYSTATUS;
4133 
4134 				vm_page_grab_diags();
4135 				counter_inc(&vm_page_grab_count);
4136 				VM_DEBUG_EVENT(vm_page_grab, DBG_VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
4137 
4138 				assert(!mem->vmp_realtime);
4139 				// TODO: missing vm_page_finalize_grabed_page()?
4140 				return mem;
4141 			}
4142 		}
4143 #endif /* CONFIG_SECLUDED_MEMORY */
4144 		(void) grab_options;
4145 	} else {
4146 		unsigned int     pages_to_steal;
4147 
4148 
4149 		/*
4150 		 * Replenishing our per-CPU cache of free pages might take
4151 		 * too long to keep holding the "free_page" lock as a spinlock,
4152 		 * so convert to the full mutex to prevent other threads trying
4153 		 * to acquire the "free_page" lock from timing out spinning on
4154 		 * the mutex interlock.
4155 		 */
4156 		vm_free_page_lock_convert();
4157 
4158 		while (vm_page_free_count == 0) {
4159 			vm_free_page_unlock();
4160 			/*
4161 			 * must be a privileged thread to be
4162 			 * in this state since a non-privileged
4163 			 * thread would have bailed if we were
4164 			 * under the vm_page_free_reserved mark
4165 			 */
4166 			VM_PAGE_WAIT();
4167 			vm_free_page_lock();
4168 		}
4169 
4170 		/*
4171 		 * Need to repopulate the per-CPU free list from the global free list.
4172 		 * Note we don't do any processing of pending retirement pages here.
4173 		 * That'll happen in the code above when the page comes off the per-CPU list.
4174 		 */
4175 		disable_preemption();
4176 
4177 		/*
4178 		 * If we got preempted the cache might now have pages.
4179 		 */
4180 		if ((mem = *PERCPU_GET(free_pages))) {
4181 			vm_free_page_unlock();
4182 			enable_preemption();
4183 			goto restart;
4184 		}
4185 
4186 		if (vm_page_free_count <= vm_page_free_reserved) {
4187 			pages_to_steal = 1;
4188 		} else {
4189 			if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
4190 				pages_to_steal = vm_free_magazine_refill_limit;
4191 			} else {
4192 				pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
4193 			}
4194 		}
4195 
4196 		/* Grab pages from the global free queues. */
4197 		mem = vm_page_queue_free_remove_first(pages_to_steal, VM_PAGE_ON_FREE_LOCAL_Q);
4198 
4199 #if HIBERNATION
4200 		if (hibernate_rebuild_needed) {
4201 			panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
4202 		}
4203 #endif /* HIBERNATION */
4204 
4205 		/* Make the grabbed list the per-CPU free list. */
4206 		vm_offset_t pcpu_base = current_percpu_base();
4207 		*PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem;
4208 
4209 		/*
4210 		 * We decremented vm_page_free_count above
4211 		 * so we must wake up vm_pageout_scan() if
4212 		 * we brought it down below vm_page_free_min.
4213 		 */
4214 		bool wakeup_pageout_scan = false;
4215 		if (vm_page_free_count < vm_page_free_min &&
4216 		    !vm_pageout_running) {
4217 			wakeup_pageout_scan = true;
4218 		}
4219 		vm_free_page_unlock();
4220 
4221 		enable_preemption();
4222 
4223 		if (wakeup_pageout_scan) {
4224 			thread_wakeup((event_t) &vm_page_free_wanted);
4225 		}
4226 		VM_CHECK_MEMORYSTATUS;
4227 
4228 		goto restart;
4229 	}
4230 
4231 	/*
4232 	 *	Decide if we should poke the pageout daemon.
4233 	 *	We do this if the free count is less than the low
4234 	 *	water mark. VM Pageout Scan will keep running till
4235 	 *	the free_count > free_target (& hence above free_min).
4236 	 *	This wakeup is to catch the possibility of the counts
4237 	 *	dropping between VM Pageout Scan parking and this check.
4238 	 *
4239 	 *	We don't have the counts locked ... if they change a little,
4240 	 *	it doesn't really matter.
4241 	 */
4242 	if (vm_page_free_count < vm_page_free_min) {
4243 		vm_free_page_lock();
4244 		if (vm_pageout_running == FALSE) {
4245 			vm_free_page_unlock();
4246 			thread_wakeup((event_t) &vm_page_free_wanted);
4247 		} else {
4248 			vm_free_page_unlock();
4249 		}
4250 	}
4251 
4252 	VM_CHECK_MEMORYSTATUS;
4253 
4254 	if (mem) {
4255 		assert(!mem->vmp_realtime);
4256 //		dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4);	/* (TEST/DEBUG) */
4257 
4258 		vm_page_finalize_grabed_page(mem);
4259 	}
4260 	return mem;
4261 }
4262 
4263 vm_page_t
vm_page_grab_options(int grab_options)4264 vm_page_grab_options(
4265 	int grab_options)
4266 {
4267 	vm_page_t mem = vm_page_grab_options_internal(grab_options);
4268 
4269 	/*
4270 	 * For all free pages, no matter their provenance... ensure they are
4271 	 * not referenced anywhere.
4272 	 */
4273 	if (mem != VM_PAGE_NULL) {
4274 
4275 #if MACH_ASSERT
4276 		if (vm_check_refs_on_alloc) {
4277 			vm_page_validate_no_references(mem);
4278 		}
4279 #endif /* MACH_ASSERT */
4280 	}
4281 
4282 	return mem;
4283 }
4284 
4285 #if CONFIG_SECLUDED_MEMORY
4286 vm_page_t
vm_page_grab_secluded(void)4287 vm_page_grab_secluded(void)
4288 {
4289 	vm_page_t       mem;
4290 	vm_object_t     object;
4291 	int             refmod_state;
4292 
4293 	if (vm_page_secluded_count == 0) {
4294 		/* no secluded pages to grab... */
4295 		return VM_PAGE_NULL;
4296 	}
4297 
4298 	/* secluded queue is protected by the VM page queue lock */
4299 	vm_page_lock_queues();
4300 
4301 	if (vm_page_secluded_count == 0) {
4302 		/* no secluded pages to grab... */
4303 		vm_page_unlock_queues();
4304 		return VM_PAGE_NULL;
4305 	}
4306 
4307 #if 00
4308 	/* can we grab from the secluded queue? */
4309 	if (vm_page_secluded_count > vm_page_secluded_target ||
4310 	    (vm_page_secluded_count > 0 &&
4311 	    task_can_use_secluded_mem(current_task(), TRUE))) {
4312 		/* OK */
4313 	} else {
4314 		/* can't grab from secluded queue... */
4315 		vm_page_unlock_queues();
4316 		return VM_PAGE_NULL;
4317 	}
4318 #endif
4319 
4320 	/* we can grab a page from secluded queue! */
4321 	assert((vm_page_secluded_count_free +
4322 	    vm_page_secluded_count_inuse) ==
4323 	    vm_page_secluded_count);
4324 	if (current_task()->task_can_use_secluded_mem) {
4325 		assert(num_tasks_can_use_secluded_mem > 0);
4326 	}
4327 	assert(!vm_page_queue_empty(&vm_page_queue_secluded));
4328 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4329 	mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
4330 	assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
4331 	vm_page_queues_remove(mem, TRUE);
4332 
4333 	object = VM_PAGE_OBJECT(mem);
4334 
4335 	assert(!vm_page_is_fictitious(mem));
4336 	assert(!VM_PAGE_WIRED(mem));
4337 	if (object == VM_OBJECT_NULL) {
4338 		/* free for grab! */
4339 		vm_page_unlock_queues();
4340 		vm_page_secluded.grab_success_free++;
4341 
4342 		assert(mem->vmp_busy);
4343 		assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4344 		assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
4345 		assert(mem->vmp_pageq.next == 0);
4346 		assert(mem->vmp_pageq.prev == 0);
4347 		assert(mem->vmp_listq.next == 0);
4348 		assert(mem->vmp_listq.prev == 0);
4349 		assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4350 		assert(mem->vmp_specialq.next == 0);
4351 		assert(mem->vmp_specialq.prev == 0);
4352 		return mem;
4353 	}
4354 
4355 	assert(!object->internal);
4356 //	vm_page_pageable_external_count--;
4357 
4358 	if (!vm_object_lock_try(object)) {
4359 //		printf("SECLUDED: page %p: object %p locked\n", mem, object);
4360 		vm_page_secluded.grab_failure_locked++;
4361 reactivate_secluded_page:
4362 		vm_page_activate(mem);
4363 		vm_page_unlock_queues();
4364 		return VM_PAGE_NULL;
4365 	}
4366 	if (mem->vmp_busy ||
4367 	    mem->vmp_cleaning ||
4368 	    mem->vmp_laundry) {
4369 		/* can't steal page in this state... */
4370 		vm_object_unlock(object);
4371 		vm_page_secluded.grab_failure_state++;
4372 		goto reactivate_secluded_page;
4373 	}
4374 	if (mem->vmp_realtime) {
4375 		/* don't steal pages used by realtime threads... */
4376 		vm_object_unlock(object);
4377 		vm_page_secluded.grab_failure_realtime++;
4378 		goto reactivate_secluded_page;
4379 	}
4380 
4381 	mem->vmp_busy = TRUE;
4382 	refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
4383 	if (refmod_state & VM_MEM_REFERENCED) {
4384 		mem->vmp_reference = TRUE;
4385 	}
4386 	if (refmod_state & VM_MEM_MODIFIED) {
4387 		SET_PAGE_DIRTY(mem, FALSE);
4388 	}
4389 	if (mem->vmp_dirty || mem->vmp_precious) {
4390 		/* can't grab a dirty page; re-activate */
4391 //		printf("SECLUDED: dirty page %p\n", mem);
4392 		vm_page_wakeup_done(object, mem);
4393 		vm_page_secluded.grab_failure_dirty++;
4394 		vm_object_unlock(object);
4395 		goto reactivate_secluded_page;
4396 	}
4397 	if (mem->vmp_reference) {
4398 		/* it's been used but we do need to grab a page... */
4399 	}
4400 
4401 	vm_page_unlock_queues();
4402 
4403 
4404 	/* finish what vm_page_free() would have done... */
4405 	vm_page_free_prepare_object(mem, TRUE);
4406 	vm_object_unlock(object);
4407 	object = VM_OBJECT_NULL;
4408 
4409 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4410 	vm_page_secluded.grab_success_other++;
4411 
4412 	assert(mem->vmp_busy);
4413 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4414 	assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
4415 	assert(mem->vmp_pageq.next == 0);
4416 	assert(mem->vmp_pageq.prev == 0);
4417 	assert(mem->vmp_listq.next == 0);
4418 	assert(mem->vmp_listq.prev == 0);
4419 	assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4420 	assert(mem->vmp_specialq.next == 0);
4421 	assert(mem->vmp_specialq.prev == 0);
4422 
4423 	return mem;
4424 }
4425 
4426 uint64_t
vm_page_secluded_drain(void)4427 vm_page_secluded_drain(void)
4428 {
4429 	vm_page_t local_freeq;
4430 	int local_freed;
4431 	uint64_t num_reclaimed;
4432 	unsigned int saved_secluded_count, saved_secluded_target;
4433 
4434 	num_reclaimed = 0;
4435 	local_freeq = NULL;
4436 	local_freed = 0;
4437 
4438 	vm_page_lock_queues();
4439 
4440 	saved_secluded_count = vm_page_secluded_count;
4441 	saved_secluded_target = vm_page_secluded_target;
4442 	vm_page_secluded_target = 0;
4443 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4444 	while (vm_page_secluded_count) {
4445 		vm_page_t secluded_page;
4446 
4447 		assert((vm_page_secluded_count_free +
4448 		    vm_page_secluded_count_inuse) ==
4449 		    vm_page_secluded_count);
4450 		secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
4451 		assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
4452 
4453 		vm_page_queues_remove(secluded_page, FALSE);
4454 		assert(!vm_page_is_fictitious(secluded_page));
4455 		assert(!VM_PAGE_WIRED(secluded_page));
4456 
4457 		if (secluded_page->vmp_object == 0) {
4458 			/* transfer to free queue */
4459 			assert(secluded_page->vmp_busy);
4460 			secluded_page->vmp_snext = local_freeq;
4461 			local_freeq = secluded_page;
4462 			local_freed += 1;
4463 		} else {
4464 			/* transfer to head of active queue */
4465 			vm_page_enqueue_active(secluded_page, FALSE);
4466 			secluded_page = VM_PAGE_NULL;
4467 		}
4468 		num_reclaimed++;
4469 	}
4470 	vm_page_secluded_target = saved_secluded_target;
4471 	VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
4472 
4473 //	printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
4474 
4475 	vm_page_unlock_queues();
4476 
4477 	if (local_freed) {
4478 		vm_page_free_list(local_freeq, TRUE);
4479 		local_freeq = NULL;
4480 		local_freed = 0;
4481 	}
4482 
4483 	return num_reclaimed;
4484 }
4485 #endif /* CONFIG_SECLUDED_MEMORY */
4486 
4487 static inline void
vm_page_grab_diags()4488 vm_page_grab_diags()
4489 {
4490 	task_t task = current_task_early();
4491 	if (task == NULL) {
4492 		return;
4493 	}
4494 
4495 	counter_inc(&task->pages_grabbed);
4496 }
4497 
4498 /*
4499  *	vm_page_release:
4500  *
4501  *	Return a page to the free list.
4502  */
4503 
4504 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)4505 vm_page_release(vm_page_t mem, boolean_t page_queues_locked)
4506 {
4507 	vmp_free_list_result_t vmpr;
4508 
4509 	if (page_queues_locked) {
4510 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4511 	} else {
4512 		LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4513 	}
4514 
4515 	assert(vm_page_is_canonical(mem));
4516 
4517 	vm_page_validate_no_references(mem);
4518 
4519 	if (__improbable(mem->vmp_realtime)) {
4520 		if (!page_queues_locked) {
4521 			vm_page_lock_queues();
4522 		}
4523 		if (mem->vmp_realtime) {
4524 			mem->vmp_realtime = false;
4525 			vm_page_realtime_count--;
4526 		}
4527 		if (!page_queues_locked) {
4528 			vm_page_unlock_queues();
4529 		}
4530 	}
4531 
4532 	pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4533 
4534 	vm_free_page_lock_spin();
4535 
4536 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4537 	assert(mem->vmp_busy);
4538 	assert(!mem->vmp_laundry);
4539 	assert(mem->vmp_object == 0);
4540 	assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4541 	assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
4542 	assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
4543 
4544 	vmpr = vm_page_put_list_on_free_queue(mem, page_queues_locked);
4545 	vm_pageout_vminfo.vm_page_pages_freed += 1;
4546 	VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE,
4547 	    DBG_FUNC_NONE, 1, 0, 0, 0);
4548 
4549 	if (vm_page_free_has_any_waiters()) {
4550 		vm_page_free_handle_wakeups_and_unlock(vmpr);
4551 	} else {
4552 		vm_free_page_unlock();
4553 	}
4554 
4555 	VM_CHECK_MEMORYSTATUS;
4556 }
4557 
4558 /*
4559  * This version of vm_page_release() is used only at startup
4560  * when we are single-threaded and pages are being released
4561  * for the first time. Hence, no locking or unnecessary checks are made.
4562  * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
4563  */
4564 void
vm_page_release_startup(vm_page_t mem)4565 vm_page_release_startup(vm_page_t mem)
4566 {
4567 	vm_page_put_list_on_free_queue(mem, false);
4568 }
4569 
4570 /*
4571  *	vm_page_wait:
4572  *
4573  *	Wait for a page to become available.
4574  *	If there are plenty of free pages, then we don't sleep.
4575  *
4576  *	Returns:
4577  *		TRUE:  There may be another page, try again
4578  *		FALSE: We were interrupted out of our wait, don't try again
4579  */
4580 
4581 boolean_t
vm_page_wait(int interruptible)4582 vm_page_wait(
4583 	int     interruptible )
4584 {
4585 	/*
4586 	 *	We can't use vm_page_free_reserved to make this
4587 	 *	determination.  Consider: some thread might
4588 	 *	need to allocate two pages.  The first allocation
4589 	 *	succeeds, the second fails.  After the first page is freed,
4590 	 *	a call to vm_page_wait must really block.
4591 	 */
4592 	kern_return_t   wait_result;
4593 	int             need_wakeup = 0;
4594 	thread_t        cur_thread = current_thread();
4595 	int             is_privileged = cur_thread->options & TH_OPT_VMPRIV;
4596 	event_t         wait_event = NULL;
4597 	event_t         wake_event = (event_t)&vm_page_free_wanted;
4598 
4599 	vm_free_page_lock_spin();
4600 
4601 	{
4602 		if (is_privileged && vm_page_free_count) {
4603 			vm_free_page_unlock();
4604 			return TRUE;
4605 		}
4606 
4607 		if (vm_page_free_count >= vm_page_free_target) {
4608 			vm_free_page_unlock();
4609 			return TRUE;
4610 		}
4611 	}
4612 
4613 	if (is_privileged) {
4614 		if (vm_page_free_wanted_privileged++ == 0) {
4615 			need_wakeup = 1;
4616 		}
4617 		wait_event = (event_t)&vm_page_free_wanted_privileged;
4618 #if CONFIG_SECLUDED_MEMORY
4619 	} else if (secluded_for_apps &&
4620 	    task_can_use_secluded_mem(current_task(), FALSE)) {
4621 #if 00
4622 		/* XXX FBDP: need pageq lock for this... */
4623 		/* XXX FBDP: might wait even if pages available, */
4624 		/* XXX FBDP: hopefully not for too long... */
4625 		if (vm_page_secluded_count > 0) {
4626 			vm_free_page_unlock();
4627 			return TRUE;
4628 		}
4629 #endif
4630 		if (vm_page_free_wanted_secluded++ == 0) {
4631 			need_wakeup = 1;
4632 		}
4633 		wait_event = (event_t)&vm_page_free_wanted_secluded;
4634 #endif /* CONFIG_SECLUDED_MEMORY */
4635 	} else {
4636 		if (vm_page_free_wanted++ == 0) {
4637 			need_wakeup = 1;
4638 		}
4639 		wait_event = (event_t)&vm_page_free_count;
4640 	}
4641 
4642 	/*
4643 	 * We don't do a vm_pageout_scan wakeup if we already have
4644 	 * some waiters because vm_pageout_scan checks for waiters
4645 	 * before it returns and does so behind the vm_page_queue_free_lock,
4646 	 * which we own when we bump the waiter counts.
4647 	 */
4648 
4649 	if (vps_dynamic_priority_enabled) {
4650 		/*
4651 		 * We are waking up vm_pageout_scan here. If it needs
4652 		 * the vm_page_queue_free_lock before we unlock it
4653 		 * we'll end up just blocking and incur an extra
4654 		 * context switch. Could be a perf. issue.
4655 		 */
4656 
4657 
4658 		if (need_wakeup) {
4659 			thread_wakeup(wake_event);
4660 		}
4661 
4662 		/*
4663 		 * LD: This event is going to get recorded every time because
4664 		 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
4665 		 * We just block in that routine.
4666 		 */
4667 		VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4668 		    vm_page_free_wanted_privileged,
4669 		    vm_page_free_wanted,
4670 #if CONFIG_SECLUDED_MEMORY
4671 		    vm_page_free_wanted_secluded,
4672 #else /* CONFIG_SECLUDED_MEMORY */
4673 		    0,
4674 #endif /* CONFIG_SECLUDED_MEMORY */
4675 		    0);
4676 		wait_result =  lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
4677 		    LCK_SLEEP_UNLOCK,
4678 		    wait_event,
4679 		    vm_pageout_scan_thread,
4680 		    interruptible,
4681 		    0);
4682 	} else {
4683 		wait_result = assert_wait(wait_event, interruptible);
4684 
4685 		vm_free_page_unlock();
4686 
4687 		if (need_wakeup) {
4688 			thread_wakeup(wake_event);
4689 		}
4690 
4691 		if (wait_result == THREAD_WAITING) {
4692 			{
4693 				VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4694 				    DBG_VM_PAGE_WAIT_BLOCK,
4695 				    DBG_FUNC_START,
4696 				    vm_page_free_wanted_privileged,
4697 				    vm_page_free_wanted,
4698 #if CONFIG_SECLUDED_MEMORY
4699 				    vm_page_free_wanted_secluded,
4700 #else /* CONFIG_SECLUDED_MEMORY */
4701 				    0,
4702 #endif /* CONFIG_SECLUDED_MEMORY */
4703 				    0
4704 				    );
4705 			}
4706 
4707 			wait_result = thread_block(THREAD_CONTINUE_NULL);
4708 
4709 			{
4710 				VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4711 				    DBG_VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4712 			}
4713 		}
4714 	}
4715 
4716 
4717 	return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4718 }
4719 
4720 /*
4721  *	vm_page_alloc:
4722  *
4723  *	Allocate and return a memory cell associated
4724  *	with this VM object/offset pair.
4725  *
4726  *	Object must be locked.
4727  */
4728 
4729 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4730 vm_page_alloc(
4731 	vm_object_t             object,
4732 	vm_object_offset_t      offset)
4733 {
4734 	vm_page_t       mem;
4735 	int             grab_options;
4736 
4737 	vm_object_lock_assert_exclusive(object);
4738 	grab_options = 0;
4739 #if CONFIG_SECLUDED_MEMORY
4740 	if (object->can_grab_secluded) {
4741 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4742 	}
4743 #endif /* CONFIG_SECLUDED_MEMORY */
4744 
4745 
4746 	mem = vm_page_grab_options(grab_options);
4747 	if (mem == VM_PAGE_NULL) {
4748 		return VM_PAGE_NULL;
4749 	}
4750 
4751 	vm_page_insert(mem, object, offset);
4752 
4753 	return mem;
4754 }
4755 
4756 /*
4757  *	vm_page_free_prepare:
4758  *
4759  *	Removes page from any queue it may be on
4760  *	and disassociates it from its VM object.
4761  *
4762  *	Object and page queues must be locked prior to entry.
4763  */
4764 static void
vm_page_free_prepare(vm_page_t mem)4765 vm_page_free_prepare(
4766 	vm_page_t       mem)
4767 {
4768 #if CONFIG_SPTM
4769 	/**
4770 	 * SPTM TODO: The pmap should retype frames automatically as mappings to them are
4771 	 *            created and destroyed. In order to catch potential cases where this
4772 	 *            does not happen, add an appropriate assert here. This code should be
4773 	 *            executed on every frame that is about to be released to the VM.
4774 	 */
4775 	const sptm_paddr_t paddr = ((uint64_t)VM_PAGE_GET_PHYS_PAGE(mem)) << PAGE_SHIFT;
4776 	__unused const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
4777 
4778 	assert(frame_type == XNU_DEFAULT);
4779 #endif /* CONFIG_SPTM */
4780 
4781 	vm_page_free_prepare_queues(mem);
4782 	vm_page_free_prepare_object(mem, TRUE);
4783 }
4784 
4785 
4786 void
vm_page_free_prepare_queues(vm_page_t mem)4787 vm_page_free_prepare_queues(
4788 	vm_page_t       mem)
4789 {
4790 	vm_object_t     m_object;
4791 
4792 	VM_PAGE_CHECK(mem);
4793 
4794 	assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4795 	assert(!mem->vmp_cleaning);
4796 	m_object = VM_PAGE_OBJECT(mem);
4797 
4798 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4799 	if (m_object) {
4800 		vm_object_lock_assert_exclusive(m_object);
4801 	}
4802 	if (mem->vmp_laundry) {
4803 		/*
4804 		 * We may have to free a page while it's being laundered
4805 		 * if we lost its pager (due to a forced unmount, for example).
4806 		 * We need to call vm_pageout_steal_laundry() before removing
4807 		 * the page from its VM object, so that we can remove it
4808 		 * from its pageout queue and adjust the laundry accounting
4809 		 */
4810 		vm_pageout_steal_laundry(mem, TRUE);
4811 	}
4812 
4813 	vm_page_queues_remove(mem, TRUE);
4814 
4815 	if (__improbable(mem->vmp_realtime)) {
4816 		mem->vmp_realtime = false;
4817 		vm_page_realtime_count--;
4818 	}
4819 
4820 	if (VM_PAGE_WIRED(mem)) {
4821 		assert(mem->vmp_wire_count > 0);
4822 
4823 		if (m_object) {
4824 			task_t          owner;
4825 			int             ledger_idx_volatile;
4826 			int             ledger_idx_nonvolatile;
4827 			int             ledger_idx_volatile_compressed;
4828 			int             ledger_idx_nonvolatile_compressed;
4829 			int             ledger_idx_composite;
4830 			int             ledger_idx_external_wired;
4831 			boolean_t       do_footprint;
4832 
4833 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4834 			VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4835 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4836 
4837 			assert(m_object->resident_page_count >=
4838 			    m_object->wired_page_count);
4839 
4840 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4841 				OSAddAtomic(+1, &vm_page_purgeable_count);
4842 				assert(vm_page_purgeable_wired_count > 0);
4843 				OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4844 			}
4845 			if (m_object->internal &&
4846 			    m_object->vo_owner != TASK_NULL &&
4847 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
4848 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
4849 				owner = VM_OBJECT_OWNER(m_object);
4850 				vm_object_ledger_tag_ledgers(
4851 					m_object,
4852 					&ledger_idx_volatile,
4853 					&ledger_idx_nonvolatile,
4854 					&ledger_idx_volatile_compressed,
4855 					&ledger_idx_nonvolatile_compressed,
4856 					&ledger_idx_composite,
4857 					&ledger_idx_external_wired,
4858 					&do_footprint);
4859 				/*
4860 				 * While wired, this page was accounted
4861 				 * as "non-volatile" but it should now
4862 				 * be accounted as "volatile".
4863 				 */
4864 				/* one less "non-volatile"... */
4865 				ledger_debit(owner->ledger,
4866 				    ledger_idx_nonvolatile,
4867 				    PAGE_SIZE);
4868 				if (do_footprint) {
4869 					/* ... and "phys_footprint" */
4870 					ledger_debit(owner->ledger,
4871 					    task_ledgers.phys_footprint,
4872 					    PAGE_SIZE);
4873 				} else if (ledger_idx_composite != -1) {
4874 					ledger_debit(owner->ledger,
4875 					    ledger_idx_composite,
4876 					    PAGE_SIZE);
4877 				}
4878 				/* one more "volatile" */
4879 				ledger_credit(owner->ledger,
4880 				    ledger_idx_volatile,
4881 				    PAGE_SIZE);
4882 			}
4883 		}
4884 		if (vm_page_is_canonical(mem)) {
4885 			vm_page_wire_count--;
4886 		}
4887 
4888 
4889 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4890 		mem->vmp_wire_count = 0;
4891 		assert(!mem->vmp_gobbled);
4892 	} else if (mem->vmp_gobbled) {
4893 		if (vm_page_is_canonical(mem)) {
4894 			vm_page_wire_count--;
4895 		}
4896 		vm_page_gobble_count--;
4897 	}
4898 }
4899 
4900 /*
4901  * like vm_page_init, but we have to preserve fields related to phys page
4902  */
4903 inline static void
vm_page_reset(vm_page_t mem)4904 vm_page_reset(vm_page_t mem)
4905 {
4906 	*mem = (struct vm_page){
4907 		.vmp_q_state   = VM_PAGE_NOT_ON_Q,
4908 		.vmp_canonical = mem->vmp_canonical,
4909 		.vmp_lopage    = mem->vmp_lopage,
4910 		.vmp_offset    = (vm_object_offset_t)-1,
4911 		.vmp_busy      = true,
4912 #if !XNU_VM_HAS_LINEAR_PAGES_ARRAY
4913 		.vmp_phys_page = mem->vmp_phys_page,
4914 #endif /* !XNU_VM_HAS_LINEAR_PAGES_ARRAY */
4915 	};
4916 	/* ECC information is out of `struct vm_page` and preserved */
4917 }
4918 
4919 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4920 vm_page_free_prepare_object(
4921 	vm_page_t       mem,
4922 	boolean_t       remove_from_hash)
4923 {
4924 	assert(!mem->vmp_realtime);
4925 	if (mem->vmp_tabled) {
4926 		vm_page_remove(mem, remove_from_hash);  /* clears tabled, object, offset */
4927 	}
4928 	vm_page_wakeup(VM_OBJECT_NULL, mem);               /* clears wanted */
4929 
4930 	if (vm_page_is_private(mem)) {
4931 		vm_page_reset_private(mem);
4932 	}
4933 	if (vm_page_is_canonical(mem)) {
4934 		assert(mem->vmp_pageq.next == 0);
4935 		assert(mem->vmp_pageq.prev == 0);
4936 		assert(mem->vmp_listq.next == 0);
4937 		assert(mem->vmp_listq.prev == 0);
4938 		assert(mem->vmp_specialq.next == 0);
4939 		assert(mem->vmp_specialq.prev == 0);
4940 		assert(mem->vmp_next_m == 0);
4941 
4942 		vm_page_validate_no_references(mem);
4943 
4944 		vm_page_reset(mem);
4945 	}
4946 }
4947 
4948 /*
4949  *	vm_page_free:
4950  *
4951  *	Returns the given page to the free list,
4952  *	disassociating it with any VM object.
4953  *
4954  *	Object and page queues must be locked prior to entry.
4955  */
4956 void
vm_page_free(vm_page_t mem)4957 vm_page_free(
4958 	vm_page_t       mem)
4959 {
4960 	vm_page_free_prepare(mem);
4961 
4962 	if (vm_page_is_canonical(mem)) {
4963 		vm_page_release(mem, TRUE);  /* page queues are locked */
4964 	} else {
4965 		vm_page_release_fictitious(mem);
4966 	}
4967 }
4968 
4969 
4970 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4971 vm_page_free_unlocked(
4972 	vm_page_t       mem,
4973 	boolean_t       remove_from_hash)
4974 {
4975 	vm_page_lockspin_queues();
4976 	vm_page_free_prepare_queues(mem);
4977 	vm_page_unlock_queues();
4978 
4979 	vm_page_free_prepare_object(mem, remove_from_hash);
4980 
4981 	if (vm_page_is_canonical(mem)) {
4982 		vm_page_release(mem, FALSE); /* page queues are not locked */
4983 	} else {
4984 		vm_page_release_fictitious(mem);
4985 	}
4986 }
4987 
4988 
4989 /*
4990  * Free a list of pages.  The list can be up to several hundred pages,
4991  * as blocked up by vm_pageout_scan().
4992  * The big win is not having to take the free list lock once
4993  * per page.
4994  *
4995  * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4996  * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4997  */
4998 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4999 vm_page_free_list(vm_page_t freeq, boolean_t prepare_object)
5000 {
5001 	vm_page_t       mem;
5002 	vm_page_t       nxt;
5003 	vm_page_t       local_freeq;
5004 	int             pg_count;
5005 
5006 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
5007 	LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
5008 
5009 	while (freeq) {
5010 		pg_count = 0;
5011 		local_freeq = VM_PAGE_NULL;
5012 		mem = freeq;
5013 
5014 		/*
5015 		 * break up the processing into smaller chunks so
5016 		 * that we can 'pipeline' the pages onto the
5017 		 * free list w/o introducing too much
5018 		 * contention on the global free queue lock
5019 		 */
5020 		while (mem && pg_count < 64) {
5021 			assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5022 			    (mem->vmp_q_state == VM_PAGE_IS_WIRED));
5023 			assert(mem->vmp_specialq.next == 0 &&
5024 			    mem->vmp_specialq.prev == 0);
5025 			/*
5026 			 * &&
5027 			 *   mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
5028 			 */
5029 			nxt = mem->vmp_snext;
5030 			mem->vmp_snext = NULL;
5031 			assert(mem->vmp_pageq.prev == 0);
5032 
5033 			if (vm_page_is_canonical(mem)) {
5034 				vm_page_validate_no_references(mem);
5035 			}
5036 
5037 			if (__improbable(mem->vmp_realtime)) {
5038 				vm_page_lock_queues();
5039 				if (mem->vmp_realtime) {
5040 					mem->vmp_realtime = false;
5041 					vm_page_realtime_count--;
5042 				}
5043 				vm_page_unlock_queues();
5044 			}
5045 
5046 			if (prepare_object == TRUE) {
5047 				vm_page_free_prepare_object(mem, TRUE);
5048 			}
5049 
5050 			if (vm_page_is_fictitious(mem)) {
5051 				vm_page_release_fictitious(mem);
5052 			} else {
5053 				/*
5054 				 * IMPORTANT: we can't set the page "free" here
5055 				 * because that would make the page eligible for
5056 				 * a physically-contiguous allocation (see
5057 				 * vm_page_find_contiguous()) right away (we don't
5058 				 * hold the vm_page_queue_free lock).  That would
5059 				 * cause trouble because the page is not actually
5060 				 * in the free queue yet...
5061 				 */
5062 				pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
5063 
5064 				mem->vmp_snext = local_freeq;
5065 				local_freeq = mem;
5066 				pg_count++;
5067 			}
5068 			mem = nxt;
5069 		}
5070 		freeq = mem;
5071 
5072 		if ((mem = local_freeq)) {
5073 			vmp_free_list_result_t vmpr;
5074 
5075 			vm_free_page_lock_spin();
5076 
5077 			vmpr = vm_page_put_list_on_free_queue(mem, false);
5078 			vm_pageout_vminfo.vm_page_pages_freed += pg_count;
5079 			VM_DEBUG_CONSTANT_EVENT(vm_page_release, DBG_VM_PAGE_RELEASE,
5080 			    DBG_FUNC_NONE, pg_count, 0, 0, 0);
5081 
5082 			if (vm_page_free_has_any_waiters()) {
5083 				vm_page_free_handle_wakeups_and_unlock(vmpr);
5084 			} else {
5085 				vm_free_page_unlock();
5086 			}
5087 
5088 			VM_CHECK_MEMORYSTATUS;
5089 		}
5090 	}
5091 }
5092 
5093 
5094 /*
5095  *	vm_page_wire:
5096  *
5097  *	Mark this page as wired down by yet
5098  *	another map, removing it from paging queues
5099  *	as necessary.
5100  *
5101  *	The page's object and the page queues must be locked.
5102  */
5103 
5104 
5105 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)5106 vm_page_wire(
5107 	vm_page_t mem,
5108 	vm_tag_t           tag,
5109 	boolean_t          check_memorystatus)
5110 {
5111 	vm_object_t     m_object;
5112 
5113 	m_object = VM_PAGE_OBJECT(mem);
5114 
5115 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 1);	/* (TEST/DEBUG) */
5116 
5117 	VM_PAGE_CHECK(mem);
5118 	if (m_object) {
5119 		vm_object_lock_assert_exclusive(m_object);
5120 	} else {
5121 		/*
5122 		 * In theory, the page should be in an object before it
5123 		 * gets wired, since we need to hold the object lock
5124 		 * to update some fields in the page structure.
5125 		 * However, some code (i386 pmap, for example) might want
5126 		 * to wire a page before it gets inserted into an object.
5127 		 * That's somewhat OK, as long as nobody else can get to
5128 		 * that page and update it at the same time.
5129 		 */
5130 	}
5131 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5132 	if (!VM_PAGE_WIRED(mem)) {
5133 		if (mem->vmp_laundry) {
5134 			vm_pageout_steal_laundry(mem, TRUE);
5135 		}
5136 
5137 		vm_page_queues_remove(mem, TRUE);
5138 
5139 		assert(mem->vmp_wire_count == 0);
5140 		mem->vmp_q_state = VM_PAGE_IS_WIRED;
5141 
5142 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5143 		if (mem->vmp_unmodified_ro == true) {
5144 			/* Object and PageQ locks are held*/
5145 			mem->vmp_unmodified_ro = false;
5146 			os_atomic_dec(&compressor_ro_uncompressed, relaxed);
5147 			vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(mem), mem->vmp_offset);
5148 		}
5149 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5150 
5151 		if (m_object) {
5152 			task_t          owner;
5153 			int             ledger_idx_volatile;
5154 			int             ledger_idx_nonvolatile;
5155 			int             ledger_idx_volatile_compressed;
5156 			int             ledger_idx_nonvolatile_compressed;
5157 			int             ledger_idx_composite;
5158 			int             ledger_idx_external_wired;
5159 			boolean_t       do_footprint;
5160 
5161 			VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
5162 			VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
5163 			VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
5164 
5165 			assert(m_object->resident_page_count >=
5166 			    m_object->wired_page_count);
5167 			if (m_object->purgable == VM_PURGABLE_VOLATILE) {
5168 				assert(vm_page_purgeable_count > 0);
5169 				OSAddAtomic(-1, &vm_page_purgeable_count);
5170 				OSAddAtomic(1, &vm_page_purgeable_wired_count);
5171 			}
5172 			if (m_object->internal &&
5173 			    m_object->vo_owner != TASK_NULL &&
5174 			    (m_object->purgable == VM_PURGABLE_VOLATILE ||
5175 			    m_object->purgable == VM_PURGABLE_EMPTY)) {
5176 				owner = VM_OBJECT_OWNER(m_object);
5177 				vm_object_ledger_tag_ledgers(
5178 					m_object,
5179 					&ledger_idx_volatile,
5180 					&ledger_idx_nonvolatile,
5181 					&ledger_idx_volatile_compressed,
5182 					&ledger_idx_nonvolatile_compressed,
5183 					&ledger_idx_composite,
5184 					&ledger_idx_external_wired,
5185 					&do_footprint);
5186 				/* less volatile bytes */
5187 				ledger_debit(owner->ledger,
5188 				    ledger_idx_volatile,
5189 				    PAGE_SIZE);
5190 				/* more not-quite-volatile bytes */
5191 				ledger_credit(owner->ledger,
5192 				    ledger_idx_nonvolatile,
5193 				    PAGE_SIZE);
5194 				if (do_footprint) {
5195 					/* more footprint */
5196 					ledger_credit(owner->ledger,
5197 					    task_ledgers.phys_footprint,
5198 					    PAGE_SIZE);
5199 				} else if (ledger_idx_composite != -1) {
5200 					ledger_credit(owner->ledger,
5201 					    ledger_idx_composite,
5202 					    PAGE_SIZE);
5203 				}
5204 			}
5205 
5206 			if (m_object->all_reusable) {
5207 				/*
5208 				 * Wired pages are not counted as "re-usable"
5209 				 * in "all_reusable" VM objects, so nothing
5210 				 * to do here.
5211 				 */
5212 			} else if (mem->vmp_reusable) {
5213 				/*
5214 				 * This page is not "re-usable" when it's
5215 				 * wired, so adjust its state and the
5216 				 * accounting.
5217 				 */
5218 				vm_page_lockconvert_queues();
5219 				vm_object_reuse_pages(m_object,
5220 				    mem->vmp_offset,
5221 				    mem->vmp_offset + PAGE_SIZE_64,
5222 				    FALSE);
5223 			}
5224 		}
5225 		assert(!mem->vmp_reusable);
5226 
5227 		if (vm_page_is_canonical(mem) && !mem->vmp_gobbled) {
5228 			vm_page_wire_count++;
5229 		}
5230 		if (mem->vmp_gobbled) {
5231 			vm_page_gobble_count--;
5232 		}
5233 		mem->vmp_gobbled = FALSE;
5234 
5235 		if (check_memorystatus == TRUE) {
5236 			VM_CHECK_MEMORYSTATUS;
5237 		}
5238 	}
5239 	assert(!mem->vmp_gobbled);
5240 	assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
5241 	mem->vmp_wire_count++;
5242 
5243 
5244 	if (__improbable(mem->vmp_wire_count == 0)) {
5245 		panic("vm_page_wire(%p): wire_count overflow", mem);
5246 	}
5247 	VM_PAGE_CHECK(mem);
5248 }
5249 
5250 /*
5251  *	vm_page_unwire:
5252  *
5253  *	Release one wiring of this page, potentially
5254  *	enabling it to be paged again.
5255  *
5256  *	The page's object and the page queues must be locked.
5257  */
5258 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)5259 vm_page_unwire(
5260 	vm_page_t       mem,
5261 	boolean_t       queueit)
5262 {
5263 	vm_object_t     m_object;
5264 
5265 	m_object = VM_PAGE_OBJECT(mem);
5266 
5267 //	dbgLog(current_thread(), mem->vmp_offset, m_object, 0);	/* (TEST/DEBUG) */
5268 
5269 	VM_PAGE_CHECK(mem);
5270 	assert(VM_PAGE_WIRED(mem));
5271 	assert(mem->vmp_wire_count > 0);
5272 	assert(!mem->vmp_gobbled);
5273 	assert(m_object != VM_OBJECT_NULL);
5274 	vm_object_lock_assert_exclusive(m_object);
5275 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5276 	if (--mem->vmp_wire_count == 0) {
5277 		task_t          owner;
5278 		int             ledger_idx_volatile;
5279 		int             ledger_idx_nonvolatile;
5280 		int             ledger_idx_volatile_compressed;
5281 		int             ledger_idx_nonvolatile_compressed;
5282 		int             ledger_idx_composite;
5283 		int             ledger_idx_external_wired;
5284 		boolean_t       do_footprint;
5285 
5286 		mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
5287 
5288 		VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
5289 		VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
5290 		VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
5291 		if (vm_page_is_canonical(mem)) {
5292 			vm_page_wire_count--;
5293 		}
5294 
5295 
5296 		assert(m_object->resident_page_count >=
5297 		    m_object->wired_page_count);
5298 		if (m_object->purgable == VM_PURGABLE_VOLATILE) {
5299 			OSAddAtomic(+1, &vm_page_purgeable_count);
5300 			assert(vm_page_purgeable_wired_count > 0);
5301 			OSAddAtomic(-1, &vm_page_purgeable_wired_count);
5302 		}
5303 		if (m_object->internal &&
5304 		    m_object->vo_owner != TASK_NULL &&
5305 		    (m_object->purgable == VM_PURGABLE_VOLATILE ||
5306 		    m_object->purgable == VM_PURGABLE_EMPTY)) {
5307 			owner = VM_OBJECT_OWNER(m_object);
5308 			vm_object_ledger_tag_ledgers(
5309 				m_object,
5310 				&ledger_idx_volatile,
5311 				&ledger_idx_nonvolatile,
5312 				&ledger_idx_volatile_compressed,
5313 				&ledger_idx_nonvolatile_compressed,
5314 				&ledger_idx_composite,
5315 				&ledger_idx_external_wired,
5316 				&do_footprint);
5317 			/* more volatile bytes */
5318 			ledger_credit(owner->ledger,
5319 			    ledger_idx_volatile,
5320 			    PAGE_SIZE);
5321 			/* less not-quite-volatile bytes */
5322 			ledger_debit(owner->ledger,
5323 			    ledger_idx_nonvolatile,
5324 			    PAGE_SIZE);
5325 			if (do_footprint) {
5326 				/* less footprint */
5327 				ledger_debit(owner->ledger,
5328 				    task_ledgers.phys_footprint,
5329 				    PAGE_SIZE);
5330 			} else if (ledger_idx_composite != -1) {
5331 				ledger_debit(owner->ledger,
5332 				    ledger_idx_composite,
5333 				    PAGE_SIZE);
5334 			}
5335 		}
5336 		assert(!is_kernel_object(m_object));
5337 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
5338 
5339 		if (queueit == TRUE) {
5340 			if (m_object->purgable == VM_PURGABLE_EMPTY) {
5341 				vm_page_deactivate(mem);
5342 			} else {
5343 				vm_page_activate(mem);
5344 			}
5345 		}
5346 
5347 		VM_CHECK_MEMORYSTATUS;
5348 	}
5349 	VM_PAGE_CHECK(mem);
5350 }
5351 
5352 /*
5353  *	vm_page_deactivate:
5354  *
5355  *	Returns the given page to the inactive list,
5356  *	indicating that no physical maps have access
5357  *	to this page.  [Used by the physical mapping system.]
5358  *
5359  *	The page queues must be locked.
5360  */
5361 void
vm_page_deactivate(vm_page_t m)5362 vm_page_deactivate(
5363 	vm_page_t       m)
5364 {
5365 	vm_page_deactivate_internal(m, TRUE);
5366 }
5367 
5368 
5369 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)5370 vm_page_deactivate_internal(
5371 	vm_page_t       m,
5372 	boolean_t       clear_hw_reference)
5373 {
5374 	vm_object_t     m_object;
5375 
5376 	m_object = VM_PAGE_OBJECT(m);
5377 
5378 	VM_PAGE_CHECK(m);
5379 	assert(!is_kernel_object(m_object));
5380 	assert(!vm_page_is_guard(m));
5381 
5382 //	dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6);	/* (TEST/DEBUG) */
5383 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5384 	/*
5385 	 *	This page is no longer very interesting.  If it was
5386 	 *	interesting (active or inactive/referenced), then we
5387 	 *	clear the reference bit and (re)enter it in the
5388 	 *	inactive queue.  Note wired pages should not have
5389 	 *	their reference bit cleared.
5390 	 */
5391 	assert( !(m->vmp_absent && !m->vmp_unusual));
5392 
5393 	if (m->vmp_gobbled) {           /* can this happen? */
5394 		assert( !VM_PAGE_WIRED(m));
5395 
5396 		if (vm_page_is_canonical(m)) {
5397 			vm_page_wire_count--;
5398 		}
5399 		vm_page_gobble_count--;
5400 		m->vmp_gobbled = FALSE;
5401 	}
5402 	/*
5403 	 * if this page is currently on the pageout queue, we can't do the
5404 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5405 	 * and we can't remove it manually since we would need the object lock
5406 	 * (which is not required here) to decrement the activity_in_progress
5407 	 * reference which is held on the object while the page is in the pageout queue...
5408 	 * just let the normal laundry processing proceed
5409 	 */
5410 	if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5411 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5412 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5413 	    VM_PAGE_WIRED(m)) {
5414 		return;
5415 	}
5416 	if (!m->vmp_absent && clear_hw_reference == TRUE) {
5417 		vm_page_lockconvert_queues();
5418 		pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
5419 	}
5420 
5421 	m->vmp_reference = FALSE;
5422 	m->vmp_no_cache = FALSE;
5423 
5424 	if (!VM_PAGE_INACTIVE(m)) {
5425 		vm_page_queues_remove(m, FALSE);
5426 
5427 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5428 		    m->vmp_dirty && m_object->internal &&
5429 		    (m_object->purgable == VM_PURGABLE_DENY ||
5430 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5431 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5432 			vm_page_check_pageable_safe(m);
5433 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5434 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5435 			vm_page_throttled_count++;
5436 		} else {
5437 			if (m_object->named &&
5438 			    os_ref_get_count_raw(&m_object->ref_count) == 1) {
5439 				vm_page_speculate(m, FALSE);
5440 #if DEVELOPMENT || DEBUG
5441 				vm_page_speculative_recreated++;
5442 #endif
5443 			} else {
5444 				vm_page_enqueue_inactive(m, FALSE);
5445 			}
5446 		}
5447 	}
5448 }
5449 
5450 /*
5451  * vm_page_enqueue_cleaned
5452  *
5453  * Put the page on the cleaned queue, mark it cleaned, etc.
5454  * Being on the cleaned queue (and having m->clean_queue set)
5455  * does ** NOT ** guarantee that the page is clean!
5456  *
5457  * Call with the queues lock held.
5458  */
5459 
5460 void
vm_page_enqueue_cleaned(vm_page_t m)5461 vm_page_enqueue_cleaned(vm_page_t m)
5462 {
5463 	vm_object_t     m_object;
5464 
5465 	m_object = VM_PAGE_OBJECT(m);
5466 
5467 	assert(!vm_page_is_guard(m));
5468 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5469 	assert(!(m->vmp_absent && !m->vmp_unusual));
5470 
5471 	if (VM_PAGE_WIRED(m)) {
5472 		return;
5473 	}
5474 
5475 	if (m->vmp_gobbled) {
5476 		if (vm_page_is_canonical(m)) {
5477 			vm_page_wire_count--;
5478 		}
5479 		vm_page_gobble_count--;
5480 		m->vmp_gobbled = FALSE;
5481 	}
5482 	/*
5483 	 * if this page is currently on the pageout queue, we can't do the
5484 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5485 	 * and we can't remove it manually since we would need the object lock
5486 	 * (which is not required here) to decrement the activity_in_progress
5487 	 * reference which is held on the object while the page is in the pageout queue...
5488 	 * just let the normal laundry processing proceed
5489 	 */
5490 	if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5491 	    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
5492 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5493 		return;
5494 	}
5495 	vm_page_queues_remove(m, FALSE);
5496 
5497 	vm_page_check_pageable_safe(m);
5498 	vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
5499 	m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
5500 	vm_page_cleaned_count++;
5501 
5502 	vm_page_inactive_count++;
5503 	if (m_object->internal) {
5504 		vm_page_pageable_internal_count++;
5505 	} else {
5506 		vm_page_pageable_external_count++;
5507 	}
5508 	vm_page_add_to_specialq(m, TRUE);
5509 	VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
5510 }
5511 
5512 /*
5513  *	vm_page_activate:
5514  *
5515  *	Put the specified page on the active list (if appropriate).
5516  *
5517  *	The page queues must be locked.
5518  */
5519 
5520 void
vm_page_activate(vm_page_t m)5521 vm_page_activate(
5522 	vm_page_t       m)
5523 {
5524 	vm_object_t     m_object;
5525 
5526 	m_object = VM_PAGE_OBJECT(m);
5527 
5528 	VM_PAGE_CHECK(m);
5529 #ifdef  FIXME_4778297
5530 	assert(!is_kernel_object(m_object));
5531 #endif
5532 	assert(!vm_page_is_guard(m));
5533 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5534 	assert( !(m->vmp_absent && !m->vmp_unusual));
5535 
5536 	if (m->vmp_gobbled) {
5537 		assert( !VM_PAGE_WIRED(m));
5538 		if (vm_page_is_canonical(m)) {
5539 			vm_page_wire_count--;
5540 		}
5541 		vm_page_gobble_count--;
5542 		m->vmp_gobbled = FALSE;
5543 	}
5544 	/*
5545 	 * if this page is currently on the pageout queue, we can't do the
5546 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5547 	 * and we can't remove it manually since we would need the object lock
5548 	 * (which is not required here) to decrement the activity_in_progress
5549 	 * reference which is held on the object while the page is in the pageout queue...
5550 	 * just let the normal laundry processing proceed
5551 	 */
5552 	if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5553 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5554 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5555 		return;
5556 	}
5557 
5558 #if DEBUG
5559 	if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
5560 		panic("vm_page_activate: already active");
5561 	}
5562 #endif
5563 
5564 	if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5565 		DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5566 		DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5567 	}
5568 
5569 	/*
5570 	 * A freshly activated page should be promoted in the donation queue.
5571 	 * So we remove it here while preserving its hint and we will enqueue
5572 	 * it again in vm_page_enqueue_active.
5573 	 */
5574 	vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5575 
5576 	if (!VM_PAGE_WIRED(m)) {
5577 		vm_page_check_pageable_safe(m);
5578 		if (!VM_DYNAMIC_PAGING_ENABLED() &&
5579 		    m->vmp_dirty && m_object->internal &&
5580 		    (m_object->purgable == VM_PURGABLE_DENY ||
5581 		    m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5582 		    m_object->purgable == VM_PURGABLE_VOLATILE)) {
5583 			vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5584 			m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5585 			vm_page_throttled_count++;
5586 		} else {
5587 #if CONFIG_SECLUDED_MEMORY
5588 			if (secluded_for_filecache &&
5589 			    vm_page_secluded_target != 0 &&
5590 			    num_tasks_can_use_secluded_mem == 0 &&
5591 			    m_object->eligible_for_secluded &&
5592 			    !m->vmp_realtime) {
5593 				vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5594 				m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5595 				vm_page_secluded_count++;
5596 				VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5597 				vm_page_secluded_count_inuse++;
5598 				assert(!m_object->internal);
5599 //				vm_page_pageable_external_count++;
5600 			} else
5601 #endif /* CONFIG_SECLUDED_MEMORY */
5602 			vm_page_enqueue_active(m, FALSE);
5603 		}
5604 		m->vmp_reference = TRUE;
5605 		m->vmp_no_cache = FALSE;
5606 	}
5607 	VM_PAGE_CHECK(m);
5608 }
5609 
5610 
5611 /*
5612  *      vm_page_speculate:
5613  *
5614  *      Put the specified page on the speculative list (if appropriate).
5615  *
5616  *      The page queues must be locked.
5617  */
5618 void
vm_page_speculate(vm_page_t m,boolean_t new)5619 vm_page_speculate(
5620 	vm_page_t       m,
5621 	boolean_t       new)
5622 {
5623 	struct vm_speculative_age_q     *aq;
5624 	vm_object_t     m_object;
5625 
5626 	m_object = VM_PAGE_OBJECT(m);
5627 
5628 	VM_PAGE_CHECK(m);
5629 	vm_page_check_pageable_safe(m);
5630 
5631 	assert(!vm_page_is_guard(m));
5632 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5633 	assert(!(m->vmp_absent && !m->vmp_unusual));
5634 	assert(m_object->internal == FALSE);
5635 
5636 	/*
5637 	 * if this page is currently on the pageout queue, we can't do the
5638 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5639 	 * and we can't remove it manually since we would need the object lock
5640 	 * (which is not required here) to decrement the activity_in_progress
5641 	 * reference which is held on the object while the page is in the pageout queue...
5642 	 * just let the normal laundry processing proceed
5643 	 */
5644 	if (m->vmp_laundry || !vm_page_is_canonical(m) ||
5645 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5646 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5647 		return;
5648 	}
5649 
5650 	vm_page_queues_remove(m, FALSE);
5651 
5652 	if (!VM_PAGE_WIRED(m)) {
5653 		mach_timespec_t         ts;
5654 		clock_sec_t sec;
5655 		clock_nsec_t nsec;
5656 
5657 		clock_get_system_nanotime(&sec, &nsec);
5658 		ts.tv_sec = (unsigned int) sec;
5659 		ts.tv_nsec = nsec;
5660 
5661 		if (vm_page_speculative_count == 0) {
5662 			speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5663 			speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5664 
5665 			aq = &vm_page_queue_speculative[speculative_age_index];
5666 
5667 			/*
5668 			 * set the timer to begin a new group
5669 			 */
5670 			aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5671 			aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5672 
5673 			ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5674 		} else {
5675 			aq = &vm_page_queue_speculative[speculative_age_index];
5676 
5677 			if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5678 				speculative_age_index++;
5679 
5680 				if (speculative_age_index > vm_page_max_speculative_age_q) {
5681 					speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5682 				}
5683 				if (speculative_age_index == speculative_steal_index) {
5684 					speculative_steal_index = speculative_age_index + 1;
5685 
5686 					if (speculative_steal_index > vm_page_max_speculative_age_q) {
5687 						speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5688 					}
5689 				}
5690 				aq = &vm_page_queue_speculative[speculative_age_index];
5691 
5692 				if (!vm_page_queue_empty(&aq->age_q)) {
5693 					vm_page_speculate_ageit(aq);
5694 				}
5695 
5696 				aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5697 				aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5698 
5699 				ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5700 			}
5701 		}
5702 		vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5703 		m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5704 		vm_page_speculative_count++;
5705 		vm_page_pageable_external_count++;
5706 
5707 		if (new == TRUE) {
5708 			vm_object_lock_assert_exclusive(m_object);
5709 
5710 			m_object->pages_created++;
5711 #if DEVELOPMENT || DEBUG
5712 			vm_page_speculative_created++;
5713 #endif
5714 		}
5715 	}
5716 	VM_PAGE_CHECK(m);
5717 }
5718 
5719 
5720 /*
5721  * move pages from the specified aging bin to
5722  * the speculative bin that pageout_scan claims from
5723  *
5724  *      The page queues must be locked.
5725  */
5726 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5727 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5728 {
5729 	struct vm_speculative_age_q     *sq;
5730 	vm_page_t       t;
5731 
5732 	sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5733 
5734 	if (vm_page_queue_empty(&sq->age_q)) {
5735 		sq->age_q.next = aq->age_q.next;
5736 		sq->age_q.prev = aq->age_q.prev;
5737 
5738 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5739 		t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5740 
5741 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5742 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5743 	} else {
5744 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5745 		t->vmp_pageq.next = aq->age_q.next;
5746 
5747 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5748 		t->vmp_pageq.prev = sq->age_q.prev;
5749 
5750 		t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5751 		t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5752 
5753 		sq->age_q.prev = aq->age_q.prev;
5754 	}
5755 	vm_page_queue_init(&aq->age_q);
5756 }
5757 
5758 
5759 void
vm_page_lru(vm_page_t m)5760 vm_page_lru(
5761 	vm_page_t       m)
5762 {
5763 	VM_PAGE_CHECK(m);
5764 	assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
5765 	assert(!vm_page_is_guard(m));
5766 
5767 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5768 
5769 	if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5770 		/*
5771 		 * we don't need to do all the other work that
5772 		 * vm_page_queues_remove and vm_page_enqueue_inactive
5773 		 * bring along for the ride
5774 		 */
5775 		assert(!m->vmp_laundry);
5776 		assert(!vm_page_is_private(m));
5777 
5778 		m->vmp_no_cache = FALSE;
5779 
5780 		vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5781 		vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5782 
5783 		return;
5784 	}
5785 	/*
5786 	 * if this page is currently on the pageout queue, we can't do the
5787 	 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5788 	 * and we can't remove it manually since we would need the object lock
5789 	 * (which is not required here) to decrement the activity_in_progress
5790 	 * reference which is held on the object while the page is in the pageout queue...
5791 	 * just let the normal laundry processing proceed
5792 	 */
5793 	if (m->vmp_laundry || vm_page_is_private(m) ||
5794 	    (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5795 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5796 	    VM_PAGE_WIRED(m)) {
5797 		return;
5798 	}
5799 
5800 	m->vmp_no_cache = FALSE;
5801 
5802 	vm_page_queues_remove(m, FALSE);
5803 
5804 	vm_page_enqueue_inactive(m, FALSE);
5805 }
5806 
5807 
5808 void
vm_page_reactivate_all_throttled(void)5809 vm_page_reactivate_all_throttled(void)
5810 {
5811 	vm_page_t       first_throttled, last_throttled;
5812 	vm_page_t       first_active;
5813 	vm_page_t       m;
5814 	int             extra_active_count;
5815 	int             extra_internal_count, extra_external_count;
5816 	vm_object_t     m_object;
5817 
5818 	if (!VM_DYNAMIC_PAGING_ENABLED()) {
5819 		return;
5820 	}
5821 
5822 	extra_active_count = 0;
5823 	extra_internal_count = 0;
5824 	extra_external_count = 0;
5825 	vm_page_lock_queues();
5826 	if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5827 		/*
5828 		 * Switch "throttled" pages to "active".
5829 		 */
5830 		vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5831 			VM_PAGE_CHECK(m);
5832 			assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5833 
5834 			m_object = VM_PAGE_OBJECT(m);
5835 
5836 			extra_active_count++;
5837 			if (m_object->internal) {
5838 				extra_internal_count++;
5839 			} else {
5840 				extra_external_count++;
5841 			}
5842 
5843 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5844 			VM_PAGE_CHECK(m);
5845 			vm_page_add_to_specialq(m, FALSE);
5846 		}
5847 
5848 		/*
5849 		 * Transfer the entire throttled queue to a regular LRU page queues.
5850 		 * We insert it at the head of the active queue, so that these pages
5851 		 * get re-evaluated by the LRU algorithm first, since they've been
5852 		 * completely out of it until now.
5853 		 */
5854 		first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5855 		last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5856 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5857 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5858 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5859 		} else {
5860 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5861 		}
5862 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5863 		first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5864 		last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5865 
5866 #if DEBUG
5867 		printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5868 #endif
5869 		vm_page_queue_init(&vm_page_queue_throttled);
5870 		/*
5871 		 * Adjust the global page counts.
5872 		 */
5873 		vm_page_active_count += extra_active_count;
5874 		vm_page_pageable_internal_count += extra_internal_count;
5875 		vm_page_pageable_external_count += extra_external_count;
5876 		vm_page_throttled_count = 0;
5877 	}
5878 	assert(vm_page_throttled_count == 0);
5879 	assert(vm_page_queue_empty(&vm_page_queue_throttled));
5880 	vm_page_unlock_queues();
5881 }
5882 
5883 
5884 /*
5885  * move pages from the indicated local queue to the global active queue
5886  * its ok to fail if we're below the hard limit and force == FALSE
5887  * the nolocks == TRUE case is to allow this function to be run on
5888  * the hibernate path
5889  */
5890 
5891 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5892 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5893 {
5894 	struct vpl      *lq;
5895 	vm_page_t       first_local, last_local;
5896 	vm_page_t       first_active;
5897 	vm_page_t       m;
5898 	uint32_t        count = 0;
5899 
5900 	if (vm_page_local_q == NULL) {
5901 		return;
5902 	}
5903 
5904 	lq = zpercpu_get_cpu(vm_page_local_q, lid);
5905 
5906 	if (nolocks == FALSE) {
5907 		if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5908 			if (!vm_page_trylockspin_queues()) {
5909 				return;
5910 			}
5911 		} else {
5912 			vm_page_lockspin_queues();
5913 		}
5914 
5915 		VPL_LOCK(&lq->vpl_lock);
5916 	}
5917 	if (lq->vpl_count) {
5918 		/*
5919 		 * Switch "local" pages to "active".
5920 		 */
5921 		assert(!vm_page_queue_empty(&lq->vpl_queue));
5922 
5923 		vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5924 			VM_PAGE_CHECK(m);
5925 			vm_page_check_pageable_safe(m);
5926 			assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5927 			assert(!vm_page_is_fictitious(m));
5928 
5929 			if (m->vmp_local_id != lid) {
5930 				panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5931 			}
5932 
5933 			m->vmp_local_id = 0;
5934 			m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5935 			VM_PAGE_CHECK(m);
5936 			vm_page_add_to_specialq(m, FALSE);
5937 			count++;
5938 		}
5939 		if (count != lq->vpl_count) {
5940 			panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5941 		}
5942 
5943 		/*
5944 		 * Transfer the entire local queue to a regular LRU page queues.
5945 		 */
5946 		first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5947 		last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5948 		first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5949 
5950 		if (vm_page_queue_empty(&vm_page_queue_active)) {
5951 			vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5952 		} else {
5953 			first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5954 		}
5955 		vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5956 		first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5957 		last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5958 
5959 		vm_page_queue_init(&lq->vpl_queue);
5960 		/*
5961 		 * Adjust the global page counts.
5962 		 */
5963 		vm_page_active_count += lq->vpl_count;
5964 		vm_page_pageable_internal_count += lq->vpl_internal_count;
5965 		vm_page_pageable_external_count += lq->vpl_external_count;
5966 		lq->vpl_count = 0;
5967 		lq->vpl_internal_count = 0;
5968 		lq->vpl_external_count = 0;
5969 	}
5970 	assert(vm_page_queue_empty(&lq->vpl_queue));
5971 
5972 	if (nolocks == FALSE) {
5973 		VPL_UNLOCK(&lq->vpl_lock);
5974 
5975 		vm_page_balance_inactive(count / 4);
5976 		vm_page_unlock_queues();
5977 	}
5978 }
5979 
5980 /*
5981  *	vm_page_part_zero_fill:
5982  *
5983  *	Zero-fill a part of the page.
5984  */
5985 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5986 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5987 vm_page_part_zero_fill(
5988 	vm_page_t       m,
5989 	vm_offset_t     m_pa,
5990 	vm_size_t       len)
5991 {
5992 #if 0
5993 	/*
5994 	 * we don't hold the page queue lock
5995 	 * so this check isn't safe to make
5996 	 */
5997 	VM_PAGE_CHECK(m);
5998 #endif
5999 
6000 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
6001 	pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
6002 #else
6003 	vm_page_t       tmp;
6004 	while (1) {
6005 		tmp = vm_page_grab();
6006 		if (tmp == VM_PAGE_NULL) {
6007 			vm_page_wait(THREAD_UNINT);
6008 			continue;
6009 		}
6010 		break;
6011 	}
6012 	vm_page_zero_fill(
6013 		tmp
6014 		);
6015 	if (m_pa != 0) {
6016 		vm_page_part_copy(m, 0, tmp, 0, m_pa);
6017 	}
6018 	if ((m_pa + len) < PAGE_SIZE) {
6019 		vm_page_part_copy(m, m_pa + len, tmp,
6020 		    m_pa + len, PAGE_SIZE - (m_pa + len));
6021 	}
6022 	vm_page_copy(tmp, m);
6023 	VM_PAGE_FREE(tmp);
6024 #endif
6025 }
6026 
6027 /*!
6028  * @function vm_page_zero_fill
6029  *
6030  * @abstract
6031  * Zero-fill the specified page.
6032  *
6033  * @param m				the page to be zero-filled.
6034  */
6035 void
vm_page_zero_fill(vm_page_t m)6036 vm_page_zero_fill(
6037 	vm_page_t       m
6038 	)
6039 {
6040 	int options = 0;
6041 #if 0
6042 	/*
6043 	 * we don't hold the page queue lock
6044 	 * so this check isn't safe to make
6045 	 */
6046 	VM_PAGE_CHECK(m);
6047 #endif
6048 
6049 //	dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0);		/* (BRINGUP) */
6050 	pmap_zero_page_with_options(VM_PAGE_GET_PHYS_PAGE(m), options);
6051 }
6052 
6053 /*
6054  *	vm_page_part_copy:
6055  *
6056  *	copy part of one page to another
6057  *
6058  *	This function is currently only consumed downstream of a
6059  *	vm_map_copy_overwrite(). The implementation has a simpler contract
6060  *	than vm_page_copy() as there's a restricted set of cases that
6061  *	are allowed to be overwriteable. If vm_map_entry_is_overwriteable()
6062  *	is expanded, this function may have to be adjusted.
6063  */
6064 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)6065 vm_page_part_copy(
6066 	vm_page_t       src_m,
6067 	vm_offset_t     src_pa,
6068 	vm_page_t       dst_m,
6069 	vm_offset_t     dst_pa,
6070 	vm_size_t       len)
6071 {
6072 #if 0
6073 	/*
6074 	 * we don't hold the page queue lock
6075 	 * so this check isn't safe to make
6076 	 */
6077 	VM_PAGE_CHECK(src_m);
6078 	VM_PAGE_CHECK(dst_m);
6079 #endif
6080 
6081 	/*
6082 	 * Copying from/into restricted pages is a security issue,
6083 	 * as it allows for restricted pages' policies bypass.
6084 	 */
6085 	if (vm_page_is_restricted(src_m)) {
6086 		panic("%s: cannot copy from a restricted page", __func__);
6087 	}
6088 
6089 	if (vm_page_is_restricted(dst_m)) {
6090 		panic("%s: cannot copy into a restricted page", __func__);
6091 	}
6092 
6093 
6094 	pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
6095 	    VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
6096 }
6097 
6098 /*
6099  *	vm_page_copy:
6100  *
6101  *	Copy one page to another
6102  */
6103 
6104 int vm_page_copy_cs_validations = 0;
6105 int vm_page_copy_cs_tainted = 0;
6106 
6107 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)6108 vm_page_copy(
6109 	vm_page_t       src_m,
6110 	vm_page_t       dest_m)
6111 {
6112 	vm_object_t     src_m_object;
6113 	int             options = 0;
6114 
6115 	src_m_object = VM_PAGE_OBJECT(src_m);
6116 
6117 #if 0
6118 	/*
6119 	 * we don't hold the page queue lock
6120 	 * so this check isn't safe to make
6121 	 */
6122 	VM_PAGE_CHECK(src_m);
6123 	VM_PAGE_CHECK(dest_m);
6124 #endif
6125 	vm_object_lock_assert_held(src_m_object);
6126 
6127 	/*
6128 	 * Copying from/into restricted pages is a security issue,
6129 	 * as it allows for restricted pages' policies bypass.
6130 	 */
6131 	if (vm_page_is_restricted(src_m)) {
6132 		panic("%s: cannot copy from a restricted page", __func__);
6133 	}
6134 
6135 	if (vm_page_is_restricted(dest_m)) {
6136 		panic("%s: cannot copy into a restricted page", __func__);
6137 	}
6138 
6139 	if (src_m_object != VM_OBJECT_NULL &&
6140 	    src_m_object->code_signed) {
6141 		/*
6142 		 * We're copying a page from a code-signed object.
6143 		 * Whoever ends up mapping the copy page might care about
6144 		 * the original page's integrity, so let's validate the
6145 		 * source page now.
6146 		 */
6147 		vm_page_copy_cs_validations++;
6148 		vm_page_validate_cs(src_m, PAGE_SIZE, 0);
6149 #if DEVELOPMENT || DEBUG
6150 		DTRACE_VM4(codesigned_copy,
6151 		    vm_object_t, src_m_object,
6152 		    vm_object_offset_t, src_m->vmp_offset,
6153 		    int, src_m->vmp_cs_validated,
6154 		    int, src_m->vmp_cs_tainted);
6155 #endif /* DEVELOPMENT || DEBUG */
6156 	}
6157 
6158 	/*
6159 	 * Propagate the cs_tainted bit to the copy page. Do not propagate
6160 	 * the cs_validated bit.
6161 	 */
6162 	dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
6163 	dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
6164 	if (dest_m->vmp_cs_tainted) {
6165 		vm_page_copy_cs_tainted++;
6166 	}
6167 
6168 
6169 	dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
6170 	pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m), options);
6171 }
6172 
6173 #if MACH_ASSERT
6174 static void
_vm_page_print(vm_page_t p)6175 _vm_page_print(
6176 	vm_page_t       p)
6177 {
6178 	printf("vm_page %p: \n", p);
6179 	printf("  pageq: next=%p prev=%p\n",
6180 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
6181 	    (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
6182 	printf("  listq: next=%p prev=%p\n",
6183 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
6184 	    (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
6185 	printf("  next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
6186 	printf("  object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
6187 	printf("  wire_count=%u\n", p->vmp_wire_count);
6188 	printf("  q_state=%u\n", p->vmp_q_state);
6189 
6190 	printf("  %slaundry, %sref, %sgobbled, %sprivate\n",
6191 	    (p->vmp_laundry ? "" : "!"),
6192 	    (p->vmp_reference ? "" : "!"),
6193 	    (p->vmp_gobbled ? "" : "!"),
6194 	    (vm_page_is_private(p) ? "" : "!"));
6195 	printf("  %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
6196 	    (p->vmp_busy ? "" : "!"),
6197 	    (p->vmp_wanted ? "" : "!"),
6198 	    (p->vmp_tabled ? "" : "!"),
6199 	    (vm_page_is_fictitious(p) ? "" : "!"),
6200 	    (p->vmp_pmapped ? "" : "!"),
6201 	    (p->vmp_wpmapped ? "" : "!"));
6202 	printf("  %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
6203 	    (p->vmp_free_when_done ? "" : "!"),
6204 	    (p->vmp_absent ? "" : "!"),
6205 	    (VMP_ERROR_GET(p) ? "" : "!"),
6206 	    (p->vmp_dirty ? "" : "!"),
6207 	    (p->vmp_cleaning ? "" : "!"),
6208 	    (p->vmp_precious ? "" : "!"),
6209 	    (p->vmp_clustered ? "" : "!"));
6210 	printf("  %soverwriting, %srestart, %sunusual\n",
6211 	    (p->vmp_overwriting ? "" : "!"),
6212 	    (p->vmp_restart ? "" : "!"),
6213 	    (p->vmp_unusual ? "" : "!"));
6214 	printf("  cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
6215 	    p->vmp_cs_validated,
6216 	    p->vmp_cs_tainted,
6217 	    p->vmp_cs_nx,
6218 	    (p->vmp_no_cache ? "" : "!"));
6219 
6220 	printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
6221 }
6222 
6223 /*
6224  *	Check that the list of pages is ordered by
6225  *	ascending physical address and has no holes.
6226  */
6227 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)6228 vm_page_verify_contiguous(
6229 	vm_page_t       pages,
6230 	unsigned int    npages)
6231 {
6232 	vm_page_t               m;
6233 	unsigned int            page_count;
6234 	vm_offset_t             prev_addr;
6235 
6236 	prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
6237 	page_count = 1;
6238 	for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
6239 		if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
6240 			printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
6241 			    m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
6242 			printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
6243 			panic("vm_page_verify_contiguous:  not contiguous!");
6244 		}
6245 		prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
6246 		++page_count;
6247 	}
6248 	if (page_count != npages) {
6249 		printf("pages %p actual count 0x%x but requested 0x%x\n",
6250 		    pages, page_count, npages);
6251 		panic("vm_page_verify_contiguous:  count error");
6252 	}
6253 	return 1;
6254 }
6255 
6256 
6257 /*
6258  *	Check the free lists for proper length etc.
6259  */
6260 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
6261 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)6262 vm_page_verify_free_list(
6263 	vm_page_queue_head_t    *vm_page_queue,
6264 	unsigned int    color,
6265 	vm_page_t       look_for_page,
6266 	boolean_t       expect_page)
6267 {
6268 	unsigned int    npages;
6269 	vm_page_t       m;
6270 	vm_page_t       prev_m;
6271 	boolean_t       found_page;
6272 
6273 	if (!vm_page_verify_this_free_list_enabled) {
6274 		return 0;
6275 	}
6276 
6277 	found_page = FALSE;
6278 	npages = 0;
6279 	prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
6280 
6281 	vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
6282 		if (m == look_for_page) {
6283 			found_page = TRUE;
6284 		}
6285 		if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
6286 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
6287 			    color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
6288 		}
6289 		if (!m->vmp_busy) {
6290 			panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
6291 			    color, npages, m);
6292 		}
6293 		if (color != (unsigned int) -1) {
6294 			if (VM_PAGE_GET_COLOR(m) != color) {
6295 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
6296 				    color, npages, m, VM_PAGE_GET_COLOR(m), color);
6297 			}
6298 			if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
6299 				panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
6300 				    color, npages, m, m->vmp_q_state);
6301 			}
6302 		} else {
6303 			if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
6304 				panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
6305 				    npages, m, m->vmp_q_state);
6306 			}
6307 		}
6308 		++npages;
6309 		prev_m = m;
6310 	}
6311 	if (look_for_page != VM_PAGE_NULL) {
6312 		unsigned int other_color;
6313 
6314 		if (expect_page && !found_page) {
6315 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
6316 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
6317 			_vm_page_print(look_for_page);
6318 			for (other_color = 0;
6319 			    other_color < vm_colors;
6320 			    other_color++) {
6321 				if (other_color == color) {
6322 					continue;
6323 				}
6324 				vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
6325 				    other_color, look_for_page, FALSE);
6326 			}
6327 			if (color == (unsigned int) -1) {
6328 				vm_page_verify_free_list(&vm_lopage_queue_free,
6329 				    (unsigned int) -1, look_for_page, FALSE);
6330 			}
6331 			panic("vm_page_verify_free_list(color=%u)", color);
6332 		}
6333 		if (!expect_page && found_page) {
6334 			printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
6335 			    color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
6336 		}
6337 	}
6338 	return npages;
6339 }
6340 
6341 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
6342 static void
vm_page_verify_free_lists(void)6343 vm_page_verify_free_lists( void )
6344 {
6345 	unsigned int    color, npages, nlopages;
6346 	boolean_t       toggle = TRUE;
6347 
6348 	if (!vm_page_verify_all_free_lists_enabled) {
6349 		return;
6350 	}
6351 
6352 	npages = 0;
6353 
6354 	vm_free_page_lock();
6355 
6356 	if (vm_page_verify_this_free_list_enabled == TRUE) {
6357 		/*
6358 		 * This variable has been set globally for extra checking of
6359 		 * each free list Q. Since we didn't set it, we don't own it
6360 		 * and we shouldn't toggle it.
6361 		 */
6362 		toggle = FALSE;
6363 	}
6364 
6365 	if (toggle == TRUE) {
6366 		vm_page_verify_this_free_list_enabled = TRUE;
6367 	}
6368 
6369 	for (color = 0; color < vm_colors; color++) {
6370 		npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
6371 		    color, VM_PAGE_NULL, FALSE);
6372 	}
6373 	nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
6374 	    (unsigned int) -1,
6375 	    VM_PAGE_NULL, FALSE);
6376 	if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
6377 		panic("vm_page_verify_free_lists:  "
6378 		    "npages %u free_count %d nlopages %u lo_free_count %u",
6379 		    npages, vm_page_free_count, nlopages, vm_lopage_free_count);
6380 	}
6381 
6382 	if (toggle == TRUE) {
6383 		vm_page_verify_this_free_list_enabled = FALSE;
6384 	}
6385 
6386 	vm_free_page_unlock();
6387 }
6388 
6389 #endif  /* MACH_ASSERT */
6390 
6391 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
6392 
6393 /*
6394  *	CONTIGUOUS PAGE ALLOCATION AND HELPER FUNCTIONS
6395  */
6396 
6397 /*
6398  * Helper function used to determine if a page can be relocated
6399  * A page is relocatable if it is in a stable non-transient state
6400  * and if the page being relocated is compatible with the reason for reloc
6401  * The page queue lock must be held, and the object lock too, if the page
6402  * is in an object.
6403  */
6404 boolean_t
vm_page_is_relocatable(vm_page_t m,vm_relocate_reason_t reloc_reason)6405 vm_page_is_relocatable(vm_page_t m, vm_relocate_reason_t reloc_reason)
6406 {
6407 
6408 	if (VM_PAGE_WIRED(m) || m->vmp_gobbled || m->vmp_laundry || m->vmp_wanted ||
6409 	    m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
6410 		/*
6411 		 * Page is in a transient state
6412 		 * or a state we don't want to deal with.
6413 		 */
6414 		return FALSE;
6415 	} else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
6416 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
6417 	    (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
6418 	    (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6419 		/*
6420 		 * Page needs to be on one of our queues (other then the pageout or special
6421 		 * free queues) or it needs to belong to the compressor pool (which is now
6422 		 * indicated by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out from
6423 		 * the check for VM_PAGE_NOT_ON_Q) in order for it to be stable behind the
6424 		 * locks we hold at this point...
6425 		 */
6426 		return FALSE;
6427 	} else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) &&
6428 	    (!m->vmp_tabled || m->vmp_busy)) {
6429 		/*
6430 		 * pages on the free list are always 'busy'
6431 		 * so we couldn't test for 'busy' in the check
6432 		 * for the transient states... pages that are
6433 		 * 'free' are never 'tabled', so we also couldn't
6434 		 * test for 'tabled'.  So we check here to make
6435 		 * sure that a non-free page is not busy and is
6436 		 * tabled on an object...
6437 		 */
6438 		return FALSE;
6439 	}
6440 
6441 	/*
6442 	 * Lastly, check the page against the relocation reason; the page may
6443 	 * be in a relocatable state, but not be a page we WANT to relocate for
6444 	 * the caller's use case.
6445 	 */
6446 	switch (reloc_reason) {
6447 	case VM_RELOCATE_REASON_CONTIGUOUS:
6448 	{
6449 		break;
6450 	}
6451 
6452 	default:
6453 	{
6454 		panic("Invalid relocation reason %u", reloc_reason);
6455 		__builtin_unreachable();
6456 	}
6457 	}
6458 
6459 	return TRUE;
6460 }
6461 
6462 /*
6463  * Free up the given page by possibily relocating its contents to a new page
6464  * If the page is on an object the object lock must be held.
6465  *
6466  * Whether or not the page is considered relocatable is contingent on the
6467  * reason it is being relocated.
6468  *
6469  * Return the new page back to the caller if requested, as done in
6470  * vm_object_iopl_wire_full().
6471  *
6472  * The VM page queues lock must also be held.
6473  */
6474 kern_return_t
vm_page_relocate(vm_page_t m1,int * compressed_pages,vm_relocate_reason_t reloc_reason,vm_page_t * new_page)6475 vm_page_relocate(
6476 	vm_page_t            m1,
6477 	int                 *compressed_pages,
6478 	vm_relocate_reason_t reloc_reason,
6479 	vm_page_t*           new_page)
6480 {
6481 	int refmod = 0;
6482 	vm_object_t object = VM_PAGE_OBJECT(m1);
6483 	kern_return_t kr;
6484 
6485 	switch (reloc_reason) {
6486 	case VM_RELOCATE_REASON_CONTIGUOUS:
6487 	{
6488 		break;
6489 	}
6490 	default:
6491 	{
6492 		panic("Unrecognized relocation reason %u\n", reloc_reason);
6493 		break;
6494 	}
6495 	}
6496 
6497 	if (object == VM_OBJECT_NULL) {
6498 		return KERN_FAILURE;
6499 	}
6500 
6501 	vm_object_lock_assert_held(object);
6502 
6503 	if (VM_PAGE_WIRED(m1) ||
6504 	    m1->vmp_gobbled ||
6505 	    m1->vmp_laundry ||
6506 	    m1->vmp_wanted ||
6507 	    m1->vmp_cleaning ||
6508 	    m1->vmp_overwriting ||
6509 	    m1->vmp_free_when_done ||
6510 	    m1->vmp_busy ||
6511 	    m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
6512 		return KERN_FAILURE;
6513 	}
6514 
6515 	boolean_t disconnected = FALSE;
6516 	boolean_t reusable = FALSE;
6517 
6518 	/*
6519 	 * Pages from reusable objects can be reclaimed directly.
6520 	 */
6521 	if ((m1->vmp_reusable || object->all_reusable) &&
6522 	    m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q && !m1->vmp_dirty &&
6523 	    !m1->vmp_reference) {
6524 		/*
6525 		 * reusable page...
6526 		 */
6527 
6528 		refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6529 		disconnected = TRUE;
6530 		if (refmod == 0) {
6531 			/*
6532 			 * ... not reused: can steal without relocating contents.
6533 			 */
6534 			reusable = TRUE;
6535 		}
6536 	}
6537 
6538 	if ((m1->vmp_pmapped && !reusable) || m1->vmp_dirty || m1->vmp_precious) {
6539 		vm_object_offset_t offset;
6540 		int copy_page_options = 0;
6541 		int grab_options = VM_PAGE_GRAB_Q_LOCK_HELD;
6542 
6543 		/* page is not reusable, we need to allocate a new page
6544 		 * and move its contents there.
6545 		 */
6546 		vm_page_t m2 = vm_page_grab_options(grab_options);
6547 
6548 		if (m2 == VM_PAGE_NULL) {
6549 			return KERN_RESOURCE_SHORTAGE;
6550 		}
6551 
6552 		if (!disconnected) {
6553 			if (m1->vmp_pmapped) {
6554 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6555 			} else {
6556 				refmod = 0;
6557 			}
6558 		}
6559 
6560 		/* copy the page's contents */
6561 		pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2), copy_page_options);
6562 
6563 		/* copy the page's state */
6564 		assert(!VM_PAGE_WIRED(m1));
6565 		assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6566 		assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6567 		assert(!m1->vmp_laundry);
6568 		m2->vmp_reference = m1->vmp_reference;
6569 		assert(!m1->vmp_gobbled);
6570 		m2->vmp_no_cache = m1->vmp_no_cache;
6571 		m2->vmp_xpmapped = 0;
6572 		assert(!m1->vmp_busy);
6573 		assert(!m1->vmp_wanted);
6574 		assert(vm_page_is_canonical(m1));
6575 		m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6576 		m2->vmp_wpmapped = m1->vmp_wpmapped;
6577 		assert(!m1->vmp_free_when_done);
6578 		m2->vmp_absent = m1->vmp_absent;
6579 		m2->vmp_error = VMP_ERROR_GET(m1);
6580 		m2->vmp_dirty = m1->vmp_dirty;
6581 		assert(!m1->vmp_cleaning);
6582 		m2->vmp_precious = m1->vmp_precious;
6583 		m2->vmp_clustered = m1->vmp_clustered;
6584 		assert(!m1->vmp_overwriting);
6585 		m2->vmp_restart = m1->vmp_restart;
6586 		m2->vmp_unusual = m1->vmp_unusual;
6587 		m2->vmp_cs_validated = m1->vmp_cs_validated;
6588 		m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6589 		m2->vmp_cs_nx = m1->vmp_cs_nx;
6590 
6591 		m2->vmp_realtime = m1->vmp_realtime;
6592 		m1->vmp_realtime = false;
6593 
6594 		/*
6595 		 * If m1 had really been reusable,
6596 		 * we would have just stolen it, so
6597 		 * let's not propagate its "reusable"
6598 		 * bit and assert that m2 is not
6599 		 * marked as "reusable".
6600 		 */
6601 		// m2->vmp_reusable	= m1->vmp_reusable;
6602 		assert(!m2->vmp_reusable);
6603 
6604 		// assert(!m1->vmp_lopage);
6605 
6606 		if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6607 			m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6608 			/*
6609 			 * We just grabbed m2 up above and so it isn't
6610 			 * going to be on any special Q as yet and so
6611 			 * we don't need to 'remove' it from the special
6612 			 * queues. Just resetting the state should be enough.
6613 			 */
6614 			m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6615 		}
6616 
6617 		/*
6618 		 * page may need to be flushed if
6619 		 * it is marshalled into a UPL
6620 		 * that is going to be used by a device
6621 		 * that doesn't support coherency
6622 		 */
6623 		m2->vmp_written_by_kernel = TRUE;
6624 
6625 		/*
6626 		 * make sure we clear the ref/mod state
6627 		 * from the pmap layer... else we risk
6628 		 * inheriting state from the last time
6629 		 * this page was used...
6630 		 */
6631 		pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2),
6632 		    VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6633 
6634 		if (refmod & VM_MEM_REFERENCED) {
6635 			m2->vmp_reference = TRUE;
6636 		}
6637 		if (refmod & VM_MEM_MODIFIED) {
6638 			SET_PAGE_DIRTY(m2, TRUE);
6639 		}
6640 		offset = m1->vmp_offset;
6641 
6642 		/*
6643 		 * completely cleans up the state
6644 		 * of the page so that it is ready
6645 		 * to be put onto the free list, or
6646 		 * for this purpose it looks like it
6647 		 * just came off of the free list
6648 		 */
6649 		vm_page_free_prepare(m1);
6650 
6651 		/*
6652 		 * now put the substitute page on the object
6653 		 */
6654 		vm_page_insert_internal(m2, object, offset, VM_KERN_MEMORY_NONE, TRUE,
6655 		    TRUE, FALSE, FALSE, NULL);
6656 
6657 		/*
6658 		 * Return the relocated vm_page_t if the caller wants to know.
6659 		 */
6660 		if (new_page) {
6661 			*new_page = m2;
6662 		}
6663 
6664 		if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6665 			m2->vmp_pmapped = TRUE;
6666 			m2->vmp_wpmapped = TRUE;
6667 
6668 			kr = pmap_enter_check(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6669 			    VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, TRUE);
6670 
6671 			assert(kr == KERN_SUCCESS);
6672 
6673 			if (compressed_pages) {
6674 				++*compressed_pages;
6675 			}
6676 		} else {
6677 			/* relocated page was not used by the compressor
6678 			 * put it on either the active or inactive lists */
6679 			if (m2->vmp_reference) {
6680 				vm_page_activate(m2);
6681 			} else {
6682 				vm_page_deactivate(m2);
6683 			}
6684 		}
6685 
6686 		/* unset the busy flag (pages on the free queue are busy) and notify if wanted */
6687 		vm_page_wakeup_done(object, m2);
6688 
6689 		return KERN_SUCCESS;
6690 	} else {
6691 		assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6692 
6693 		/*
6694 		 * completely cleans up the state
6695 		 * of the page so that it is ready
6696 		 * to be put onto the free list, or
6697 		 * for this purpose it looks like it
6698 		 * just came off of the free list
6699 		 */
6700 		vm_page_free_prepare(m1);
6701 
6702 		/* we're done here */
6703 		return KERN_SUCCESS;
6704 	}
6705 
6706 	return KERN_FAILURE;
6707 }
6708 
6709 /*
6710  *	CONTIGUOUS PAGE ALLOCATION
6711  *
6712  *	Find a region large enough to contain at least n pages
6713  *	of contiguous physical memory.
6714  *
6715  *	This is done by traversing the vm_page_t array in a linear fashion
6716  *	we assume that the vm_page_t array has the avaiable physical pages in an
6717  *	ordered, ascending list... this is currently true of all our implementations
6718  *      and must remain so... there can be 'holes' in the array...  we also can
6719  *	no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
6720  *      which use to happen via 'vm_page_convert'... that function was no longer
6721  *      being called and was removed...
6722  *
6723  *	The basic flow consists of stabilizing some of the interesting state of
6724  *	a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
6725  *	sweep at the beginning of the array looking for pages that meet our criterea
6726  *	for a 'stealable' page... currently we are pretty conservative... if the page
6727  *	meets this criterea and is physically contiguous to the previous page in the 'run'
6728  *      we keep developing it.  If we hit a page that doesn't fit, we reset our state
6729  *	and start to develop a new run... if at this point we've already considered
6730  *      at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
6731  *	and mutex_pause (which will yield the processor), to keep the latency low w/r
6732  *	to other threads trying to acquire free pages (or move pages from q to q),
6733  *	and then continue from the spot we left off... we only make 1 pass through the
6734  *	array.  Once we have a 'run' that is long enough, we'll go into the loop which
6735  *      which steals the pages from the queues they're currently on... pages on the free
6736  *	queue can be stolen directly... pages that are on any of the other queues
6737  *	must be removed from the object they are tabled on... this requires taking the
6738  *      object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
6739  *	or if the state of the page behind the vm_object lock is no longer viable, we'll
6740  *	dump the pages we've currently stolen back to the free list, and pick up our
6741  *	scan from the point where we aborted the 'current' run.
6742  *
6743  *
6744  *	Requirements:
6745  *		- neither vm_page_queue nor vm_free_list lock can be held on entry
6746  *
6747  *	Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
6748  *
6749  * Algorithm:
6750  */
6751 
6752 #define MAX_CONSIDERED_BEFORE_YIELD     1000
6753 
6754 
6755 #define RESET_STATE_OF_RUN()    \
6756 	MACRO_BEGIN             \
6757 	prevcontaddr = -2;      \
6758 	start_pnum = -1;        \
6759 	free_considered = 0;    \
6760 	substitute_needed = 0;  \
6761 	npages = 0;             \
6762 	MACRO_END
6763 
6764 /*
6765  * Can we steal in-use (i.e. not free) pages when searching for
6766  * physically-contiguous pages ?
6767  */
6768 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
6769 
6770 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
6771 #if DEBUG
6772 int vm_page_find_contig_debug = 0;
6773 #endif
6774 
6775 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6776 vm_page_find_contiguous(
6777 	unsigned int    contig_pages,
6778 	ppnum_t         max_pnum,
6779 	ppnum_t         pnum_mask,
6780 	boolean_t       wire,
6781 	int             flags)
6782 {
6783 	vm_page_t       m = NULL;
6784 	ppnum_t         prevcontaddr = 0;
6785 	ppnum_t         start_pnum = 0;
6786 	unsigned int    npages = 0, considered = 0, scanned = 0;
6787 	unsigned int    page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
6788 	unsigned int    idx_last_contig_page_found = 0;
6789 	int             free_considered = 0, free_available = 0;
6790 	int             substitute_needed = 0;
6791 	int             zone_gc_called = 0;
6792 	boolean_t       wrapped;
6793 	kern_return_t   kr;
6794 #if DEBUG
6795 	clock_sec_t     tv_start_sec = 0, tv_end_sec = 0;
6796 	clock_usec_t    tv_start_usec = 0, tv_end_usec = 0;
6797 #endif
6798 
6799 	int             yielded = 0;
6800 	int             dumped_run = 0;
6801 	int             stolen_pages = 0;
6802 	int             compressed_pages = 0;
6803 
6804 
6805 	if (contig_pages == 0) {
6806 		return VM_PAGE_NULL;
6807 	}
6808 
6809 full_scan_again:
6810 
6811 #if MACH_ASSERT
6812 	vm_page_verify_free_lists();
6813 #endif
6814 #if DEBUG
6815 	clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
6816 #endif
6817 	PAGE_REPLACEMENT_ALLOWED(TRUE);
6818 
6819 #if XNU_VM_HAS_DELAYED_PAGES
6820 	/*
6821 	 * If there are still delayed pages, try to free up some that match.
6822 	 */
6823 	if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
6824 		vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
6825 	}
6826 #endif /* XNU_VM_HAS_DELAYED_PAGES */
6827 
6828 	vm_page_lock_queues();
6829 	vm_free_page_lock();
6830 
6831 	RESET_STATE_OF_RUN();
6832 
6833 	scanned = 0;
6834 	considered = 0;
6835 	free_available = vm_page_free_count - vm_page_free_reserved;
6836 
6837 	wrapped = FALSE;
6838 
6839 	if (flags & KMA_LOMEM) {
6840 		idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
6841 	} else {
6842 		idx_last_contig_page_found =  vm_page_find_contiguous_last_idx;
6843 	}
6844 
6845 	orig_last_idx = idx_last_contig_page_found;
6846 	last_idx = orig_last_idx;
6847 
6848 	for (page_idx = last_idx, start_idx = last_idx;
6849 	    npages < contig_pages && page_idx < vm_pages_count;
6850 	    page_idx++) {
6851 retry:
6852 		if (wrapped &&
6853 		    npages == 0 &&
6854 		    page_idx >= orig_last_idx) {
6855 			/*
6856 			 * We're back where we started and we haven't
6857 			 * found any suitable contiguous range.  Let's
6858 			 * give up.
6859 			 */
6860 			break;
6861 		}
6862 		scanned++;
6863 		m = vm_page_get(page_idx);
6864 
6865 		assert(vm_page_is_canonical(m));
6866 
6867 		if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
6868 			/* no more low pages... */
6869 			break;
6870 		}
6871 		if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
6872 			/*
6873 			 * not aligned
6874 			 */
6875 			RESET_STATE_OF_RUN();
6876 		} else if (!vm_page_is_relocatable(m,
6877 		    VM_RELOCATE_REASON_CONTIGUOUS)) {
6878 			/*
6879 			 * page is not relocatable */
6880 			RESET_STATE_OF_RUN();
6881 		} else {
6882 			if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
6883 				if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
6884 					RESET_STATE_OF_RUN();
6885 					goto did_consider;
6886 				} else {
6887 					npages = 1;
6888 					start_idx = page_idx;
6889 					start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6890 				}
6891 			} else {
6892 				npages++;
6893 			}
6894 			prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6895 
6896 			VM_PAGE_CHECK(m);
6897 			if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6898 				free_considered++;
6899 			} else {
6900 				/*
6901 				 * This page is not free.
6902 				 * If we can't steal used pages,
6903 				 * we have to give up this run
6904 				 * and keep looking.
6905 				 * Otherwise, we might need to
6906 				 * move the contents of this page
6907 				 * into a substitute page.
6908 				 */
6909 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6910 				if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6911 					substitute_needed++;
6912 				}
6913 #else
6914 				RESET_STATE_OF_RUN();
6915 #endif
6916 			}
6917 
6918 			if ((free_considered + substitute_needed) > free_available) {
6919 				/*
6920 				 * if we let this run continue
6921 				 * we will end up dropping the vm_page_free_count
6922 				 * below the reserve limit... we need to abort
6923 				 * this run, but we can at least re-consider this
6924 				 * page... thus the jump back to 'retry'
6925 				 */
6926 				RESET_STATE_OF_RUN();
6927 
6928 				if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6929 					considered++;
6930 					goto retry;
6931 				}
6932 				/*
6933 				 * free_available == 0
6934 				 * so can't consider any free pages... if
6935 				 * we went to retry in this case, we'd
6936 				 * get stuck looking at the same page
6937 				 * w/o making any forward progress
6938 				 * we also want to take this path if we've already
6939 				 * reached our limit that controls the lock latency
6940 				 */
6941 			}
6942 		}
6943 did_consider:
6944 		if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6945 			PAGE_REPLACEMENT_ALLOWED(FALSE);
6946 
6947 			vm_free_page_unlock();
6948 			vm_page_unlock_queues();
6949 
6950 			mutex_pause(0);
6951 
6952 			PAGE_REPLACEMENT_ALLOWED(TRUE);
6953 
6954 			vm_page_lock_queues();
6955 			vm_free_page_lock();
6956 
6957 			RESET_STATE_OF_RUN();
6958 			/*
6959 			 * reset our free page limit since we
6960 			 * dropped the lock protecting the vm_page_free_queue
6961 			 */
6962 			free_available = vm_page_free_count - vm_page_free_reserved;
6963 			considered = 0;
6964 
6965 			yielded++;
6966 
6967 			goto retry;
6968 		}
6969 		considered++;
6970 	} /* main for-loop end */
6971 
6972 	m = VM_PAGE_NULL;
6973 
6974 	if (npages != contig_pages) {
6975 		if (!wrapped) {
6976 			/*
6977 			 * We didn't find a contiguous range but we didn't
6978 			 * start from the very first page.
6979 			 * Start again from the very first page.
6980 			 */
6981 			RESET_STATE_OF_RUN();
6982 			if (flags & KMA_LOMEM) {
6983 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = 0;
6984 			} else {
6985 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6986 			}
6987 			last_idx = 0;
6988 			page_idx = last_idx;
6989 			wrapped = TRUE;
6990 			goto retry;
6991 		}
6992 		vm_free_page_unlock();
6993 	} else {
6994 		vm_page_t m1;
6995 		unsigned int cur_idx;
6996 		unsigned int tmp_start_idx;
6997 		vm_object_t locked_object = VM_OBJECT_NULL;
6998 		boolean_t abort_run = FALSE;
6999 
7000 		assert(page_idx - start_idx == contig_pages);
7001 
7002 		tmp_start_idx = start_idx;
7003 
7004 		/*
7005 		 * first pass through to pull the free pages
7006 		 * off of the free queue so that in case we
7007 		 * need substitute pages, we won't grab any
7008 		 * of the free pages in the run... we'll clear
7009 		 * the 'free' bit in the 2nd pass, and even in
7010 		 * an abort_run case, we'll collect all of the
7011 		 * free pages in this run and return them to the free list
7012 		 */
7013 		while (start_idx < page_idx) {
7014 			m1 = vm_page_get(start_idx++);
7015 
7016 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
7017 			assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
7018 #endif
7019 
7020 			if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
7021 #if MACH_ASSERT
7022 				unsigned int color = VM_PAGE_GET_COLOR(m1);
7023 				vm_memory_class_t memory_class = vm_page_get_memory_class(m1);
7024 
7025 				if (memory_class == VM_MEMORY_CLASS_REGULAR) {
7026 					vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
7027 				}
7028 #endif
7029 				vm_page_steal_free_page(m1, VM_REMOVE_REASON_USE);
7030 #if MACH_ASSERT
7031 				if (memory_class == VM_MEMORY_CLASS_REGULAR) {
7032 					vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
7033 				}
7034 #endif
7035 				/*
7036 				 * Clear the "free" bit so that this page
7037 				 * does not get considered for another
7038 				 * concurrent physically-contiguous allocation.
7039 				 */
7040 				m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
7041 				assert(m1->vmp_busy);
7042 			}
7043 		}
7044 		if (flags & KMA_LOMEM) {
7045 			vm_page_lomem_find_contiguous_last_idx = page_idx;
7046 		} else {
7047 			vm_page_find_contiguous_last_idx = page_idx;
7048 		}
7049 
7050 		/*
7051 		 * we can drop the free queue lock at this point since
7052 		 * we've pulled any 'free' candidates off of the list
7053 		 * we need it dropped so that we can do a vm_page_grab
7054 		 * when substituing for pmapped/dirty pages
7055 		 */
7056 		vm_free_page_unlock();
7057 
7058 		start_idx = tmp_start_idx;
7059 		cur_idx = page_idx - 1;
7060 
7061 		while (start_idx++ < page_idx) {
7062 			/*
7063 			 * must go through the list from back to front
7064 			 * so that the page list is created in the
7065 			 * correct order - low -> high phys addresses
7066 			 */
7067 			m1 = vm_page_get(cur_idx--);
7068 
7069 			if (m1->vmp_object == 0) {
7070 				/*
7071 				 * page has already been removed from
7072 				 * the free list in the 1st pass
7073 				 */
7074 				assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
7075 				assert(m1->vmp_offset == (vm_object_offset_t) -1);
7076 				assert(m1->vmp_busy);
7077 				assert(!m1->vmp_wanted);
7078 				assert(!m1->vmp_laundry);
7079 			} else {
7080 				/*
7081 				 * try to relocate/steal the page
7082 				 */
7083 				if (abort_run == TRUE) {
7084 					continue;
7085 				}
7086 
7087 				assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
7088 
7089 				vm_object_t object = VM_PAGE_OBJECT(m1);
7090 
7091 				if (object != locked_object) {
7092 					if (locked_object) {
7093 						vm_object_unlock(locked_object);
7094 						locked_object = VM_OBJECT_NULL;
7095 					}
7096 					if (vm_object_lock_try(object)) {
7097 						locked_object = object;
7098 					} else {
7099 						/* object must be locked to relocate its pages */
7100 						tmp_start_idx = cur_idx;
7101 						abort_run = TRUE;
7102 						continue;
7103 					}
7104 				}
7105 
7106 				kr = vm_page_relocate(m1, &compressed_pages, VM_RELOCATE_REASON_CONTIGUOUS, NULL);
7107 				if (kr != KERN_SUCCESS) {
7108 					if (locked_object) {
7109 						vm_object_unlock(locked_object);
7110 						locked_object = VM_OBJECT_NULL;
7111 					}
7112 					tmp_start_idx = cur_idx;
7113 					abort_run = TRUE;
7114 					continue;
7115 				}
7116 
7117 				stolen_pages++;
7118 			}
7119 
7120 			/* m1 is ours at this point ... */
7121 
7122 			if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
7123 				/*
7124 				 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
7125 				 * change it for pages marked as used-by-compressor.
7126 				 */
7127 				vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
7128 			}
7129 			VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
7130 			m1->vmp_snext = m;
7131 			m = m1;
7132 		}
7133 
7134 		if (locked_object) {
7135 			vm_object_unlock(locked_object);
7136 			locked_object = VM_OBJECT_NULL;
7137 		}
7138 
7139 		if (abort_run == TRUE) {
7140 			/*
7141 			 * want the index of the last
7142 			 * page in this run that was
7143 			 * successfully 'stolen', so back
7144 			 * it up 1 for the auto-decrement on use
7145 			 * and 1 more to bump back over this page
7146 			 */
7147 			page_idx = tmp_start_idx + 2;
7148 			if (page_idx >= vm_pages_count) {
7149 				if (wrapped) {
7150 					if (m != VM_PAGE_NULL) {
7151 						vm_page_unlock_queues();
7152 						vm_page_free_list(m, FALSE);
7153 						vm_page_lock_queues();
7154 						m = VM_PAGE_NULL;
7155 					}
7156 					dumped_run++;
7157 					goto done_scanning;
7158 				}
7159 				page_idx = last_idx = 0;
7160 				wrapped = TRUE;
7161 			}
7162 			abort_run = FALSE;
7163 
7164 			/*
7165 			 * We didn't find a contiguous range but we didn't
7166 			 * start from the very first page.
7167 			 * Start again from the very first page.
7168 			 */
7169 			RESET_STATE_OF_RUN();
7170 
7171 			if (flags & KMA_LOMEM) {
7172 				idx_last_contig_page_found  = vm_page_lomem_find_contiguous_last_idx = page_idx;
7173 			} else {
7174 				idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
7175 			}
7176 
7177 			last_idx = page_idx;
7178 
7179 			if (m != VM_PAGE_NULL) {
7180 				vm_page_unlock_queues();
7181 				vm_page_free_list(m, FALSE);
7182 				vm_page_lock_queues();
7183 				m = VM_PAGE_NULL;
7184 			}
7185 			dumped_run++;
7186 
7187 			vm_free_page_lock();
7188 			/*
7189 			 * reset our free page limit since we
7190 			 * dropped the lock protecting the vm_page_free_queue
7191 			 */
7192 			free_available = vm_page_free_count - vm_page_free_reserved;
7193 			goto retry;
7194 		}
7195 
7196 		for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
7197 			assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
7198 			assert(m1->vmp_wire_count == 0);
7199 
7200 			if (wire == TRUE) {
7201 				m1->vmp_wire_count++;
7202 				m1->vmp_q_state = VM_PAGE_IS_WIRED;
7203 
7204 			} else {
7205 				m1->vmp_gobbled = TRUE;
7206 			}
7207 		}
7208 		if (wire == FALSE) {
7209 			vm_page_gobble_count += npages;
7210 		}
7211 
7212 		/*
7213 		 * gobbled pages are also counted as wired pages
7214 		 */
7215 		vm_page_wire_count += npages;
7216 
7217 		assert(vm_page_verify_contiguous(m, npages));
7218 	}
7219 done_scanning:
7220 	PAGE_REPLACEMENT_ALLOWED(FALSE);
7221 
7222 	vm_page_unlock_queues();
7223 
7224 #if DEBUG
7225 	clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
7226 
7227 	tv_end_sec -= tv_start_sec;
7228 	if (tv_end_usec < tv_start_usec) {
7229 		tv_end_sec--;
7230 		tv_end_usec += 1000000;
7231 	}
7232 	tv_end_usec -= tv_start_usec;
7233 	if (tv_end_usec >= 1000000) {
7234 		tv_end_sec++;
7235 		tv_end_sec -= 1000000;
7236 	}
7237 	if (vm_page_find_contig_debug) {
7238 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds...  started at %d...  scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages\n",
7239 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
7240 		    (long)tv_end_sec, tv_end_usec, orig_last_idx,
7241 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages);
7242 	}
7243 
7244 #endif
7245 #if MACH_ASSERT
7246 	vm_page_verify_free_lists();
7247 #endif
7248 	if (m == NULL && zone_gc_called < 2) {
7249 		printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages...  yielded %d times...  dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
7250 		    __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
7251 		        scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
7252 
7253 		if (consider_buffer_cache_collect != NULL) {
7254 			(void)(*consider_buffer_cache_collect)(1);
7255 		}
7256 
7257 		zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
7258 
7259 		zone_gc_called++;
7260 
7261 		printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
7262 		goto full_scan_again;
7263 	}
7264 
7265 	return m;
7266 }
7267 
7268 /*
7269  *	Allocate a list of contiguous, wired pages.
7270  */
7271 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)7272 cpm_allocate(
7273 	vm_size_t       size,
7274 	vm_page_t       *list,
7275 	ppnum_t         max_pnum,
7276 	ppnum_t         pnum_mask,
7277 	boolean_t       wire,
7278 	int             flags)
7279 {
7280 	vm_page_t               pages;
7281 	unsigned int            npages;
7282 
7283 	if (size % PAGE_SIZE != 0) {
7284 		return KERN_INVALID_ARGUMENT;
7285 	}
7286 
7287 	npages = (unsigned int) (size / PAGE_SIZE);
7288 	if (npages != size / PAGE_SIZE) {
7289 		/* 32-bit overflow */
7290 		return KERN_INVALID_ARGUMENT;
7291 	}
7292 
7293 	/*
7294 	 *	Obtain a pointer to a subset of the free
7295 	 *	list large enough to satisfy the request;
7296 	 *	the region will be physically contiguous.
7297 	 */
7298 	pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
7299 
7300 	if (pages == VM_PAGE_NULL) {
7301 		return KERN_NO_SPACE;
7302 	}
7303 	/*
7304 	 * determine need for wakeups
7305 	 */
7306 	if (vm_page_free_count < vm_page_free_min) {
7307 		vm_free_page_lock();
7308 		if (vm_pageout_running == FALSE) {
7309 			vm_free_page_unlock();
7310 			thread_wakeup((event_t) &vm_page_free_wanted);
7311 		} else {
7312 			vm_free_page_unlock();
7313 		}
7314 	}
7315 
7316 	VM_CHECK_MEMORYSTATUS;
7317 
7318 	/*
7319 	 *	The CPM pages should now be available and
7320 	 *	ordered by ascending physical address.
7321 	 */
7322 	assert(vm_page_verify_contiguous(pages, npages));
7323 
7324 	if (flags & KMA_ZERO) {
7325 		for (vm_page_t m = pages; m; m = NEXT_PAGE(m)) {
7326 			vm_page_zero_fill(
7327 				m
7328 				);
7329 		}
7330 	}
7331 
7332 	*list = pages;
7333 	return KERN_SUCCESS;
7334 }
7335 
7336 
7337 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
7338 
7339 /*
7340  * when working on a 'run' of pages, it is necessary to hold
7341  * the vm_page_queue_lock (a hot global lock) for certain operations
7342  * on the page... however, the majority of the work can be done
7343  * while merely holding the object lock... in fact there are certain
7344  * collections of pages that don't require any work brokered by the
7345  * vm_page_queue_lock... to mitigate the time spent behind the global
7346  * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
7347  * while doing all of the work that doesn't require the vm_page_queue_lock...
7348  * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
7349  * necessary work for each page... we will grab the busy bit on the page
7350  * if it's not already held so that vm_page_do_delayed_work can drop the object lock
7351  * if it can't immediately take the vm_page_queue_lock in order to compete
7352  * for the locks in the same order that vm_pageout_scan takes them.
7353  * the operation names are modeled after the names of the routines that
7354  * need to be called in order to make the changes very obvious in the
7355  * original loop
7356  */
7357 
7358 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)7359 vm_page_do_delayed_work(
7360 	vm_object_t     object,
7361 	vm_tag_t        tag,
7362 	struct vm_page_delayed_work *dwp,
7363 	int             dw_count)
7364 {
7365 	int             j;
7366 	vm_page_t       m;
7367 	vm_page_t       local_free_q = VM_PAGE_NULL;
7368 
7369 	/*
7370 	 * pageout_scan takes the vm_page_lock_queues first
7371 	 * then tries for the object lock... to avoid what
7372 	 * is effectively a lock inversion, we'll go to the
7373 	 * trouble of taking them in that same order... otherwise
7374 	 * if this object contains the majority of the pages resident
7375 	 * in the UBC (or a small set of large objects actively being
7376 	 * worked on contain the majority of the pages), we could
7377 	 * cause the pageout_scan thread to 'starve' in its attempt
7378 	 * to find pages to move to the free queue, since it has to
7379 	 * successfully acquire the object lock of any candidate page
7380 	 * before it can steal/clean it.
7381 	 */
7382 	if (!vm_page_trylock_queues()) {
7383 		vm_object_unlock(object);
7384 
7385 		/*
7386 		 * "Turnstile enabled vm_pageout_scan" can be runnable
7387 		 * for a very long time without getting on a core.
7388 		 * If this is a higher priority thread it could be
7389 		 * waiting here for a very long time respecting the fact
7390 		 * that pageout_scan would like its object after VPS does
7391 		 * a mutex_pause(0).
7392 		 * So we cap the number of yields in the vm_object_lock_avoid()
7393 		 * case to a single mutex_pause(0) which will give vm_pageout_scan
7394 		 * 10us to run and grab the object if needed.
7395 		 */
7396 		vm_page_lock_queues();
7397 
7398 		for (j = 0;; j++) {
7399 			if ((!vm_object_lock_avoid(object) ||
7400 			    (vps_dynamic_priority_enabled && (j > 0))) &&
7401 			    _vm_object_lock_try(object)) {
7402 				break;
7403 			}
7404 			vm_page_unlock_queues();
7405 			mutex_pause(j);
7406 			vm_page_lock_queues();
7407 		}
7408 	}
7409 	for (j = 0; j < dw_count; j++, dwp++) {
7410 		m = dwp->dw_m;
7411 
7412 		if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
7413 			vm_pageout_throttle_up(m);
7414 		}
7415 #if CONFIG_PHANTOM_CACHE
7416 		if (dwp->dw_mask & DW_vm_phantom_cache_update) {
7417 			vm_phantom_cache_update(m);
7418 		}
7419 #endif
7420 		if (dwp->dw_mask & DW_vm_page_wire) {
7421 			vm_page_wire(m, tag, FALSE);
7422 		} else if (dwp->dw_mask & DW_vm_page_unwire) {
7423 			boolean_t       queueit;
7424 
7425 			queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
7426 
7427 			vm_page_unwire(m, queueit);
7428 		}
7429 		if (dwp->dw_mask & DW_vm_page_free) {
7430 			vm_page_free_prepare_queues(m);
7431 
7432 			assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
7433 			/*
7434 			 * Add this page to our list of reclaimed pages,
7435 			 * to be freed later.
7436 			 */
7437 			m->vmp_snext = local_free_q;
7438 			local_free_q = m;
7439 		} else {
7440 			if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
7441 				vm_page_deactivate_internal(m, FALSE);
7442 			} else if (dwp->dw_mask & DW_vm_page_activate) {
7443 				if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
7444 					vm_page_activate(m);
7445 				}
7446 			} else if (dwp->dw_mask & DW_vm_page_speculate) {
7447 				vm_page_speculate(m, TRUE);
7448 			} else if (dwp->dw_mask & DW_enqueue_cleaned) {
7449 				/*
7450 				 * if we didn't hold the object lock and did this,
7451 				 * we might disconnect the page, then someone might
7452 				 * soft fault it back in, then we would put it on the
7453 				 * cleaned queue, and so we would have a referenced (maybe even dirty)
7454 				 * page on that queue, which we don't want
7455 				 */
7456 				int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7457 
7458 				if ((refmod_state & VM_MEM_REFERENCED)) {
7459 					/*
7460 					 * this page has been touched since it got cleaned; let's activate it
7461 					 * if it hasn't already been
7462 					 */
7463 					VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
7464 					VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
7465 
7466 					if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
7467 						vm_page_activate(m);
7468 					}
7469 				} else {
7470 					m->vmp_reference = FALSE;
7471 					vm_page_enqueue_cleaned(m);
7472 				}
7473 			} else if (dwp->dw_mask & DW_vm_page_lru) {
7474 				vm_page_lru(m);
7475 			} else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
7476 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7477 					vm_page_queues_remove(m, TRUE);
7478 				}
7479 			}
7480 			if (dwp->dw_mask & DW_set_reference) {
7481 				m->vmp_reference = TRUE;
7482 			} else if (dwp->dw_mask & DW_clear_reference) {
7483 				m->vmp_reference = FALSE;
7484 			}
7485 
7486 			if (dwp->dw_mask & DW_move_page) {
7487 				if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
7488 					vm_page_queues_remove(m, FALSE);
7489 
7490 					assert(!is_kernel_object(VM_PAGE_OBJECT(m)));
7491 
7492 					vm_page_enqueue_inactive(m, FALSE);
7493 				}
7494 			}
7495 			if (dwp->dw_mask & DW_clear_busy) {
7496 				m->vmp_busy = FALSE;
7497 			}
7498 
7499 			if (dwp->dw_mask & DW_PAGE_WAKEUP) {
7500 				vm_page_wakeup(object, m);
7501 			}
7502 		}
7503 	}
7504 	vm_page_unlock_queues();
7505 
7506 	if (local_free_q) {
7507 		vm_page_free_list(local_free_q, TRUE);
7508 	}
7509 
7510 	VM_CHECK_MEMORYSTATUS;
7511 }
7512 
7513 __abortlike
7514 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)7515 __vm_page_alloc_list_failed_panic(
7516 	vm_size_t       page_count,
7517 	kma_flags_t     flags,
7518 	kern_return_t   kr)
7519 {
7520 	panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
7521 	    (size_t)page_count, flags, kr);
7522 }
7523 
7524 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)7525 vm_page_alloc_list(
7526 	vm_size_t   page_count,
7527 	kma_flags_t flags,
7528 	vm_page_t  *list)
7529 {
7530 	vm_page_t       page_list = VM_PAGE_NULL;
7531 	vm_page_t       mem;
7532 	kern_return_t   kr = KERN_SUCCESS;
7533 	int             page_grab_count = 0;
7534 	task_t          task;
7535 
7536 	for (vm_size_t i = 0; i < page_count; i++) {
7537 		for (;;) {
7538 			if (flags & KMA_LOMEM) {
7539 				mem = vm_page_grablo();
7540 			} else {
7541 				uint_t options = VM_PAGE_GRAB_OPTIONS_NONE;
7542 				mem = vm_page_grab_options(options);
7543 			}
7544 
7545 			if (mem != VM_PAGE_NULL) {
7546 				break;
7547 			}
7548 
7549 			if (flags & KMA_NOPAGEWAIT) {
7550 				kr = KERN_RESOURCE_SHORTAGE;
7551 				goto out;
7552 			}
7553 			if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
7554 				kr = KERN_RESOURCE_SHORTAGE;
7555 				goto out;
7556 			}
7557 
7558 			/* VM privileged threads should have waited in vm_page_grab() and not get here. */
7559 			assert(!(current_thread()->options & TH_OPT_VMPRIV));
7560 
7561 			if ((flags & KMA_NOFAIL) == 0 && ptoa_64(page_count) > max_mem / 4) {
7562 				uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
7563 				if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
7564 					kr = KERN_RESOURCE_SHORTAGE;
7565 					goto out;
7566 				}
7567 			}
7568 			VM_PAGE_WAIT();
7569 		}
7570 
7571 		page_grab_count++;
7572 		mem->vmp_snext = page_list;
7573 		page_list = mem;
7574 	}
7575 
7576 	if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
7577 		for (mem = page_list; mem; mem = mem->vmp_snext) {
7578 			vm_page_zero_fill(
7579 				mem
7580 				);
7581 		}
7582 	}
7583 
7584 out:
7585 	task = current_task_early();
7586 	if (task != NULL) {
7587 		counter_add(&task->pages_grabbed_kern, page_grab_count);
7588 	}
7589 
7590 	if (kr == KERN_SUCCESS) {
7591 		*list = page_list;
7592 	} else if (flags & KMA_NOFAIL) {
7593 		__vm_page_alloc_list_failed_panic(page_count, flags, kr);
7594 	} else {
7595 		vm_page_free_list(page_list, FALSE);
7596 	}
7597 
7598 	return kr;
7599 }
7600 
7601 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)7602 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
7603 {
7604 	page->vmp_offset = offset;
7605 }
7606 
7607 vm_page_t
vm_page_get_next(vm_page_t page)7608 vm_page_get_next(vm_page_t page)
7609 {
7610 	return page->vmp_snext;
7611 }
7612 
7613 vm_object_offset_t
vm_page_get_offset(vm_page_t page)7614 vm_page_get_offset(vm_page_t page)
7615 {
7616 	return page->vmp_offset;
7617 }
7618 
7619 ppnum_t
vm_page_get_phys_page(vm_page_t page)7620 vm_page_get_phys_page(vm_page_t page)
7621 {
7622 	return VM_PAGE_GET_PHYS_PAGE(page);
7623 }
7624 
7625 
7626 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
7627 
7628 #if HIBERNATION
7629 
7630 static vm_page_t hibernate_gobble_queue;
7631 
7632 static int  hibernate_drain_pageout_queue(struct vm_pageout_queue *);
7633 static int  hibernate_flush_dirty_pages(int);
7634 static int  hibernate_flush_queue(vm_page_queue_head_t *, int);
7635 
7636 void hibernate_flush_wait(void);
7637 void hibernate_mark_in_progress(void);
7638 void hibernate_clear_in_progress(void);
7639 
7640 void            hibernate_free_range(int, int);
7641 void            hibernate_hash_insert_page(vm_page_t);
7642 uint32_t        hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
7643 uint32_t        hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
7644 ppnum_t         hibernate_lookup_paddr(unsigned int);
7645 
7646 struct hibernate_statistics {
7647 	int hibernate_considered;
7648 	int hibernate_reentered_on_q;
7649 	int hibernate_found_dirty;
7650 	int hibernate_skipped_cleaning;
7651 	int hibernate_skipped_transient;
7652 	int hibernate_skipped_precious;
7653 	int hibernate_skipped_external;
7654 	int hibernate_queue_nolock;
7655 	int hibernate_queue_paused;
7656 	int hibernate_throttled;
7657 	int hibernate_throttle_timeout;
7658 	int hibernate_drained;
7659 	int hibernate_drain_timeout;
7660 	int cd_lock_failed;
7661 	int cd_found_precious;
7662 	int cd_found_wired;
7663 	int cd_found_busy;
7664 	int cd_found_unusual;
7665 	int cd_found_cleaning;
7666 	int cd_found_laundry;
7667 	int cd_found_dirty;
7668 	int cd_found_xpmapped;
7669 	int cd_skipped_xpmapped;
7670 	int cd_local_free;
7671 	int cd_total_free;
7672 	int cd_vm_page_wire_count;
7673 	int cd_vm_struct_pages_unneeded;
7674 	int cd_pages;
7675 	int cd_discarded;
7676 	int cd_count_wire;
7677 } hibernate_stats;
7678 
7679 #if CONFIG_SPTM
7680 /**
7681  * On SPTM-based systems don't save any executable pages into the hibernation
7682  * image. The SPTM has stronger guarantees around not allowing write access to
7683  * the executable pages than on older systems, which prevents XNU from being
7684  * able to restore any pages mapped as executable.
7685  */
7686 #define HIBERNATE_XPMAPPED_LIMIT        0ULL
7687 #else /* CONFIG_SPTM */
7688 /*
7689  * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
7690  * so that we don't overrun the estimated image size, which would
7691  * result in a hibernation failure.
7692  *
7693  * We use a size value instead of pages because we don't want to take up more space
7694  * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
7695  * to have that additional space available.
7696  *
7697  * Since this was set at 40000 pages on X86 we are going to use 160MB as our
7698  * xpmapped size.
7699  */
7700 #define HIBERNATE_XPMAPPED_LIMIT        ((160 * 1024 * 1024ULL) / PAGE_SIZE)
7701 #endif /* CONFIG_SPTM */
7702 
7703 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)7704 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
7705 {
7706 	wait_result_t   wait_result;
7707 
7708 	vm_page_lock_queues();
7709 
7710 	while (!vm_page_queue_empty(&q->pgo_pending)) {
7711 		q->pgo_draining = TRUE;
7712 
7713 		assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
7714 
7715 		vm_page_unlock_queues();
7716 
7717 		wait_result = thread_block(THREAD_CONTINUE_NULL);
7718 
7719 		if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
7720 			hibernate_stats.hibernate_drain_timeout++;
7721 
7722 			if (q == &vm_pageout_queue_external) {
7723 				return 0;
7724 			}
7725 
7726 			return 1;
7727 		}
7728 		vm_page_lock_queues();
7729 
7730 		hibernate_stats.hibernate_drained++;
7731 	}
7732 	vm_page_unlock_queues();
7733 
7734 	return 0;
7735 }
7736 
7737 
7738 boolean_t hibernate_skip_external = FALSE;
7739 
7740 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7741 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7742 {
7743 	vm_page_t       m;
7744 	vm_object_t     l_object = NULL;
7745 	vm_object_t     m_object = NULL;
7746 	int             refmod_state = 0;
7747 	int             try_failed_count = 0;
7748 	int             retval = 0;
7749 	int             current_run = 0;
7750 	struct  vm_pageout_queue *iq;
7751 	struct  vm_pageout_queue *eq;
7752 	struct  vm_pageout_queue *tq;
7753 
7754 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7755 	    VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7756 
7757 	iq = &vm_pageout_queue_internal;
7758 	eq = &vm_pageout_queue_external;
7759 
7760 	vm_page_lock_queues();
7761 
7762 	while (qcount && !vm_page_queue_empty(q)) {
7763 		if (current_run++ == 1000) {
7764 			if (hibernate_should_abort()) {
7765 				retval = 1;
7766 				break;
7767 			}
7768 			current_run = 0;
7769 		}
7770 
7771 		m = (vm_page_t) vm_page_queue_first(q);
7772 		m_object = VM_PAGE_OBJECT(m);
7773 
7774 		/*
7775 		 * check to see if we currently are working
7776 		 * with the same object... if so, we've
7777 		 * already got the lock
7778 		 */
7779 		if (m_object != l_object) {
7780 			/*
7781 			 * the object associated with candidate page is
7782 			 * different from the one we were just working
7783 			 * with... dump the lock if we still own it
7784 			 */
7785 			if (l_object != NULL) {
7786 				vm_object_unlock(l_object);
7787 				l_object = NULL;
7788 			}
7789 			/*
7790 			 * Try to lock object; since we've alread got the
7791 			 * page queues lock, we can only 'try' for this one.
7792 			 * if the 'try' fails, we need to do a mutex_pause
7793 			 * to allow the owner of the object lock a chance to
7794 			 * run...
7795 			 */
7796 			if (!vm_object_lock_try_scan(m_object)) {
7797 				if (try_failed_count > 20) {
7798 					hibernate_stats.hibernate_queue_nolock++;
7799 
7800 					goto reenter_pg_on_q;
7801 				}
7802 
7803 				vm_page_unlock_queues();
7804 				mutex_pause(try_failed_count++);
7805 				vm_page_lock_queues();
7806 
7807 				hibernate_stats.hibernate_queue_paused++;
7808 				continue;
7809 			} else {
7810 				l_object = m_object;
7811 			}
7812 		}
7813 		if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7814 			/*
7815 			 * page is not to be cleaned
7816 			 * put it back on the head of its queue
7817 			 */
7818 			if (m->vmp_cleaning) {
7819 				hibernate_stats.hibernate_skipped_cleaning++;
7820 			} else {
7821 				hibernate_stats.hibernate_skipped_transient++;
7822 			}
7823 
7824 			goto reenter_pg_on_q;
7825 		}
7826 		if (m_object->vo_copy == VM_OBJECT_NULL) {
7827 			if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7828 				/*
7829 				 * let the normal hibernate image path
7830 				 * deal with these
7831 				 */
7832 				goto reenter_pg_on_q;
7833 			}
7834 		}
7835 		if (!m->vmp_dirty && m->vmp_pmapped) {
7836 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7837 
7838 			if ((refmod_state & VM_MEM_MODIFIED)) {
7839 				SET_PAGE_DIRTY(m, FALSE);
7840 			}
7841 		} else {
7842 			refmod_state = 0;
7843 		}
7844 
7845 		if (!m->vmp_dirty) {
7846 			/*
7847 			 * page is not to be cleaned
7848 			 * put it back on the head of its queue
7849 			 */
7850 			if (m->vmp_precious) {
7851 				hibernate_stats.hibernate_skipped_precious++;
7852 			}
7853 
7854 			goto reenter_pg_on_q;
7855 		}
7856 
7857 		if (hibernate_skip_external == TRUE && !m_object->internal) {
7858 			hibernate_stats.hibernate_skipped_external++;
7859 
7860 			goto reenter_pg_on_q;
7861 		}
7862 		tq = NULL;
7863 
7864 		if (m_object->internal) {
7865 			if (VM_PAGE_Q_THROTTLED(iq)) {
7866 				tq = iq;
7867 			}
7868 		} else if (VM_PAGE_Q_THROTTLED(eq)) {
7869 			tq = eq;
7870 		}
7871 
7872 		if (tq != NULL) {
7873 			wait_result_t   wait_result;
7874 			int             wait_count = 5;
7875 
7876 			if (l_object != NULL) {
7877 				vm_object_unlock(l_object);
7878 				l_object = NULL;
7879 			}
7880 
7881 			while (retval == 0) {
7882 				tq->pgo_throttled = TRUE;
7883 
7884 				assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7885 
7886 				vm_page_unlock_queues();
7887 
7888 				wait_result = thread_block(THREAD_CONTINUE_NULL);
7889 
7890 				vm_page_lock_queues();
7891 
7892 				if (wait_result != THREAD_TIMED_OUT) {
7893 					break;
7894 				}
7895 				if (!VM_PAGE_Q_THROTTLED(tq)) {
7896 					break;
7897 				}
7898 
7899 				if (hibernate_should_abort()) {
7900 					retval = 1;
7901 				}
7902 
7903 				if (--wait_count == 0) {
7904 					hibernate_stats.hibernate_throttle_timeout++;
7905 
7906 					if (tq == eq) {
7907 						hibernate_skip_external = TRUE;
7908 						break;
7909 					}
7910 					retval = 1;
7911 				}
7912 			}
7913 			if (retval) {
7914 				break;
7915 			}
7916 
7917 			hibernate_stats.hibernate_throttled++;
7918 
7919 			continue;
7920 		}
7921 		/*
7922 		 * we've already factored out pages in the laundry which
7923 		 * means this page can't be on the pageout queue so it's
7924 		 * safe to do the vm_page_queues_remove
7925 		 */
7926 		vm_page_queues_remove(m, TRUE);
7927 
7928 		if (m_object->internal == TRUE) {
7929 			pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7930 		}
7931 
7932 		vm_pageout_cluster(m);
7933 
7934 		hibernate_stats.hibernate_found_dirty++;
7935 
7936 		goto next_pg;
7937 
7938 reenter_pg_on_q:
7939 		vm_page_queue_remove(q, m, vmp_pageq);
7940 		vm_page_queue_enter(q, m, vmp_pageq);
7941 
7942 		hibernate_stats.hibernate_reentered_on_q++;
7943 next_pg:
7944 		hibernate_stats.hibernate_considered++;
7945 
7946 		qcount--;
7947 		try_failed_count = 0;
7948 	}
7949 	if (l_object != NULL) {
7950 		vm_object_unlock(l_object);
7951 		l_object = NULL;
7952 	}
7953 
7954 	vm_page_unlock_queues();
7955 
7956 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7957 
7958 	return retval;
7959 }
7960 
7961 
7962 static int
hibernate_flush_dirty_pages(int pass)7963 hibernate_flush_dirty_pages(int pass)
7964 {
7965 	struct vm_speculative_age_q     *aq;
7966 	uint32_t        i;
7967 
7968 	if (vm_page_local_q) {
7969 		zpercpu_foreach_cpu(lid) {
7970 			vm_page_reactivate_local(lid, TRUE, FALSE);
7971 		}
7972 	}
7973 
7974 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
7975 		int             qcount;
7976 		vm_page_t       m;
7977 
7978 		aq = &vm_page_queue_speculative[i];
7979 
7980 		if (vm_page_queue_empty(&aq->age_q)) {
7981 			continue;
7982 		}
7983 		qcount = 0;
7984 
7985 		vm_page_lockspin_queues();
7986 
7987 		vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7988 			qcount++;
7989 		}
7990 		vm_page_unlock_queues();
7991 
7992 		if (qcount) {
7993 			if (hibernate_flush_queue(&aq->age_q, qcount)) {
7994 				return 1;
7995 			}
7996 		}
7997 	}
7998 	if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7999 		return 1;
8000 	}
8001 	/* XXX FBDP TODO: flush secluded queue */
8002 	if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
8003 		return 1;
8004 	}
8005 	if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
8006 		return 1;
8007 	}
8008 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
8009 		return 1;
8010 	}
8011 
8012 	if (pass == 1) {
8013 		vm_compressor_record_warmup_start();
8014 	}
8015 
8016 	if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
8017 		if (pass == 1) {
8018 			vm_compressor_record_warmup_end();
8019 		}
8020 		return 1;
8021 	}
8022 	if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
8023 		if (pass == 1) {
8024 			vm_compressor_record_warmup_end();
8025 		}
8026 		return 1;
8027 	}
8028 	if (pass == 1) {
8029 		vm_compressor_record_warmup_end();
8030 	}
8031 
8032 	if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
8033 		return 1;
8034 	}
8035 
8036 	return 0;
8037 }
8038 
8039 
8040 void
hibernate_reset_stats()8041 hibernate_reset_stats()
8042 {
8043 	bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
8044 }
8045 
8046 
8047 int
hibernate_flush_memory()8048 hibernate_flush_memory()
8049 {
8050 	int     retval;
8051 
8052 	assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
8053 
8054 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
8055 
8056 	hibernate_cleaning_in_progress = TRUE;
8057 	hibernate_skip_external = FALSE;
8058 
8059 	if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
8060 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
8061 
8062 		vm_compressor_flush();
8063 
8064 		KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
8065 
8066 		if (consider_buffer_cache_collect != NULL) {
8067 			unsigned int orig_wire_count;
8068 
8069 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
8070 			orig_wire_count = vm_page_wire_count;
8071 
8072 			(void)(*consider_buffer_cache_collect)(1);
8073 			zone_gc(ZONE_GC_DRAIN);
8074 
8075 			HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
8076 
8077 			KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
8078 		}
8079 	}
8080 	hibernate_cleaning_in_progress = FALSE;
8081 
8082 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
8083 
8084 	if (retval) {
8085 		HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
8086 	}
8087 
8088 
8089 	HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
8090 	    hibernate_stats.hibernate_considered,
8091 	    hibernate_stats.hibernate_reentered_on_q,
8092 	    hibernate_stats.hibernate_found_dirty);
8093 	HIBPRINT("   skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
8094 	    hibernate_stats.hibernate_skipped_cleaning,
8095 	    hibernate_stats.hibernate_skipped_transient,
8096 	    hibernate_stats.hibernate_skipped_precious,
8097 	    hibernate_stats.hibernate_skipped_external,
8098 	    hibernate_stats.hibernate_queue_nolock);
8099 	HIBPRINT("   queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
8100 	    hibernate_stats.hibernate_queue_paused,
8101 	    hibernate_stats.hibernate_throttled,
8102 	    hibernate_stats.hibernate_throttle_timeout,
8103 	    hibernate_stats.hibernate_drained,
8104 	    hibernate_stats.hibernate_drain_timeout);
8105 
8106 	return retval;
8107 }
8108 
8109 
8110 static void
hibernate_page_list_zero(hibernate_page_list_t * list)8111 hibernate_page_list_zero(hibernate_page_list_t *list)
8112 {
8113 	uint32_t             bank;
8114 	hibernate_bitmap_t * bitmap;
8115 
8116 	bitmap = &list->bank_bitmap[0];
8117 	for (bank = 0; bank < list->bank_count; bank++) {
8118 		uint32_t last_bit;
8119 
8120 		bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
8121 		// set out-of-bound bits at end of bitmap.
8122 		last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
8123 		if (last_bit) {
8124 			bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
8125 		}
8126 
8127 		bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
8128 	}
8129 }
8130 
8131 void
hibernate_free_gobble_pages(void)8132 hibernate_free_gobble_pages(void)
8133 {
8134 	vm_page_t m, next;
8135 	uint32_t  count = 0;
8136 
8137 	m = (vm_page_t) hibernate_gobble_queue;
8138 	while (m) {
8139 		next = m->vmp_snext;
8140 		vm_page_free(m);
8141 		count++;
8142 		m = next;
8143 	}
8144 	hibernate_gobble_queue = VM_PAGE_NULL;
8145 
8146 	if (count) {
8147 		HIBLOG("Freed %d pages\n", count);
8148 	}
8149 }
8150 
8151 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)8152 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
8153 {
8154 	vm_object_t object = NULL;
8155 	int                  refmod_state;
8156 	boolean_t            discard = FALSE;
8157 
8158 	do{
8159 		if (vm_page_is_private(m)) {
8160 			panic("hibernate_consider_discard: private");
8161 		}
8162 
8163 		object = VM_PAGE_OBJECT(m);
8164 
8165 		if (!vm_object_lock_try(object)) {
8166 			object = NULL;
8167 			if (!preflight) {
8168 				hibernate_stats.cd_lock_failed++;
8169 			}
8170 			break;
8171 		}
8172 		if (VM_PAGE_WIRED(m)) {
8173 			if (!preflight) {
8174 				hibernate_stats.cd_found_wired++;
8175 			}
8176 			break;
8177 		}
8178 		if (m->vmp_precious) {
8179 			if (!preflight) {
8180 				hibernate_stats.cd_found_precious++;
8181 			}
8182 			break;
8183 		}
8184 		if (m->vmp_busy || !object->alive) {
8185 			/*
8186 			 *	Somebody is playing with this page.
8187 			 */
8188 			if (!preflight) {
8189 				hibernate_stats.cd_found_busy++;
8190 			}
8191 			break;
8192 		}
8193 		if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
8194 			/*
8195 			 * If it's unusual in anyway, ignore it
8196 			 */
8197 			if (!preflight) {
8198 				hibernate_stats.cd_found_unusual++;
8199 			}
8200 			break;
8201 		}
8202 		if (m->vmp_cleaning) {
8203 			if (!preflight) {
8204 				hibernate_stats.cd_found_cleaning++;
8205 			}
8206 			break;
8207 		}
8208 		if (m->vmp_laundry) {
8209 			if (!preflight) {
8210 				hibernate_stats.cd_found_laundry++;
8211 			}
8212 			break;
8213 		}
8214 		if (!m->vmp_dirty) {
8215 			refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
8216 
8217 			if (refmod_state & VM_MEM_REFERENCED) {
8218 				m->vmp_reference = TRUE;
8219 			}
8220 			if (refmod_state & VM_MEM_MODIFIED) {
8221 				SET_PAGE_DIRTY(m, FALSE);
8222 			}
8223 		}
8224 
8225 		/*
8226 		 * If it's clean or purgeable we can discard the page on wakeup.
8227 		 */
8228 		discard = (!m->vmp_dirty)
8229 		    || (VM_PURGABLE_VOLATILE == object->purgable)
8230 		    || (VM_PURGABLE_EMPTY == object->purgable);
8231 
8232 
8233 		if (discard == FALSE) {
8234 			if (!preflight) {
8235 				hibernate_stats.cd_found_dirty++;
8236 			}
8237 		} else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
8238 			if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
8239 				if (!preflight) {
8240 					hibernate_stats.cd_found_xpmapped++;
8241 				}
8242 				discard = FALSE;
8243 			} else {
8244 				if (!preflight) {
8245 					hibernate_stats.cd_skipped_xpmapped++;
8246 				}
8247 			}
8248 		}
8249 	}while (FALSE);
8250 
8251 	if (object) {
8252 		vm_object_unlock(object);
8253 	}
8254 
8255 	return discard;
8256 }
8257 
8258 
8259 static void
hibernate_discard_page(vm_page_t m)8260 hibernate_discard_page(vm_page_t m)
8261 {
8262 	vm_object_t m_object;
8263 
8264 	if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
8265 		/*
8266 		 * If it's unusual in anyway, ignore
8267 		 */
8268 		return;
8269 	}
8270 
8271 	m_object = VM_PAGE_OBJECT(m);
8272 
8273 #if MACH_ASSERT || DEBUG
8274 	if (!vm_object_lock_try(m_object)) {
8275 		panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
8276 	}
8277 #else
8278 	/* No need to lock page queue for token delete, hibernate_vm_unlock()
8279 	 *  makes sure these locks are uncontended before sleep */
8280 #endif /* MACH_ASSERT || DEBUG */
8281 
8282 	if (m->vmp_pmapped == TRUE) {
8283 		__unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
8284 	}
8285 
8286 	if (m->vmp_laundry) {
8287 		panic("hibernate_discard_page(%p) laundry", m);
8288 	}
8289 	if (vm_page_is_private(m)) {
8290 		panic("hibernate_discard_page(%p) private", m);
8291 	}
8292 	if (vm_page_is_fictitious(m)) {
8293 		panic("hibernate_discard_page(%p) fictitious", m);
8294 	}
8295 
8296 	if (VM_PURGABLE_VOLATILE == m_object->purgable) {
8297 		/* object should be on a queue */
8298 		assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
8299 		purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
8300 		assert(old_queue);
8301 		if (m_object->purgeable_when_ripe) {
8302 			vm_purgeable_token_delete_first(old_queue);
8303 		}
8304 		vm_object_lock_assert_exclusive(m_object);
8305 		VM_OBJECT_SET_PURGABLE(m_object, VM_PURGABLE_EMPTY);
8306 
8307 		/*
8308 		 * Purgeable ledgers:  pages of VOLATILE and EMPTY objects are
8309 		 * accounted in the "volatile" ledger, so no change here.
8310 		 * We have to update vm_page_purgeable_count, though, since we're
8311 		 * effectively purging this object.
8312 		 */
8313 		unsigned int delta;
8314 		assert(m_object->resident_page_count >= m_object->wired_page_count);
8315 		delta = (m_object->resident_page_count - m_object->wired_page_count);
8316 		assert(vm_page_purgeable_count >= delta);
8317 		assert(delta > 0);
8318 		OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
8319 	}
8320 
8321 	vm_page_free(m);
8322 
8323 #if MACH_ASSERT || DEBUG
8324 	vm_object_unlock(m_object);
8325 #endif  /* MACH_ASSERT || DEBUG */
8326 }
8327 
8328 /*
8329  *  Grab locks for hibernate_page_list_setall()
8330  */
8331 void
hibernate_vm_lock_queues(void)8332 hibernate_vm_lock_queues(void)
8333 {
8334 	vm_object_lock(compressor_object);
8335 	vm_page_lock_queues();
8336 	vm_free_page_lock();
8337 	lck_mtx_lock(&vm_purgeable_queue_lock);
8338 
8339 	if (vm_page_local_q) {
8340 		zpercpu_foreach(lq, vm_page_local_q) {
8341 			VPL_LOCK(&lq->vpl_lock);
8342 		}
8343 	}
8344 }
8345 
8346 void
hibernate_vm_unlock_queues(void)8347 hibernate_vm_unlock_queues(void)
8348 {
8349 	if (vm_page_local_q) {
8350 		zpercpu_foreach(lq, vm_page_local_q) {
8351 			VPL_UNLOCK(&lq->vpl_lock);
8352 		}
8353 	}
8354 	lck_mtx_unlock(&vm_purgeable_queue_lock);
8355 	vm_free_page_unlock();
8356 	vm_page_unlock_queues();
8357 	vm_object_unlock(compressor_object);
8358 }
8359 
8360 #if CONFIG_SPTM
8361 static bool
hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)8362 hibernate_sptm_should_force_page_to_wired_pagelist(vm_page_t vmp)
8363 {
8364 	const sptm_paddr_t paddr = ptoa_64(VM_PAGE_GET_PHYS_PAGE(vmp));
8365 	const sptm_frame_type_t frame_type = sptm_get_frame_type(paddr);
8366 	const vm_object_t vmp_objp = VM_PAGE_OBJECT(vmp);
8367 
8368 	return frame_type == XNU_USER_JIT || frame_type == XNU_USER_DEBUG ||
8369 	       (frame_type == XNU_USER_EXEC && vmp_objp->internal == TRUE);
8370 }
8371 #endif
8372 
8373 /*
8374  *  Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
8375  *  pages known to VM to not need saving are subtracted.
8376  *  Wired pages to be saved are present in page_list_wired, pageable in page_list.
8377  */
8378 
8379 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)8380 hibernate_page_list_setall(hibernate_page_list_t * page_list,
8381     hibernate_page_list_t * page_list_wired,
8382     hibernate_page_list_t * page_list_pal,
8383     boolean_t preflight,
8384     boolean_t will_discard,
8385     uint32_t * pagesOut)
8386 {
8387 	uint64_t start, end, nsec;
8388 	vm_page_t m;
8389 	vm_page_t next;
8390 	uint32_t pages = page_list->page_count;
8391 	uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
8392 	uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
8393 	uint32_t count_wire = pages;
8394 	uint32_t count_discard_active    = 0;
8395 	uint32_t count_discard_inactive  = 0;
8396 	uint32_t count_retired = 0;
8397 	uint32_t count_discard_cleaned   = 0;
8398 	uint32_t count_discard_purgeable = 0;
8399 	uint32_t count_discard_speculative = 0;
8400 	uint32_t count_discard_vm_struct_pages = 0;
8401 	uint32_t i;
8402 	uint32_t             bank;
8403 	hibernate_bitmap_t * bitmap;
8404 	hibernate_bitmap_t * bitmap_wired;
8405 	boolean_t                    discard_all;
8406 	boolean_t            discard = FALSE;
8407 
8408 	HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
8409 
8410 	if (preflight) {
8411 		page_list       = NULL;
8412 		page_list_wired = NULL;
8413 		page_list_pal   = NULL;
8414 		discard_all     = FALSE;
8415 	} else {
8416 		discard_all     = will_discard;
8417 	}
8418 
8419 #if MACH_ASSERT || DEBUG
8420 	if (!preflight) {
8421 		assert(hibernate_vm_locks_are_safe());
8422 		vm_page_lock_queues();
8423 		if (vm_page_local_q) {
8424 			zpercpu_foreach(lq, vm_page_local_q) {
8425 				VPL_LOCK(&lq->vpl_lock);
8426 			}
8427 		}
8428 	}
8429 #endif  /* MACH_ASSERT || DEBUG */
8430 
8431 
8432 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
8433 
8434 	clock_get_uptime(&start);
8435 
8436 	if (!preflight) {
8437 		hibernate_page_list_zero(page_list);
8438 		hibernate_page_list_zero(page_list_wired);
8439 		hibernate_page_list_zero(page_list_pal);
8440 
8441 		hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
8442 		hibernate_stats.cd_pages = pages;
8443 	}
8444 
8445 	if (vm_page_local_q) {
8446 		zpercpu_foreach_cpu(lid) {
8447 			vm_page_reactivate_local(lid, TRUE, !preflight);
8448 		}
8449 	}
8450 
8451 	if (preflight) {
8452 		vm_object_lock(compressor_object);
8453 		vm_page_lock_queues();
8454 		vm_free_page_lock();
8455 	}
8456 
8457 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8458 
8459 	hibernation_vmqueues_inspection = TRUE;
8460 
8461 	m = (vm_page_t) hibernate_gobble_queue;
8462 	while (m) {
8463 		pages--;
8464 		count_wire--;
8465 		if (!preflight) {
8466 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8467 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8468 		}
8469 		m = m->vmp_snext;
8470 	}
8471 
8472 	if (!preflight) {
8473 		percpu_foreach(free_pages_head, free_pages) {
8474 			for (m = *free_pages_head; m; m = m->vmp_snext) {
8475 				assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
8476 
8477 				pages--;
8478 				count_wire--;
8479 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8480 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8481 
8482 				hibernate_stats.cd_local_free++;
8483 				hibernate_stats.cd_total_free++;
8484 			}
8485 		}
8486 	}
8487 
8488 
8489 	for (i = 0; i < vm_colors; i++) {
8490 		vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
8491 			assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
8492 
8493 			pages--;
8494 			count_wire--;
8495 			if (!preflight) {
8496 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8497 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8498 
8499 				hibernate_stats.cd_total_free++;
8500 			}
8501 		}
8502 	}
8503 
8504 	vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
8505 		assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
8506 
8507 		pages--;
8508 		count_wire--;
8509 		if (!preflight) {
8510 			hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8511 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8512 
8513 			hibernate_stats.cd_total_free++;
8514 		}
8515 	}
8516 
8517 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
8518 	while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
8519 		assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
8520 
8521 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8522 		discard = FALSE;
8523 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
8524 		    && hibernate_consider_discard(m, preflight)) {
8525 			if (!preflight) {
8526 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8527 			}
8528 			count_discard_inactive++;
8529 			discard = discard_all;
8530 		} else {
8531 			count_throttled++;
8532 		}
8533 		count_wire--;
8534 		if (!preflight) {
8535 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8536 		}
8537 
8538 		if (discard) {
8539 			hibernate_discard_page(m);
8540 		}
8541 		m = next;
8542 	}
8543 
8544 	m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
8545 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8546 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8547 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8548 #if CONFIG_SPTM
8549 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8550 #endif
8551 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8552 		discard = FALSE;
8553 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8554 		    hibernate_consider_discard(m, preflight)) {
8555 			if (!preflight) {
8556 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8557 			}
8558 			if (m->vmp_dirty) {
8559 				count_discard_purgeable++;
8560 			} else {
8561 				count_discard_inactive++;
8562 			}
8563 			discard = discard_all;
8564 		} else {
8565 			/*
8566 			 * If the page must be force-added to the wired page list, prevent it from appearing
8567 			 * in the unwired page list.
8568 			 */
8569 			if (force_to_wired_list) {
8570 				if (!preflight) {
8571 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8572 				}
8573 			} else {
8574 				count_anonymous++;
8575 			}
8576 		}
8577 		/*
8578 		 * If the page is NOT being forced into the wired page list, remove it from the
8579 		 * wired page list here.
8580 		 */
8581 		if (!force_to_wired_list) {
8582 			count_wire--;
8583 			if (!preflight) {
8584 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8585 			}
8586 		}
8587 		if (discard) {
8588 			hibernate_discard_page(m);
8589 		}
8590 		m = next;
8591 	}
8592 
8593 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8594 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8595 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8596 
8597 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8598 		discard = FALSE;
8599 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8600 		    hibernate_consider_discard(m, preflight)) {
8601 			if (!preflight) {
8602 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8603 			}
8604 			if (m->vmp_dirty) {
8605 				count_discard_purgeable++;
8606 			} else {
8607 				count_discard_cleaned++;
8608 			}
8609 			discard = discard_all;
8610 		} else {
8611 			count_cleaned++;
8612 		}
8613 		count_wire--;
8614 		if (!preflight) {
8615 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8616 		}
8617 		if (discard) {
8618 			hibernate_discard_page(m);
8619 		}
8620 		m = next;
8621 	}
8622 
8623 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8624 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8625 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8626 		bool force_to_wired_list = false;       /* Default to NOT forcing page into the wired page list */
8627 #if CONFIG_SPTM
8628 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8629 #endif
8630 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8631 		discard = FALSE;
8632 		if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
8633 		    hibernate_consider_discard(m, preflight)) {
8634 			if (!preflight) {
8635 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8636 			}
8637 			if (m->vmp_dirty) {
8638 				count_discard_purgeable++;
8639 			} else {
8640 				count_discard_active++;
8641 			}
8642 			discard = discard_all;
8643 		} else {
8644 			/*
8645 			 * If the page must be force-added to the wired page list, prevent it from appearing
8646 			 * in the unwired page list.
8647 			 */
8648 			if (force_to_wired_list) {
8649 				if (!preflight) {
8650 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8651 				}
8652 			} else {
8653 				count_active++;
8654 			}
8655 		}
8656 		/*
8657 		 * If the page is NOT being forced into the wired page list, remove it from the
8658 		 * wired page list here.
8659 		 */
8660 		if (!force_to_wired_list) {
8661 			count_wire--;
8662 			if (!preflight) {
8663 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8664 			}
8665 		}
8666 		if (discard) {
8667 			hibernate_discard_page(m);
8668 		}
8669 		m = next;
8670 	}
8671 
8672 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8673 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8674 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8675 		bool force_to_wired_list = false;        /* Default to NOT forcing page into the wired page list */
8676 #if CONFIG_SPTM
8677 		force_to_wired_list = hibernate_sptm_should_force_page_to_wired_pagelist(m);
8678 #endif
8679 		next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8680 		discard = FALSE;
8681 		if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8682 		    hibernate_consider_discard(m, preflight)) {
8683 			if (!preflight) {
8684 				hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8685 			}
8686 			if (m->vmp_dirty) {
8687 				count_discard_purgeable++;
8688 			} else {
8689 				count_discard_inactive++;
8690 			}
8691 			discard = discard_all;
8692 		} else {
8693 			/*
8694 			 * If the page must be force-added to the wired page list, prevent it from appearing
8695 			 * in the unwired page list.
8696 			 */
8697 			if (force_to_wired_list) {
8698 				if (!preflight) {
8699 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8700 				}
8701 			} else {
8702 				count_inactive++;
8703 			}
8704 		}
8705 		/*
8706 		 * If the page is NOT being forced into the wired page list, remove it from the
8707 		 * wired page list here.
8708 		 */
8709 		if (!force_to_wired_list) {
8710 			count_wire--;
8711 			if (!preflight) {
8712 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8713 			}
8714 		}
8715 		if (discard) {
8716 			hibernate_discard_page(m);
8717 		}
8718 		m = next;
8719 	}
8720 	/* XXX FBDP TODO: secluded queue */
8721 
8722 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8723 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8724 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8725 			assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
8726 			    "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
8727 			    m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
8728 
8729 			next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8730 			discard = FALSE;
8731 			if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
8732 			    hibernate_consider_discard(m, preflight)) {
8733 				if (!preflight) {
8734 					hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8735 				}
8736 				count_discard_speculative++;
8737 				discard = discard_all;
8738 			} else {
8739 				count_speculative++;
8740 			}
8741 			count_wire--;
8742 			if (!preflight) {
8743 				hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8744 			}
8745 			if (discard) {
8746 				hibernate_discard_page(m);
8747 			}
8748 			m = next;
8749 		}
8750 	}
8751 
8752 	vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
8753 		assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
8754 
8755 		count_compressor++;
8756 		count_wire--;
8757 		if (!preflight) {
8758 			hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
8759 		}
8760 	}
8761 
8762 
8763 	if (preflight == FALSE && discard_all == TRUE) {
8764 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
8765 
8766 		HIBLOG("hibernate_teardown started\n");
8767 		count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
8768 		HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
8769 
8770 		pages -= count_discard_vm_struct_pages;
8771 		count_wire -= count_discard_vm_struct_pages;
8772 
8773 		hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
8774 
8775 		KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
8776 	}
8777 
8778 	if (!preflight) {
8779 		// pull wired from hibernate_bitmap
8780 		bitmap = &page_list->bank_bitmap[0];
8781 		bitmap_wired = &page_list_wired->bank_bitmap[0];
8782 		for (bank = 0; bank < page_list->bank_count; bank++) {
8783 			for (i = 0; i < bitmap->bitmapwords; i++) {
8784 				bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
8785 			}
8786 			bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
8787 			bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
8788 		}
8789 	}
8790 
8791 	// machine dependent adjustments
8792 	hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
8793 
8794 	if (!preflight) {
8795 		hibernate_stats.cd_count_wire = count_wire;
8796 		hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
8797 		    count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
8798 	}
8799 
8800 	clock_get_uptime(&end);
8801 	absolutetime_to_nanoseconds(end - start, &nsec);
8802 	HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
8803 
8804 	HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n  %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
8805 	    pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
8806 	    discard_all ? "did" : "could",
8807 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8808 
8809 	if (hibernate_stats.cd_skipped_xpmapped) {
8810 		HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8811 	}
8812 
8813 	*pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8814 
8815 	if (preflight && will_discard) {
8816 		*pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8817 		/*
8818 		 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8819 		 * even if these are clean and so we need to size the hibernation image accordingly.
8820 		 *
8821 		 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8822 		 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8823 		 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8824 		 * clean xpmapped pages.
8825 		 *
8826 		 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8827 		 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8828 		 */
8829 		*pagesOut +=  HIBERNATE_XPMAPPED_LIMIT;
8830 	}
8831 
8832 	hibernation_vmqueues_inspection = FALSE;
8833 
8834 #if MACH_ASSERT || DEBUG
8835 	if (!preflight) {
8836 		if (vm_page_local_q) {
8837 			zpercpu_foreach(lq, vm_page_local_q) {
8838 				VPL_UNLOCK(&lq->vpl_lock);
8839 			}
8840 		}
8841 		vm_page_unlock_queues();
8842 	}
8843 #endif  /* MACH_ASSERT || DEBUG */
8844 
8845 	if (preflight) {
8846 		vm_free_page_unlock();
8847 		vm_page_unlock_queues();
8848 		vm_object_unlock(compressor_object);
8849 	}
8850 
8851 	KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8852 }
8853 
8854 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8855 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8856 {
8857 	uint64_t  start, end, nsec;
8858 	vm_page_t m;
8859 	vm_page_t next;
8860 	uint32_t  i;
8861 	uint32_t  count_discard_active    = 0;
8862 	uint32_t  count_discard_inactive  = 0;
8863 	uint32_t  count_discard_purgeable = 0;
8864 	uint32_t  count_discard_cleaned   = 0;
8865 	uint32_t  count_discard_speculative = 0;
8866 
8867 
8868 #if MACH_ASSERT || DEBUG
8869 	vm_page_lock_queues();
8870 	if (vm_page_local_q) {
8871 		zpercpu_foreach(lq, vm_page_local_q) {
8872 			VPL_LOCK(&lq->vpl_lock);
8873 		}
8874 	}
8875 #endif  /* MACH_ASSERT || DEBUG */
8876 
8877 	clock_get_uptime(&start);
8878 
8879 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8880 	while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8881 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8882 
8883 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8884 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8885 			if (m->vmp_dirty) {
8886 				count_discard_purgeable++;
8887 			} else {
8888 				count_discard_inactive++;
8889 			}
8890 			hibernate_discard_page(m);
8891 		}
8892 		m = next;
8893 	}
8894 
8895 	for (i = 0; i <= vm_page_max_speculative_age_q; i++) {
8896 		m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8897 		while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8898 			assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8899 
8900 			next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8901 			if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8902 				count_discard_speculative++;
8903 				hibernate_discard_page(m);
8904 			}
8905 			m = next;
8906 		}
8907 	}
8908 
8909 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8910 	while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8911 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8912 
8913 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8914 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8915 			if (m->vmp_dirty) {
8916 				count_discard_purgeable++;
8917 			} else {
8918 				count_discard_inactive++;
8919 			}
8920 			hibernate_discard_page(m);
8921 		}
8922 		m = next;
8923 	}
8924 	/* XXX FBDP TODO: secluded queue */
8925 
8926 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8927 	while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8928 		assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8929 
8930 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8931 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8932 			if (m->vmp_dirty) {
8933 				count_discard_purgeable++;
8934 			} else {
8935 				count_discard_active++;
8936 			}
8937 			hibernate_discard_page(m);
8938 		}
8939 		m = next;
8940 	}
8941 
8942 	m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8943 	while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8944 		assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8945 
8946 		next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8947 		if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8948 			if (m->vmp_dirty) {
8949 				count_discard_purgeable++;
8950 			} else {
8951 				count_discard_cleaned++;
8952 			}
8953 			hibernate_discard_page(m);
8954 		}
8955 		m = next;
8956 	}
8957 
8958 #if MACH_ASSERT || DEBUG
8959 	if (vm_page_local_q) {
8960 		zpercpu_foreach(lq, vm_page_local_q) {
8961 			VPL_UNLOCK(&lq->vpl_lock);
8962 		}
8963 	}
8964 	vm_page_unlock_queues();
8965 #endif  /* MACH_ASSERT || DEBUG */
8966 
8967 	clock_get_uptime(&end);
8968 	absolutetime_to_nanoseconds(end - start, &nsec);
8969 	HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8970 	    nsec / 1000000ULL,
8971 	    count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8972 }
8973 
8974 boolean_t       hibernate_paddr_map_inited = FALSE;
8975 unsigned int    hibernate_teardown_last_valid_compact_indx = -1;
8976 vm_page_t       hibernate_rebuild_hash_list = NULL;
8977 
8978 unsigned int    hibernate_teardown_found_tabled_pages = 0;
8979 unsigned int    hibernate_teardown_found_created_pages = 0;
8980 unsigned int    hibernate_teardown_found_free_pages = 0;
8981 unsigned int    hibernate_teardown_vm_page_free_count;
8982 
8983 
8984 struct ppnum_mapping {
8985 	struct ppnum_mapping    *ppnm_next;
8986 	ppnum_t                 ppnm_base_paddr;
8987 	unsigned int            ppnm_sindx;
8988 	unsigned int            ppnm_eindx;
8989 };
8990 
8991 struct ppnum_mapping    *ppnm_head;
8992 struct ppnum_mapping    *ppnm_last_found = NULL;
8993 
8994 
8995 void
hibernate_create_paddr_map(void)8996 hibernate_create_paddr_map(void)
8997 {
8998 	unsigned int    i;
8999 	ppnum_t         next_ppnum_in_run = 0;
9000 	struct ppnum_mapping *ppnm = NULL;
9001 
9002 	if (hibernate_paddr_map_inited == FALSE) {
9003 		for (i = 0; i < vm_pages_count; i++) {
9004 			if (ppnm) {
9005 				ppnm->ppnm_eindx = i;
9006 			}
9007 
9008 			if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(vm_page_get(i)) != next_ppnum_in_run) {
9009 				ppnm = zalloc_permanent_type(struct ppnum_mapping);
9010 
9011 				ppnm->ppnm_next = ppnm_head;
9012 				ppnm_head = ppnm;
9013 
9014 				ppnm->ppnm_sindx = i;
9015 				ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(vm_page_get(i));
9016 			}
9017 			next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(vm_page_get(i)) + 1;
9018 		}
9019 		ppnm->ppnm_eindx = vm_pages_count;
9020 
9021 		hibernate_paddr_map_inited = TRUE;
9022 	}
9023 }
9024 
9025 ppnum_t
hibernate_lookup_paddr(unsigned int indx)9026 hibernate_lookup_paddr(unsigned int indx)
9027 {
9028 	struct ppnum_mapping *ppnm = NULL;
9029 
9030 	ppnm = ppnm_last_found;
9031 
9032 	if (ppnm) {
9033 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
9034 			goto done;
9035 		}
9036 	}
9037 	for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
9038 		if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
9039 			ppnm_last_found = ppnm;
9040 			break;
9041 		}
9042 	}
9043 	if (ppnm == NULL) {
9044 		panic("hibernate_lookup_paddr of %d failed", indx);
9045 	}
9046 done:
9047 	return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
9048 }
9049 
9050 
9051 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)9052 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
9053 {
9054 	addr64_t        saddr_aligned;
9055 	addr64_t        eaddr_aligned;
9056 	addr64_t        addr;
9057 	ppnum_t         paddr;
9058 	unsigned int    mark_as_unneeded_pages = 0;
9059 
9060 	saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
9061 	eaddr_aligned = eaddr & ~PAGE_MASK_64;
9062 
9063 	for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
9064 		paddr = pmap_find_phys(kernel_pmap, addr);
9065 
9066 		assert(paddr);
9067 
9068 		hibernate_page_bitset(page_list, TRUE, paddr);
9069 		hibernate_page_bitset(page_list_wired, TRUE, paddr);
9070 
9071 		mark_as_unneeded_pages++;
9072 	}
9073 	return mark_as_unneeded_pages;
9074 }
9075 
9076 
9077 void
hibernate_hash_insert_page(vm_page_t mem)9078 hibernate_hash_insert_page(vm_page_t mem)
9079 {
9080 	vm_page_bucket_t *bucket;
9081 	int             hash_id;
9082 	vm_object_t     m_object;
9083 
9084 	m_object = VM_PAGE_OBJECT(mem);
9085 
9086 	assert(mem->vmp_hashed);
9087 	assert(m_object);
9088 	assert(mem->vmp_offset != (vm_object_offset_t) -1);
9089 
9090 	/*
9091 	 *	Insert it into the object_object/offset hash table
9092 	 */
9093 	hash_id = vm_page_hash(m_object, mem->vmp_offset);
9094 	bucket = &vm_page_buckets[hash_id];
9095 
9096 	mem->vmp_next_m = bucket->page_list;
9097 	bucket->page_list = VM_PAGE_PACK_PTR(mem);
9098 }
9099 
9100 
9101 void
hibernate_free_range(int sindx,int eindx)9102 hibernate_free_range(int sindx, int eindx)
9103 {
9104 	vm_page_t       mem;
9105 
9106 	while (sindx < eindx) {
9107 		mem = vm_page_get(sindx);
9108 
9109 		vm_page_init(mem, hibernate_lookup_paddr(sindx));
9110 
9111 		vm_page_put_list_on_free_queue(mem, false);
9112 
9113 		sindx++;
9114 	}
9115 }
9116 
9117 void
hibernate_rebuild_vm_structs(void)9118 hibernate_rebuild_vm_structs(void)
9119 {
9120 	int             i, cindx, sindx, eindx;
9121 	vm_page_t       mem, tmem, mem_next;
9122 	AbsoluteTime    startTime, endTime;
9123 	uint64_t        nsec;
9124 
9125 	if (hibernate_rebuild_needed == FALSE) {
9126 		return;
9127 	}
9128 
9129 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
9130 	HIBLOG("hibernate_rebuild started\n");
9131 
9132 	clock_get_uptime(&startTime);
9133 
9134 	pal_hib_rebuild_pmap_structs();
9135 
9136 	bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
9137 	eindx = vm_pages_count;
9138 
9139 	/*
9140 	 * Mark all the vm_pages[] that have not been initialized yet as being
9141 	 * transient. This is needed to ensure that buddy page search is corrrect.
9142 	 * Without this random data in these vm_pages[] can trip the buddy search
9143 	 */
9144 	for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
9145 		vm_page_get(i)->vmp_q_state = VM_PAGE_NOT_ON_Q;
9146 	}
9147 
9148 	for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
9149 		mem = vm_page_get(cindx);
9150 		assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
9151 		/*
9152 		 * hibernate_teardown_vm_structs leaves the location where
9153 		 * this vm_page_t must be located in "next".
9154 		 */
9155 		tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9156 		mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
9157 		assert(tmem >= mem);
9158 
9159 		sindx = (int)(tmem - vm_page_get(0));
9160 
9161 		if (mem != tmem) {
9162 			/*
9163 			 * this vm_page_t was moved by hibernate_teardown_vm_structs,
9164 			 * so move it back to its real location
9165 			 */
9166 			*tmem = *mem;
9167 			mem = tmem;
9168 		}
9169 		if (mem->vmp_hashed) {
9170 			hibernate_hash_insert_page(mem);
9171 		}
9172 		/*
9173 		 * the 'hole' between this vm_page_t and the previous
9174 		 * vm_page_t we moved needs to be initialized as
9175 		 * a range of free vm_page_t's
9176 		 */
9177 		hibernate_free_range(sindx + 1, eindx);
9178 
9179 		eindx = sindx;
9180 	}
9181 	if (sindx) {
9182 		hibernate_free_range(0, sindx);
9183 	}
9184 
9185 	assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
9186 
9187 	/*
9188 	 * process the list of vm_page_t's that were entered in the hash,
9189 	 * but were not located in the vm_pages arrary... these are
9190 	 * vm_page_t's that were created on the fly (i.e. fictitious)
9191 	 */
9192 	for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
9193 		mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9194 
9195 		mem->vmp_next_m = 0;
9196 		hibernate_hash_insert_page(mem);
9197 	}
9198 	hibernate_rebuild_hash_list = NULL;
9199 
9200 	clock_get_uptime(&endTime);
9201 	SUB_ABSOLUTETIME(&endTime, &startTime);
9202 	absolutetime_to_nanoseconds(endTime, &nsec);
9203 
9204 	HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
9205 
9206 	hibernate_rebuild_needed = FALSE;
9207 
9208 	KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
9209 }
9210 
9211 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)9212 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
9213 {
9214 	unsigned int    i;
9215 	unsigned int    compact_target_indx;
9216 	vm_page_t       mem, mem_next;
9217 	vm_page_bucket_t *bucket;
9218 	unsigned int    mark_as_unneeded_pages = 0;
9219 	unsigned int    unneeded_vm_page_bucket_pages = 0;
9220 	unsigned int    unneeded_vm_pages_pages = 0;
9221 	unsigned int    unneeded_pmap_pages = 0;
9222 	addr64_t        start_of_unneeded = 0;
9223 	addr64_t        end_of_unneeded = 0;
9224 
9225 
9226 	if (hibernate_should_abort()) {
9227 		return 0;
9228 	}
9229 
9230 	hibernate_rebuild_needed = TRUE;
9231 
9232 	HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
9233 	    vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
9234 	    vm_page_cleaned_count, compressor_object->resident_page_count);
9235 
9236 	for (i = 0; i < vm_page_bucket_count; i++) {
9237 		bucket = &vm_page_buckets[i];
9238 
9239 		for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
9240 			assert(mem->vmp_hashed);
9241 
9242 			mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
9243 
9244 			if (!vm_page_in_array(mem)) {
9245 				mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
9246 				hibernate_rebuild_hash_list = mem;
9247 			}
9248 		}
9249 	}
9250 	unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
9251 	mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
9252 
9253 	hibernate_teardown_vm_page_free_count = vm_page_free_count;
9254 
9255 	compact_target_indx = 0;
9256 
9257 	for (i = 0; i < vm_pages_count; i++) {
9258 		mem = vm_page_get(i);
9259 
9260 		if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
9261 			assert(mem->vmp_busy);
9262 			assert(!mem->vmp_lopage);
9263 
9264 			vm_page_steal_free_page(mem, VM_REMOVE_REASON_USE);
9265 			hibernate_teardown_found_free_pages++;
9266 
9267 
9268 			if (vm_page_get(compact_target_indx)->vmp_q_state != VM_PAGE_ON_FREE_Q) {
9269 				compact_target_indx = i;
9270 			}
9271 		} else {
9272 			/*
9273 			 * record this vm_page_t's original location
9274 			 * we need this even if it doesn't get moved
9275 			 * as an indicator to the rebuild function that
9276 			 * we don't have to move it
9277 			 */
9278 			mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
9279 
9280 			if (vm_page_get(compact_target_indx)->vmp_q_state == VM_PAGE_ON_FREE_Q) {
9281 				/*
9282 				 * we've got a hole to fill, so
9283 				 * move this vm_page_t to it's new home
9284 				 */
9285 				*vm_page_get(compact_target_indx) = *mem;
9286 				mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
9287 
9288 				hibernate_teardown_last_valid_compact_indx = compact_target_indx;
9289 				compact_target_indx++;
9290 			} else {
9291 				hibernate_teardown_last_valid_compact_indx = i;
9292 			}
9293 		}
9294 	}
9295 
9296 	unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
9297 	    (addr64_t)vm_page_get(vm_pages_count - 1), page_list, page_list_wired);
9298 	mark_as_unneeded_pages += unneeded_vm_pages_pages;
9299 
9300 	pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
9301 
9302 	if (start_of_unneeded) {
9303 		unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
9304 		mark_as_unneeded_pages += unneeded_pmap_pages;
9305 	}
9306 	HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
9307 
9308 	return mark_as_unneeded_pages;
9309 }
9310 
9311 
9312 #endif /* HIBERNATION */
9313 
9314 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9315 
9316 #include <mach_vm_debug.h>
9317 #if     MACH_VM_DEBUG
9318 
9319 #include <mach_debug/hash_info.h>
9320 #include <vm/vm_debug_internal.h>
9321 
9322 /*
9323  *	Routine:	vm_page_info
9324  *	Purpose:
9325  *		Return information about the global VP table.
9326  *		Fills the buffer with as much information as possible
9327  *		and returns the desired size of the buffer.
9328  *	Conditions:
9329  *		Nothing locked.  The caller should provide
9330  *		possibly-pageable memory.
9331  */
9332 
9333 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)9334 vm_page_info(
9335 	hash_info_bucket_t *info,
9336 	unsigned int count)
9337 {
9338 	unsigned int i;
9339 	lck_spin_t      *bucket_lock;
9340 
9341 	if (vm_page_bucket_count < count) {
9342 		count = vm_page_bucket_count;
9343 	}
9344 
9345 	for (i = 0; i < count; i++) {
9346 		vm_page_bucket_t *bucket = &vm_page_buckets[i];
9347 		unsigned int bucket_count = 0;
9348 		vm_page_t m;
9349 
9350 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
9351 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
9352 
9353 		for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
9354 		    m != VM_PAGE_NULL;
9355 		    m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
9356 			bucket_count++;
9357 		}
9358 
9359 		lck_spin_unlock(bucket_lock);
9360 
9361 		/* don't touch pageable memory while holding locks */
9362 		info[i].hib_count = bucket_count;
9363 	}
9364 
9365 	return vm_page_bucket_count;
9366 }
9367 #endif  /* MACH_VM_DEBUG */
9368 
9369 #if VM_PAGE_BUCKETS_CHECK
9370 void
vm_page_buckets_check(void)9371 vm_page_buckets_check(void)
9372 {
9373 	unsigned int i;
9374 	vm_page_t p;
9375 	unsigned int p_hash;
9376 	vm_page_bucket_t *bucket;
9377 	lck_spin_t      *bucket_lock;
9378 
9379 	if (!vm_page_buckets_check_ready) {
9380 		return;
9381 	}
9382 
9383 #if HIBERNATION
9384 	if (hibernate_rebuild_needed ||
9385 	    hibernate_rebuild_hash_list) {
9386 		panic("BUCKET_CHECK: hibernation in progress: "
9387 		    "rebuild_needed=%d rebuild_hash_list=%p\n",
9388 		    hibernate_rebuild_needed,
9389 		    hibernate_rebuild_hash_list);
9390 	}
9391 #endif /* HIBERNATION */
9392 
9393 #if VM_PAGE_FAKE_BUCKETS
9394 	char *cp;
9395 	for (cp = (char *) vm_page_fake_buckets_start;
9396 	    cp < (char *) vm_page_fake_buckets_end;
9397 	    cp++) {
9398 		if (*cp != 0x5a) {
9399 			panic("BUCKET_CHECK: corruption at %p in fake buckets "
9400 			    "[0x%llx:0x%llx]\n",
9401 			    cp,
9402 			    (uint64_t) vm_page_fake_buckets_start,
9403 			    (uint64_t) vm_page_fake_buckets_end);
9404 		}
9405 	}
9406 #endif /* VM_PAGE_FAKE_BUCKETS */
9407 
9408 	for (i = 0; i < vm_page_bucket_count; i++) {
9409 		vm_object_t     p_object;
9410 
9411 		bucket = &vm_page_buckets[i];
9412 		if (!bucket->page_list) {
9413 			continue;
9414 		}
9415 
9416 		bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
9417 		lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
9418 		p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
9419 
9420 		while (p != VM_PAGE_NULL) {
9421 			p_object = VM_PAGE_OBJECT(p);
9422 
9423 			if (!p->vmp_hashed) {
9424 				panic("BUCKET_CHECK: page %p (%p,0x%llx) "
9425 				    "hash %d in bucket %d at %p "
9426 				    "is not hashed\n",
9427 				    p, p_object, p->vmp_offset,
9428 				    p_hash, i, bucket);
9429 			}
9430 			p_hash = vm_page_hash(p_object, p->vmp_offset);
9431 			if (p_hash != i) {
9432 				panic("BUCKET_CHECK: corruption in bucket %d "
9433 				    "at %p: page %p object %p offset 0x%llx "
9434 				    "hash %d\n",
9435 				    i, bucket, p, p_object, p->vmp_offset,
9436 				    p_hash);
9437 			}
9438 			p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
9439 		}
9440 		lck_spin_unlock(bucket_lock);
9441 	}
9442 
9443 //	printf("BUCKET_CHECK: checked buckets\n");
9444 }
9445 #endif /* VM_PAGE_BUCKETS_CHECK */
9446 
9447 /*
9448  * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
9449  * local queues if they exist... its the only spot in the system where we add pages
9450  * to those queues...  once on those queues, those pages can only move to one of the
9451  * global page queues or the free queues... they NEVER move from local q to local q.
9452  * the 'local' state is stable when vm_page_queues_remove is called since we're behind
9453  * the global vm_page_queue_lock at this point...  we still need to take the local lock
9454  * in case this operation is being run on a different CPU then the local queue's identity,
9455  * but we don't have to worry about the page moving to a global queue or becoming wired
9456  * while we're grabbing the local lock since those operations would require the global
9457  * vm_page_queue_lock to be held, and we already own it.
9458  *
9459  * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
9460  * 'wired' and local are ALWAYS mutually exclusive conditions.
9461  */
9462 
9463 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)9464 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
9465 {
9466 	boolean_t       was_pageable = TRUE;
9467 	vm_object_t     m_object;
9468 
9469 	m_object = VM_PAGE_OBJECT(mem);
9470 
9471 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9472 
9473 	if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
9474 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9475 		if (remove_from_specialq == TRUE) {
9476 			vm_page_remove_from_specialq(mem);
9477 		}
9478 		/*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
9479 		 *       assert(mem->vmp_specialq.next != 0);
9480 		 *       assert(mem->vmp_specialq.prev != 0);
9481 		 *  } else {*/
9482 		if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
9483 			assert(mem->vmp_specialq.next == 0);
9484 			assert(mem->vmp_specialq.prev == 0);
9485 		}
9486 		return;
9487 	}
9488 
9489 	if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
9490 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9491 		assert(mem->vmp_specialq.next == 0 &&
9492 		    mem->vmp_specialq.prev == 0 &&
9493 		    mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9494 		return;
9495 	}
9496 	if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
9497 		/*
9498 		 * might put these guys on a list for debugging purposes
9499 		 * if we do, we'll need to remove this assert
9500 		 */
9501 		assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
9502 		assert(mem->vmp_specialq.next == 0 &&
9503 		    mem->vmp_specialq.prev == 0);
9504 		/*
9505 		 * Recall that vmp_on_specialq also means a request to put
9506 		 * it on the special Q. So we don't want to reset that bit
9507 		 * just because a wiring request came in. We might want to
9508 		 * put it on the special queue post-unwiring.
9509 		 *
9510 		 * &&
9511 		 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
9512 		 */
9513 		return;
9514 	}
9515 
9516 	assert(m_object != compressor_object);
9517 	assert(!is_kernel_object(m_object));
9518 	assert(!vm_page_is_fictitious(mem));
9519 
9520 	switch (mem->vmp_q_state) {
9521 	case VM_PAGE_ON_ACTIVE_LOCAL_Q:
9522 	{
9523 		struct vpl      *lq;
9524 
9525 		lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
9526 		VPL_LOCK(&lq->vpl_lock);
9527 		vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
9528 		mem->vmp_local_id = 0;
9529 		lq->vpl_count--;
9530 		if (m_object->internal) {
9531 			lq->vpl_internal_count--;
9532 		} else {
9533 			lq->vpl_external_count--;
9534 		}
9535 		VPL_UNLOCK(&lq->vpl_lock);
9536 		was_pageable = FALSE;
9537 		break;
9538 	}
9539 	case VM_PAGE_ON_ACTIVE_Q:
9540 	{
9541 		vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
9542 		vm_page_active_count--;
9543 		break;
9544 	}
9545 
9546 	case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
9547 	{
9548 		assert(m_object->internal == TRUE);
9549 
9550 		vm_page_inactive_count--;
9551 		vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
9552 		vm_page_anonymous_count--;
9553 
9554 		vm_purgeable_q_advance_all();
9555 		vm_page_balance_inactive(3);
9556 		break;
9557 	}
9558 
9559 	case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
9560 	{
9561 		assert(m_object->internal == FALSE);
9562 
9563 		vm_page_inactive_count--;
9564 		vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
9565 		vm_purgeable_q_advance_all();
9566 		vm_page_balance_inactive(3);
9567 		break;
9568 	}
9569 
9570 	case VM_PAGE_ON_INACTIVE_CLEANED_Q:
9571 	{
9572 		assert(m_object->internal == FALSE);
9573 
9574 		vm_page_inactive_count--;
9575 		vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
9576 		vm_page_cleaned_count--;
9577 		vm_page_balance_inactive(3);
9578 		break;
9579 	}
9580 
9581 	case VM_PAGE_ON_THROTTLED_Q:
9582 	{
9583 		assert(m_object->internal == TRUE);
9584 
9585 		vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
9586 		vm_page_throttled_count--;
9587 		was_pageable = FALSE;
9588 		break;
9589 	}
9590 
9591 	case VM_PAGE_ON_SPECULATIVE_Q:
9592 	{
9593 		assert(m_object->internal == FALSE);
9594 
9595 		vm_page_remque(&mem->vmp_pageq);
9596 		vm_page_speculative_count--;
9597 		vm_page_balance_inactive(3);
9598 		break;
9599 	}
9600 
9601 #if CONFIG_SECLUDED_MEMORY
9602 	case VM_PAGE_ON_SECLUDED_Q:
9603 	{
9604 		vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
9605 		vm_page_secluded_count--;
9606 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9607 		if (m_object == VM_OBJECT_NULL) {
9608 			vm_page_secluded_count_free--;
9609 			was_pageable = FALSE;
9610 		} else {
9611 			assert(!m_object->internal);
9612 			vm_page_secluded_count_inuse--;
9613 			was_pageable = FALSE;
9614 //			was_pageable = TRUE;
9615 		}
9616 		break;
9617 	}
9618 #endif /* CONFIG_SECLUDED_MEMORY */
9619 
9620 	default:
9621 	{
9622 		/*
9623 		 *	if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
9624 		 *              NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
9625 		 *              the caller is responsible for determing if the page is on that queue, and if so, must
9626 		 *              either first remove it (it needs both the page queues lock and the object lock to do
9627 		 *              this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
9628 		 *
9629 		 *	we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
9630 		 *	or any of the undefined states
9631 		 */
9632 		panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
9633 		break;
9634 	}
9635 	}
9636 	VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
9637 	mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
9638 
9639 	if (remove_from_specialq == TRUE) {
9640 		vm_page_remove_from_specialq(mem);
9641 	}
9642 	if (was_pageable) {
9643 		if (m_object->internal) {
9644 			vm_page_pageable_internal_count--;
9645 		} else {
9646 			vm_page_pageable_external_count--;
9647 		}
9648 	}
9649 }
9650 
9651 void
vm_page_remove_internal(vm_page_t page)9652 vm_page_remove_internal(vm_page_t page)
9653 {
9654 	vm_object_t __object = VM_PAGE_OBJECT(page);
9655 	if (page == __object->memq_hint) {
9656 		vm_page_t       __new_hint;
9657 		vm_page_queue_entry_t   __qe;
9658 		__qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
9659 		if (vm_page_queue_end(&__object->memq, __qe)) {
9660 			__qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
9661 			if (vm_page_queue_end(&__object->memq, __qe)) {
9662 				__qe = NULL;
9663 			}
9664 		}
9665 		__new_hint = (vm_page_t)((uintptr_t) __qe);
9666 		__object->memq_hint = __new_hint;
9667 	}
9668 	vm_page_queue_remove(&__object->memq, page, vmp_listq);
9669 #if CONFIG_SECLUDED_MEMORY
9670 	if (__object->eligible_for_secluded) {
9671 		vm_page_secluded.eligible_for_secluded--;
9672 	}
9673 #endif /* CONFIG_SECLUDED_MEMORY */
9674 }
9675 
9676 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)9677 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
9678 {
9679 	vm_object_t     m_object;
9680 
9681 	m_object = VM_PAGE_OBJECT(mem);
9682 
9683 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9684 	assert(!vm_page_is_fictitious(mem));
9685 	assert(!mem->vmp_laundry);
9686 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9687 	vm_page_check_pageable_safe(mem);
9688 
9689 	if (m_object->internal) {
9690 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
9691 
9692 		if (first == TRUE) {
9693 			vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
9694 		} else {
9695 			vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
9696 		}
9697 
9698 		vm_page_anonymous_count++;
9699 		vm_page_pageable_internal_count++;
9700 	} else {
9701 		mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
9702 
9703 		if (first == TRUE) {
9704 			vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
9705 		} else {
9706 			vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
9707 		}
9708 
9709 		vm_page_pageable_external_count++;
9710 	}
9711 	vm_page_inactive_count++;
9712 	token_new_pagecount++;
9713 
9714 	vm_page_add_to_specialq(mem, FALSE);
9715 }
9716 
9717 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)9718 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
9719 {
9720 	vm_object_t     m_object;
9721 
9722 	m_object = VM_PAGE_OBJECT(mem);
9723 
9724 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
9725 	assert(!vm_page_is_fictitious(mem));
9726 	assert(!mem->vmp_laundry);
9727 	assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
9728 	vm_page_check_pageable_safe(mem);
9729 
9730 	mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
9731 	if (first == TRUE) {
9732 		vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
9733 	} else {
9734 		vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
9735 	}
9736 	vm_page_active_count++;
9737 
9738 	if (m_object->internal) {
9739 		vm_page_pageable_internal_count++;
9740 	} else {
9741 		vm_page_pageable_external_count++;
9742 	}
9743 
9744 	vm_page_add_to_specialq(mem, FALSE);
9745 	vm_page_balance_inactive(3);
9746 }
9747 
9748 /*
9749  * Pages from special kernel objects shouldn't
9750  * be placed on pageable queues.
9751  */
9752 void
vm_page_check_pageable_safe(vm_page_t page)9753 vm_page_check_pageable_safe(vm_page_t page)
9754 {
9755 	vm_object_t     page_object;
9756 
9757 	page_object = VM_PAGE_OBJECT(page);
9758 
9759 	if (is_kernel_object(page_object)) {
9760 		panic("vm_page_check_pageable_safe: trying to add page"
9761 		    "from a kernel object to pageable queue");
9762 	}
9763 
9764 	if (page_object == compressor_object) {
9765 		panic("vm_page_check_pageable_safe: trying to add page"
9766 		    "from compressor object (%p) to pageable queue", compressor_object);
9767 	}
9768 }
9769 
9770 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
9771 * wired page diagnose
9772 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
9773 
9774 #include <libkern/OSKextLibPrivate.h>
9775 
9776 #define KA_SIZE(namelen, subtotalscount)        \
9777 	(sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
9778 
9779 #define KA_NAME(alloc)  \
9780 	((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
9781 
9782 #define KA_NAME_LEN(alloc)      \
9783     (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
9784 
9785 vm_tag_t
vm_tag_bt(void)9786 vm_tag_bt(void)
9787 {
9788 	uintptr_t* frameptr;
9789 	uintptr_t* frameptr_next;
9790 	uintptr_t retaddr;
9791 	uintptr_t kstackb, kstackt;
9792 	const vm_allocation_site_t * site;
9793 	thread_t cthread;
9794 	kern_allocation_name_t name;
9795 
9796 	cthread = current_thread();
9797 	if (__improbable(cthread == NULL)) {
9798 		return VM_KERN_MEMORY_OSFMK;
9799 	}
9800 
9801 	if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9802 		if (!name->tag) {
9803 			vm_tag_alloc(name);
9804 		}
9805 		return name->tag;
9806 	}
9807 
9808 	kstackb = cthread->kernel_stack;
9809 	kstackt = kstackb + kernel_stack_size;
9810 
9811 	/* Load stack frame pointer (EBP on x86) into frameptr */
9812 	frameptr = __builtin_frame_address(0);
9813 	site = NULL;
9814 	while (frameptr != NULL) {
9815 		/* Verify thread stack bounds */
9816 		if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9817 			break;
9818 		}
9819 
9820 		/* Next frame pointer is pointed to by the previous one */
9821 		frameptr_next = (uintptr_t*) *frameptr;
9822 #if defined(HAS_APPLE_PAC)
9823 		frameptr_next = ptrauth_strip(frameptr_next, ptrauth_key_frame_pointer);
9824 #endif
9825 
9826 		/* Pull return address from one spot above the frame pointer */
9827 		retaddr = *(frameptr + 1);
9828 
9829 #if defined(HAS_APPLE_PAC)
9830 		retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9831 #endif
9832 
9833 		if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9834 		    || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9835 			site = OSKextGetAllocationSiteForCaller(retaddr);
9836 			break;
9837 		}
9838 		frameptr = frameptr_next;
9839 	}
9840 
9841 	if (site) {
9842 		return site->tag;
9843 	}
9844 
9845 #if MACH_ASSERT
9846 	/*
9847 	 * Kernel tests appear here as unrecognized call sites and would get
9848 	 * no memory tag. Give them a default tag to prevent panics later.
9849 	 */
9850 	if (thread_get_test_option(test_option_vm_prevent_wire_tag_panic)) {
9851 		return VM_KERN_MEMORY_OSFMK;
9852 	}
9853 #endif
9854 
9855 	return VM_KERN_MEMORY_NONE;
9856 }
9857 
9858 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9859 
9860 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9861 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9862 {
9863 	vm_tag_t tag;
9864 	uint64_t avail;
9865 	uint32_t idx;
9866 	vm_allocation_site_t * prev;
9867 
9868 	if (site->tag) {
9869 		return;
9870 	}
9871 
9872 	idx = 0;
9873 	while (TRUE) {
9874 		avail = free_tag_bits[idx];
9875 		if (avail) {
9876 			tag = (vm_tag_t)__builtin_clzll(avail);
9877 			avail &= ~(1ULL << (63 - tag));
9878 			free_tag_bits[idx] = avail;
9879 			tag += (idx << 6);
9880 			break;
9881 		}
9882 		idx++;
9883 		if (idx >= ARRAY_COUNT(free_tag_bits)) {
9884 			for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9885 				prev = vm_allocation_sites[idx];
9886 				if (!prev) {
9887 					continue;
9888 				}
9889 				if (!KA_NAME_LEN(prev)) {
9890 					continue;
9891 				}
9892 				if (!prev->tag) {
9893 					continue;
9894 				}
9895 				if (prev->total) {
9896 					continue;
9897 				}
9898 				if (1 != prev->refcount) {
9899 					continue;
9900 				}
9901 
9902 				assert(idx == prev->tag);
9903 				tag = (vm_tag_t)idx;
9904 				prev->tag = VM_KERN_MEMORY_NONE;
9905 				*releasesiteP = prev;
9906 				break;
9907 			}
9908 			if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9909 				tag = VM_KERN_MEMORY_ANY;
9910 			}
9911 			break;
9912 		}
9913 	}
9914 	site->tag = tag;
9915 
9916 	OSAddAtomic16(1, &site->refcount);
9917 
9918 	if (VM_KERN_MEMORY_ANY != tag) {
9919 		vm_allocation_sites[tag] = site;
9920 	}
9921 
9922 	if (tag > vm_allocation_tag_highest) {
9923 		vm_allocation_tag_highest = tag;
9924 	}
9925 }
9926 
9927 static void
vm_tag_free_locked(vm_tag_t tag)9928 vm_tag_free_locked(vm_tag_t tag)
9929 {
9930 	uint64_t avail;
9931 	uint32_t idx;
9932 	uint64_t bit;
9933 
9934 	if (VM_KERN_MEMORY_ANY == tag) {
9935 		return;
9936 	}
9937 
9938 	idx = (tag >> 6);
9939 	avail = free_tag_bits[idx];
9940 	tag &= 63;
9941 	bit = (1ULL << (63 - tag));
9942 	assert(!(avail & bit));
9943 	free_tag_bits[idx] = (avail | bit);
9944 }
9945 
9946 static void
vm_tag_init(void)9947 vm_tag_init(void)
9948 {
9949 	vm_tag_t tag;
9950 	for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9951 		vm_tag_free_locked(tag);
9952 	}
9953 
9954 	for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9955 		vm_tag_free_locked(tag);
9956 	}
9957 }
9958 
9959 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9960 vm_tag_alloc(vm_allocation_site_t * site)
9961 {
9962 	vm_allocation_site_t * releasesite;
9963 
9964 	if (!site->tag) {
9965 		releasesite = NULL;
9966 		lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
9967 		vm_tag_alloc_locked(site, &releasesite);
9968 		lck_ticket_unlock(&vm_allocation_sites_lock);
9969 		if (releasesite) {
9970 			kern_allocation_name_release(releasesite);
9971 		}
9972 	}
9973 
9974 	return site->tag;
9975 }
9976 
9977 #if VM_BTLOG_TAGS
9978 #define VM_KERN_MEMORY_STR_MAX_LEN (32)
9979 TUNABLE_STR(vmtaglog, VM_KERN_MEMORY_STR_MAX_LEN, "vmtaglog", "");
9980 #define VM_TAG_BTLOG_SIZE (16u << 10)
9981 
9982 btlog_t vmtaglog_btlog;
9983 vm_tag_t vmtaglog_tag;
9984 
9985 static void
vm_tag_log(vm_object_t object,int64_t delta,void * fp)9986 vm_tag_log(vm_object_t object, int64_t delta, void *fp)
9987 {
9988 	if (is_kernel_object(object)) {
9989 		/* kernel object backtraces are tracked in vm entries */
9990 		return;
9991 	}
9992 	if (delta > 0) {
9993 		btref_t ref = btref_get(fp, BTREF_GET_NOWAIT);
9994 		btlog_record(vmtaglog_btlog, object, 0, ref);
9995 	} else if (object->wired_page_count == 0) {
9996 		btlog_erase(vmtaglog_btlog, object);
9997 	}
9998 }
9999 
10000 #ifndef ARRAY_SIZE
10001 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0]))
10002 #endif /* ARRAY_SIZE */
10003 #define VM_KERN_MEMORY_ELEM(name) [VM_KERN_MEMORY_##name] = #name
10004 const char *vm_kern_memory_strs[] = {
10005 	VM_KERN_MEMORY_ELEM(OSFMK),
10006 	VM_KERN_MEMORY_ELEM(BSD),
10007 	VM_KERN_MEMORY_ELEM(IOKIT),
10008 	VM_KERN_MEMORY_ELEM(LIBKERN),
10009 	VM_KERN_MEMORY_ELEM(OSKEXT),
10010 	VM_KERN_MEMORY_ELEM(KEXT),
10011 	VM_KERN_MEMORY_ELEM(IPC),
10012 	VM_KERN_MEMORY_ELEM(STACK),
10013 	VM_KERN_MEMORY_ELEM(CPU),
10014 	VM_KERN_MEMORY_ELEM(PMAP),
10015 	VM_KERN_MEMORY_ELEM(PTE),
10016 	VM_KERN_MEMORY_ELEM(ZONE),
10017 	VM_KERN_MEMORY_ELEM(KALLOC),
10018 	VM_KERN_MEMORY_ELEM(COMPRESSOR),
10019 	VM_KERN_MEMORY_ELEM(COMPRESSED_DATA),
10020 	VM_KERN_MEMORY_ELEM(PHANTOM_CACHE),
10021 	VM_KERN_MEMORY_ELEM(WAITQ),
10022 	VM_KERN_MEMORY_ELEM(DIAG),
10023 	VM_KERN_MEMORY_ELEM(LOG),
10024 	VM_KERN_MEMORY_ELEM(FILE),
10025 	VM_KERN_MEMORY_ELEM(MBUF),
10026 	VM_KERN_MEMORY_ELEM(UBC),
10027 	VM_KERN_MEMORY_ELEM(SECURITY),
10028 	VM_KERN_MEMORY_ELEM(MLOCK),
10029 	VM_KERN_MEMORY_ELEM(REASON),
10030 	VM_KERN_MEMORY_ELEM(SKYWALK),
10031 	VM_KERN_MEMORY_ELEM(LTABLE),
10032 	VM_KERN_MEMORY_ELEM(HV),
10033 	VM_KERN_MEMORY_ELEM(KALLOC_DATA),
10034 	VM_KERN_MEMORY_ELEM(RETIRED),
10035 	VM_KERN_MEMORY_ELEM(KALLOC_TYPE),
10036 	VM_KERN_MEMORY_ELEM(TRIAGE),
10037 	VM_KERN_MEMORY_ELEM(RECOUNT),
10038 };
10039 
10040 static vm_tag_t
vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])10041 vm_tag_str_to_idx(char tagstr[VM_KERN_MEMORY_STR_MAX_LEN])
10042 {
10043 	for (vm_tag_t i = VM_KERN_MEMORY_OSFMK; i < ARRAY_SIZE(vm_kern_memory_strs); i++) {
10044 		if (!strncmp(vm_kern_memory_strs[i], tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10045 			return i;
10046 		}
10047 	}
10048 
10049 	if (!strncmp("dynamic", tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10050 		return VM_KERN_MEMORY_FIRST_DYNAMIC;
10051 	}
10052 
10053 	if (!strncmp("any", tagstr, VM_KERN_MEMORY_STR_MAX_LEN)) {
10054 		return VM_KERN_MEMORY_ANY;
10055 	}
10056 
10057 	printf("Unable to find vm tag %s for btlog\n", tagstr);
10058 	return VM_KERN_MEMORY_NONE;
10059 }
10060 
10061 __startup_func
10062 static void
vm_btlog_init(void)10063 vm_btlog_init(void)
10064 {
10065 	vmtaglog_tag = vm_tag_str_to_idx(vmtaglog);
10066 
10067 	if (vmtaglog_tag != VM_KERN_MEMORY_NONE) {
10068 		vmtaglog_btlog = btlog_create(BTLOG_HASH, VM_TAG_BTLOG_SIZE, 0);
10069 	}
10070 }
10071 STARTUP(ZALLOC, STARTUP_RANK_FIRST, vm_btlog_init);
10072 #endif /* VM_BTLOG_TAGS */
10073 
10074 void
vm_tag_update_size(vm_tag_t tag,int64_t delta,vm_object_t object)10075 vm_tag_update_size(vm_tag_t tag, int64_t delta, vm_object_t object)
10076 {
10077 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
10078 
10079 	kern_allocation_update_size(vm_allocation_sites[tag], delta, object);
10080 }
10081 
10082 uint64_t
vm_tag_get_size(vm_tag_t tag)10083 vm_tag_get_size(vm_tag_t tag)
10084 {
10085 	vm_allocation_site_t *allocation;
10086 
10087 	assert(VM_KERN_MEMORY_NONE != tag && tag < VM_MAX_TAG_VALUE);
10088 
10089 	allocation = vm_allocation_sites[tag];
10090 	return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
10091 }
10092 
10093 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta,__unused vm_object_t object)10094 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta, __unused vm_object_t object)
10095 {
10096 	uint64_t value;
10097 
10098 	value = os_atomic_add(&allocation->total, delta, relaxed);
10099 	if (delta < 0) {
10100 		assertf(value + (uint64_t)-delta > value,
10101 		    "tag %d, site %p", allocation->tag, allocation);
10102 	}
10103 
10104 #if DEBUG || DEVELOPMENT
10105 	if (value > allocation->peak) {
10106 		os_atomic_max(&allocation->peak, value, relaxed);
10107 	}
10108 #endif /* DEBUG || DEVELOPMENT */
10109 
10110 	if (value == (uint64_t)delta && !allocation->tag) {
10111 		vm_tag_alloc(allocation);
10112 	}
10113 
10114 #if VM_BTLOG_TAGS
10115 	if (vmtaglog_matches(allocation->tag) && object) {
10116 		vm_tag_log(object, delta, __builtin_frame_address(0));
10117 	}
10118 #endif /* VM_BTLOG_TAGS */
10119 }
10120 
10121 #if VM_TAG_SIZECLASSES
10122 
10123 void
vm_allocation_zones_init(void)10124 vm_allocation_zones_init(void)
10125 {
10126 	vm_offset_t   addr;
10127 	vm_size_t     size;
10128 
10129 	const vm_tag_t early_tags[] = {
10130 		VM_KERN_MEMORY_DIAG,
10131 		VM_KERN_MEMORY_KALLOC,
10132 		VM_KERN_MEMORY_KALLOC_DATA,
10133 		VM_KERN_MEMORY_KALLOC_TYPE,
10134 		VM_KERN_MEMORY_LIBKERN,
10135 		VM_KERN_MEMORY_OSFMK,
10136 		VM_KERN_MEMORY_RECOUNT,
10137 	};
10138 
10139 	size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
10140 	    + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
10141 
10142 	kmem_alloc(kernel_map, &addr, round_page(size),
10143 	    KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
10144 	    VM_KERN_MEMORY_DIAG);
10145 
10146 	vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
10147 	addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
10148 
10149 	// prepopulate early tag ranges so allocations
10150 	// in vm_tag_update_zone_size() and early boot won't recurse
10151 	for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
10152 		vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
10153 		addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
10154 	}
10155 }
10156 
10157 __attribute__((noinline))
10158 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)10159 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
10160 {
10161 	vm_allocation_zone_total_t *stats;
10162 	vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
10163 
10164 	flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
10165 	stats = kalloc_data(size, flags);
10166 	if (!stats) {
10167 		return VM_KERN_MEMORY_NONE;
10168 	}
10169 	if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
10170 		kfree_data(stats, size);
10171 	}
10172 	return tag;
10173 }
10174 
10175 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zflags)10176 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zflags)
10177 {
10178 	assert(VM_KERN_MEMORY_NONE != tag);
10179 	assert(tag < VM_MAX_TAG_VALUE);
10180 
10181 	if (__probable(vm_allocation_zone_totals[tag])) {
10182 		return tag;
10183 	}
10184 	return vm_tag_zone_stats_alloc(tag, zflags);
10185 }
10186 
10187 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)10188 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
10189 {
10190 	vm_allocation_zone_total_t *stats;
10191 	vm_size_t value;
10192 
10193 	assert(VM_KERN_MEMORY_NONE != tag);
10194 	assert(tag < VM_MAX_TAG_VALUE);
10195 
10196 	if (zidx >= VM_TAG_SIZECLASSES) {
10197 		return;
10198 	}
10199 
10200 	stats = vm_allocation_zone_totals[tag];
10201 	assert(stats);
10202 	stats += zidx;
10203 
10204 	value = os_atomic_add(&stats->vazt_total, delta, relaxed);
10205 	if (delta < 0) {
10206 		assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
10207 		return;
10208 	} else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
10209 		os_atomic_max(&stats->vazt_peak, value, relaxed);
10210 	}
10211 }
10212 
10213 #endif /* VM_TAG_SIZECLASSES */
10214 
10215 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,vm_tag_t subtag,int64_t delta)10216 kern_allocation_update_subtotal(kern_allocation_name_t allocation, vm_tag_t subtag, int64_t delta)
10217 {
10218 	kern_allocation_name_t other;
10219 	struct vm_allocation_total * total;
10220 	uint32_t subidx;
10221 
10222 	assert(VM_KERN_MEMORY_NONE != subtag);
10223 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10224 	for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
10225 		total = &allocation->subtotals[subidx];
10226 		if (subtag == total->tag) {
10227 			break;
10228 		}
10229 	}
10230 	if (subidx >= allocation->subtotalscount) {
10231 		for (subidx = 0; subidx < allocation->subtotalscount; subidx++) {
10232 			total = &allocation->subtotals[subidx];
10233 			if ((VM_KERN_MEMORY_NONE == total->tag)
10234 			    || !total->total) {
10235 				total->tag = (vm_tag_t)subtag;
10236 				break;
10237 			}
10238 		}
10239 	}
10240 	assert(subidx < allocation->subtotalscount);
10241 	if (subidx >= allocation->subtotalscount) {
10242 		lck_ticket_unlock(&vm_allocation_sites_lock);
10243 		return;
10244 	}
10245 	if (delta < 0) {
10246 		assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
10247 	}
10248 	OSAddAtomic64(delta, &total->total);
10249 	lck_ticket_unlock(&vm_allocation_sites_lock);
10250 
10251 	other = vm_allocation_sites[subtag];
10252 	assert(other);
10253 	if (delta < 0) {
10254 		assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
10255 	}
10256 	OSAddAtomic64(delta, &other->mapped);
10257 }
10258 
10259 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)10260 kern_allocation_get_name(kern_allocation_name_t allocation)
10261 {
10262 	return KA_NAME(allocation);
10263 }
10264 
10265 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)10266 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
10267 {
10268 	kern_allocation_name_t allocation;
10269 	uint16_t namelen;
10270 
10271 	namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
10272 
10273 	allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
10274 	allocation->refcount       = 1;
10275 	allocation->subtotalscount = subtotalscount;
10276 	allocation->flags          = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
10277 	strlcpy(KA_NAME(allocation), name, namelen + 1);
10278 
10279 	vm_tag_alloc(allocation);
10280 	return allocation;
10281 }
10282 
10283 void
kern_allocation_name_release(kern_allocation_name_t allocation)10284 kern_allocation_name_release(kern_allocation_name_t allocation)
10285 {
10286 	assert(allocation->refcount > 0);
10287 	if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
10288 		kfree_data(allocation,
10289 		    KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
10290 	}
10291 }
10292 
10293 #if !VM_TAG_ACTIVE_UPDATE
10294 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)10295 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
10296 {
10297 	if (!object->wired_page_count) {
10298 		return;
10299 	}
10300 	if (!is_kernel_object(object)) {
10301 		assert(object->wire_tag < num_info);
10302 		info[object->wire_tag].size += ptoa_64(object->wired_page_count);
10303 	}
10304 }
10305 
10306 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
10307     unsigned int num_info, vm_object_t object);
10308 
10309 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)10310 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
10311     vm_page_iterate_proc proc, purgeable_q_t queue,
10312     int group)
10313 {
10314 	vm_object_t object;
10315 
10316 	for (object = (vm_object_t) queue_first(&queue->objq[group]);
10317 	    !queue_end(&queue->objq[group], (queue_entry_t) object);
10318 	    object = (vm_object_t) queue_next(&object->objq)) {
10319 		proc(info, num_info, object);
10320 	}
10321 }
10322 
10323 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)10324 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
10325     vm_page_iterate_proc proc)
10326 {
10327 	vm_object_t     object;
10328 
10329 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
10330 	queue_iterate(&vm_objects_wired,
10331 	    object,
10332 	    vm_object_t,
10333 	    wired_objq)
10334 	{
10335 		proc(info, num_info, object);
10336 	}
10337 	lck_spin_unlock(&vm_objects_wired_lock);
10338 }
10339 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10340 
10341 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated,bool redact_info __unused)10342 process_account(mach_memory_info_t * info, unsigned int num_info,
10343     uint64_t zones_collectable_bytes, boolean_t iterated, bool redact_info __unused)
10344 {
10345 	size_t                 namelen;
10346 	unsigned int           idx, count, nextinfo;
10347 	vm_allocation_site_t * site;
10348 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10349 
10350 	for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
10351 		site = vm_allocation_sites[idx];
10352 		if (!site) {
10353 			continue;
10354 		}
10355 		info[idx].mapped = site->mapped;
10356 		info[idx].tag    = site->tag;
10357 		if (!iterated) {
10358 			info[idx].size = site->total;
10359 #if DEBUG || DEVELOPMENT
10360 			info[idx].peak = site->peak;
10361 #endif /* DEBUG || DEVELOPMENT */
10362 		} else {
10363 			if (!site->subtotalscount && (site->total != info[idx].size)) {
10364 				printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
10365 				info[idx].size = site->total;
10366 			}
10367 		}
10368 		info[idx].flags |= VM_KERN_SITE_WIRED;
10369 		if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
10370 			info[idx].site   = idx;
10371 			info[idx].flags |= VM_KERN_SITE_TAG;
10372 			if (VM_KERN_MEMORY_ZONE == idx) {
10373 				info[idx].flags |= VM_KERN_SITE_HIDE;
10374 				info[idx].flags &= ~VM_KERN_SITE_WIRED;
10375 				info[idx].collectable_bytes = zones_collectable_bytes;
10376 			}
10377 		} else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
10378 			info[idx].site   = 0;
10379 			info[idx].flags |= VM_KERN_SITE_NAMED;
10380 			if (namelen > sizeof(info[idx].name)) {
10381 				namelen = sizeof(info[idx].name);
10382 			}
10383 			strncpy(&info[idx].name[0], KA_NAME(site), namelen);
10384 		} else if (VM_TAG_KMOD & site->flags) {
10385 			info[idx].site   = OSKextGetKmodIDForSite(site, NULL, 0);
10386 			info[idx].flags |= VM_KERN_SITE_KMOD;
10387 		} else {
10388 			info[idx].site   = VM_KERNEL_UNSLIDE(site);
10389 			info[idx].flags |= VM_KERN_SITE_KERNEL;
10390 		}
10391 	}
10392 
10393 	nextinfo = (vm_allocation_tag_highest + 1);
10394 	count    = nextinfo;
10395 	if (count >= num_info) {
10396 		count = num_info;
10397 	}
10398 
10399 	for (idx = 0; idx < count; idx++) {
10400 		site = vm_allocation_sites[idx];
10401 		if (!site) {
10402 			continue;
10403 		}
10404 #if VM_TAG_SIZECLASSES
10405 		vm_allocation_zone_total_t * zone;
10406 		unsigned int                 zidx;
10407 
10408 		if (!redact_info
10409 		    && vm_allocation_zone_totals
10410 		    && (zone = vm_allocation_zone_totals[idx])
10411 		    && (nextinfo < num_info)) {
10412 			for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10413 				if (!zone[zidx].vazt_peak) {
10414 					continue;
10415 				}
10416 				info[nextinfo]        = info[idx];
10417 				info[nextinfo].zone   = zone_index_from_tag_index(zidx);
10418 				info[nextinfo].flags  &= ~VM_KERN_SITE_WIRED;
10419 				info[nextinfo].flags  |= VM_KERN_SITE_ZONE;
10420 				info[nextinfo].flags  |= VM_KERN_SITE_KALLOC;
10421 				info[nextinfo].size   = zone[zidx].vazt_total;
10422 				info[nextinfo].peak   = zone[zidx].vazt_peak;
10423 				info[nextinfo].mapped = 0;
10424 				nextinfo++;
10425 			}
10426 		}
10427 #endif /* VM_TAG_SIZECLASSES */
10428 		if (site->subtotalscount) {
10429 			uint64_t mapped, mapcost, take;
10430 			uint32_t sub;
10431 			vm_tag_t alloctag;
10432 
10433 			info[idx].size = site->total;
10434 			mapped = info[idx].size;
10435 			info[idx].mapped = mapped;
10436 			mapcost = 0;
10437 			for (sub = 0; sub < site->subtotalscount; sub++) {
10438 				alloctag = site->subtotals[sub].tag;
10439 				assert(alloctag < num_info);
10440 				if (info[alloctag].name[0]) {
10441 					continue;
10442 				}
10443 				take = site->subtotals[sub].total;
10444 				if (take > info[alloctag].size) {
10445 					take = info[alloctag].size;
10446 				}
10447 				if (take > mapped) {
10448 					take = mapped;
10449 				}
10450 				info[alloctag].mapped  -= take;
10451 				info[alloctag].size    -= take;
10452 				mapped                 -= take;
10453 				mapcost                += take;
10454 			}
10455 			info[idx].size = mapcost;
10456 		}
10457 	}
10458 	lck_ticket_unlock(&vm_allocation_sites_lock);
10459 
10460 	return 0;
10461 }
10462 
10463 uint32_t
vm_page_diagnose_estimate(void)10464 vm_page_diagnose_estimate(void)
10465 {
10466 	vm_allocation_site_t * site;
10467 	uint32_t               count = zone_view_count;
10468 	uint32_t               idx;
10469 
10470 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10471 	for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
10472 		site = vm_allocation_sites[idx];
10473 		if (!site) {
10474 			continue;
10475 		}
10476 		count++;
10477 #if VM_TAG_SIZECLASSES
10478 		if (vm_allocation_zone_totals) {
10479 			vm_allocation_zone_total_t * zone;
10480 			zone = vm_allocation_zone_totals[idx];
10481 			if (!zone) {
10482 				continue;
10483 			}
10484 			for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
10485 				count += (zone[zidx].vazt_peak != 0);
10486 			}
10487 		}
10488 #endif
10489 	}
10490 	lck_ticket_unlock(&vm_allocation_sites_lock);
10491 
10492 	/* some slop for new tags created */
10493 	count += 8;
10494 	count += VM_KERN_COUNTER_COUNT;
10495 
10496 	return count;
10497 }
10498 
10499 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)10500 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
10501     bool percpu)
10502 {
10503 	zpercpu_foreach(zs, zstats) {
10504 		info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
10505 	}
10506 	if (percpu) {
10507 		info->size *= zpercpu_count();
10508 	}
10509 	info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
10510 }
10511 
10512 static void
vm_page_add_info(mach_memory_info_t * info,zone_stats_t stats,bool per_cpu,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10513 vm_page_add_info(
10514 	mach_memory_info_t     *info,
10515 	zone_stats_t            stats,
10516 	bool                    per_cpu,
10517 	const char             *parent_heap_name,
10518 	const char             *parent_zone_name,
10519 	const char             *view_name)
10520 {
10521 	vm_page_diagnose_zone_stats(info, stats, per_cpu);
10522 	snprintf(info->name, sizeof(info->name),
10523 	    "%s%s[%s]", parent_heap_name, parent_zone_name, view_name);
10524 }
10525 
10526 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)10527 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
10528 {
10529 	vm_page_add_info(info, z->z_stats, z->z_percpu, zone_heap_name(z),
10530 	    z->z_name, "raw");
10531 }
10532 
10533 static void
vm_page_add_view(mach_memory_info_t * info,zone_stats_t stats,const char * parent_heap_name,const char * parent_zone_name,const char * view_name)10534 vm_page_add_view(
10535 	mach_memory_info_t     *info,
10536 	zone_stats_t            stats,
10537 	const char             *parent_heap_name,
10538 	const char             *parent_zone_name,
10539 	const char             *view_name)
10540 {
10541 	vm_page_add_info(info, stats, false, parent_heap_name, parent_zone_name,
10542 	    view_name);
10543 }
10544 
10545 static uint32_t
vm_page_diagnose_heap_views(mach_memory_info_t * info,kalloc_heap_t kh,const char * parent_heap_name,const char * parent_zone_name)10546 vm_page_diagnose_heap_views(
10547 	mach_memory_info_t     *info,
10548 	kalloc_heap_t           kh,
10549 	const char             *parent_heap_name,
10550 	const char             *parent_zone_name)
10551 {
10552 	uint32_t i = 0;
10553 
10554 	while (kh) {
10555 		vm_page_add_view(info + i, kh->kh_stats, parent_heap_name,
10556 		    parent_zone_name, kh->kh_name);
10557 		kh = kh->kh_views;
10558 		i++;
10559 	}
10560 	return i;
10561 }
10562 
10563 static uint32_t
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)10564 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
10565 {
10566 	uint32_t i = 0;
10567 
10568 	for (; i < KHEAP_NUM_ZONES; i++) {
10569 		vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
10570 	}
10571 
10572 	i += vm_page_diagnose_heap_views(info + i, kheap->kh_views, kheap->kh_name,
10573 	    NULL);
10574 	return i;
10575 }
10576 
10577 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)10578 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
10579 {
10580 	uint32_t idx = 0;
10581 	vm_page_add_view(info + idx, KHEAP_KT_VAR->kh_stats, KHEAP_KT_VAR->kh_name,
10582 	    "", "raw");
10583 	idx++;
10584 
10585 	for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
10586 		struct kheap_info heap = kalloc_type_heap_array[i];
10587 		char heap_num_tmp[MAX_ZONE_NAME] = "";
10588 		const char *heap_num;
10589 
10590 		snprintf(&heap_num_tmp[0], MAX_ZONE_NAME, "%u", i);
10591 		heap_num = &heap_num_tmp[0];
10592 
10593 		for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
10594 		    ktv = (kalloc_type_var_view_t) ktv->kt_next) {
10595 			if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
10596 				vm_page_add_view(info + idx, ktv->kt_stats, KHEAP_KT_VAR->kh_name,
10597 				    heap_num, ktv->kt_name);
10598 				idx++;
10599 			}
10600 		}
10601 
10602 		idx += vm_page_diagnose_heap_views(info + idx, heap.kh_views,
10603 		    KHEAP_KT_VAR->kh_name, heap_num);
10604 	}
10605 
10606 	return idx;
10607 }
10608 
10609 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,bool redact_info)10610 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes, bool redact_info)
10611 {
10612 	uint64_t                 wired_size;
10613 	uint64_t                 wired_managed_size;
10614 	uint64_t                 wired_reserved_size;
10615 	boolean_t                iterate;
10616 	mach_memory_info_t     * counts;
10617 	uint32_t                 i;
10618 
10619 	bzero(info, num_info * sizeof(mach_memory_info_t));
10620 
10621 	if (!vm_page_wire_count_initial) {
10622 		return KERN_ABORTED;
10623 	}
10624 
10625 #if !XNU_TARGET_OS_OSX
10626 	wired_size          = ptoa_64(vm_page_wire_count);
10627 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
10628 #else /* !XNU_TARGET_OS_OSX */
10629 	wired_size          = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
10630 	wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
10631 #endif /* !XNU_TARGET_OS_OSX */
10632 	wired_managed_size  = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
10633 
10634 	wired_size += booter_size;
10635 
10636 	assert(num_info >= VM_KERN_COUNTER_COUNT);
10637 	num_info -= VM_KERN_COUNTER_COUNT;
10638 	counts = &info[num_info];
10639 
10640 #define SET_COUNT(xcount, xsize, xflags)                        \
10641     counts[xcount].tag   = VM_MAX_TAG_VALUE + xcount;   \
10642     counts[xcount].site  = (xcount);                            \
10643     counts[xcount].size  = (xsize);                                 \
10644     counts[xcount].mapped  = (xsize);                           \
10645     counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
10646 
10647 	SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
10648 	SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
10649 	SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
10650 	SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
10651 	SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
10652 	SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
10653 	SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
10654 	SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
10655 	SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
10656 #if CONFIG_SPTM
10657 	SET_COUNT(VM_KERN_COUNT_EXCLAVES_CARVEOUT, SPTMArgs->sk_carveout_size, 0);
10658 #endif
10659 
10660 #define SET_MAP(xcount, xsize, xfree, xlargest) \
10661     counts[xcount].site    = (xcount);                  \
10662     counts[xcount].size    = (xsize);                   \
10663     counts[xcount].mapped  = (xsize);                   \
10664     counts[xcount].free    = (xfree);                   \
10665     counts[xcount].largest = (xlargest);                \
10666     counts[xcount].flags   = VM_KERN_SITE_COUNTER;
10667 
10668 	vm_map_size_t map_size, map_free, map_largest;
10669 
10670 	vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
10671 	SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
10672 
10673 	zone_map_sizes(&map_size, &map_free, &map_largest);
10674 	SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
10675 
10676 	assert(num_info >= zone_view_count);
10677 	num_info -= zone_view_count;
10678 	counts = &info[num_info];
10679 	i = 0;
10680 
10681 	if (!redact_info) {
10682 		if (zone_is_data_kheap(KHEAP_DATA_BUFFERS->kh_heap_id)) {
10683 			i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
10684 		}
10685 		if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
10686 			i += vm_page_diagnose_kt_heaps(counts + i);
10687 		}
10688 		assert(i <= zone_view_count);
10689 
10690 		zone_index_foreach(zidx) {
10691 			zone_t z = &zone_array[zidx];
10692 			zone_security_flags_t zsflags = zone_security_array[zidx];
10693 			zone_view_t zv = z->z_views;
10694 
10695 			if (zv == NULL) {
10696 				continue;
10697 			}
10698 
10699 			zone_stats_t zv_stats_head = z->z_stats;
10700 			bool has_raw_view = false;
10701 
10702 			for (; zv; zv = zv->zv_next) {
10703 				/*
10704 				 * kalloc_types that allocate from the same zone are linked
10705 				 * as views. Only print the ones that have their own stats.
10706 				 */
10707 				if (zv->zv_stats == zv_stats_head) {
10708 					continue;
10709 				}
10710 				has_raw_view = true;
10711 				vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
10712 				    z->z_percpu);
10713 				snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
10714 				    zone_heap_name(z), z->z_name, zv->zv_name);
10715 				i++;
10716 				assert(i <= zone_view_count);
10717 			}
10718 
10719 			/*
10720 			 * Print raw views for non kalloc or kalloc_type zones
10721 			 */
10722 			bool kalloc_type = zsflags.z_kalloc_type;
10723 			if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
10724 			    (kalloc_type && has_raw_view)) {
10725 				vm_page_diagnose_zone(counts + i, z);
10726 				i++;
10727 				assert(i <= zone_view_count);
10728 			}
10729 		}
10730 	}
10731 
10732 	iterate = !VM_TAG_ACTIVE_UPDATE;
10733 	if (iterate) {
10734 		enum                       { kMaxKernelDepth = 1 };
10735 		vm_map_t                     maps[kMaxKernelDepth];
10736 		vm_map_entry_t               entries[kMaxKernelDepth];
10737 		vm_map_t                     map;
10738 		vm_map_entry_t               entry;
10739 		vm_object_offset_t           offset;
10740 		vm_page_t                    page;
10741 		int                          stackIdx, count;
10742 
10743 #if !VM_TAG_ACTIVE_UPDATE
10744 		vm_page_iterate_objects(info, num_info, &vm_page_count_object);
10745 #endif /* ! VM_TAG_ACTIVE_UPDATE */
10746 
10747 		map = kernel_map;
10748 		stackIdx = 0;
10749 		while (map) {
10750 			vm_map_lock(map);
10751 			for (entry = map->hdr.links.next; map; entry = entry->vme_next) {
10752 				if (entry->is_sub_map) {
10753 					assert(stackIdx < kMaxKernelDepth);
10754 					maps[stackIdx] = map;
10755 					entries[stackIdx] = entry;
10756 					stackIdx++;
10757 					map = VME_SUBMAP(entry);
10758 					entry = NULL;
10759 					break;
10760 				}
10761 				if (is_kernel_object(VME_OBJECT(entry))) {
10762 					count = 0;
10763 					vm_object_lock(VME_OBJECT(entry));
10764 					for (offset = entry->vme_start; offset < entry->vme_end; offset += page_size) {
10765 						page = vm_page_lookup(VME_OBJECT(entry), offset);
10766 						if (page && VM_PAGE_WIRED(page)) {
10767 							count++;
10768 						}
10769 					}
10770 					vm_object_unlock(VME_OBJECT(entry));
10771 
10772 					if (count) {
10773 						assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
10774 						assert(VME_ALIAS(entry) < num_info);
10775 						info[VME_ALIAS(entry)].size += ptoa_64(count);
10776 					}
10777 				}
10778 				while (map && (entry == vm_map_last_entry(map))) {
10779 					vm_map_unlock(map);
10780 					if (!stackIdx) {
10781 						map = NULL;
10782 					} else {
10783 						--stackIdx;
10784 						map = maps[stackIdx];
10785 						entry = entries[stackIdx];
10786 					}
10787 				}
10788 			}
10789 		}
10790 	}
10791 
10792 	process_account(info, num_info, zones_collectable_bytes, iterate, redact_info);
10793 
10794 	return KERN_SUCCESS;
10795 }
10796 
10797 #if DEBUG || DEVELOPMENT
10798 
10799 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)10800 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
10801 {
10802 	kern_return_t  ret;
10803 	vm_size_t      zsize;
10804 	vm_map_t       map;
10805 	vm_map_entry_t entry;
10806 
10807 	zsize = zone_element_info((void *) addr, tag);
10808 	if (zsize) {
10809 		*zone_size = *size = zsize;
10810 		return KERN_SUCCESS;
10811 	}
10812 
10813 	*zone_size = 0;
10814 	ret = KERN_INVALID_ADDRESS;
10815 	for (map = kernel_map; map;) {
10816 		vm_map_lock(map);
10817 		if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
10818 			break;
10819 		}
10820 		if (entry->is_sub_map) {
10821 			if (map != kernel_map) {
10822 				break;
10823 			}
10824 			map = VME_SUBMAP(entry);
10825 			continue;
10826 		}
10827 		if (entry->vme_start != addr) {
10828 			break;
10829 		}
10830 		*tag = (vm_tag_t)VME_ALIAS(entry);
10831 		*size = (entry->vme_end - addr);
10832 		ret = KERN_SUCCESS;
10833 		break;
10834 	}
10835 	if (map != kernel_map) {
10836 		vm_map_unlock(map);
10837 	}
10838 	vm_map_unlock(kernel_map);
10839 
10840 	return ret;
10841 }
10842 
10843 #endif /* DEBUG || DEVELOPMENT */
10844 
10845 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)10846 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
10847 {
10848 	vm_allocation_site_t * site;
10849 	uint32_t               kmodId;
10850 
10851 	kmodId = 0;
10852 	lck_ticket_lock(&vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
10853 	if ((site = vm_allocation_sites[tag])) {
10854 		if (VM_TAG_KMOD & site->flags) {
10855 			kmodId = OSKextGetKmodIDForSite(site, name, namelen);
10856 		}
10857 	}
10858 	lck_ticket_unlock(&vm_allocation_sites_lock);
10859 
10860 	return kmodId;
10861 }
10862 
10863 
10864 #if CONFIG_SECLUDED_MEMORY
10865 /*
10866  * Note that there's no locking around other accesses to vm_page_secluded_target.
10867  * That should be OK, since these are the only place where it can be changed after
10868  * initialization. Other users (like vm_pageout) may see the wrong value briefly,
10869  * but will eventually get the correct value. This brief mismatch is OK as pageout
10870  * and page freeing will auto-adjust the vm_page_secluded_count to match the target
10871  * over time.
10872  */
10873 unsigned int vm_page_secluded_suppress_cnt = 0;
10874 unsigned int vm_page_secluded_save_target;
10875 
10876 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
10877 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
10878 
10879 void
start_secluded_suppression(task_t task)10880 start_secluded_suppression(task_t task)
10881 {
10882 	if (task->task_suppressed_secluded) {
10883 		return;
10884 	}
10885 	lck_spin_lock(&secluded_suppress_slock);
10886 	if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
10887 		task->task_suppressed_secluded = TRUE;
10888 		vm_page_secluded_save_target = vm_page_secluded_target;
10889 		vm_page_secluded_target = 0;
10890 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10891 	}
10892 	lck_spin_unlock(&secluded_suppress_slock);
10893 }
10894 
10895 void
stop_secluded_suppression(task_t task)10896 stop_secluded_suppression(task_t task)
10897 {
10898 	lck_spin_lock(&secluded_suppress_slock);
10899 	if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
10900 		task->task_suppressed_secluded = FALSE;
10901 		vm_page_secluded_target = vm_page_secluded_save_target;
10902 		VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
10903 	}
10904 	lck_spin_unlock(&secluded_suppress_slock);
10905 }
10906 
10907 #endif /* CONFIG_SECLUDED_MEMORY */
10908 
10909 /*
10910  * Move the list of retired pages on the vm_page_queue_retired to
10911  * their final resting place on retired_pages_object.
10912  */
10913 void
vm_retire_boot_pages(void)10914 vm_retire_boot_pages(void)
10915 {
10916 }
10917 
10918 /*
10919  * This holds the reported physical address if an ECC error leads to a panic.
10920  * SMC will store it in PMU SRAM under the 'sECC' key.
10921  */
10922 uint64_t ecc_panic_physical_address = 0;
10923 
10924