1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm/vm_page.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Resident memory management module.
63 */
64
65 #include <debug.h>
66 #include <libkern/OSAtomic.h>
67 #include <libkern/OSDebug.h>
68
69 #include <mach/clock_types.h>
70 #include <mach/vm_prot.h>
71 #include <mach/vm_statistics.h>
72 #include <mach/sdt.h>
73 #include <kern/counter.h>
74 #include <kern/host_statistics.h>
75 #include <kern/sched_prim.h>
76 #include <kern/policy_internal.h>
77 #include <kern/task.h>
78 #include <kern/thread.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc_internal.h>
81 #include <kern/ledger.h>
82 #include <kern/ecc.h>
83 #include <vm/pmap.h>
84 #include <vm/vm_init.h>
85 #include <vm/vm_map.h>
86 #include <vm/vm_page.h>
87 #include <vm/vm_pageout.h>
88 #include <vm/vm_kern.h> /* kmem_alloc() */
89 #include <kern/misc_protos.h>
90 #include <mach_debug/zone_info.h>
91 #include <vm/cpm.h>
92 #include <pexpert/pexpert.h>
93 #include <pexpert/device_tree.h>
94 #include <san/kasan.h>
95
96 #include <vm/vm_protos.h>
97 #include <vm/memory_object.h>
98 #include <vm/vm_purgeable_internal.h>
99 #include <vm/vm_compressor.h>
100 #if defined (__x86_64__)
101 #include <i386/misc_protos.h>
102 #endif
103
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache.h>
106 #endif
107
108 #if HIBERNATION
109 #include <IOKit/IOHibernatePrivate.h>
110 #include <machine/pal_hibernate.h>
111 #endif /* HIBERNATION */
112
113 #include <sys/kdebug.h>
114
115 #if defined(HAS_APPLE_PAC)
116 #include <ptrauth.h>
117 #endif
118 #if defined(__arm64__)
119 #include <arm/cpu_internal.h>
120 #endif /* defined(__arm64__) */
121
122 #if MACH_ASSERT
123
124 #define ASSERT_PMAP_FREE(mem) pmap_assert_free(VM_PAGE_GET_PHYS_PAGE(mem))
125
126 #else /* MACH_ASSERT */
127
128 #define ASSERT_PMAP_FREE(mem) /* nothing */
129
130 #endif /* MACH_ASSERT */
131
132 extern boolean_t vm_pageout_running;
133 extern thread_t vm_pageout_scan_thread;
134 extern boolean_t vps_dynamic_priority_enabled;
135
136 char vm_page_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
137 char vm_page_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
138 char vm_page_non_speculative_pageable_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
139 char vm_page_active_or_inactive_states[VM_PAGE_Q_STATE_ARRAY_SIZE];
140
141 #if CONFIG_SECLUDED_MEMORY
142 struct vm_page_secluded_data vm_page_secluded;
143 #endif /* CONFIG_SECLUDED_MEMORY */
144
145 #if DEVELOPMENT || DEBUG
146 extern struct memory_object_pager_ops shared_region_pager_ops;
147 unsigned int shared_region_pagers_resident_count = 0;
148 unsigned int shared_region_pagers_resident_peak = 0;
149 #endif /* DEVELOPMENT || DEBUG */
150
151
152
153 int PERCPU_DATA(start_color);
154 vm_page_t PERCPU_DATA(free_pages);
155 boolean_t hibernate_cleaning_in_progress = FALSE;
156 boolean_t vm_page_free_verify = TRUE;
157
158 uint32_t vm_lopage_free_count = 0;
159 uint32_t vm_lopage_free_limit = 0;
160 uint32_t vm_lopage_lowater = 0;
161 boolean_t vm_lopage_refill = FALSE;
162 boolean_t vm_lopage_needed = FALSE;
163
164 int speculative_age_index = 0;
165 int speculative_steal_index = 0;
166 struct vm_speculative_age_q vm_page_queue_speculative[VM_PAGE_MAX_SPECULATIVE_AGE_Q + 1];
167
168 boolean_t hibernation_vmqueues_inspection = FALSE; /* Tracks if the hibernation code is looking at the VM queues.
169 * Updated and checked behind the vm_page_queues_lock. */
170
171 static void vm_page_free_prepare(vm_page_t page);
172 static vm_page_t vm_page_grab_fictitious_common(ppnum_t, boolean_t);
173
174 static void vm_tag_init(void);
175
176 /* for debugging purposes */
177 SECURITY_READ_ONLY_EARLY(uint32_t) vm_packed_from_vm_pages_array_mask =
178 VM_PAGE_PACKED_FROM_ARRAY;
179 SECURITY_READ_ONLY_EARLY(vm_packing_params_t) vm_page_packing_params =
180 VM_PACKING_PARAMS(VM_PAGE_PACKED_PTR);
181
182 /*
183 * Associated with page of user-allocatable memory is a
184 * page structure.
185 */
186
187 /*
188 * These variables record the values returned by vm_page_bootstrap,
189 * for debugging purposes. The implementation of pmap_steal_memory
190 * and pmap_startup here also uses them internally.
191 */
192
193 vm_offset_t virtual_space_start;
194 vm_offset_t virtual_space_end;
195 uint32_t vm_page_pages;
196
197 /*
198 * The vm_page_lookup() routine, which provides for fast
199 * (virtual memory object, offset) to page lookup, employs
200 * the following hash table. The vm_page_{insert,remove}
201 * routines install and remove associations in the table.
202 * [This table is often called the virtual-to-physical,
203 * or VP, table.]
204 */
205 typedef struct {
206 vm_page_packed_t page_list;
207 #if MACH_PAGE_HASH_STATS
208 int cur_count; /* current count */
209 int hi_count; /* high water mark */
210 #endif /* MACH_PAGE_HASH_STATS */
211 } vm_page_bucket_t;
212
213
214 #define BUCKETS_PER_LOCK 16
215
216 SECURITY_READ_ONLY_LATE(vm_page_bucket_t *) vm_page_buckets; /* Array of buckets */
217 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_count = 0; /* How big is array? */
218 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_mask; /* Mask for hash function */
219 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_hash_shift; /* Shift for hash function */
220 SECURITY_READ_ONLY_LATE(uint32_t) vm_page_bucket_hash; /* Basic bucket hash */
221 SECURITY_READ_ONLY_LATE(unsigned int) vm_page_bucket_lock_count = 0; /* How big is array of locks? */
222
223 #ifndef VM_TAG_ACTIVE_UPDATE
224 #error VM_TAG_ACTIVE_UPDATE
225 #endif
226 #ifndef VM_TAG_SIZECLASSES
227 #error VM_TAG_SIZECLASSES
228 #endif
229
230 /* for debugging */
231 SECURITY_READ_ONLY_LATE(bool) vm_tag_active_update = VM_TAG_ACTIVE_UPDATE;
232 SECURITY_READ_ONLY_LATE(lck_spin_t *) vm_page_bucket_locks;
233
234 vm_allocation_site_t vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC + 1];
235 vm_allocation_site_t * vm_allocation_sites[VM_MAX_TAG_VALUE];
236 #if VM_TAG_SIZECLASSES
237 static vm_allocation_zone_total_t **vm_allocation_zone_totals;
238 #endif /* VM_TAG_SIZECLASSES */
239
240 vm_tag_t vm_allocation_tag_highest;
241
242 #if VM_PAGE_BUCKETS_CHECK
243 boolean_t vm_page_buckets_check_ready = FALSE;
244 #if VM_PAGE_FAKE_BUCKETS
245 vm_page_bucket_t *vm_page_fake_buckets; /* decoy buckets */
246 vm_map_offset_t vm_page_fake_buckets_start, vm_page_fake_buckets_end;
247 #endif /* VM_PAGE_FAKE_BUCKETS */
248 #endif /* VM_PAGE_BUCKETS_CHECK */
249
250 #if MACH_PAGE_HASH_STATS
251 /* This routine is only for debug. It is intended to be called by
252 * hand by a developer using a kernel debugger. This routine prints
253 * out vm_page_hash table statistics to the kernel debug console.
254 */
255 void
hash_debug(void)256 hash_debug(void)
257 {
258 int i;
259 int numbuckets = 0;
260 int highsum = 0;
261 int maxdepth = 0;
262
263 for (i = 0; i < vm_page_bucket_count; i++) {
264 if (vm_page_buckets[i].hi_count) {
265 numbuckets++;
266 highsum += vm_page_buckets[i].hi_count;
267 if (vm_page_buckets[i].hi_count > maxdepth) {
268 maxdepth = vm_page_buckets[i].hi_count;
269 }
270 }
271 }
272 printf("Total number of buckets: %d\n", vm_page_bucket_count);
273 printf("Number used buckets: %d = %d%%\n",
274 numbuckets, 100 * numbuckets / vm_page_bucket_count);
275 printf("Number unused buckets: %d = %d%%\n",
276 vm_page_bucket_count - numbuckets,
277 100 * (vm_page_bucket_count - numbuckets) / vm_page_bucket_count);
278 printf("Sum of bucket max depth: %d\n", highsum);
279 printf("Average bucket depth: %d.%2d\n",
280 highsum / vm_page_bucket_count,
281 highsum % vm_page_bucket_count);
282 printf("Maximum bucket depth: %d\n", maxdepth);
283 }
284 #endif /* MACH_PAGE_HASH_STATS */
285
286 /*
287 * The virtual page size is currently implemented as a runtime
288 * variable, but is constant once initialized using vm_set_page_size.
289 * This initialization must be done in the machine-dependent
290 * bootstrap sequence, before calling other machine-independent
291 * initializations.
292 *
293 * All references to the virtual page size outside this
294 * module must use the PAGE_SIZE, PAGE_MASK and PAGE_SHIFT
295 * constants.
296 */
297 #if defined(__arm64__)
298 vm_size_t page_size;
299 vm_size_t page_mask;
300 int page_shift;
301 #else
302 vm_size_t page_size = PAGE_SIZE;
303 vm_size_t page_mask = PAGE_MASK;
304 int page_shift = PAGE_SHIFT;
305 #endif
306
307 SECURITY_READ_ONLY_LATE(vm_page_t) vm_pages = VM_PAGE_NULL;
308 SECURITY_READ_ONLY_LATE(vm_page_t) vm_page_array_beginning_addr;
309 vm_page_t vm_page_array_ending_addr;
310
311 unsigned int vm_pages_count = 0;
312
313 /*
314 * Resident pages that represent real memory
315 * are allocated from a set of free lists,
316 * one per color.
317 */
318 unsigned int vm_colors;
319 unsigned int vm_color_mask; /* mask is == (vm_colors-1) */
320 unsigned int vm_cache_geometry_colors = 0; /* set by hw dependent code during startup */
321 unsigned int vm_free_magazine_refill_limit = 0;
322
323
324 struct vm_page_queue_free_head {
325 vm_page_queue_head_t qhead;
326 } VM_PAGE_PACKED_ALIGNED;
327
328 struct vm_page_queue_free_head vm_page_queue_free[MAX_COLORS];
329
330
331 unsigned int vm_page_free_wanted;
332 unsigned int vm_page_free_wanted_privileged;
333 #if CONFIG_SECLUDED_MEMORY
334 unsigned int vm_page_free_wanted_secluded;
335 #endif /* CONFIG_SECLUDED_MEMORY */
336 unsigned int vm_page_free_count;
337
338 unsigned int vm_page_realtime_count;
339
340 /*
341 * Occasionally, the virtual memory system uses
342 * resident page structures that do not refer to
343 * real pages, for example to leave a page with
344 * important state information in the VP table.
345 *
346 * These page structures are allocated the way
347 * most other kernel structures are.
348 */
349 SECURITY_READ_ONLY_LATE(zone_t) vm_page_zone;
350 vm_locks_array_t vm_page_locks;
351
352 LCK_ATTR_DECLARE(vm_page_lck_attr, 0, 0);
353 LCK_GRP_DECLARE(vm_page_lck_grp_free, "vm_page_free");
354 LCK_GRP_DECLARE(vm_page_lck_grp_queue, "vm_page_queue");
355 LCK_GRP_DECLARE(vm_page_lck_grp_local, "vm_page_queue_local");
356 LCK_GRP_DECLARE(vm_page_lck_grp_purge, "vm_page_purge");
357 LCK_GRP_DECLARE(vm_page_lck_grp_alloc, "vm_page_alloc");
358 LCK_GRP_DECLARE(vm_page_lck_grp_bucket, "vm_page_bucket");
359 LCK_SPIN_DECLARE_ATTR(vm_objects_wired_lock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
360 LCK_TICKET_DECLARE(vm_allocation_sites_lock, &vm_page_lck_grp_bucket);
361
362 unsigned int vm_page_local_q_soft_limit = 250;
363 unsigned int vm_page_local_q_hard_limit = 500;
364 struct vpl *__zpercpu vm_page_local_q;
365
366 /* N.B. Guard and fictitious pages must not
367 * be assigned a zero phys_page value.
368 */
369 /*
370 * Fictitious pages don't have a physical address,
371 * but we must initialize phys_page to something.
372 * For debugging, this should be a strange value
373 * that the pmap module can recognize in assertions.
374 */
375 const ppnum_t vm_page_fictitious_addr = (ppnum_t) -1;
376
377 /*
378 * Guard pages are not accessible so they don't
379 * need a physical address, but we need to enter
380 * one in the pmap.
381 * Let's make it recognizable and make sure that
382 * we don't use a real physical page with that
383 * physical address.
384 */
385 const ppnum_t vm_page_guard_addr = (ppnum_t) -2;
386
387 /*
388 * Resident page structures are also chained on
389 * queues that are used by the page replacement
390 * system (pageout daemon). These queues are
391 * defined here, but are shared by the pageout
392 * module. The inactive queue is broken into
393 * file backed and anonymous for convenience as the
394 * pageout daemon often assignes a higher
395 * importance to anonymous pages (less likely to pick)
396 */
397 vm_page_queue_head_t vm_page_queue_active VM_PAGE_PACKED_ALIGNED;
398 vm_page_queue_head_t vm_page_queue_inactive VM_PAGE_PACKED_ALIGNED;
399 #if CONFIG_SECLUDED_MEMORY
400 vm_page_queue_head_t vm_page_queue_secluded VM_PAGE_PACKED_ALIGNED;
401 #endif /* CONFIG_SECLUDED_MEMORY */
402 vm_page_queue_head_t vm_page_queue_anonymous VM_PAGE_PACKED_ALIGNED; /* inactive memory queue for anonymous pages */
403 vm_page_queue_head_t vm_page_queue_throttled VM_PAGE_PACKED_ALIGNED;
404
405 queue_head_t vm_objects_wired;
406
407 void vm_update_darkwake_mode(boolean_t);
408
409 vm_page_queue_head_t vm_page_queue_donate VM_PAGE_PACKED_ALIGNED;
410 uint32_t vm_page_donate_mode;
411 uint32_t vm_page_donate_target, vm_page_donate_target_high, vm_page_donate_target_low;
412 uint32_t vm_page_donate_count;
413 bool vm_page_donate_queue_ripe;
414
415
416 vm_page_queue_head_t vm_page_queue_background VM_PAGE_PACKED_ALIGNED;
417 uint32_t vm_page_background_target;
418 uint32_t vm_page_background_target_snapshot;
419 uint32_t vm_page_background_count;
420 uint64_t vm_page_background_promoted_count;
421
422 uint32_t vm_page_background_internal_count;
423 uint32_t vm_page_background_external_count;
424
425 uint32_t vm_page_background_mode;
426 uint32_t vm_page_background_exclude_external;
427
428 unsigned int vm_page_active_count;
429 unsigned int vm_page_inactive_count;
430 unsigned int vm_page_kernelcache_count;
431 #if CONFIG_SECLUDED_MEMORY
432 unsigned int vm_page_secluded_count;
433 unsigned int vm_page_secluded_count_free;
434 unsigned int vm_page_secluded_count_inuse;
435 unsigned int vm_page_secluded_count_over_target;
436 #endif /* CONFIG_SECLUDED_MEMORY */
437 unsigned int vm_page_anonymous_count;
438 unsigned int vm_page_throttled_count;
439 unsigned int vm_page_speculative_count;
440
441 unsigned int vm_page_wire_count;
442 unsigned int vm_page_wire_count_on_boot = 0;
443 unsigned int vm_page_stolen_count = 0;
444 unsigned int vm_page_wire_count_initial;
445 unsigned int vm_page_gobble_count = 0;
446 unsigned int vm_page_kern_lpage_count = 0;
447
448 uint64_t booter_size; /* external so it can be found in core dumps */
449
450 #define VM_PAGE_WIRE_COUNT_WARNING 0
451 #define VM_PAGE_GOBBLE_COUNT_WARNING 0
452
453 unsigned int vm_page_purgeable_count = 0; /* # of pages purgeable now */
454 unsigned int vm_page_purgeable_wired_count = 0; /* # of purgeable pages that are wired now */
455 uint64_t vm_page_purged_count = 0; /* total count of purged pages */
456
457 unsigned int vm_page_xpmapped_external_count = 0;
458 unsigned int vm_page_external_count = 0;
459 unsigned int vm_page_internal_count = 0;
460 unsigned int vm_page_pageable_external_count = 0;
461 unsigned int vm_page_pageable_internal_count = 0;
462
463 #if DEVELOPMENT || DEBUG
464 unsigned int vm_page_speculative_recreated = 0;
465 unsigned int vm_page_speculative_created = 0;
466 unsigned int vm_page_speculative_used = 0;
467 #endif
468
469 vm_page_queue_head_t vm_page_queue_cleaned VM_PAGE_PACKED_ALIGNED;
470
471 unsigned int vm_page_cleaned_count = 0;
472
473 uint64_t max_valid_dma_address = 0xffffffffffffffffULL;
474 ppnum_t max_valid_low_ppnum = PPNUM_MAX;
475
476
477 /*
478 * Several page replacement parameters are also
479 * shared with this module, so that page allocation
480 * (done here in vm_page_alloc) can trigger the
481 * pageout daemon.
482 */
483 unsigned int vm_page_free_target = 0;
484 unsigned int vm_page_free_min = 0;
485 unsigned int vm_page_throttle_limit = 0;
486 unsigned int vm_page_inactive_target = 0;
487 #if CONFIG_SECLUDED_MEMORY
488 unsigned int vm_page_secluded_target = 0;
489 #endif /* CONFIG_SECLUDED_MEMORY */
490 unsigned int vm_page_anonymous_min = 0;
491 unsigned int vm_page_free_reserved = 0;
492
493
494 /*
495 * The VM system has a couple of heuristics for deciding
496 * that pages are "uninteresting" and should be placed
497 * on the inactive queue as likely candidates for replacement.
498 * These variables let the heuristics be controlled at run-time
499 * to make experimentation easier.
500 */
501
502 boolean_t vm_page_deactivate_hint = TRUE;
503
504 struct vm_page_stats_reusable vm_page_stats_reusable;
505
506 /*
507 * vm_set_page_size:
508 *
509 * Sets the page size, perhaps based upon the memory
510 * size. Must be called before any use of page-size
511 * dependent functions.
512 *
513 * Sets page_shift and page_mask from page_size.
514 */
515 void
vm_set_page_size(void)516 vm_set_page_size(void)
517 {
518 page_size = PAGE_SIZE;
519 page_mask = PAGE_MASK;
520 page_shift = PAGE_SHIFT;
521
522 if ((page_mask & page_size) != 0) {
523 panic("vm_set_page_size: page size not a power of two");
524 }
525
526 for (page_shift = 0;; page_shift++) {
527 if ((1U << page_shift) == page_size) {
528 break;
529 }
530 }
531 }
532
533 #if defined (__x86_64__)
534
535 #define MAX_CLUMP_SIZE 16
536 #define DEFAULT_CLUMP_SIZE 4
537
538 unsigned int vm_clump_size, vm_clump_mask, vm_clump_shift, vm_clump_promote_threshold;
539
540 #if DEVELOPMENT || DEBUG
541 unsigned long vm_clump_stats[MAX_CLUMP_SIZE + 1];
542 unsigned long vm_clump_allocs, vm_clump_inserts, vm_clump_inrange, vm_clump_promotes;
543
544 static inline void
vm_clump_update_stats(unsigned int c)545 vm_clump_update_stats(unsigned int c)
546 {
547 assert(c <= vm_clump_size);
548 if (c > 0 && c <= vm_clump_size) {
549 vm_clump_stats[c] += c;
550 }
551 vm_clump_allocs += c;
552 }
553 #endif /* if DEVELOPMENT || DEBUG */
554
555 /* Called once to setup the VM clump knobs */
556 static void
vm_page_setup_clump(void)557 vm_page_setup_clump( void )
558 {
559 unsigned int override, n;
560
561 vm_clump_size = DEFAULT_CLUMP_SIZE;
562 if (PE_parse_boot_argn("clump_size", &override, sizeof(override))) {
563 vm_clump_size = override;
564 }
565
566 if (vm_clump_size > MAX_CLUMP_SIZE) {
567 panic("vm_page_setup_clump:: clump_size is too large!");
568 }
569 if (vm_clump_size < 1) {
570 panic("vm_page_setup_clump:: clump_size must be >= 1");
571 }
572 if ((vm_clump_size & (vm_clump_size - 1)) != 0) {
573 panic("vm_page_setup_clump:: clump_size must be a power of 2");
574 }
575
576 vm_clump_promote_threshold = vm_clump_size;
577 vm_clump_mask = vm_clump_size - 1;
578 for (vm_clump_shift = 0, n = vm_clump_size; n > 1; n >>= 1, vm_clump_shift++) {
579 ;
580 }
581
582 #if DEVELOPMENT || DEBUG
583 bzero(vm_clump_stats, sizeof(vm_clump_stats));
584 vm_clump_allocs = vm_clump_inserts = vm_clump_inrange = vm_clump_promotes = 0;
585 #endif /* if DEVELOPMENT || DEBUG */
586 }
587
588 #endif /* #if defined (__x86_64__) */
589
590 #define COLOR_GROUPS_TO_STEAL 4
591
592 /* Called once during statup, once the cache geometry is known.
593 */
594 static void
vm_page_set_colors(void)595 vm_page_set_colors( void )
596 {
597 unsigned int n, override;
598
599 #if defined (__x86_64__)
600 /* adjust #colors because we need to color outside the clump boundary */
601 vm_cache_geometry_colors >>= vm_clump_shift;
602 #endif
603 if (PE_parse_boot_argn("colors", &override, sizeof(override))) { /* colors specified as a boot-arg? */
604 n = override;
605 } else if (vm_cache_geometry_colors) { /* do we know what the cache geometry is? */
606 n = vm_cache_geometry_colors;
607 } else {
608 n = DEFAULT_COLORS; /* use default if all else fails */
609 }
610 if (n == 0) {
611 n = 1;
612 }
613 if (n > MAX_COLORS) {
614 n = MAX_COLORS;
615 }
616
617 /* the count must be a power of 2 */
618 if ((n & (n - 1)) != 0) {
619 n = DEFAULT_COLORS; /* use default if all else fails */
620 }
621 vm_colors = n;
622 vm_color_mask = n - 1;
623
624 vm_free_magazine_refill_limit = vm_colors * COLOR_GROUPS_TO_STEAL;
625
626 #if defined (__x86_64__)
627 /* adjust for reduction in colors due to clumping and multiple cores */
628 if (real_ncpus) {
629 vm_free_magazine_refill_limit *= (vm_clump_size * real_ncpus);
630 }
631 #endif
632 }
633
634 /*
635 * During single threaded early boot we don't initialize all pages.
636 * This avoids some delay during boot. They'll be initialized and
637 * added to the free list as needed or after we are multithreaded by
638 * what becomes the pageout thread.
639 */
640 static boolean_t fill = FALSE;
641 static unsigned int fillval;
642 uint_t vm_delayed_count = 0; /* when non-zero, indicates we may have more pages to init */
643 ppnum_t delay_above_pnum = PPNUM_MAX;
644
645 /*
646 * For x86 first 8 Gig initializes quickly and gives us lots of lowmem + mem above to start off with.
647 * If ARM ever uses delayed page initialization, this value may need to be quite different.
648 */
649 #define DEFAULT_DELAY_ABOVE_PHYS_GB (8)
650
651 /*
652 * When we have to dip into more delayed pages due to low memory, free up
653 * a large chunk to get things back to normal. This avoids contention on the
654 * delayed code allocating page by page.
655 */
656 #define VM_DELAY_PAGE_CHUNK ((1024 * 1024 * 1024) / PAGE_SIZE)
657
658 /*
659 * Get and initialize the next delayed page.
660 */
661 static vm_page_t
vm_get_delayed_page(int grab_options)662 vm_get_delayed_page(int grab_options)
663 {
664 vm_page_t p;
665 ppnum_t pnum;
666
667 /*
668 * Get a new page if we have one.
669 */
670 vm_free_page_lock();
671 if (vm_delayed_count == 0) {
672 vm_free_page_unlock();
673 return NULL;
674 }
675
676 if (!pmap_next_page(&pnum)) {
677 vm_delayed_count = 0;
678 vm_free_page_unlock();
679 return NULL;
680 }
681
682
683 assert(vm_delayed_count > 0);
684 --vm_delayed_count;
685
686 #if defined(__x86_64__)
687 /* x86 cluster code requires increasing phys_page in vm_pages[] */
688 if (vm_pages_count > 0) {
689 assert(pnum > vm_pages[vm_pages_count - 1].vmp_phys_page);
690 }
691 #endif
692 p = &vm_pages[vm_pages_count];
693 assert(p < vm_page_array_ending_addr);
694 vm_page_init(p, pnum, FALSE);
695 ++vm_pages_count;
696 ++vm_page_pages;
697 vm_free_page_unlock();
698
699 /*
700 * These pages were initially counted as wired, undo that now.
701 */
702 if (grab_options & VM_PAGE_GRAB_Q_LOCK_HELD) {
703 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
704 } else {
705 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
706 vm_page_lockspin_queues();
707 }
708 --vm_page_wire_count;
709 --vm_page_wire_count_initial;
710 if (vm_page_wire_count_on_boot != 0) {
711 --vm_page_wire_count_on_boot;
712 }
713 if (!(grab_options & VM_PAGE_GRAB_Q_LOCK_HELD)) {
714 vm_page_unlock_queues();
715 }
716
717
718 if (fill) {
719 fillPage(pnum, fillval);
720 }
721 return p;
722 }
723
724 static void vm_page_module_init_delayed(void);
725
726 /*
727 * Free all remaining delayed pages to the free lists.
728 */
729 void
vm_free_delayed_pages(void)730 vm_free_delayed_pages(void)
731 {
732 vm_page_t p;
733 vm_page_t list = NULL;
734 uint_t cnt = 0;
735 vm_offset_t start_free_va;
736 int64_t free_size;
737
738 while ((p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE)) != NULL) {
739 if (vm_himemory_mode) {
740 vm_page_release(p, FALSE);
741 } else {
742 p->vmp_snext = list;
743 list = p;
744 }
745 ++cnt;
746 }
747
748 /*
749 * Free the pages in reverse order if not himemory mode.
750 * Hence the low memory pages will be first on free lists. (LIFO)
751 */
752 while (list != NULL) {
753 p = list;
754 list = p->vmp_snext;
755 p->vmp_snext = NULL;
756 vm_page_release(p, FALSE);
757 }
758 #if DEVELOPMENT || DEBUG
759 kprintf("vm_free_delayed_pages: initialized %d free pages\n", cnt);
760 #endif
761
762 /*
763 * Free up any unused full pages at the end of the vm_pages[] array
764 */
765 start_free_va = round_page((vm_offset_t)&vm_pages[vm_pages_count]);
766
767 #if defined(__x86_64__)
768 /*
769 * Since x86 might have used large pages for vm_pages[], we can't
770 * free starting in the middle of a partially used large page.
771 */
772 if (pmap_query_pagesize(kernel_pmap, start_free_va) == I386_LPGBYTES) {
773 start_free_va = ((start_free_va + I386_LPGMASK) & ~I386_LPGMASK);
774 }
775 #endif
776 if (start_free_va < (vm_offset_t)vm_page_array_ending_addr) {
777 free_size = trunc_page((vm_offset_t)vm_page_array_ending_addr - start_free_va);
778 if (free_size > 0) {
779 ml_static_mfree(start_free_va, (vm_offset_t)free_size);
780 vm_page_array_ending_addr = (void *)start_free_va;
781
782 /*
783 * Note there's no locking here, as only this thread will ever change this value.
784 * The reader, vm_page_diagnose, doesn't grab any locks for the counts it looks at.
785 */
786 vm_page_stolen_count -= (free_size >> PAGE_SHIFT);
787
788 #if DEVELOPMENT || DEBUG
789 kprintf("Freeing final unused %ld bytes from vm_pages[] at 0x%lx\n",
790 (long)free_size, (long)start_free_va);
791 #endif
792 }
793 }
794
795
796 /*
797 * now we can create the VM page array zone
798 */
799 vm_page_module_init_delayed();
800 }
801
802 /*
803 * Try and free up enough delayed pages to match a contig memory allocation.
804 */
805 static void
vm_free_delayed_pages_contig(uint_t npages,ppnum_t max_pnum,ppnum_t pnum_mask)806 vm_free_delayed_pages_contig(
807 uint_t npages,
808 ppnum_t max_pnum,
809 ppnum_t pnum_mask)
810 {
811 vm_page_t p;
812 ppnum_t pnum;
813 uint_t cnt = 0;
814
815 /*
816 * Treat 0 as the absolute max page number.
817 */
818 if (max_pnum == 0) {
819 max_pnum = PPNUM_MAX;
820 }
821
822 /*
823 * Free till we get a properly aligned start page
824 */
825 for (;;) {
826 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
827 if (p == NULL) {
828 return;
829 }
830 pnum = VM_PAGE_GET_PHYS_PAGE(p);
831 vm_page_release(p, FALSE);
832 if (pnum >= max_pnum) {
833 return;
834 }
835 if ((pnum & pnum_mask) == 0) {
836 break;
837 }
838 }
839
840 /*
841 * Having a healthy pool of free pages will help performance. We don't
842 * want to fall back to the delayed code for every page allocation.
843 */
844 if (vm_page_free_count < VM_DELAY_PAGE_CHUNK) {
845 npages += VM_DELAY_PAGE_CHUNK;
846 }
847
848 /*
849 * Now free up the pages
850 */
851 for (cnt = 1; cnt < npages; ++cnt) {
852 p = vm_get_delayed_page(VM_PAGE_GRAB_OPTIONS_NONE);
853 if (p == NULL) {
854 return;
855 }
856 vm_page_release(p, FALSE);
857 }
858 }
859
860 #define ROUNDUP_NEXTP2(X) (1U << (32 - __builtin_clz((X) - 1)))
861
862 void
vm_page_init_local_q(unsigned int num_cpus)863 vm_page_init_local_q(unsigned int num_cpus)
864 {
865 struct vpl *t_local_q;
866
867 /*
868 * no point in this for a uni-processor system
869 */
870 if (num_cpus >= 2) {
871 ml_cpu_info_t cpu_info;
872
873 /*
874 * Force the allocation alignment to a cacheline,
875 * because the `vpl` struct has a lock and will be taken
876 * cross CPU so we want to isolate the rest of the per-CPU
877 * data to avoid false sharing due to this lock being taken.
878 */
879
880 ml_cpu_get_info(&cpu_info);
881
882 t_local_q = zalloc_percpu_permanent(sizeof(struct vpl),
883 cpu_info.cache_line_size - 1);
884
885 zpercpu_foreach(lq, t_local_q) {
886 VPL_LOCK_INIT(lq, &vm_page_lck_grp_local, &vm_page_lck_attr);
887 vm_page_queue_init(&lq->vpl_queue);
888 }
889
890 /* make the initialization visible to all cores */
891 os_atomic_store(&vm_page_local_q, t_local_q, release);
892 }
893 }
894
895 /*
896 * vm_init_before_launchd
897 *
898 * This should be called right before launchd is loaded.
899 */
900 void
vm_init_before_launchd()901 vm_init_before_launchd()
902 {
903 vm_page_lockspin_queues();
904 vm_page_wire_count_on_boot = vm_page_wire_count;
905 vm_page_unlock_queues();
906 }
907
908
909 /*
910 * vm_page_bootstrap:
911 *
912 * Initializes the resident memory module.
913 *
914 * Allocates memory for the page cells, and
915 * for the object/offset-to-page hash table headers.
916 * Each page cell is initialized and placed on the free list.
917 * Returns the range of available kernel virtual memory.
918 */
919 __startup_func
920 void
vm_page_bootstrap(vm_offset_t * startp,vm_offset_t * endp)921 vm_page_bootstrap(
922 vm_offset_t *startp,
923 vm_offset_t *endp)
924 {
925 unsigned int i;
926 unsigned int log1;
927 unsigned int log2;
928 unsigned int size;
929
930 /*
931 * Initialize the page queues.
932 */
933
934 lck_mtx_init(&vm_page_queue_free_lock, &vm_page_lck_grp_free, &vm_page_lck_attr);
935 lck_mtx_init(&vm_page_queue_lock, &vm_page_lck_grp_queue, &vm_page_lck_attr);
936 lck_mtx_init(&vm_purgeable_queue_lock, &vm_page_lck_grp_purge, &vm_page_lck_attr);
937
938 for (i = 0; i < PURGEABLE_Q_TYPE_MAX; i++) {
939 int group;
940
941 purgeable_queues[i].token_q_head = 0;
942 purgeable_queues[i].token_q_tail = 0;
943 for (group = 0; group < NUM_VOLATILE_GROUPS; group++) {
944 queue_init(&purgeable_queues[i].objq[group]);
945 }
946
947 purgeable_queues[i].type = i;
948 purgeable_queues[i].new_pages = 0;
949 #if MACH_ASSERT
950 purgeable_queues[i].debug_count_tokens = 0;
951 purgeable_queues[i].debug_count_objects = 0;
952 #endif
953 }
954 ;
955 purgeable_nonvolatile_count = 0;
956 queue_init(&purgeable_nonvolatile_queue);
957
958 for (i = 0; i < MAX_COLORS; i++) {
959 vm_page_queue_init(&vm_page_queue_free[i].qhead);
960 }
961
962 vm_page_queue_init(&vm_lopage_queue_free);
963 vm_page_queue_init(&vm_page_queue_active);
964 vm_page_queue_init(&vm_page_queue_inactive);
965 #if CONFIG_SECLUDED_MEMORY
966 vm_page_queue_init(&vm_page_queue_secluded);
967 #endif /* CONFIG_SECLUDED_MEMORY */
968 vm_page_queue_init(&vm_page_queue_cleaned);
969 vm_page_queue_init(&vm_page_queue_throttled);
970 vm_page_queue_init(&vm_page_queue_anonymous);
971 queue_init(&vm_objects_wired);
972
973 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
974 vm_page_queue_init(&vm_page_queue_speculative[i].age_q);
975
976 vm_page_queue_speculative[i].age_ts.tv_sec = 0;
977 vm_page_queue_speculative[i].age_ts.tv_nsec = 0;
978 }
979
980 vm_page_queue_init(&vm_page_queue_donate);
981 vm_page_queue_init(&vm_page_queue_background);
982
983 vm_page_background_count = 0;
984 vm_page_background_internal_count = 0;
985 vm_page_background_external_count = 0;
986 vm_page_background_promoted_count = 0;
987
988 vm_page_background_target = (unsigned int)(atop_64(max_mem) / 25);
989
990 if (vm_page_background_target > VM_PAGE_BACKGROUND_TARGET_MAX) {
991 vm_page_background_target = VM_PAGE_BACKGROUND_TARGET_MAX;
992 }
993
994 #if defined(__LP64__)
995 vm_page_background_mode = VM_PAGE_BG_ENABLED;
996 vm_page_donate_mode = VM_PAGE_DONATE_ENABLED;
997 #else
998 vm_page_background_mode = VM_PAGE_BG_DISABLED;
999 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1000 #endif
1001 vm_page_background_exclude_external = 0;
1002
1003 PE_parse_boot_argn("vm_page_bg_mode", &vm_page_background_mode, sizeof(vm_page_background_mode));
1004 PE_parse_boot_argn("vm_page_bg_exclude_external", &vm_page_background_exclude_external, sizeof(vm_page_background_exclude_external));
1005 PE_parse_boot_argn("vm_page_bg_target", &vm_page_background_target, sizeof(vm_page_background_target));
1006
1007 if (vm_page_background_mode != VM_PAGE_BG_DISABLED && vm_page_background_mode != VM_PAGE_BG_ENABLED) {
1008 vm_page_background_mode = VM_PAGE_BG_DISABLED;
1009 }
1010
1011 PE_parse_boot_argn("vm_page_donate_mode", &vm_page_donate_mode, sizeof(vm_page_donate_mode));
1012 if (vm_page_donate_mode != VM_PAGE_DONATE_DISABLED && vm_page_donate_mode != VM_PAGE_DONATE_ENABLED) {
1013 vm_page_donate_mode = VM_PAGE_DONATE_DISABLED;
1014 }
1015
1016 vm_page_donate_target_high = VM_PAGE_DONATE_TARGET_HIGHWATER;
1017 vm_page_donate_target_low = VM_PAGE_DONATE_TARGET_LOWWATER;
1018 vm_page_donate_target = vm_page_donate_target_high;
1019 vm_page_donate_count = 0;
1020
1021 vm_page_free_wanted = 0;
1022 vm_page_free_wanted_privileged = 0;
1023 #if CONFIG_SECLUDED_MEMORY
1024 vm_page_free_wanted_secluded = 0;
1025 #endif /* CONFIG_SECLUDED_MEMORY */
1026
1027 #if defined (__x86_64__)
1028 /* this must be called before vm_page_set_colors() */
1029 vm_page_setup_clump();
1030 #endif
1031
1032 vm_page_set_colors();
1033
1034 bzero(vm_page_inactive_states, sizeof(vm_page_inactive_states));
1035 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1036 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1037 vm_page_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1038
1039 bzero(vm_page_pageable_states, sizeof(vm_page_pageable_states));
1040 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1041 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1042 vm_page_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1043 vm_page_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1044 vm_page_pageable_states[VM_PAGE_ON_SPECULATIVE_Q] = 1;
1045 vm_page_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1046 #if CONFIG_SECLUDED_MEMORY
1047 vm_page_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1048 #endif /* CONFIG_SECLUDED_MEMORY */
1049
1050 bzero(vm_page_non_speculative_pageable_states, sizeof(vm_page_non_speculative_pageable_states));
1051 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1052 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1053 vm_page_non_speculative_pageable_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1054 vm_page_non_speculative_pageable_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1055 vm_page_non_speculative_pageable_states[VM_PAGE_ON_THROTTLED_Q] = 1;
1056 #if CONFIG_SECLUDED_MEMORY
1057 vm_page_non_speculative_pageable_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1058 #endif /* CONFIG_SECLUDED_MEMORY */
1059
1060 bzero(vm_page_active_or_inactive_states, sizeof(vm_page_active_or_inactive_states));
1061 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_INTERNAL_Q] = 1;
1062 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_EXTERNAL_Q] = 1;
1063 vm_page_active_or_inactive_states[VM_PAGE_ON_INACTIVE_CLEANED_Q] = 1;
1064 vm_page_active_or_inactive_states[VM_PAGE_ON_ACTIVE_Q] = 1;
1065 #if CONFIG_SECLUDED_MEMORY
1066 vm_page_active_or_inactive_states[VM_PAGE_ON_SECLUDED_Q] = 1;
1067 #endif /* CONFIG_SECLUDED_MEMORY */
1068
1069 for (vm_tag_t t = 0; t < VM_KERN_MEMORY_FIRST_DYNAMIC; t++) {
1070 vm_allocation_sites_static[t].refcount = 2;
1071 vm_allocation_sites_static[t].tag = t;
1072 vm_allocation_sites[t] = &vm_allocation_sites_static[t];
1073 }
1074 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].refcount = 2;
1075 vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC].tag = VM_KERN_MEMORY_ANY;
1076 vm_allocation_sites[VM_KERN_MEMORY_ANY] = &vm_allocation_sites_static[VM_KERN_MEMORY_FIRST_DYNAMIC];
1077
1078 /*
1079 * Steal memory for the map and zone subsystems.
1080 */
1081 kernel_startup_initialize_upto(STARTUP_SUB_PMAP_STEAL);
1082
1083 /*
1084 * Allocate (and initialize) the virtual-to-physical
1085 * table hash buckets.
1086 *
1087 * The number of buckets should be a power of two to
1088 * get a good hash function. The following computation
1089 * chooses the first power of two that is greater
1090 * than the number of physical pages in the system.
1091 */
1092
1093 if (vm_page_bucket_count == 0) {
1094 unsigned int npages = pmap_free_pages();
1095
1096 vm_page_bucket_count = 1;
1097 while (vm_page_bucket_count < npages) {
1098 vm_page_bucket_count <<= 1;
1099 }
1100 }
1101 vm_page_bucket_lock_count = (vm_page_bucket_count + BUCKETS_PER_LOCK - 1) / BUCKETS_PER_LOCK;
1102
1103 vm_page_hash_mask = vm_page_bucket_count - 1;
1104
1105 /*
1106 * Calculate object shift value for hashing algorithm:
1107 * O = log2(sizeof(struct vm_object))
1108 * B = log2(vm_page_bucket_count)
1109 * hash shifts the object left by
1110 * B/2 - O
1111 */
1112 size = vm_page_bucket_count;
1113 for (log1 = 0; size > 1; log1++) {
1114 size /= 2;
1115 }
1116 size = sizeof(struct vm_object);
1117 for (log2 = 0; size > 1; log2++) {
1118 size /= 2;
1119 }
1120 vm_page_hash_shift = log1 / 2 - log2 + 1;
1121
1122 vm_page_bucket_hash = 1 << ((log1 + 1) >> 1); /* Get (ceiling of sqrt of table size) */
1123 vm_page_bucket_hash |= 1 << ((log1 + 1) >> 2); /* Get (ceiling of quadroot of table size) */
1124 vm_page_bucket_hash |= 1; /* Set bit and add 1 - always must be 1 to insure unique series */
1125
1126 if (vm_page_hash_mask & vm_page_bucket_count) {
1127 printf("vm_page_bootstrap: WARNING -- strange page hash\n");
1128 }
1129
1130 #if VM_PAGE_BUCKETS_CHECK
1131 #if VM_PAGE_FAKE_BUCKETS
1132 /*
1133 * Allocate a decoy set of page buckets, to detect
1134 * any stomping there.
1135 */
1136 vm_page_fake_buckets = (vm_page_bucket_t *)
1137 pmap_steal_memory(vm_page_bucket_count *
1138 sizeof(vm_page_bucket_t));
1139 vm_page_fake_buckets_start = (vm_map_offset_t) vm_page_fake_buckets;
1140 vm_page_fake_buckets_end =
1141 vm_map_round_page((vm_page_fake_buckets_start +
1142 (vm_page_bucket_count *
1143 sizeof(vm_page_bucket_t))),
1144 PAGE_MASK);
1145 char *cp;
1146 for (cp = (char *)vm_page_fake_buckets_start;
1147 cp < (char *)vm_page_fake_buckets_end;
1148 cp++) {
1149 *cp = 0x5a;
1150 }
1151 #endif /* VM_PAGE_FAKE_BUCKETS */
1152 #endif /* VM_PAGE_BUCKETS_CHECK */
1153
1154 kernel_debug_string_early("vm_page_buckets");
1155 vm_page_buckets = (vm_page_bucket_t *)
1156 pmap_steal_memory(vm_page_bucket_count *
1157 sizeof(vm_page_bucket_t));
1158
1159 kernel_debug_string_early("vm_page_bucket_locks");
1160 vm_page_bucket_locks = (lck_spin_t *)
1161 pmap_steal_memory(vm_page_bucket_lock_count *
1162 sizeof(lck_spin_t));
1163
1164 for (i = 0; i < vm_page_bucket_count; i++) {
1165 vm_page_bucket_t *bucket = &vm_page_buckets[i];
1166
1167 bucket->page_list = VM_PAGE_PACK_PTR(VM_PAGE_NULL);
1168 #if MACH_PAGE_HASH_STATS
1169 bucket->cur_count = 0;
1170 bucket->hi_count = 0;
1171 #endif /* MACH_PAGE_HASH_STATS */
1172 }
1173
1174 for (i = 0; i < vm_page_bucket_lock_count; i++) {
1175 lck_spin_init(&vm_page_bucket_locks[i], &vm_page_lck_grp_bucket, &vm_page_lck_attr);
1176 }
1177
1178 vm_tag_init();
1179
1180 #if VM_PAGE_BUCKETS_CHECK
1181 vm_page_buckets_check_ready = TRUE;
1182 #endif /* VM_PAGE_BUCKETS_CHECK */
1183
1184 /*
1185 * Machine-dependent code allocates the resident page table.
1186 * It uses vm_page_init to initialize the page frames.
1187 * The code also returns to us the virtual space available
1188 * to the kernel. We don't trust the pmap module
1189 * to get the alignment right.
1190 */
1191
1192 kernel_debug_string_early("pmap_startup");
1193 pmap_startup(&virtual_space_start, &virtual_space_end);
1194 virtual_space_start = round_page(virtual_space_start);
1195 virtual_space_end = trunc_page(virtual_space_end);
1196
1197 *startp = virtual_space_start;
1198 *endp = virtual_space_end;
1199
1200 /*
1201 * Compute the initial "wire" count.
1202 * Up until now, the pages which have been set aside are not under
1203 * the VM system's control, so although they aren't explicitly
1204 * wired, they nonetheless can't be moved. At this moment,
1205 * all VM managed pages are "free", courtesy of pmap_startup.
1206 */
1207 assert((unsigned int) atop_64(max_mem) == atop_64(max_mem));
1208 vm_page_wire_count = ((unsigned int) atop_64(max_mem)) -
1209 vm_page_free_count - vm_lopage_free_count;
1210 #if CONFIG_SECLUDED_MEMORY
1211 vm_page_wire_count -= vm_page_secluded_count;
1212 #endif
1213 vm_page_wire_count_initial = vm_page_wire_count;
1214
1215 /* capture this for later use */
1216 booter_size = ml_get_booter_memory_size();
1217
1218 printf("vm_page_bootstrap: %d free pages, %d wired pages, (up to %d of which are delayed free)\n",
1219 vm_page_free_count, vm_page_wire_count, vm_delayed_count);
1220
1221 kernel_debug_string_early("vm_page_bootstrap complete");
1222 }
1223
1224 #ifndef MACHINE_PAGES
1225 /*
1226 * This is the early boot time allocator for data structures needed to bootstrap the VM system.
1227 * On x86 it will allocate large pages if size is sufficiently large. We don't need to do this
1228 * on ARM yet, due to the combination of a large base page size and smaller RAM devices.
1229 */
1230 static void *
pmap_steal_memory_internal(vm_size_t size,boolean_t might_free)1231 pmap_steal_memory_internal(
1232 vm_size_t size,
1233 boolean_t might_free)
1234 {
1235 kern_return_t kr;
1236 vm_offset_t addr;
1237 vm_offset_t map_addr;
1238 ppnum_t phys_page;
1239
1240 /*
1241 * Size needs to be aligned to word size.
1242 */
1243 size = (size + sizeof(void *) - 1) & ~(sizeof(void *) - 1);
1244
1245 /*
1246 * On the first call, get the initial values for virtual address space
1247 * and page align them.
1248 */
1249 if (virtual_space_start == virtual_space_end) {
1250 pmap_virtual_space(&virtual_space_start, &virtual_space_end);
1251 virtual_space_start = round_page(virtual_space_start);
1252 virtual_space_end = trunc_page(virtual_space_end);
1253
1254 #if defined(__x86_64__)
1255 /*
1256 * Release remaining unused section of preallocated KVA and the 4K page tables
1257 * that map it. This makes the VA available for large page mappings.
1258 */
1259 Idle_PTs_release(virtual_space_start, virtual_space_end);
1260 #endif
1261 }
1262
1263 /*
1264 * Allocate the virtual space for this request. On x86, we'll align to a large page
1265 * address if the size is big enough to back with at least 1 large page.
1266 */
1267 #if defined(__x86_64__)
1268 if (size >= I386_LPGBYTES) {
1269 virtual_space_start = ((virtual_space_start + I386_LPGMASK) & ~I386_LPGMASK);
1270 }
1271 #endif
1272 addr = virtual_space_start;
1273 virtual_space_start += size;
1274
1275 //kprintf("pmap_steal_memory: %08lX - %08lX; size=%08lX\n", (long)addr, (long)virtual_space_start, (long)size); /* (TEST/DEBUG) */
1276
1277 /*
1278 * Allocate and map physical pages to back the new virtual space.
1279 */
1280 map_addr = round_page(addr);
1281 while (map_addr < addr + size) {
1282 #if defined(__x86_64__)
1283 /*
1284 * Back with a large page if properly aligned on x86
1285 */
1286 if ((map_addr & I386_LPGMASK) == 0 &&
1287 map_addr + I386_LPGBYTES <= addr + size &&
1288 pmap_pre_expand_large(kernel_pmap, map_addr) == KERN_SUCCESS &&
1289 pmap_next_page_large(&phys_page) == KERN_SUCCESS) {
1290 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1291 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1292 VM_WIMG_USE_DEFAULT | VM_MEM_SUPERPAGE, FALSE);
1293
1294 if (kr != KERN_SUCCESS) {
1295 panic("pmap_steal_memory: pmap_enter() large failed, new_addr=%#lx, phys_page=%u",
1296 (unsigned long)map_addr, phys_page);
1297 }
1298 map_addr += I386_LPGBYTES;
1299 vm_page_wire_count += I386_LPGBYTES >> PAGE_SHIFT;
1300 vm_page_stolen_count += I386_LPGBYTES >> PAGE_SHIFT;
1301 vm_page_kern_lpage_count++;
1302 continue;
1303 }
1304 #endif
1305
1306 if (!pmap_next_page_hi(&phys_page, might_free)) {
1307 panic("pmap_steal_memory() size: 0x%llx", (uint64_t)size);
1308 }
1309
1310 #if defined(__x86_64__)
1311 pmap_pre_expand(kernel_pmap, map_addr);
1312 #endif
1313
1314 kr = pmap_enter(kernel_pmap, map_addr, phys_page,
1315 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE,
1316 VM_WIMG_USE_DEFAULT, FALSE);
1317
1318 if (kr != KERN_SUCCESS) {
1319 panic("pmap_steal_memory() pmap_enter failed, map_addr=%#lx, phys_page=%u",
1320 (unsigned long)map_addr, phys_page);
1321 }
1322 map_addr += PAGE_SIZE;
1323
1324 /*
1325 * Account for newly stolen memory
1326 */
1327 vm_page_wire_count++;
1328 vm_page_stolen_count++;
1329 }
1330
1331 #if defined(__x86_64__)
1332 /*
1333 * The call with might_free is currently the last use of pmap_steal_memory*().
1334 * Notify the pmap layer to record which high pages were allocated so far.
1335 */
1336 if (might_free) {
1337 pmap_hi_pages_done();
1338 }
1339 #endif
1340 #if KASAN
1341 kasan_notify_address(round_page(addr), size);
1342 #endif
1343 return (void *) addr;
1344 }
1345
1346 void *
pmap_steal_memory(vm_size_t size)1347 pmap_steal_memory(
1348 vm_size_t size)
1349 {
1350 return pmap_steal_memory_internal(size, FALSE);
1351 }
1352
1353 void *
pmap_steal_freeable_memory(vm_size_t size)1354 pmap_steal_freeable_memory(
1355 vm_size_t size)
1356 {
1357 return pmap_steal_memory_internal(size, TRUE);
1358 }
1359
1360
1361 #if CONFIG_SECLUDED_MEMORY
1362 /* boot-args to control secluded memory */
1363 unsigned int secluded_mem_mb = 0; /* # of MBs of RAM to seclude */
1364 int secluded_for_iokit = 1; /* IOKit can use secluded memory */
1365 int secluded_for_apps = 1; /* apps can use secluded memory */
1366 int secluded_for_filecache = 2; /* filecache can use seclude memory */
1367 #if 11
1368 int secluded_for_fbdp = 0;
1369 #endif
1370 uint64_t secluded_shutoff_trigger = 0;
1371 uint64_t secluded_shutoff_headroom = 150 * 1024 * 1024; /* original value from N56 */
1372 #endif /* CONFIG_SECLUDED_MEMORY */
1373
1374
1375 #if defined(__arm64__)
1376 extern void patch_low_glo_vm_page_info(void *, void *, uint32_t);
1377 unsigned int vm_first_phys_ppnum = 0;
1378 #endif
1379
1380 void vm_page_release_startup(vm_page_t mem);
1381 void
pmap_startup(vm_offset_t * startp,vm_offset_t * endp)1382 pmap_startup(
1383 vm_offset_t *startp,
1384 vm_offset_t *endp)
1385 {
1386 unsigned int i, npages;
1387 ppnum_t phys_page;
1388 uint64_t mem_sz;
1389 uint64_t start_ns;
1390 uint64_t now_ns;
1391 uint_t low_page_count = 0;
1392
1393 #if defined(__LP64__)
1394 /*
1395 * make sure we are aligned on a 64 byte boundary
1396 * for VM_PAGE_PACK_PTR (it clips off the low-order
1397 * 6 bits of the pointer)
1398 */
1399 if (virtual_space_start != virtual_space_end) {
1400 virtual_space_start = round_page(virtual_space_start);
1401 }
1402 #endif
1403
1404 /*
1405 * We calculate how many page frames we will have
1406 * and then allocate the page structures in one chunk.
1407 *
1408 * Note that the calculation here doesn't take into account
1409 * the memory needed to map what's being allocated, i.e. the page
1410 * table entries. So the actual number of pages we get will be
1411 * less than this. To do someday: include that in the computation.
1412 *
1413 * Also for ARM, we don't use the count of free_pages, but rather the
1414 * range from last page to first page (ignore holes due to retired pages).
1415 */
1416 #if defined(__arm64__)
1417 mem_sz = pmap_free_pages_span() * (uint64_t)PAGE_SIZE;
1418 #else /* defined(__arm64__) */
1419 mem_sz = pmap_free_pages() * (uint64_t)PAGE_SIZE;
1420 #endif /* defined(__arm64__) */
1421 mem_sz += round_page(virtual_space_start) - virtual_space_start; /* Account for any slop */
1422 npages = (uint_t)(mem_sz / (PAGE_SIZE + sizeof(*vm_pages))); /* scaled to include the vm_page_ts */
1423
1424
1425 vm_pages = (vm_page_t) pmap_steal_freeable_memory(npages * sizeof *vm_pages);
1426
1427 /*
1428 * Check if we want to initialize pages to a known value
1429 */
1430 if (PE_parse_boot_argn("fill", &fillval, sizeof(fillval))) {
1431 fill = TRUE;
1432 }
1433 #if DEBUG
1434 /* This slows down booting the DEBUG kernel, particularly on
1435 * large memory systems, but is worthwhile in deterministically
1436 * trapping uninitialized memory usage.
1437 */
1438 if (!fill) {
1439 fill = TRUE;
1440 fillval = 0xDEB8F177;
1441 }
1442 #endif
1443 if (fill) {
1444 kprintf("Filling vm_pages with pattern: 0x%x\n", fillval);
1445 }
1446
1447 #if CONFIG_SECLUDED_MEMORY
1448 /*
1449 * Figure out how much secluded memory to have before we start
1450 * release pages to free lists.
1451 * The default, if specified nowhere else, is no secluded mem.
1452 */
1453 secluded_mem_mb = 0;
1454 if (max_mem > 1 * 1024 * 1024 * 1024) {
1455 /* default to 90MB for devices with > 1GB of RAM */
1456 secluded_mem_mb = 90;
1457 }
1458 /* override with value from device tree, if provided */
1459 PE_get_default("kern.secluded_mem_mb",
1460 &secluded_mem_mb, sizeof(secluded_mem_mb));
1461 /* override with value from boot-args, if provided */
1462 PE_parse_boot_argn("secluded_mem_mb",
1463 &secluded_mem_mb,
1464 sizeof(secluded_mem_mb));
1465
1466 vm_page_secluded_target = (unsigned int)
1467 ((secluded_mem_mb * 1024ULL * 1024ULL) / PAGE_SIZE);
1468 PE_parse_boot_argn("secluded_for_iokit",
1469 &secluded_for_iokit,
1470 sizeof(secluded_for_iokit));
1471 PE_parse_boot_argn("secluded_for_apps",
1472 &secluded_for_apps,
1473 sizeof(secluded_for_apps));
1474 PE_parse_boot_argn("secluded_for_filecache",
1475 &secluded_for_filecache,
1476 sizeof(secluded_for_filecache));
1477 #if 11
1478 PE_parse_boot_argn("secluded_for_fbdp",
1479 &secluded_for_fbdp,
1480 sizeof(secluded_for_fbdp));
1481 #endif
1482
1483 /*
1484 * Allow a really large app to effectively use secluded memory until it exits.
1485 */
1486 if (vm_page_secluded_target != 0) {
1487 /*
1488 * Get an amount from boot-args, else use 1/2 of max_mem.
1489 * 1/2 max_mem was chosen from a Peace daemon tentpole test which
1490 * used munch to induce jetsam thrashing of false idle daemons on N56.
1491 */
1492 int secluded_shutoff_mb;
1493 if (PE_parse_boot_argn("secluded_shutoff_mb", &secluded_shutoff_mb,
1494 sizeof(secluded_shutoff_mb))) {
1495 secluded_shutoff_trigger = (uint64_t)secluded_shutoff_mb * 1024 * 1024;
1496 } else {
1497 secluded_shutoff_trigger = max_mem / 2;
1498 }
1499
1500 /* ensure the headroom value is sensible and avoid underflows */
1501 assert(secluded_shutoff_trigger == 0 || secluded_shutoff_trigger > secluded_shutoff_headroom);
1502 }
1503
1504 #endif /* CONFIG_SECLUDED_MEMORY */
1505
1506 #if defined(__x86_64__)
1507
1508 /*
1509 * Decide how much memory we delay freeing at boot time.
1510 */
1511 uint32_t delay_above_gb;
1512 if (!PE_parse_boot_argn("delay_above_gb", &delay_above_gb, sizeof(delay_above_gb))) {
1513 delay_above_gb = DEFAULT_DELAY_ABOVE_PHYS_GB;
1514 }
1515
1516 if (delay_above_gb == 0) {
1517 delay_above_pnum = PPNUM_MAX;
1518 } else {
1519 delay_above_pnum = delay_above_gb * (1024 * 1024 * 1024 / PAGE_SIZE);
1520 }
1521
1522 /* make sure we have sane breathing room: 1G above low memory */
1523 if (delay_above_pnum <= max_valid_low_ppnum) {
1524 delay_above_pnum = max_valid_low_ppnum + ((1024 * 1024 * 1024) >> PAGE_SHIFT);
1525 }
1526
1527 if (delay_above_pnum < PPNUM_MAX) {
1528 printf("pmap_startup() delaying init/free of page nums > 0x%x\n", delay_above_pnum);
1529 }
1530
1531 #endif /* defined(__x86_64__) */
1532
1533 /*
1534 * Initialize and release the page frames.
1535 */
1536 kernel_debug_string_early("page_frame_init");
1537
1538 vm_page_array_beginning_addr = &vm_pages[0];
1539 vm_page_array_ending_addr = &vm_pages[npages]; /* used by ptr packing/unpacking code */
1540 #if VM_PAGE_PACKED_FROM_ARRAY
1541 if (npages >= VM_PAGE_PACKED_FROM_ARRAY) {
1542 panic("pmap_startup(): too many pages to support vm_page packing");
1543 }
1544 #endif
1545
1546 vm_delayed_count = 0;
1547
1548 absolutetime_to_nanoseconds(mach_absolute_time(), &start_ns);
1549 vm_pages_count = 0;
1550 for (i = 0; i < npages; i++) {
1551 /* Did we run out of pages? */
1552 if (!pmap_next_page(&phys_page)) {
1553 break;
1554 }
1555
1556 if (phys_page < max_valid_low_ppnum) {
1557 ++low_page_count;
1558 }
1559
1560 /* Are we at high enough pages to delay the rest? */
1561 if (low_page_count > vm_lopage_free_limit && phys_page > delay_above_pnum) {
1562 vm_delayed_count = pmap_free_pages();
1563 break;
1564 }
1565
1566 #if defined(__arm64__)
1567 if (i == 0) {
1568 vm_first_phys_ppnum = phys_page;
1569 patch_low_glo_vm_page_info((void *)vm_page_array_beginning_addr,
1570 (void *)vm_page_array_ending_addr, vm_first_phys_ppnum);
1571 }
1572 #endif /* defined(__arm64__) */
1573
1574 #if defined(__x86_64__)
1575 /* The x86 clump freeing code requires increasing ppn's to work correctly */
1576 if (i > 0) {
1577 assert(phys_page > vm_pages[i - 1].vmp_phys_page);
1578 }
1579 #endif
1580 ++vm_pages_count;
1581 vm_page_init(&vm_pages[i], phys_page, FALSE);
1582 if (fill) {
1583 fillPage(phys_page, fillval);
1584 }
1585 if (vm_himemory_mode) {
1586 vm_page_release_startup(&vm_pages[i]);
1587 }
1588 }
1589 vm_page_pages = vm_pages_count; /* used to report to user space */
1590
1591 if (!vm_himemory_mode) {
1592 do {
1593 if (!VMP_ERROR_GET(&vm_pages[--i])) { /* skip retired pages */
1594 vm_page_release_startup(&vm_pages[i]);
1595 }
1596 } while (i != 0);
1597 }
1598
1599 absolutetime_to_nanoseconds(mach_absolute_time(), &now_ns);
1600 printf("pmap_startup() init/release time: %lld microsec\n", (now_ns - start_ns) / NSEC_PER_USEC);
1601 printf("pmap_startup() delayed init/release of %d pages\n", vm_delayed_count);
1602
1603 #if defined(__LP64__)
1604 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[0]))) != &vm_pages[0]) {
1605 panic("VM_PAGE_PACK_PTR failed on &vm_pages[0] - %p", (void *)&vm_pages[0]);
1606 }
1607
1608 if ((vm_page_t)(VM_PAGE_UNPACK_PTR(VM_PAGE_PACK_PTR(&vm_pages[vm_pages_count - 1]))) != &vm_pages[vm_pages_count - 1]) {
1609 panic("VM_PAGE_PACK_PTR failed on &vm_pages[vm_pages_count-1] - %p", (void *)&vm_pages[vm_pages_count - 1]);
1610 }
1611 #endif
1612
1613 VM_CHECK_MEMORYSTATUS;
1614
1615 /*
1616 * We have to re-align virtual_space_start,
1617 * because pmap_steal_memory has been using it.
1618 */
1619 virtual_space_start = round_page(virtual_space_start);
1620 *startp = virtual_space_start;
1621 *endp = virtual_space_end;
1622 }
1623 #endif /* MACHINE_PAGES */
1624
1625 /*
1626 * Create the zone that represents the vm_pages[] array. Nothing ever allocates
1627 * or frees to this zone. It's just here for reporting purposes via zprint command.
1628 * This needs to be done after all initially delayed pages are put on the free lists.
1629 */
1630 static void
vm_page_module_init_delayed(void)1631 vm_page_module_init_delayed(void)
1632 {
1633 (void)zone_create_ext("vm pages array", sizeof(struct vm_page),
1634 ZC_NONE, ZONE_ID_VM_PAGES, ^(zone_t z) {
1635 uint64_t vm_page_zone_pages, vm_page_array_zone_data_size;
1636
1637 zone_set_exhaustible(z, 0);
1638 /*
1639 * Reflect size and usage information for vm_pages[].
1640 */
1641
1642 z->z_elems_avail = (uint32_t)(vm_page_array_ending_addr - vm_pages);
1643 z->z_elems_free = z->z_elems_avail - vm_pages_count;
1644 zpercpu_get_cpu(z->z_stats, 0)->zs_mem_allocated =
1645 vm_pages_count * sizeof(struct vm_page);
1646 vm_page_array_zone_data_size = (uint64_t)vm_page_array_ending_addr - (uint64_t)vm_pages;
1647 vm_page_zone_pages = atop(round_page((vm_offset_t)vm_page_array_zone_data_size));
1648 z->z_wired_cur += vm_page_zone_pages;
1649 z->z_wired_hwm = z->z_wired_cur;
1650 z->z_va_cur = z->z_wired_cur;
1651 /* since zone accounts for these, take them out of stolen */
1652 VM_PAGE_MOVE_STOLEN(vm_page_zone_pages);
1653 });
1654 }
1655
1656 /*
1657 * Create the vm_pages zone. This is used for the vm_page structures for the pages
1658 * that are scavanged from other boot time usages by ml_static_mfree(). As such,
1659 * this needs to happen in early VM bootstrap.
1660 */
1661
1662 __startup_func
1663 static void
vm_page_module_init(void)1664 vm_page_module_init(void)
1665 {
1666 vm_size_t vm_page_with_ppnum_size;
1667
1668 /*
1669 * Since the pointers to elements in this zone will be packed, they
1670 * must have appropriate size. Not strictly what sizeof() reports.
1671 */
1672 vm_page_with_ppnum_size =
1673 (sizeof(struct vm_page_with_ppnum) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
1674 ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
1675
1676 vm_page_zone = zone_create_ext("vm pages", vm_page_with_ppnum_size,
1677 ZC_ALIGNMENT_REQUIRED | ZC_VM_LP64 | ZC_NOTBITAG,
1678 ZONE_ID_ANY, ^(zone_t z) {
1679 /*
1680 * The number "10" is a small number that is larger than the number
1681 * of fictitious pages that any single caller will attempt to allocate
1682 * without blocking.
1683 *
1684 * The largest such number at the moment is kmem_alloc()
1685 * when 2 guard pages are asked. 10 is simply a somewhat larger number,
1686 * taking into account the 50% hysteresis the zone allocator uses.
1687 *
1688 * Note: this works at all because the zone allocator
1689 * doesn't ever allocate fictitious pages.
1690 */
1691 zone_raise_reserve(z, 10);
1692 });
1693 }
1694 STARTUP(ZALLOC, STARTUP_RANK_SECOND, vm_page_module_init);
1695
1696 /*
1697 * Routine: vm_page_create
1698 * Purpose:
1699 * After the VM system is up, machine-dependent code
1700 * may stumble across more physical memory. For example,
1701 * memory that it was reserving for a frame buffer.
1702 * vm_page_create turns this memory into available pages.
1703 */
1704
1705 void
vm_page_create(ppnum_t start,ppnum_t end)1706 vm_page_create(
1707 ppnum_t start,
1708 ppnum_t end)
1709 {
1710 ppnum_t phys_page;
1711 vm_page_t m;
1712
1713 for (phys_page = start;
1714 phys_page < end;
1715 phys_page++) {
1716 m = vm_page_grab_fictitious_common(phys_page, TRUE);
1717 m->vmp_fictitious = FALSE;
1718 pmap_clear_noencrypt(phys_page);
1719
1720
1721 vm_free_page_lock();
1722 vm_page_pages++;
1723 vm_free_page_unlock();
1724 vm_page_release(m, FALSE);
1725 }
1726 }
1727
1728
1729 /*
1730 * vm_page_hash:
1731 *
1732 * Distributes the object/offset key pair among hash buckets.
1733 *
1734 * NOTE: The bucket count must be a power of 2
1735 */
1736 #define vm_page_hash(object, offset) (\
1737 ( (natural_t)((uintptr_t)object * vm_page_bucket_hash) + ((uint32_t)atop_64(offset) ^ vm_page_bucket_hash))\
1738 & vm_page_hash_mask)
1739
1740
1741 /*
1742 * vm_page_insert: [ internal use only ]
1743 *
1744 * Inserts the given mem entry into the object/object-page
1745 * table and object list.
1746 *
1747 * The object must be locked.
1748 */
1749 void
vm_page_insert(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)1750 vm_page_insert(
1751 vm_page_t mem,
1752 vm_object_t object,
1753 vm_object_offset_t offset)
1754 {
1755 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, TRUE, FALSE, FALSE, NULL);
1756 }
1757
1758 void
vm_page_insert_wired(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag)1759 vm_page_insert_wired(
1760 vm_page_t mem,
1761 vm_object_t object,
1762 vm_object_offset_t offset,
1763 vm_tag_t tag)
1764 {
1765 vm_page_insert_internal(mem, object, offset, tag, FALSE, TRUE, FALSE, FALSE, NULL);
1766 }
1767
1768 void
vm_page_insert_internal(vm_page_t mem,vm_object_t object,vm_object_offset_t offset,vm_tag_t tag,boolean_t queues_lock_held,boolean_t insert_in_hash,boolean_t batch_pmap_op,boolean_t batch_accounting,uint64_t * delayed_ledger_update)1769 vm_page_insert_internal(
1770 vm_page_t mem,
1771 vm_object_t object,
1772 vm_object_offset_t offset,
1773 vm_tag_t tag,
1774 boolean_t queues_lock_held,
1775 boolean_t insert_in_hash,
1776 boolean_t batch_pmap_op,
1777 boolean_t batch_accounting,
1778 uint64_t *delayed_ledger_update)
1779 {
1780 vm_page_bucket_t *bucket;
1781 lck_spin_t *bucket_lock;
1782 int hash_id;
1783 task_t owner;
1784 int ledger_idx_volatile;
1785 int ledger_idx_nonvolatile;
1786 int ledger_idx_volatile_compressed;
1787 int ledger_idx_nonvolatile_compressed;
1788 boolean_t do_footprint;
1789
1790 #if 0
1791 /*
1792 * we may not hold the page queue lock
1793 * so this check isn't safe to make
1794 */
1795 VM_PAGE_CHECK(mem);
1796 #endif
1797
1798 assertf(page_aligned(offset), "0x%llx\n", offset);
1799
1800 assert(!VM_PAGE_WIRED(mem) || mem->vmp_private || mem->vmp_fictitious || (tag != VM_KERN_MEMORY_NONE));
1801
1802 vm_object_lock_assert_exclusive(object);
1803 LCK_MTX_ASSERT(&vm_page_queue_lock,
1804 queues_lock_held ? LCK_MTX_ASSERT_OWNED
1805 : LCK_MTX_ASSERT_NOTOWNED);
1806
1807 if (queues_lock_held == FALSE) {
1808 assert(!VM_PAGE_PAGEABLE(mem));
1809 }
1810
1811 if (insert_in_hash == TRUE) {
1812 #if DEBUG || VM_PAGE_BUCKETS_CHECK
1813 if (mem->vmp_tabled || mem->vmp_object) {
1814 panic("vm_page_insert: page %p for (obj=%p,off=0x%llx) "
1815 "already in (obj=%p,off=0x%llx)",
1816 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
1817 }
1818 #endif
1819 if (object->internal && (offset >= object->vo_size)) {
1820 panic("vm_page_insert_internal: (page=%p,obj=%p,off=0x%llx,size=0x%llx) inserted at offset past object bounds",
1821 mem, object, offset, object->vo_size);
1822 }
1823
1824 assert(vm_page_lookup(object, offset) == VM_PAGE_NULL);
1825
1826 /*
1827 * Record the object/offset pair in this page
1828 */
1829
1830 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
1831 mem->vmp_offset = offset;
1832
1833 #if CONFIG_SECLUDED_MEMORY
1834 if (object->eligible_for_secluded) {
1835 vm_page_secluded.eligible_for_secluded++;
1836 }
1837 #endif /* CONFIG_SECLUDED_MEMORY */
1838
1839 /*
1840 * Insert it into the object_object/offset hash table
1841 */
1842 hash_id = vm_page_hash(object, offset);
1843 bucket = &vm_page_buckets[hash_id];
1844 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
1845
1846 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
1847
1848 mem->vmp_next_m = bucket->page_list;
1849 bucket->page_list = VM_PAGE_PACK_PTR(mem);
1850 assert(mem == (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)));
1851
1852 #if MACH_PAGE_HASH_STATS
1853 if (++bucket->cur_count > bucket->hi_count) {
1854 bucket->hi_count = bucket->cur_count;
1855 }
1856 #endif /* MACH_PAGE_HASH_STATS */
1857 mem->vmp_hashed = TRUE;
1858 lck_spin_unlock(bucket_lock);
1859 }
1860
1861 {
1862 unsigned int cache_attr;
1863
1864 cache_attr = object->wimg_bits & VM_WIMG_MASK;
1865
1866 if (cache_attr != VM_WIMG_USE_DEFAULT) {
1867 PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op);
1868 }
1869 }
1870 /*
1871 * Now link into the object's list of backed pages.
1872 */
1873 vm_page_queue_enter(&object->memq, mem, vmp_listq);
1874 object->memq_hint = mem;
1875 mem->vmp_tabled = TRUE;
1876
1877 /*
1878 * Show that the object has one more resident page.
1879 */
1880
1881 object->resident_page_count++;
1882 if (VM_PAGE_WIRED(mem)) {
1883 assert(mem->vmp_wire_count > 0);
1884 VM_OBJECT_WIRED_PAGE_UPDATE_START(object);
1885 VM_OBJECT_WIRED_PAGE_ADD(object, mem);
1886 VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag);
1887 }
1888 assert(object->resident_page_count >= object->wired_page_count);
1889
1890 #if DEVELOPMENT || DEBUG
1891 if (object->object_is_shared_cache &&
1892 object->pager != NULL &&
1893 object->pager->mo_pager_ops == &shared_region_pager_ops) {
1894 int new, old;
1895 assert(!object->internal);
1896 new = OSAddAtomic(+1, &shared_region_pagers_resident_count);
1897 do {
1898 old = shared_region_pagers_resident_peak;
1899 } while (old < new &&
1900 !OSCompareAndSwap(old, new, &shared_region_pagers_resident_peak));
1901 }
1902 #endif /* DEVELOPMENT || DEBUG */
1903
1904 if (batch_accounting == FALSE) {
1905 if (object->internal) {
1906 OSAddAtomic(1, &vm_page_internal_count);
1907 } else {
1908 OSAddAtomic(1, &vm_page_external_count);
1909 }
1910 }
1911
1912 /*
1913 * It wouldn't make sense to insert a "reusable" page in
1914 * an object (the page would have been marked "reusable" only
1915 * at the time of a madvise(MADV_FREE_REUSABLE) if it was already
1916 * in the object at that time).
1917 * But a page could be inserted in a "all_reusable" object, if
1918 * something faults it in (a vm_read() from another task or a
1919 * "use-after-free" issue in user space, for example). It can
1920 * also happen if we're relocating a page from that object to
1921 * a different physical page during a physically-contiguous
1922 * allocation.
1923 */
1924 assert(!mem->vmp_reusable);
1925 if (object->all_reusable) {
1926 OSAddAtomic(+1, &vm_page_stats_reusable.reusable_count);
1927 }
1928
1929 if (object->purgable == VM_PURGABLE_DENY &&
1930 !object->vo_ledger_tag) {
1931 owner = TASK_NULL;
1932 } else {
1933 owner = VM_OBJECT_OWNER(object);
1934 vm_object_ledger_tag_ledgers(object,
1935 &ledger_idx_volatile,
1936 &ledger_idx_nonvolatile,
1937 &ledger_idx_volatile_compressed,
1938 &ledger_idx_nonvolatile_compressed,
1939 &do_footprint);
1940 }
1941 if (owner &&
1942 (object->purgable == VM_PURGABLE_NONVOLATILE ||
1943 object->purgable == VM_PURGABLE_DENY ||
1944 VM_PAGE_WIRED(mem))) {
1945 if (delayed_ledger_update) {
1946 *delayed_ledger_update += PAGE_SIZE;
1947 } else {
1948 /* more non-volatile bytes */
1949 ledger_credit(owner->ledger,
1950 ledger_idx_nonvolatile,
1951 PAGE_SIZE);
1952 if (do_footprint) {
1953 /* more footprint */
1954 ledger_credit(owner->ledger,
1955 task_ledgers.phys_footprint,
1956 PAGE_SIZE);
1957 }
1958 }
1959 } else if (owner &&
1960 (object->purgable == VM_PURGABLE_VOLATILE ||
1961 object->purgable == VM_PURGABLE_EMPTY)) {
1962 assert(!VM_PAGE_WIRED(mem));
1963 /* more volatile bytes */
1964 ledger_credit(owner->ledger,
1965 ledger_idx_volatile,
1966 PAGE_SIZE);
1967 }
1968
1969 if (object->purgable == VM_PURGABLE_VOLATILE) {
1970 if (VM_PAGE_WIRED(mem)) {
1971 OSAddAtomic(+1, &vm_page_purgeable_wired_count);
1972 } else {
1973 OSAddAtomic(+1, &vm_page_purgeable_count);
1974 }
1975 } else if (object->purgable == VM_PURGABLE_EMPTY &&
1976 mem->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
1977 /*
1978 * This page belongs to a purged VM object but hasn't
1979 * been purged (because it was "busy").
1980 * It's in the "throttled" queue and hence not
1981 * visible to vm_pageout_scan(). Move it to a pageable
1982 * queue, so that it can eventually be reclaimed, instead
1983 * of lingering in the "empty" object.
1984 */
1985 if (queues_lock_held == FALSE) {
1986 vm_page_lockspin_queues();
1987 }
1988 vm_page_deactivate(mem);
1989 if (queues_lock_held == FALSE) {
1990 vm_page_unlock_queues();
1991 }
1992 }
1993
1994 #if VM_OBJECT_TRACKING_OP_MODIFIED
1995 if (vm_object_tracking_btlog &&
1996 object->internal &&
1997 object->resident_page_count == 0 &&
1998 object->pager == NULL &&
1999 object->shadow != NULL &&
2000 object->shadow->copy == object) {
2001 btlog_record(vm_object_tracking_btlog, object,
2002 VM_OBJECT_TRACKING_OP_MODIFIED,
2003 btref_get(__builtin_frame_address(0), 0));
2004 }
2005 #endif /* VM_OBJECT_TRACKING_OP_MODIFIED */
2006 }
2007
2008 /*
2009 * vm_page_replace:
2010 *
2011 * Exactly like vm_page_insert, except that we first
2012 * remove any existing page at the given offset in object.
2013 *
2014 * The object must be locked.
2015 */
2016 void
vm_page_replace(vm_page_t mem,vm_object_t object,vm_object_offset_t offset)2017 vm_page_replace(
2018 vm_page_t mem,
2019 vm_object_t object,
2020 vm_object_offset_t offset)
2021 {
2022 vm_page_bucket_t *bucket;
2023 vm_page_t found_m = VM_PAGE_NULL;
2024 lck_spin_t *bucket_lock;
2025 int hash_id;
2026
2027 #if 0
2028 /*
2029 * we don't hold the page queue lock
2030 * so this check isn't safe to make
2031 */
2032 VM_PAGE_CHECK(mem);
2033 #endif
2034 vm_object_lock_assert_exclusive(object);
2035 #if DEBUG || VM_PAGE_BUCKETS_CHECK
2036 if (mem->vmp_tabled || mem->vmp_object) {
2037 panic("vm_page_replace: page %p for (obj=%p,off=0x%llx) "
2038 "already in (obj=%p,off=0x%llx)",
2039 mem, object, offset, VM_PAGE_OBJECT(mem), mem->vmp_offset);
2040 }
2041 #endif
2042 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2043
2044 assert(!VM_PAGE_PAGEABLE(mem));
2045
2046 /*
2047 * Record the object/offset pair in this page
2048 */
2049 mem->vmp_object = VM_PAGE_PACK_OBJECT(object);
2050 mem->vmp_offset = offset;
2051
2052 /*
2053 * Insert it into the object_object/offset hash table,
2054 * replacing any page that might have been there.
2055 */
2056
2057 hash_id = vm_page_hash(object, offset);
2058 bucket = &vm_page_buckets[hash_id];
2059 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2060
2061 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2062
2063 if (bucket->page_list) {
2064 vm_page_packed_t *mp = &bucket->page_list;
2065 vm_page_t m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp));
2066
2067 do {
2068 /*
2069 * compare packed object pointers
2070 */
2071 if (m->vmp_object == mem->vmp_object && m->vmp_offset == offset) {
2072 /*
2073 * Remove old page from hash list
2074 */
2075 *mp = m->vmp_next_m;
2076 m->vmp_hashed = FALSE;
2077 m->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2078
2079 found_m = m;
2080 break;
2081 }
2082 mp = &m->vmp_next_m;
2083 } while ((m = (vm_page_t)(VM_PAGE_UNPACK_PTR(*mp))));
2084
2085 mem->vmp_next_m = bucket->page_list;
2086 } else {
2087 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2088 }
2089 /*
2090 * insert new page at head of hash list
2091 */
2092 bucket->page_list = VM_PAGE_PACK_PTR(mem);
2093 mem->vmp_hashed = TRUE;
2094
2095 lck_spin_unlock(bucket_lock);
2096
2097 if (found_m) {
2098 /*
2099 * there was already a page at the specified
2100 * offset for this object... remove it from
2101 * the object and free it back to the free list
2102 */
2103 vm_page_free_unlocked(found_m, FALSE);
2104 }
2105 vm_page_insert_internal(mem, object, offset, VM_KERN_MEMORY_NONE, FALSE, FALSE, FALSE, FALSE, NULL);
2106 }
2107
2108 /*
2109 * vm_page_remove: [ internal use only ]
2110 *
2111 * Removes the given mem entry from the object/offset-page
2112 * table and the object page list.
2113 *
2114 * The object must be locked.
2115 */
2116
2117 void
vm_page_remove(vm_page_t mem,boolean_t remove_from_hash)2118 vm_page_remove(
2119 vm_page_t mem,
2120 boolean_t remove_from_hash)
2121 {
2122 vm_page_bucket_t *bucket;
2123 vm_page_t this;
2124 lck_spin_t *bucket_lock;
2125 int hash_id;
2126 task_t owner;
2127 vm_object_t m_object;
2128 int ledger_idx_volatile;
2129 int ledger_idx_nonvolatile;
2130 int ledger_idx_volatile_compressed;
2131 int ledger_idx_nonvolatile_compressed;
2132 int do_footprint;
2133
2134 m_object = VM_PAGE_OBJECT(mem);
2135
2136 vm_object_lock_assert_exclusive(m_object);
2137 assert(mem->vmp_tabled);
2138 assert(!mem->vmp_cleaning);
2139 assert(!mem->vmp_laundry);
2140
2141 if (VM_PAGE_PAGEABLE(mem)) {
2142 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2143 }
2144 #if 0
2145 /*
2146 * we don't hold the page queue lock
2147 * so this check isn't safe to make
2148 */
2149 VM_PAGE_CHECK(mem);
2150 #endif
2151 if (remove_from_hash == TRUE) {
2152 /*
2153 * Remove from the object_object/offset hash table
2154 */
2155 hash_id = vm_page_hash(m_object, mem->vmp_offset);
2156 bucket = &vm_page_buckets[hash_id];
2157 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2158
2159 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2160
2161 if ((this = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list))) == mem) {
2162 /* optimize for common case */
2163
2164 bucket->page_list = mem->vmp_next_m;
2165 } else {
2166 vm_page_packed_t *prev;
2167
2168 for (prev = &this->vmp_next_m;
2169 (this = (vm_page_t)(VM_PAGE_UNPACK_PTR(*prev))) != mem;
2170 prev = &this->vmp_next_m) {
2171 continue;
2172 }
2173 *prev = this->vmp_next_m;
2174 }
2175 #if MACH_PAGE_HASH_STATS
2176 bucket->cur_count--;
2177 #endif /* MACH_PAGE_HASH_STATS */
2178 mem->vmp_hashed = FALSE;
2179 this->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
2180 lck_spin_unlock(bucket_lock);
2181 }
2182 /*
2183 * Now remove from the object's list of backed pages.
2184 */
2185
2186 vm_page_remove_internal(mem);
2187
2188 /*
2189 * And show that the object has one fewer resident
2190 * page.
2191 */
2192
2193 assert(m_object->resident_page_count > 0);
2194 m_object->resident_page_count--;
2195
2196 #if DEVELOPMENT || DEBUG
2197 if (m_object->object_is_shared_cache &&
2198 m_object->pager != NULL &&
2199 m_object->pager->mo_pager_ops == &shared_region_pager_ops) {
2200 assert(!m_object->internal);
2201 OSAddAtomic(-1, &shared_region_pagers_resident_count);
2202 }
2203 #endif /* DEVELOPMENT || DEBUG */
2204
2205 if (m_object->internal) {
2206 #if DEBUG
2207 assert(vm_page_internal_count);
2208 #endif /* DEBUG */
2209
2210 OSAddAtomic(-1, &vm_page_internal_count);
2211 } else {
2212 assert(vm_page_external_count);
2213 OSAddAtomic(-1, &vm_page_external_count);
2214
2215 if (mem->vmp_xpmapped) {
2216 assert(vm_page_xpmapped_external_count);
2217 OSAddAtomic(-1, &vm_page_xpmapped_external_count);
2218 }
2219 }
2220 if (!m_object->internal &&
2221 m_object->cached_list.next &&
2222 m_object->cached_list.prev) {
2223 if (m_object->resident_page_count == 0) {
2224 vm_object_cache_remove(m_object);
2225 }
2226 }
2227
2228 if (VM_PAGE_WIRED(mem)) {
2229 assert(mem->vmp_wire_count > 0);
2230 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
2231 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
2232 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
2233 }
2234 assert(m_object->resident_page_count >=
2235 m_object->wired_page_count);
2236 if (mem->vmp_reusable) {
2237 assert(m_object->reusable_page_count > 0);
2238 m_object->reusable_page_count--;
2239 assert(m_object->reusable_page_count <=
2240 m_object->resident_page_count);
2241 mem->vmp_reusable = FALSE;
2242 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2243 vm_page_stats_reusable.reused_remove++;
2244 } else if (m_object->all_reusable) {
2245 OSAddAtomic(-1, &vm_page_stats_reusable.reusable_count);
2246 vm_page_stats_reusable.reused_remove++;
2247 }
2248
2249 if (m_object->purgable == VM_PURGABLE_DENY &&
2250 !m_object->vo_ledger_tag) {
2251 owner = TASK_NULL;
2252 } else {
2253 owner = VM_OBJECT_OWNER(m_object);
2254 vm_object_ledger_tag_ledgers(m_object,
2255 &ledger_idx_volatile,
2256 &ledger_idx_nonvolatile,
2257 &ledger_idx_volatile_compressed,
2258 &ledger_idx_nonvolatile_compressed,
2259 &do_footprint);
2260 }
2261 if (owner &&
2262 (m_object->purgable == VM_PURGABLE_NONVOLATILE ||
2263 m_object->purgable == VM_PURGABLE_DENY ||
2264 VM_PAGE_WIRED(mem))) {
2265 /* less non-volatile bytes */
2266 ledger_debit(owner->ledger,
2267 ledger_idx_nonvolatile,
2268 PAGE_SIZE);
2269 if (do_footprint) {
2270 /* less footprint */
2271 ledger_debit(owner->ledger,
2272 task_ledgers.phys_footprint,
2273 PAGE_SIZE);
2274 }
2275 } else if (owner &&
2276 (m_object->purgable == VM_PURGABLE_VOLATILE ||
2277 m_object->purgable == VM_PURGABLE_EMPTY)) {
2278 assert(!VM_PAGE_WIRED(mem));
2279 /* less volatile bytes */
2280 ledger_debit(owner->ledger,
2281 ledger_idx_volatile,
2282 PAGE_SIZE);
2283 }
2284 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
2285 if (VM_PAGE_WIRED(mem)) {
2286 assert(vm_page_purgeable_wired_count > 0);
2287 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
2288 } else {
2289 assert(vm_page_purgeable_count > 0);
2290 OSAddAtomic(-1, &vm_page_purgeable_count);
2291 }
2292 }
2293
2294 if (m_object->set_cache_attr == TRUE) {
2295 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), 0);
2296 }
2297
2298 mem->vmp_tabled = FALSE;
2299 mem->vmp_object = 0;
2300 mem->vmp_offset = (vm_object_offset_t) -1;
2301 }
2302
2303
2304 /*
2305 * vm_page_lookup:
2306 *
2307 * Returns the page associated with the object/offset
2308 * pair specified; if none is found, VM_PAGE_NULL is returned.
2309 *
2310 * The object must be locked. No side effects.
2311 */
2312
2313 #define VM_PAGE_HASH_LOOKUP_THRESHOLD 10
2314
2315 #if DEBUG_VM_PAGE_LOOKUP
2316
2317 struct {
2318 uint64_t vpl_total;
2319 uint64_t vpl_empty_obj;
2320 uint64_t vpl_bucket_NULL;
2321 uint64_t vpl_hit_hint;
2322 uint64_t vpl_hit_hint_next;
2323 uint64_t vpl_hit_hint_prev;
2324 uint64_t vpl_fast;
2325 uint64_t vpl_slow;
2326 uint64_t vpl_hit;
2327 uint64_t vpl_miss;
2328
2329 uint64_t vpl_fast_elapsed;
2330 uint64_t vpl_slow_elapsed;
2331 } vm_page_lookup_stats __attribute__((aligned(8)));
2332
2333 #endif
2334
2335 #define KDP_VM_PAGE_WALK_MAX 1000
2336
2337 vm_page_t
kdp_vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2338 kdp_vm_page_lookup(
2339 vm_object_t object,
2340 vm_object_offset_t offset)
2341 {
2342 vm_page_t cur_page;
2343 int num_traversed = 0;
2344
2345 if (not_in_kdp) {
2346 panic("panic: kdp_vm_page_lookup done outside of kernel debugger");
2347 }
2348
2349 vm_page_queue_iterate(&object->memq, cur_page, vmp_listq) {
2350 if (cur_page->vmp_offset == offset) {
2351 return cur_page;
2352 }
2353 num_traversed++;
2354
2355 if (num_traversed >= KDP_VM_PAGE_WALK_MAX) {
2356 return VM_PAGE_NULL;
2357 }
2358 }
2359
2360 return VM_PAGE_NULL;
2361 }
2362
2363 vm_page_t
vm_page_lookup(vm_object_t object,vm_object_offset_t offset)2364 vm_page_lookup(
2365 vm_object_t object,
2366 vm_object_offset_t offset)
2367 {
2368 vm_page_t mem;
2369 vm_page_bucket_t *bucket;
2370 vm_page_queue_entry_t qe;
2371 lck_spin_t *bucket_lock = NULL;
2372 int hash_id;
2373 #if DEBUG_VM_PAGE_LOOKUP
2374 uint64_t start, elapsed;
2375
2376 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_total);
2377 #endif
2378
2379 #if CONFIG_KERNEL_TBI
2380 if (VM_KERNEL_ADDRESS(offset)) {
2381 offset = VM_KERNEL_STRIP_UPTR(offset);
2382 }
2383 #endif /* CONFIG_KERNEL_TBI */
2384
2385 vm_object_lock_assert_held(object);
2386 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
2387
2388 if (object->resident_page_count == 0) {
2389 #if DEBUG_VM_PAGE_LOOKUP
2390 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_empty_obj);
2391 #endif
2392 return VM_PAGE_NULL;
2393 }
2394
2395 mem = object->memq_hint;
2396
2397 if (mem != VM_PAGE_NULL) {
2398 assert(VM_PAGE_OBJECT(mem) == object);
2399
2400 if (mem->vmp_offset == offset) {
2401 #if DEBUG_VM_PAGE_LOOKUP
2402 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint);
2403 #endif
2404 return mem;
2405 }
2406 qe = (vm_page_queue_entry_t)vm_page_queue_next(&mem->vmp_listq);
2407
2408 if (!vm_page_queue_end(&object->memq, qe)) {
2409 vm_page_t next_page;
2410
2411 next_page = (vm_page_t)((uintptr_t)qe);
2412 assert(VM_PAGE_OBJECT(next_page) == object);
2413
2414 if (next_page->vmp_offset == offset) {
2415 object->memq_hint = next_page; /* new hint */
2416 #if DEBUG_VM_PAGE_LOOKUP
2417 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_next);
2418 #endif
2419 return next_page;
2420 }
2421 }
2422 qe = (vm_page_queue_entry_t)vm_page_queue_prev(&mem->vmp_listq);
2423
2424 if (!vm_page_queue_end(&object->memq, qe)) {
2425 vm_page_t prev_page;
2426
2427 prev_page = (vm_page_t)((uintptr_t)qe);
2428 assert(VM_PAGE_OBJECT(prev_page) == object);
2429
2430 if (prev_page->vmp_offset == offset) {
2431 object->memq_hint = prev_page; /* new hint */
2432 #if DEBUG_VM_PAGE_LOOKUP
2433 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit_hint_prev);
2434 #endif
2435 return prev_page;
2436 }
2437 }
2438 }
2439 /*
2440 * Search the hash table for this object/offset pair
2441 */
2442 hash_id = vm_page_hash(object, offset);
2443 bucket = &vm_page_buckets[hash_id];
2444
2445 /*
2446 * since we hold the object lock, we are guaranteed that no
2447 * new pages can be inserted into this object... this in turn
2448 * guarantess that the page we're looking for can't exist
2449 * if the bucket it hashes to is currently NULL even when looked
2450 * at outside the scope of the hash bucket lock... this is a
2451 * really cheap optimiztion to avoid taking the lock
2452 */
2453 if (!bucket->page_list) {
2454 #if DEBUG_VM_PAGE_LOOKUP
2455 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_bucket_NULL);
2456 #endif
2457 return VM_PAGE_NULL;
2458 }
2459
2460 #if DEBUG_VM_PAGE_LOOKUP
2461 start = mach_absolute_time();
2462 #endif
2463 if (object->resident_page_count <= VM_PAGE_HASH_LOOKUP_THRESHOLD) {
2464 /*
2465 * on average, it's roughly 3 times faster to run a short memq list
2466 * than to take the spin lock and go through the hash list
2467 */
2468 mem = (vm_page_t)vm_page_queue_first(&object->memq);
2469
2470 while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2471 if (mem->vmp_offset == offset) {
2472 break;
2473 }
2474
2475 mem = (vm_page_t)vm_page_queue_next(&mem->vmp_listq);
2476 }
2477 if (vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)mem)) {
2478 mem = NULL;
2479 }
2480 } else {
2481 vm_page_object_t packed_object;
2482
2483 packed_object = VM_PAGE_PACK_OBJECT(object);
2484
2485 bucket_lock = &vm_page_bucket_locks[hash_id / BUCKETS_PER_LOCK];
2486
2487 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
2488
2489 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
2490 mem != VM_PAGE_NULL;
2491 mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m))) {
2492 #if 0
2493 /*
2494 * we don't hold the page queue lock
2495 * so this check isn't safe to make
2496 */
2497 VM_PAGE_CHECK(mem);
2498 #endif
2499 if ((mem->vmp_object == packed_object) && (mem->vmp_offset == offset)) {
2500 break;
2501 }
2502 }
2503 lck_spin_unlock(bucket_lock);
2504 }
2505
2506 #if DEBUG_VM_PAGE_LOOKUP
2507 elapsed = mach_absolute_time() - start;
2508
2509 if (bucket_lock) {
2510 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_slow);
2511 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_slow_elapsed);
2512 } else {
2513 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_fast);
2514 OSAddAtomic64(elapsed, &vm_page_lookup_stats.vpl_fast_elapsed);
2515 }
2516 if (mem != VM_PAGE_NULL) {
2517 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_hit);
2518 } else {
2519 OSAddAtomic64(1, &vm_page_lookup_stats.vpl_miss);
2520 }
2521 #endif
2522 if (mem != VM_PAGE_NULL) {
2523 assert(VM_PAGE_OBJECT(mem) == object);
2524
2525 object->memq_hint = mem;
2526 }
2527 return mem;
2528 }
2529
2530
2531 /*
2532 * vm_page_rename:
2533 *
2534 * Move the given memory entry from its
2535 * current object to the specified target object/offset.
2536 *
2537 * The object must be locked.
2538 */
2539 void
vm_page_rename(vm_page_t mem,vm_object_t new_object,vm_object_offset_t new_offset)2540 vm_page_rename(
2541 vm_page_t mem,
2542 vm_object_t new_object,
2543 vm_object_offset_t new_offset)
2544 {
2545 boolean_t internal_to_external, external_to_internal;
2546 vm_tag_t tag;
2547 vm_object_t m_object;
2548
2549 m_object = VM_PAGE_OBJECT(mem);
2550
2551 assert(m_object != new_object);
2552 assert(m_object);
2553
2554 /*
2555 * Changes to mem->vmp_object require the page lock because
2556 * the pageout daemon uses that lock to get the object.
2557 */
2558 vm_page_lockspin_queues();
2559
2560 internal_to_external = FALSE;
2561 external_to_internal = FALSE;
2562
2563 if (mem->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
2564 /*
2565 * it's much easier to get the vm_page_pageable_xxx accounting correct
2566 * if we first move the page to the active queue... it's going to end
2567 * up there anyway, and we don't do vm_page_rename's frequently enough
2568 * for this to matter.
2569 */
2570 vm_page_queues_remove(mem, FALSE);
2571 vm_page_activate(mem);
2572 }
2573 if (VM_PAGE_PAGEABLE(mem)) {
2574 if (m_object->internal && !new_object->internal) {
2575 internal_to_external = TRUE;
2576 }
2577 if (!m_object->internal && new_object->internal) {
2578 external_to_internal = TRUE;
2579 }
2580 }
2581
2582 tag = m_object->wire_tag;
2583 vm_page_remove(mem, TRUE);
2584 vm_page_insert_internal(mem, new_object, new_offset, tag, TRUE, TRUE, FALSE, FALSE, NULL);
2585
2586 if (internal_to_external) {
2587 vm_page_pageable_internal_count--;
2588 vm_page_pageable_external_count++;
2589 } else if (external_to_internal) {
2590 vm_page_pageable_external_count--;
2591 vm_page_pageable_internal_count++;
2592 }
2593
2594 vm_page_unlock_queues();
2595 }
2596
2597 /*
2598 * vm_page_init:
2599 *
2600 * Initialize the fields in a new page.
2601 * This takes a structure with random values and initializes it
2602 * so that it can be given to vm_page_release or vm_page_insert.
2603 */
2604 void
vm_page_init(vm_page_t mem,ppnum_t phys_page,boolean_t lopage)2605 vm_page_init(
2606 vm_page_t mem,
2607 ppnum_t phys_page,
2608 boolean_t lopage)
2609 {
2610 uint_t i;
2611 uintptr_t *p;
2612
2613 assert(phys_page);
2614
2615 #if DEBUG
2616 if ((phys_page != vm_page_fictitious_addr) && (phys_page != vm_page_guard_addr)) {
2617 if (!(pmap_valid_page(phys_page))) {
2618 panic("vm_page_init: non-DRAM phys_page 0x%x", phys_page);
2619 }
2620 }
2621 #endif /* DEBUG */
2622
2623 /*
2624 * Initialize the fields of the vm_page. If adding any new fields to vm_page,
2625 * try to use initial values which match 0. This minimizes the number of writes
2626 * needed for boot-time initialization.
2627 *
2628 * Kernel bzero() isn't an inline yet, so do it by hand for performance.
2629 */
2630 assert(VM_PAGE_NOT_ON_Q == 0);
2631 assert(sizeof(*mem) % sizeof(uintptr_t) == 0);
2632 for (p = (uintptr_t *)(void *)mem, i = sizeof(*mem) / sizeof(uintptr_t); i != 0; --i) {
2633 *p++ = 0;
2634 }
2635 mem->vmp_offset = (vm_object_offset_t)-1;
2636 mem->vmp_busy = TRUE;
2637 mem->vmp_lopage = lopage;
2638
2639 VM_PAGE_SET_PHYS_PAGE(mem, phys_page);
2640 #if 0
2641 /*
2642 * we're leaving this turned off for now... currently pages
2643 * come off the free list and are either immediately dirtied/referenced
2644 * due to zero-fill or COW faults, or are used to read or write files...
2645 * in the file I/O case, the UPL mechanism takes care of clearing
2646 * the state of the HW ref/mod bits in a somewhat fragile way.
2647 * Since we may change the way this works in the future (to toughen it up),
2648 * I'm leaving this as a reminder of where these bits could get cleared
2649 */
2650
2651 /*
2652 * make sure both the h/w referenced and modified bits are
2653 * clear at this point... we are especially dependent on
2654 * not finding a 'stale' h/w modified in a number of spots
2655 * once this page goes back into use
2656 */
2657 pmap_clear_refmod(phys_page, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
2658 #endif
2659 }
2660
2661 /*
2662 * vm_page_grab_fictitious:
2663 *
2664 * Remove a fictitious page from the free list.
2665 * Returns VM_PAGE_NULL if there are no free pages.
2666 */
2667
2668 static vm_page_t
vm_page_grab_fictitious_common(ppnum_t phys_addr,boolean_t canwait)2669 vm_page_grab_fictitious_common(ppnum_t phys_addr, boolean_t canwait)
2670 {
2671 vm_page_t m;
2672
2673 m = zalloc_flags(vm_page_zone, canwait ? Z_WAITOK : Z_NOWAIT);
2674 if (m) {
2675 vm_page_init(m, phys_addr, FALSE);
2676 m->vmp_fictitious = TRUE;
2677 }
2678 return m;
2679 }
2680
2681 vm_page_t
vm_page_grab_fictitious(boolean_t canwait)2682 vm_page_grab_fictitious(boolean_t canwait)
2683 {
2684 return vm_page_grab_fictitious_common(vm_page_fictitious_addr, canwait);
2685 }
2686
2687 int vm_guard_count;
2688
2689
2690 vm_page_t
vm_page_grab_guard(boolean_t canwait)2691 vm_page_grab_guard(boolean_t canwait)
2692 {
2693 vm_page_t page;
2694 page = vm_page_grab_fictitious_common(vm_page_guard_addr, canwait);
2695 if (page) {
2696 OSAddAtomic(1, &vm_guard_count);
2697 }
2698 return page;
2699 }
2700
2701
2702 /*
2703 * vm_page_release_fictitious:
2704 *
2705 * Release a fictitious page to the zone pool
2706 */
2707 void
vm_page_release_fictitious(vm_page_t m)2708 vm_page_release_fictitious(
2709 vm_page_t m)
2710 {
2711 assert((m->vmp_q_state == VM_PAGE_NOT_ON_Q) || (m->vmp_q_state == VM_PAGE_IS_WIRED));
2712 assert(m->vmp_fictitious);
2713 assert(VM_PAGE_GET_PHYS_PAGE(m) == vm_page_fictitious_addr ||
2714 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr);
2715 assert(!m->vmp_realtime);
2716
2717 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
2718 OSAddAtomic(-1, &vm_guard_count);
2719 }
2720
2721 zfree(vm_page_zone, m);
2722 }
2723
2724 /*
2725 * vm_pool_low():
2726 *
2727 * Return true if it is not likely that a non-vm_privileged thread
2728 * can get memory without blocking. Advisory only, since the
2729 * situation may change under us.
2730 */
2731 bool
vm_pool_low(void)2732 vm_pool_low(void)
2733 {
2734 /* No locking, at worst we will fib. */
2735 return vm_page_free_count <= vm_page_free_reserved;
2736 }
2737
2738 boolean_t vm_darkwake_mode = FALSE;
2739
2740 /*
2741 * vm_update_darkwake_mode():
2742 *
2743 * Tells the VM that the system is in / out of darkwake.
2744 *
2745 * Today, the VM only lowers/raises the background queue target
2746 * so as to favor consuming more/less background pages when
2747 * darwake is ON/OFF.
2748 *
2749 * We might need to do more things in the future.
2750 */
2751
2752 void
vm_update_darkwake_mode(boolean_t darkwake_mode)2753 vm_update_darkwake_mode(boolean_t darkwake_mode)
2754 {
2755 #if XNU_TARGET_OS_OSX && defined(__arm64__)
2756 #pragma unused(darkwake_mode)
2757 assert(vm_darkwake_mode == FALSE);
2758 /*
2759 * Darkwake mode isn't supported for AS macOS.
2760 */
2761 return;
2762 #else /* XNU_TARGET_OS_OSX && __arm64__ */
2763 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
2764
2765 vm_page_lockspin_queues();
2766
2767 if (vm_darkwake_mode == darkwake_mode) {
2768 /*
2769 * No change.
2770 */
2771 vm_page_unlock_queues();
2772 return;
2773 }
2774
2775 vm_darkwake_mode = darkwake_mode;
2776
2777 if (vm_darkwake_mode == TRUE) {
2778 /* save background target to restore later */
2779 vm_page_background_target_snapshot = vm_page_background_target;
2780
2781 /* target is set to 0...no protection for background pages */
2782 vm_page_background_target = 0;
2783 } else if (vm_darkwake_mode == FALSE) {
2784 if (vm_page_background_target_snapshot) {
2785 vm_page_background_target = vm_page_background_target_snapshot;
2786 }
2787 }
2788 vm_page_unlock_queues();
2789 #endif
2790 }
2791
2792 void
vm_page_update_special_state(vm_page_t mem)2793 vm_page_update_special_state(vm_page_t mem)
2794 {
2795 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR || mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
2796 return;
2797 }
2798
2799 int mode = mem->vmp_on_specialq;
2800
2801 switch (mode) {
2802 case VM_PAGE_SPECIAL_Q_BG:
2803 {
2804 task_t my_task = current_task_early();
2805
2806 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2807 return;
2808 }
2809
2810 if (my_task) {
2811 if (task_get_darkwake_mode(my_task)) {
2812 return;
2813 }
2814 }
2815
2816 if (my_task) {
2817 if (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG)) {
2818 return;
2819 }
2820 }
2821 vm_page_lockspin_queues();
2822
2823 vm_page_background_promoted_count++;
2824
2825 vm_page_remove_from_specialq(mem);
2826 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2827
2828 vm_page_unlock_queues();
2829 break;
2830 }
2831
2832 case VM_PAGE_SPECIAL_Q_DONATE:
2833 {
2834 task_t my_task = current_task_early();
2835
2836 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2837 return;
2838 }
2839
2840 if (my_task->donates_own_pages == false) {
2841 vm_page_lockspin_queues();
2842
2843 vm_page_remove_from_specialq(mem);
2844 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
2845
2846 vm_page_unlock_queues();
2847 }
2848 break;
2849 }
2850
2851 default:
2852 {
2853 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2854 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2855 break;
2856 }
2857 }
2858 }
2859
2860
2861 void
vm_page_assign_special_state(vm_page_t mem,int mode)2862 vm_page_assign_special_state(vm_page_t mem, int mode)
2863 {
2864 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
2865 return;
2866 }
2867
2868 switch (mode) {
2869 case VM_PAGE_SPECIAL_Q_BG:
2870 {
2871 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2872 return;
2873 }
2874
2875 task_t my_task = current_task_early();
2876
2877 if (my_task) {
2878 if (task_get_darkwake_mode(my_task)) {
2879 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2880 return;
2881 }
2882 }
2883
2884 if (my_task) {
2885 mem->vmp_on_specialq = (proc_get_effective_task_policy(my_task, TASK_POLICY_DARWIN_BG) ? VM_PAGE_SPECIAL_Q_BG : VM_PAGE_SPECIAL_Q_EMPTY);
2886 }
2887 break;
2888 }
2889
2890 case VM_PAGE_SPECIAL_Q_DONATE:
2891 {
2892 if (vm_page_donate_mode == VM_PAGE_DONATE_DISABLED) {
2893 return;
2894 }
2895 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
2896 break;
2897 }
2898
2899 default:
2900 break;
2901 }
2902 }
2903
2904
2905 void
vm_page_remove_from_specialq(vm_page_t mem)2906 vm_page_remove_from_specialq(
2907 vm_page_t mem)
2908 {
2909 vm_object_t m_object;
2910 unsigned short mode;
2911
2912 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2913
2914 mode = mem->vmp_on_specialq;
2915
2916 switch (mode) {
2917 case VM_PAGE_SPECIAL_Q_BG:
2918 {
2919 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2920 vm_page_queue_remove(&vm_page_queue_background, mem, vmp_specialq);
2921
2922 mem->vmp_specialq.next = 0;
2923 mem->vmp_specialq.prev = 0;
2924
2925 vm_page_background_count--;
2926
2927 m_object = VM_PAGE_OBJECT(mem);
2928
2929 if (m_object->internal) {
2930 vm_page_background_internal_count--;
2931 } else {
2932 vm_page_background_external_count--;
2933 }
2934 }
2935 break;
2936 }
2937
2938 case VM_PAGE_SPECIAL_Q_DONATE:
2939 {
2940 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2941 vm_page_queue_remove((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
2942 mem->vmp_specialq.next = 0;
2943 mem->vmp_specialq.prev = 0;
2944 vm_page_donate_count--;
2945 if (vm_page_donate_queue_ripe && (vm_page_donate_count < vm_page_donate_target)) {
2946 assert(vm_page_donate_target == vm_page_donate_target_low);
2947 vm_page_donate_target = vm_page_donate_target_high;
2948 vm_page_donate_queue_ripe = false;
2949 }
2950 }
2951
2952 break;
2953 }
2954
2955 default:
2956 {
2957 assert(VM_PAGE_UNPACK_PTR(mem->vmp_specialq.next) == (uintptr_t)NULL &&
2958 VM_PAGE_UNPACK_PTR(mem->vmp_specialq.prev) == (uintptr_t)NULL);
2959 break;
2960 }
2961 }
2962 }
2963
2964
2965 void
vm_page_add_to_specialq(vm_page_t mem,boolean_t first)2966 vm_page_add_to_specialq(
2967 vm_page_t mem,
2968 boolean_t first)
2969 {
2970 vm_object_t m_object;
2971
2972 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
2973
2974 if (mem->vmp_specialq.next && mem->vmp_specialq.prev) {
2975 return;
2976 }
2977
2978 int mode = mem->vmp_on_specialq;
2979
2980 switch (mode) {
2981 case VM_PAGE_SPECIAL_Q_BG:
2982 {
2983 if (vm_page_background_mode == VM_PAGE_BG_DISABLED) {
2984 return;
2985 }
2986
2987 m_object = VM_PAGE_OBJECT(mem);
2988
2989 if (vm_page_background_exclude_external && !m_object->internal) {
2990 return;
2991 }
2992
2993 if (first == TRUE) {
2994 vm_page_queue_enter_first(&vm_page_queue_background, mem, vmp_specialq);
2995 } else {
2996 vm_page_queue_enter(&vm_page_queue_background, mem, vmp_specialq);
2997 }
2998 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_BG;
2999
3000 vm_page_background_count++;
3001
3002 if (m_object->internal) {
3003 vm_page_background_internal_count++;
3004 } else {
3005 vm_page_background_external_count++;
3006 }
3007 break;
3008 }
3009
3010 case VM_PAGE_SPECIAL_Q_DONATE:
3011 {
3012 if (first == TRUE) {
3013 vm_page_queue_enter_first((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3014 } else {
3015 vm_page_queue_enter((vm_page_queue_head_t*)&vm_page_queue_donate, mem, vmp_specialq);
3016 }
3017 vm_page_donate_count++;
3018 if (!vm_page_donate_queue_ripe && (vm_page_donate_count > vm_page_donate_target)) {
3019 assert(vm_page_donate_target == vm_page_donate_target_high);
3020 vm_page_donate_target = vm_page_donate_target_low;
3021 vm_page_donate_queue_ripe = true;
3022 }
3023 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_DONATE;
3024 break;
3025 }
3026
3027 default:
3028 break;
3029 }
3030 }
3031
3032 /*
3033 * This can be switched to FALSE to help debug drivers
3034 * that are having problems with memory > 4G.
3035 */
3036 boolean_t vm_himemory_mode = TRUE;
3037
3038 /*
3039 * this interface exists to support hardware controllers
3040 * incapable of generating DMAs with more than 32 bits
3041 * of address on platforms with physical memory > 4G...
3042 */
3043 unsigned int vm_lopages_allocated_q = 0;
3044 unsigned int vm_lopages_allocated_cpm_success = 0;
3045 unsigned int vm_lopages_allocated_cpm_failed = 0;
3046 vm_page_queue_head_t vm_lopage_queue_free VM_PAGE_PACKED_ALIGNED;
3047
3048 vm_page_t
vm_page_grablo(void)3049 vm_page_grablo(void)
3050 {
3051 vm_page_t mem;
3052
3053 if (vm_lopage_needed == FALSE) {
3054 return vm_page_grab();
3055 }
3056
3057 vm_free_page_lock_spin();
3058
3059 if (!vm_page_queue_empty(&vm_lopage_queue_free)) {
3060 vm_page_queue_remove_first(&vm_lopage_queue_free, mem, vmp_pageq);
3061 assert(vm_lopage_free_count);
3062 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
3063 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3064
3065 vm_lopage_free_count--;
3066 vm_lopages_allocated_q++;
3067
3068 if (vm_lopage_free_count < vm_lopage_lowater) {
3069 vm_lopage_refill = TRUE;
3070 }
3071
3072 vm_free_page_unlock();
3073
3074 if (current_task()->donates_own_pages) {
3075 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3076 } else {
3077 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3078 }
3079 } else {
3080 vm_free_page_unlock();
3081
3082 if (cpm_allocate(PAGE_SIZE, &mem, atop(PPNUM_MAX), 0, FALSE, KMA_LOMEM) != KERN_SUCCESS) {
3083 vm_free_page_lock_spin();
3084 vm_lopages_allocated_cpm_failed++;
3085 vm_free_page_unlock();
3086
3087 return VM_PAGE_NULL;
3088 }
3089 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3090
3091 mem->vmp_busy = TRUE;
3092
3093 vm_page_lockspin_queues();
3094
3095 mem->vmp_gobbled = FALSE;
3096 vm_page_gobble_count--;
3097 vm_page_wire_count--;
3098
3099 vm_lopages_allocated_cpm_success++;
3100 vm_page_unlock_queues();
3101 }
3102 assert(mem->vmp_busy);
3103 assert(!mem->vmp_pmapped);
3104 assert(!mem->vmp_wpmapped);
3105 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3106
3107 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3108
3109 counter_inc(&vm_page_grab_count);
3110 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, 0, 1, 0, 0);
3111
3112 return mem;
3113 }
3114
3115 /*
3116 * vm_page_grab:
3117 *
3118 * first try to grab a page from the per-cpu free list...
3119 * this must be done while pre-emption is disabled... if
3120 * a page is available, we're done...
3121 * if no page is available, grab the vm_page_queue_free_lock
3122 * and see if current number of free pages would allow us
3123 * to grab at least 1... if not, return VM_PAGE_NULL as before...
3124 * if there are pages available, disable preemption and
3125 * recheck the state of the per-cpu free list... we could
3126 * have been preempted and moved to a different cpu, or
3127 * some other thread could have re-filled it... if still
3128 * empty, figure out how many pages we can steal from the
3129 * global free queue and move to the per-cpu queue...
3130 * return 1 of these pages when done... only wakeup the
3131 * pageout_scan thread if we moved pages from the global
3132 * list... no need for the wakeup if we've satisfied the
3133 * request from the per-cpu queue.
3134 */
3135
3136 #if CONFIG_SECLUDED_MEMORY
3137 vm_page_t vm_page_grab_secluded(void);
3138 #endif /* CONFIG_SECLUDED_MEMORY */
3139
3140 static inline void
3141 vm_page_grab_diags(void);
3142
3143 vm_page_t
vm_page_grab(void)3144 vm_page_grab(void)
3145 {
3146 return vm_page_grab_options(VM_PAGE_GRAB_OPTIONS_NONE);
3147 }
3148
3149 #if HIBERNATION
3150 boolean_t hibernate_rebuild_needed = FALSE;
3151 #endif /* HIBERNATION */
3152
3153 vm_page_t
vm_page_grab_options(int grab_options)3154 vm_page_grab_options(
3155 int grab_options)
3156 {
3157 vm_page_t mem;
3158
3159 restart:
3160 disable_preemption();
3161
3162 if ((mem = *PERCPU_GET(free_pages))) {
3163 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
3164
3165 #if HIBERNATION
3166 if (hibernate_rebuild_needed) {
3167 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3168 }
3169 #endif /* HIBERNATION */
3170
3171 vm_page_grab_diags();
3172
3173 vm_offset_t pcpu_base = current_percpu_base();
3174 counter_inc_preemption_disabled(&vm_page_grab_count);
3175 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = mem->vmp_snext;
3176 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3177
3178 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3179 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
3180 enable_preemption();
3181
3182 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3183 assert(mem->vmp_tabled == FALSE);
3184 assert(mem->vmp_object == 0);
3185 assert(!mem->vmp_laundry);
3186 ASSERT_PMAP_FREE(mem);
3187 assert(mem->vmp_busy);
3188 assert(!mem->vmp_pmapped);
3189 assert(!mem->vmp_wpmapped);
3190 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3191 assert(!mem->vmp_realtime);
3192
3193 task_t cur_task = current_task_early();
3194 if (cur_task && cur_task != kernel_task) {
3195 if (cur_task->donates_own_pages) {
3196 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3197 } else {
3198 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3199 }
3200 }
3201 return mem;
3202 }
3203 enable_preemption();
3204
3205
3206 /*
3207 * Optionally produce warnings if the wire or gobble
3208 * counts exceed some threshold.
3209 */
3210 #if VM_PAGE_WIRE_COUNT_WARNING
3211 if (vm_page_wire_count >= VM_PAGE_WIRE_COUNT_WARNING) {
3212 printf("mk: vm_page_grab(): high wired page count of %d\n",
3213 vm_page_wire_count);
3214 }
3215 #endif
3216 #if VM_PAGE_GOBBLE_COUNT_WARNING
3217 if (vm_page_gobble_count >= VM_PAGE_GOBBLE_COUNT_WARNING) {
3218 printf("mk: vm_page_grab(): high gobbled page count of %d\n",
3219 vm_page_gobble_count);
3220 }
3221 #endif
3222
3223 /*
3224 * If free count is low and we have delayed pages from early boot,
3225 * get one of those instead.
3226 */
3227 if (__improbable(vm_delayed_count > 0 &&
3228 vm_page_free_count <= vm_page_free_target &&
3229 (mem = vm_get_delayed_page(grab_options)) != NULL)) {
3230 assert(!mem->vmp_realtime);
3231 return mem;
3232 }
3233
3234 vm_free_page_lock_spin();
3235
3236 /*
3237 * Only let privileged threads (involved in pageout)
3238 * dip into the reserved pool.
3239 */
3240 if ((vm_page_free_count < vm_page_free_reserved) &&
3241 !(current_thread()->options & TH_OPT_VMPRIV)) {
3242 /* no page for us in the free queue... */
3243 vm_free_page_unlock();
3244 mem = VM_PAGE_NULL;
3245
3246 #if CONFIG_SECLUDED_MEMORY
3247 /* ... but can we try and grab from the secluded queue? */
3248 if (vm_page_secluded_count > 0 &&
3249 ((grab_options & VM_PAGE_GRAB_SECLUDED) ||
3250 task_can_use_secluded_mem(current_task(), TRUE))) {
3251 mem = vm_page_grab_secluded();
3252 if (grab_options & VM_PAGE_GRAB_SECLUDED) {
3253 vm_page_secluded.grab_for_iokit++;
3254 if (mem) {
3255 vm_page_secluded.grab_for_iokit_success++;
3256 }
3257 }
3258 if (mem) {
3259 VM_CHECK_MEMORYSTATUS;
3260
3261 vm_page_grab_diags();
3262 counter_inc(&vm_page_grab_count);
3263 VM_DEBUG_EVENT(vm_page_grab, VM_PAGE_GRAB, DBG_FUNC_NONE, grab_options, 0, 0, 0);
3264
3265 assert(!mem->vmp_realtime);
3266 return mem;
3267 }
3268 }
3269 #else /* CONFIG_SECLUDED_MEMORY */
3270 (void) grab_options;
3271 #endif /* CONFIG_SECLUDED_MEMORY */
3272 } else {
3273 vm_page_t head;
3274 vm_page_t tail;
3275 unsigned int pages_to_steal;
3276 unsigned int color;
3277 unsigned int clump_end, sub_count;
3278
3279 while (vm_page_free_count == 0) {
3280 vm_free_page_unlock();
3281 /*
3282 * must be a privileged thread to be
3283 * in this state since a non-privileged
3284 * thread would have bailed if we were
3285 * under the vm_page_free_reserved mark
3286 */
3287 VM_PAGE_WAIT();
3288 vm_free_page_lock_spin();
3289 }
3290
3291 /*
3292 * Need to repopulate the per-CPU free list from the global free list.
3293 * Note we don't do any processing of pending retirement pages here.
3294 * That'll happen in the code above when the page comes off the per-CPU list.
3295 */
3296 disable_preemption();
3297
3298 /*
3299 * If we got preempted the cache might now have pages.
3300 */
3301 if ((mem = *PERCPU_GET(free_pages))) {
3302 vm_free_page_unlock();
3303 enable_preemption();
3304 goto restart;
3305 }
3306
3307 if (vm_page_free_count <= vm_page_free_reserved) {
3308 pages_to_steal = 1;
3309 } else {
3310 if (vm_free_magazine_refill_limit <= (vm_page_free_count - vm_page_free_reserved)) {
3311 pages_to_steal = vm_free_magazine_refill_limit;
3312 } else {
3313 pages_to_steal = (vm_page_free_count - vm_page_free_reserved);
3314 }
3315 }
3316 color = *PERCPU_GET(start_color);
3317 head = tail = NULL;
3318
3319 vm_page_free_count -= pages_to_steal;
3320 clump_end = sub_count = 0;
3321
3322 while (pages_to_steal--) {
3323 while (vm_page_queue_empty(&vm_page_queue_free[color].qhead)) {
3324 color = (color + 1) & vm_color_mask;
3325 }
3326 #if defined(__x86_64__)
3327 vm_page_queue_remove_first_with_clump(&vm_page_queue_free[color].qhead,
3328 mem, clump_end);
3329 #else
3330 vm_page_queue_remove_first(&vm_page_queue_free[color].qhead,
3331 mem, vmp_pageq);
3332 #endif
3333
3334 assert(mem->vmp_q_state == VM_PAGE_ON_FREE_Q);
3335
3336 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
3337
3338 #if defined(__arm64__)
3339 color = (color + 1) & vm_color_mask;
3340 #else
3341
3342 #if DEVELOPMENT || DEBUG
3343
3344 sub_count++;
3345 if (clump_end) {
3346 vm_clump_update_stats(sub_count);
3347 sub_count = 0;
3348 color = (color + 1) & vm_color_mask;
3349 }
3350 #else
3351 if (clump_end) {
3352 color = (color + 1) & vm_color_mask;
3353 }
3354
3355 #endif /* if DEVELOPMENT || DEBUG */
3356
3357 #endif /* if defined(__arm64__) */
3358
3359 if (head == NULL) {
3360 head = mem;
3361 } else {
3362 tail->vmp_snext = mem;
3363 }
3364 tail = mem;
3365
3366 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3367 assert(mem->vmp_tabled == FALSE);
3368 assert(mem->vmp_object == 0);
3369 assert(!mem->vmp_laundry);
3370
3371 mem->vmp_q_state = VM_PAGE_ON_FREE_LOCAL_Q;
3372
3373 ASSERT_PMAP_FREE(mem);
3374 assert(mem->vmp_busy);
3375 assert(!mem->vmp_pmapped);
3376 assert(!mem->vmp_wpmapped);
3377 assert(!pmap_is_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem)));
3378 assert(!mem->vmp_realtime);
3379 }
3380 #if defined (__x86_64__) && (DEVELOPMENT || DEBUG)
3381 vm_clump_update_stats(sub_count);
3382 #endif
3383
3384 #if HIBERNATION
3385 if (hibernate_rebuild_needed) {
3386 panic("%s:%d should not modify cpu->free_pages while hibernating", __FUNCTION__, __LINE__);
3387 }
3388 #endif /* HIBERNATION */
3389 vm_offset_t pcpu_base = current_percpu_base();
3390 *PERCPU_GET_WITH_BASE(pcpu_base, free_pages) = head;
3391 *PERCPU_GET_WITH_BASE(pcpu_base, start_color) = color;
3392
3393 vm_free_page_unlock();
3394 enable_preemption();
3395 goto restart;
3396 }
3397
3398 /*
3399 * Decide if we should poke the pageout daemon.
3400 * We do this if the free count is less than the low
3401 * water mark. VM Pageout Scan will keep running till
3402 * the free_count > free_target (& hence above free_min).
3403 * This wakeup is to catch the possibility of the counts
3404 * dropping between VM Pageout Scan parking and this check.
3405 *
3406 * We don't have the counts locked ... if they change a little,
3407 * it doesn't really matter.
3408 */
3409 if (vm_page_free_count < vm_page_free_min) {
3410 vm_free_page_lock();
3411 if (vm_pageout_running == FALSE) {
3412 vm_free_page_unlock();
3413 thread_wakeup((event_t) &vm_page_free_wanted);
3414 } else {
3415 vm_free_page_unlock();
3416 }
3417 }
3418
3419 VM_CHECK_MEMORYSTATUS;
3420
3421 if (mem) {
3422 assert(!mem->vmp_realtime);
3423 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 4); /* (TEST/DEBUG) */
3424
3425 task_t cur_task = current_task_early();
3426 if (cur_task && cur_task != kernel_task) {
3427 if (cur_task->donates_own_pages) {
3428 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_DONATE);
3429 } else {
3430 vm_page_assign_special_state(mem, VM_PAGE_SPECIAL_Q_BG);
3431 }
3432 }
3433 }
3434 return mem;
3435 }
3436
3437 #if CONFIG_SECLUDED_MEMORY
3438 vm_page_t
vm_page_grab_secluded(void)3439 vm_page_grab_secluded(void)
3440 {
3441 vm_page_t mem;
3442 vm_object_t object;
3443 int refmod_state;
3444
3445 if (vm_page_secluded_count == 0) {
3446 /* no secluded pages to grab... */
3447 return VM_PAGE_NULL;
3448 }
3449
3450 /* secluded queue is protected by the VM page queue lock */
3451 vm_page_lock_queues();
3452
3453 if (vm_page_secluded_count == 0) {
3454 /* no secluded pages to grab... */
3455 vm_page_unlock_queues();
3456 return VM_PAGE_NULL;
3457 }
3458
3459 #if 00
3460 /* can we grab from the secluded queue? */
3461 if (vm_page_secluded_count > vm_page_secluded_target ||
3462 (vm_page_secluded_count > 0 &&
3463 task_can_use_secluded_mem(current_task(), TRUE))) {
3464 /* OK */
3465 } else {
3466 /* can't grab from secluded queue... */
3467 vm_page_unlock_queues();
3468 return VM_PAGE_NULL;
3469 }
3470 #endif
3471
3472 /* we can grab a page from secluded queue! */
3473 assert((vm_page_secluded_count_free +
3474 vm_page_secluded_count_inuse) ==
3475 vm_page_secluded_count);
3476 if (current_task()->task_can_use_secluded_mem) {
3477 assert(num_tasks_can_use_secluded_mem > 0);
3478 }
3479 assert(!vm_page_queue_empty(&vm_page_queue_secluded));
3480 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3481 mem = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3482 assert(mem->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3483 vm_page_queues_remove(mem, TRUE);
3484
3485 object = VM_PAGE_OBJECT(mem);
3486
3487 assert(!mem->vmp_fictitious);
3488 assert(!VM_PAGE_WIRED(mem));
3489 if (object == VM_OBJECT_NULL) {
3490 /* free for grab! */
3491 vm_page_unlock_queues();
3492 vm_page_secluded.grab_success_free++;
3493
3494 assert(mem->vmp_busy);
3495 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3496 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3497 assert(mem->vmp_pageq.next == 0);
3498 assert(mem->vmp_pageq.prev == 0);
3499 assert(mem->vmp_listq.next == 0);
3500 assert(mem->vmp_listq.prev == 0);
3501 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3502 assert(mem->vmp_specialq.next == 0);
3503 assert(mem->vmp_specialq.prev == 0);
3504 return mem;
3505 }
3506
3507 assert(!object->internal);
3508 // vm_page_pageable_external_count--;
3509
3510 if (!vm_object_lock_try(object)) {
3511 // printf("SECLUDED: page %p: object %p locked\n", mem, object);
3512 vm_page_secluded.grab_failure_locked++;
3513 reactivate_secluded_page:
3514 vm_page_activate(mem);
3515 vm_page_unlock_queues();
3516 return VM_PAGE_NULL;
3517 }
3518 if (mem->vmp_busy ||
3519 mem->vmp_cleaning ||
3520 mem->vmp_laundry) {
3521 /* can't steal page in this state... */
3522 vm_object_unlock(object);
3523 vm_page_secluded.grab_failure_state++;
3524 goto reactivate_secluded_page;
3525 }
3526 if (mem->vmp_realtime) {
3527 /* don't steal pages used by realtime threads... */
3528 vm_object_unlock(object);
3529 vm_page_secluded.grab_failure_realtime++;
3530 goto reactivate_secluded_page;
3531 }
3532
3533 mem->vmp_busy = TRUE;
3534 refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(mem));
3535 if (refmod_state & VM_MEM_REFERENCED) {
3536 mem->vmp_reference = TRUE;
3537 }
3538 if (refmod_state & VM_MEM_MODIFIED) {
3539 SET_PAGE_DIRTY(mem, FALSE);
3540 }
3541 if (mem->vmp_dirty || mem->vmp_precious) {
3542 /* can't grab a dirty page; re-activate */
3543 // printf("SECLUDED: dirty page %p\n", mem);
3544 PAGE_WAKEUP_DONE(mem);
3545 vm_page_secluded.grab_failure_dirty++;
3546 vm_object_unlock(object);
3547 goto reactivate_secluded_page;
3548 }
3549 if (mem->vmp_reference) {
3550 /* it's been used but we do need to grab a page... */
3551 }
3552
3553 vm_page_unlock_queues();
3554
3555
3556 /* finish what vm_page_free() would have done... */
3557 vm_page_free_prepare_object(mem, TRUE);
3558 vm_object_unlock(object);
3559 object = VM_OBJECT_NULL;
3560 if (vm_page_free_verify) {
3561 ASSERT_PMAP_FREE(mem);
3562 }
3563 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3564 vm_page_secluded.grab_success_other++;
3565
3566 assert(mem->vmp_busy);
3567 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3568 assert(VM_PAGE_OBJECT(mem) == VM_OBJECT_NULL);
3569 assert(mem->vmp_pageq.next == 0);
3570 assert(mem->vmp_pageq.prev == 0);
3571 assert(mem->vmp_listq.next == 0);
3572 assert(mem->vmp_listq.prev == 0);
3573 assert(mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
3574 assert(mem->vmp_specialq.next == 0);
3575 assert(mem->vmp_specialq.prev == 0);
3576
3577 return mem;
3578 }
3579
3580 uint64_t
vm_page_secluded_drain(void)3581 vm_page_secluded_drain(void)
3582 {
3583 vm_page_t local_freeq;
3584 int local_freed;
3585 uint64_t num_reclaimed;
3586 unsigned int saved_secluded_count, saved_secluded_target;
3587
3588 num_reclaimed = 0;
3589 local_freeq = NULL;
3590 local_freed = 0;
3591
3592 vm_page_lock_queues();
3593
3594 saved_secluded_count = vm_page_secluded_count;
3595 saved_secluded_target = vm_page_secluded_target;
3596 vm_page_secluded_target = 0;
3597 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3598 while (vm_page_secluded_count) {
3599 vm_page_t secluded_page;
3600
3601 assert((vm_page_secluded_count_free +
3602 vm_page_secluded_count_inuse) ==
3603 vm_page_secluded_count);
3604 secluded_page = (vm_page_t)vm_page_queue_first(&vm_page_queue_secluded);
3605 assert(secluded_page->vmp_q_state == VM_PAGE_ON_SECLUDED_Q);
3606
3607 vm_page_queues_remove(secluded_page, FALSE);
3608 assert(!secluded_page->vmp_fictitious);
3609 assert(!VM_PAGE_WIRED(secluded_page));
3610
3611 if (secluded_page->vmp_object == 0) {
3612 /* transfer to free queue */
3613 assert(secluded_page->vmp_busy);
3614 secluded_page->vmp_snext = local_freeq;
3615 local_freeq = secluded_page;
3616 local_freed += 1;
3617 } else {
3618 /* transfer to head of active queue */
3619 vm_page_enqueue_active(secluded_page, FALSE);
3620 secluded_page = VM_PAGE_NULL;
3621 }
3622 num_reclaimed++;
3623 }
3624 vm_page_secluded_target = saved_secluded_target;
3625 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3626
3627 // printf("FBDP %s:%d secluded_count %d->%d, target %d, reclaimed %lld\n", __FUNCTION__, __LINE__, saved_secluded_count, vm_page_secluded_count, vm_page_secluded_target, num_reclaimed);
3628
3629 vm_page_unlock_queues();
3630
3631 if (local_freed) {
3632 vm_page_free_list(local_freeq, TRUE);
3633 local_freeq = NULL;
3634 local_freed = 0;
3635 }
3636
3637 return num_reclaimed;
3638 }
3639 #endif /* CONFIG_SECLUDED_MEMORY */
3640
3641
3642 static inline void
vm_page_grab_diags()3643 vm_page_grab_diags()
3644 {
3645 #if DEVELOPMENT || DEBUG
3646 task_t task = current_task_early();
3647 if (task == NULL) {
3648 return;
3649 }
3650
3651 ledger_credit(task->ledger, task_ledgers.pages_grabbed, 1);
3652 #endif /* DEVELOPMENT || DEBUG */
3653 }
3654
3655 /*
3656 * vm_page_release:
3657 *
3658 * Return a page to the free list.
3659 */
3660
3661 void
vm_page_release(vm_page_t mem,boolean_t page_queues_locked)3662 vm_page_release(
3663 vm_page_t mem,
3664 boolean_t page_queues_locked)
3665 {
3666 unsigned int color;
3667 int need_wakeup = 0;
3668 int need_priv_wakeup = 0;
3669 #if CONFIG_SECLUDED_MEMORY
3670 int need_secluded_wakeup = 0;
3671 #endif /* CONFIG_SECLUDED_MEMORY */
3672 event_t wakeup_event = NULL;
3673
3674 if (page_queues_locked) {
3675 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3676 } else {
3677 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3678 }
3679
3680 assert(!mem->vmp_private && !mem->vmp_fictitious);
3681 if (vm_page_free_verify) {
3682 ASSERT_PMAP_FREE(mem);
3683 }
3684 // dbgLog(VM_PAGE_GET_PHYS_PAGE(mem), vm_page_free_count, vm_page_wire_count, 5); /* (TEST/DEBUG) */
3685
3686 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
3687
3688 if (__improbable(mem->vmp_realtime)) {
3689 if (!page_queues_locked) {
3690 vm_page_lock_queues();
3691 }
3692 if (mem->vmp_realtime) {
3693 mem->vmp_realtime = false;
3694 vm_page_realtime_count--;
3695 }
3696 if (!page_queues_locked) {
3697 vm_page_unlock_queues();
3698 }
3699 }
3700
3701 vm_free_page_lock_spin();
3702
3703 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
3704 assert(mem->vmp_busy);
3705 assert(!mem->vmp_laundry);
3706 assert(mem->vmp_object == 0);
3707 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
3708 assert(mem->vmp_listq.next == 0 && mem->vmp_listq.prev == 0);
3709 assert(mem->vmp_specialq.next == 0 && mem->vmp_specialq.prev == 0);
3710
3711 /* Clear any specialQ hints before releasing page to the free pool*/
3712 mem->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
3713
3714 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
3715 vm_lopage_free_count < vm_lopage_free_limit &&
3716 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3717 /*
3718 * this exists to support hardware controllers
3719 * incapable of generating DMAs with more than 32 bits
3720 * of address on platforms with physical memory > 4G...
3721 */
3722 vm_page_queue_enter_first(&vm_lopage_queue_free, mem, vmp_pageq);
3723 vm_lopage_free_count++;
3724
3725 if (vm_lopage_free_count >= vm_lopage_free_limit) {
3726 vm_lopage_refill = FALSE;
3727 }
3728
3729 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3730 mem->vmp_lopage = TRUE;
3731 #if CONFIG_SECLUDED_MEMORY
3732 } else if (vm_page_free_count > vm_page_free_reserved &&
3733 vm_page_secluded_count < vm_page_secluded_target &&
3734 num_tasks_can_use_secluded_mem == 0) {
3735 /*
3736 * XXX FBDP TODO: also avoid refilling secluded queue
3737 * when some IOKit objects are already grabbing from it...
3738 */
3739 if (!page_queues_locked) {
3740 if (!vm_page_trylock_queues()) {
3741 /* take locks in right order */
3742 vm_free_page_unlock();
3743 vm_page_lock_queues();
3744 vm_free_page_lock_spin();
3745 }
3746 }
3747 mem->vmp_lopage = FALSE;
3748 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
3749 vm_page_queue_enter_first(&vm_page_queue_secluded, mem, vmp_pageq);
3750 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3751 vm_page_secluded_count++;
3752 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3753 vm_page_secluded_count_free++;
3754 if (!page_queues_locked) {
3755 vm_page_unlock_queues();
3756 }
3757 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_OWNED);
3758 if (vm_page_free_wanted_secluded > 0) {
3759 vm_page_free_wanted_secluded--;
3760 need_secluded_wakeup = 1;
3761 }
3762 #endif /* CONFIG_SECLUDED_MEMORY */
3763 } else {
3764 mem->vmp_lopage = FALSE;
3765 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3766
3767 color = VM_PAGE_GET_COLOR(mem);
3768 #if defined(__x86_64__)
3769 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
3770 #else
3771 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
3772 #endif
3773 vm_page_free_count++;
3774 /*
3775 * Check if we should wake up someone waiting for page.
3776 * But don't bother waking them unless they can allocate.
3777 *
3778 * We wakeup only one thread, to prevent starvation.
3779 * Because the scheduling system handles wait queues FIFO,
3780 * if we wakeup all waiting threads, one greedy thread
3781 * can starve multiple niceguy threads. When the threads
3782 * all wakeup, the greedy threads runs first, grabs the page,
3783 * and waits for another page. It will be the first to run
3784 * when the next page is freed.
3785 *
3786 * However, there is a slight danger here.
3787 * The thread we wake might not use the free page.
3788 * Then the other threads could wait indefinitely
3789 * while the page goes unused. To forestall this,
3790 * the pageout daemon will keep making free pages
3791 * as long as vm_page_free_wanted is non-zero.
3792 */
3793
3794 assert(vm_page_free_count > 0);
3795 if (vm_page_free_wanted_privileged > 0) {
3796 vm_page_free_wanted_privileged--;
3797 need_priv_wakeup = 1;
3798 #if CONFIG_SECLUDED_MEMORY
3799 } else if (vm_page_free_wanted_secluded > 0 &&
3800 vm_page_free_count > vm_page_free_reserved) {
3801 vm_page_free_wanted_secluded--;
3802 need_secluded_wakeup = 1;
3803 #endif /* CONFIG_SECLUDED_MEMORY */
3804 } else if (vm_page_free_wanted > 0 &&
3805 vm_page_free_count > vm_page_free_reserved) {
3806 vm_page_free_wanted--;
3807 need_wakeup = 1;
3808 }
3809 }
3810 vm_pageout_vminfo.vm_page_pages_freed++;
3811
3812 vm_free_page_unlock();
3813
3814 VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, 1, 0, 0, 0);
3815
3816 if (need_priv_wakeup) {
3817 wakeup_event = &vm_page_free_wanted_privileged;
3818 }
3819 #if CONFIG_SECLUDED_MEMORY
3820 else if (need_secluded_wakeup) {
3821 wakeup_event = &vm_page_free_wanted_secluded;
3822 }
3823 #endif /* CONFIG_SECLUDED_MEMORY */
3824 else if (need_wakeup) {
3825 wakeup_event = &vm_page_free_count;
3826 }
3827
3828 if (wakeup_event) {
3829 if (vps_dynamic_priority_enabled == TRUE) {
3830 thread_t thread_woken = NULL;
3831 wakeup_one_with_inheritor((event_t) wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
3832 /*
3833 * (80947592) if this is the last reference on this
3834 * thread, calling thread_deallocate() here
3835 * might take the tasks_threads_lock,
3836 * sadly thread_create_internal is doing several
3837 * allocations under this lock, which can result in
3838 * deadlocks with the pageout scan daemon.
3839 *
3840 * FIXME: we should disallow allocations under the
3841 * task_thread_locks, but that is a larger fix to make.
3842 */
3843 thread_deallocate_safe(thread_woken);
3844 } else {
3845 thread_wakeup_one((event_t) wakeup_event);
3846 }
3847 }
3848
3849 VM_CHECK_MEMORYSTATUS;
3850 }
3851
3852 /*
3853 * This version of vm_page_release() is used only at startup
3854 * when we are single-threaded and pages are being released
3855 * for the first time. Hence, no locking or unnecessary checks are made.
3856 * Note: VM_CHECK_MEMORYSTATUS invoked by the caller.
3857 */
3858 void
vm_page_release_startup(vm_page_t mem)3859 vm_page_release_startup(
3860 vm_page_t mem)
3861 {
3862 vm_page_queue_t queue_free;
3863
3864 if (vm_lopage_free_count < vm_lopage_free_limit &&
3865 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
3866 mem->vmp_lopage = TRUE;
3867 mem->vmp_q_state = VM_PAGE_ON_FREE_LOPAGE_Q;
3868 vm_lopage_free_count++;
3869 queue_free = &vm_lopage_queue_free;
3870 #if CONFIG_SECLUDED_MEMORY
3871 } else if (vm_page_secluded_count < vm_page_secluded_target) {
3872 mem->vmp_lopage = FALSE;
3873 mem->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
3874 vm_page_secluded_count++;
3875 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
3876 vm_page_secluded_count_free++;
3877 queue_free = &vm_page_queue_secluded;
3878 #endif /* CONFIG_SECLUDED_MEMORY */
3879 } else {
3880 mem->vmp_lopage = FALSE;
3881 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
3882 vm_page_free_count++;
3883 queue_free = &vm_page_queue_free[VM_PAGE_GET_COLOR(mem)].qhead;
3884 }
3885 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
3886 #if defined(__x86_64__)
3887 vm_page_queue_enter_clump(queue_free, mem);
3888 #else
3889 vm_page_queue_enter(queue_free, mem, vmp_pageq);
3890 #endif
3891 } else {
3892 vm_page_queue_enter_first(queue_free, mem, vmp_pageq);
3893 }
3894 }
3895
3896 /*
3897 * vm_page_wait:
3898 *
3899 * Wait for a page to become available.
3900 * If there are plenty of free pages, then we don't sleep.
3901 *
3902 * Returns:
3903 * TRUE: There may be another page, try again
3904 * FALSE: We were interrupted out of our wait, don't try again
3905 */
3906
3907 boolean_t
vm_page_wait(int interruptible)3908 vm_page_wait(
3909 int interruptible )
3910 {
3911 /*
3912 * We can't use vm_page_free_reserved to make this
3913 * determination. Consider: some thread might
3914 * need to allocate two pages. The first allocation
3915 * succeeds, the second fails. After the first page is freed,
3916 * a call to vm_page_wait must really block.
3917 */
3918 kern_return_t wait_result;
3919 int need_wakeup = 0;
3920 int is_privileged = current_thread()->options & TH_OPT_VMPRIV;
3921 event_t wait_event = NULL;
3922
3923 vm_free_page_lock_spin();
3924
3925 if (is_privileged && vm_page_free_count) {
3926 vm_free_page_unlock();
3927 return TRUE;
3928 }
3929
3930 if (vm_page_free_count >= vm_page_free_target) {
3931 vm_free_page_unlock();
3932 return TRUE;
3933 }
3934
3935 if (is_privileged) {
3936 if (vm_page_free_wanted_privileged++ == 0) {
3937 need_wakeup = 1;
3938 }
3939 wait_event = (event_t)&vm_page_free_wanted_privileged;
3940 #if CONFIG_SECLUDED_MEMORY
3941 } else if (secluded_for_apps &&
3942 task_can_use_secluded_mem(current_task(), FALSE)) {
3943 #if 00
3944 /* XXX FBDP: need pageq lock for this... */
3945 /* XXX FBDP: might wait even if pages available, */
3946 /* XXX FBDP: hopefully not for too long... */
3947 if (vm_page_secluded_count > 0) {
3948 vm_free_page_unlock();
3949 return TRUE;
3950 }
3951 #endif
3952 if (vm_page_free_wanted_secluded++ == 0) {
3953 need_wakeup = 1;
3954 }
3955 wait_event = (event_t)&vm_page_free_wanted_secluded;
3956 #endif /* CONFIG_SECLUDED_MEMORY */
3957 } else {
3958 if (vm_page_free_wanted++ == 0) {
3959 need_wakeup = 1;
3960 }
3961 wait_event = (event_t)&vm_page_free_count;
3962 }
3963
3964 /*
3965 * We don't do a vm_pageout_scan wakeup if we already have
3966 * some waiters because vm_pageout_scan checks for waiters
3967 * before it returns and does so behind the vm_page_queue_free_lock,
3968 * which we own when we bump the waiter counts.
3969 */
3970
3971 if (vps_dynamic_priority_enabled == TRUE) {
3972 /*
3973 * We are waking up vm_pageout_scan here. If it needs
3974 * the vm_page_queue_free_lock before we unlock it
3975 * we'll end up just blocking and incur an extra
3976 * context switch. Could be a perf. issue.
3977 */
3978
3979 if (need_wakeup) {
3980 thread_wakeup((event_t)&vm_page_free_wanted);
3981 }
3982
3983 /*
3984 * LD: This event is going to get recorded every time because
3985 * we don't get back THREAD_WAITING from lck_mtx_sleep_with_inheritor.
3986 * We just block in that routine.
3987 */
3988 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
3989 vm_page_free_wanted_privileged,
3990 vm_page_free_wanted,
3991 #if CONFIG_SECLUDED_MEMORY
3992 vm_page_free_wanted_secluded,
3993 #else /* CONFIG_SECLUDED_MEMORY */
3994 0,
3995 #endif /* CONFIG_SECLUDED_MEMORY */
3996 0);
3997 wait_result = lck_mtx_sleep_with_inheritor(&vm_page_queue_free_lock,
3998 LCK_SLEEP_UNLOCK,
3999 wait_event,
4000 vm_pageout_scan_thread,
4001 interruptible,
4002 0);
4003 } else {
4004 wait_result = assert_wait(wait_event, interruptible);
4005
4006 vm_free_page_unlock();
4007
4008 if (need_wakeup) {
4009 thread_wakeup((event_t)&vm_page_free_wanted);
4010 }
4011
4012 if (wait_result == THREAD_WAITING) {
4013 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block, VM_PAGE_WAIT_BLOCK, DBG_FUNC_START,
4014 vm_page_free_wanted_privileged,
4015 vm_page_free_wanted,
4016 #if CONFIG_SECLUDED_MEMORY
4017 vm_page_free_wanted_secluded,
4018 #else /* CONFIG_SECLUDED_MEMORY */
4019 0,
4020 #endif /* CONFIG_SECLUDED_MEMORY */
4021 0);
4022 wait_result = thread_block(THREAD_CONTINUE_NULL);
4023 VM_DEBUG_CONSTANT_EVENT(vm_page_wait_block,
4024 VM_PAGE_WAIT_BLOCK, DBG_FUNC_END, 0, 0, 0, 0);
4025 }
4026 }
4027
4028 return (wait_result == THREAD_AWAKENED) || (wait_result == THREAD_NOT_WAITING);
4029 }
4030
4031 /*
4032 * vm_page_alloc:
4033 *
4034 * Allocate and return a memory cell associated
4035 * with this VM object/offset pair.
4036 *
4037 * Object must be locked.
4038 */
4039
4040 vm_page_t
vm_page_alloc(vm_object_t object,vm_object_offset_t offset)4041 vm_page_alloc(
4042 vm_object_t object,
4043 vm_object_offset_t offset)
4044 {
4045 vm_page_t mem;
4046 int grab_options;
4047
4048 vm_object_lock_assert_exclusive(object);
4049 grab_options = 0;
4050 #if CONFIG_SECLUDED_MEMORY
4051 if (object->can_grab_secluded) {
4052 grab_options |= VM_PAGE_GRAB_SECLUDED;
4053 }
4054 #endif /* CONFIG_SECLUDED_MEMORY */
4055 mem = vm_page_grab_options(grab_options);
4056 if (mem == VM_PAGE_NULL) {
4057 return VM_PAGE_NULL;
4058 }
4059
4060 vm_page_insert(mem, object, offset);
4061
4062 return mem;
4063 }
4064
4065 /*
4066 * vm_page_free_prepare:
4067 *
4068 * Removes page from any queue it may be on
4069 * and disassociates it from its VM object.
4070 *
4071 * Object and page queues must be locked prior to entry.
4072 */
4073 static void
vm_page_free_prepare(vm_page_t mem)4074 vm_page_free_prepare(
4075 vm_page_t mem)
4076 {
4077 vm_page_free_prepare_queues(mem);
4078 vm_page_free_prepare_object(mem, TRUE);
4079 }
4080
4081
4082 void
vm_page_free_prepare_queues(vm_page_t mem)4083 vm_page_free_prepare_queues(
4084 vm_page_t mem)
4085 {
4086 vm_object_t m_object;
4087
4088 VM_PAGE_CHECK(mem);
4089
4090 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
4091 assert(!mem->vmp_cleaning);
4092 m_object = VM_PAGE_OBJECT(mem);
4093
4094 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4095 if (m_object) {
4096 vm_object_lock_assert_exclusive(m_object);
4097 }
4098 if (mem->vmp_laundry) {
4099 /*
4100 * We may have to free a page while it's being laundered
4101 * if we lost its pager (due to a forced unmount, for example).
4102 * We need to call vm_pageout_steal_laundry() before removing
4103 * the page from its VM object, so that we can remove it
4104 * from its pageout queue and adjust the laundry accounting
4105 */
4106 vm_pageout_steal_laundry(mem, TRUE);
4107 }
4108
4109 vm_page_queues_remove(mem, TRUE);
4110
4111 if (__improbable(mem->vmp_realtime)) {
4112 mem->vmp_realtime = false;
4113 vm_page_realtime_count--;
4114 }
4115
4116 if (VM_PAGE_WIRED(mem)) {
4117 assert(mem->vmp_wire_count > 0);
4118
4119 if (m_object) {
4120 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4121 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4122 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4123
4124 assert(m_object->resident_page_count >=
4125 m_object->wired_page_count);
4126
4127 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4128 OSAddAtomic(+1, &vm_page_purgeable_count);
4129 assert(vm_page_purgeable_wired_count > 0);
4130 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4131 }
4132 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4133 m_object->purgable == VM_PURGABLE_EMPTY) &&
4134 m_object->vo_owner != TASK_NULL) {
4135 task_t owner;
4136 int ledger_idx_volatile;
4137 int ledger_idx_nonvolatile;
4138 int ledger_idx_volatile_compressed;
4139 int ledger_idx_nonvolatile_compressed;
4140 boolean_t do_footprint;
4141
4142 owner = VM_OBJECT_OWNER(m_object);
4143 vm_object_ledger_tag_ledgers(
4144 m_object,
4145 &ledger_idx_volatile,
4146 &ledger_idx_nonvolatile,
4147 &ledger_idx_volatile_compressed,
4148 &ledger_idx_nonvolatile_compressed,
4149 &do_footprint);
4150 /*
4151 * While wired, this page was accounted
4152 * as "non-volatile" but it should now
4153 * be accounted as "volatile".
4154 */
4155 /* one less "non-volatile"... */
4156 ledger_debit(owner->ledger,
4157 ledger_idx_nonvolatile,
4158 PAGE_SIZE);
4159 if (do_footprint) {
4160 /* ... and "phys_footprint" */
4161 ledger_debit(owner->ledger,
4162 task_ledgers.phys_footprint,
4163 PAGE_SIZE);
4164 }
4165 /* one more "volatile" */
4166 ledger_credit(owner->ledger,
4167 ledger_idx_volatile,
4168 PAGE_SIZE);
4169 }
4170 }
4171 if (!mem->vmp_private && !mem->vmp_fictitious) {
4172 vm_page_wire_count--;
4173 }
4174
4175 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4176 mem->vmp_wire_count = 0;
4177 assert(!mem->vmp_gobbled);
4178 } else if (mem->vmp_gobbled) {
4179 if (!mem->vmp_private && !mem->vmp_fictitious) {
4180 vm_page_wire_count--;
4181 }
4182 vm_page_gobble_count--;
4183 }
4184 }
4185
4186
4187 void
vm_page_free_prepare_object(vm_page_t mem,boolean_t remove_from_hash)4188 vm_page_free_prepare_object(
4189 vm_page_t mem,
4190 boolean_t remove_from_hash)
4191 {
4192 assert(!mem->vmp_realtime);
4193 if (mem->vmp_tabled) {
4194 vm_page_remove(mem, remove_from_hash); /* clears tabled, object, offset */
4195 }
4196 PAGE_WAKEUP(mem); /* clears wanted */
4197
4198 if (mem->vmp_private) {
4199 mem->vmp_private = FALSE;
4200 mem->vmp_fictitious = TRUE;
4201 VM_PAGE_SET_PHYS_PAGE(mem, vm_page_fictitious_addr);
4202 }
4203 if (!mem->vmp_fictitious) {
4204 assert(mem->vmp_pageq.next == 0);
4205 assert(mem->vmp_pageq.prev == 0);
4206 assert(mem->vmp_listq.next == 0);
4207 assert(mem->vmp_listq.prev == 0);
4208 assert(mem->vmp_specialq.next == 0);
4209 assert(mem->vmp_specialq.prev == 0);
4210 assert(mem->vmp_next_m == 0);
4211 ASSERT_PMAP_FREE(mem);
4212 {
4213 vm_page_init(mem, VM_PAGE_GET_PHYS_PAGE(mem), mem->vmp_lopage);
4214 }
4215 }
4216 }
4217
4218
4219 /*
4220 * vm_page_free:
4221 *
4222 * Returns the given page to the free list,
4223 * disassociating it with any VM object.
4224 *
4225 * Object and page queues must be locked prior to entry.
4226 */
4227 void
vm_page_free(vm_page_t mem)4228 vm_page_free(
4229 vm_page_t mem)
4230 {
4231 vm_page_free_prepare(mem);
4232
4233 if (mem->vmp_fictitious) {
4234 vm_page_release_fictitious(mem);
4235 } else {
4236 vm_page_release(mem, TRUE); /* page queues are locked */
4237 }
4238 }
4239
4240
4241 void
vm_page_free_unlocked(vm_page_t mem,boolean_t remove_from_hash)4242 vm_page_free_unlocked(
4243 vm_page_t mem,
4244 boolean_t remove_from_hash)
4245 {
4246 vm_page_lockspin_queues();
4247 vm_page_free_prepare_queues(mem);
4248 vm_page_unlock_queues();
4249
4250 vm_page_free_prepare_object(mem, remove_from_hash);
4251
4252 if (mem->vmp_fictitious) {
4253 vm_page_release_fictitious(mem);
4254 } else {
4255 vm_page_release(mem, FALSE); /* page queues are not locked */
4256 }
4257 }
4258
4259
4260 /*
4261 * Free a list of pages. The list can be up to several hundred pages,
4262 * as blocked up by vm_pageout_scan().
4263 * The big win is not having to take the free list lock once
4264 * per page.
4265 *
4266 * The VM page queues lock (vm_page_queue_lock) should NOT be held.
4267 * The VM page free queues lock (vm_page_queue_free_lock) should NOT be held.
4268 */
4269 void
vm_page_free_list(vm_page_t freeq,boolean_t prepare_object)4270 vm_page_free_list(
4271 vm_page_t freeq,
4272 boolean_t prepare_object)
4273 {
4274 vm_page_t mem;
4275 vm_page_t nxt;
4276 vm_page_t local_freeq;
4277 int pg_count;
4278
4279 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
4280 LCK_MTX_ASSERT(&vm_page_queue_free_lock, LCK_MTX_ASSERT_NOTOWNED);
4281
4282 while (freeq) {
4283 pg_count = 0;
4284 local_freeq = VM_PAGE_NULL;
4285 mem = freeq;
4286
4287 /*
4288 * break up the processing into smaller chunks so
4289 * that we can 'pipeline' the pages onto the
4290 * free list w/o introducing too much
4291 * contention on the global free queue lock
4292 */
4293 while (mem && pg_count < 64) {
4294 assert((mem->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
4295 (mem->vmp_q_state == VM_PAGE_IS_WIRED));
4296 assert(mem->vmp_specialq.next == 0 &&
4297 mem->vmp_specialq.prev == 0);
4298 /*
4299 * &&
4300 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
4301 */
4302 nxt = mem->vmp_snext;
4303 mem->vmp_snext = NULL;
4304 assert(mem->vmp_pageq.prev == 0);
4305
4306 if (vm_page_free_verify && !mem->vmp_fictitious && !mem->vmp_private) {
4307 ASSERT_PMAP_FREE(mem);
4308 }
4309
4310 if (__improbable(mem->vmp_realtime)) {
4311 vm_page_lock_queues();
4312 if (mem->vmp_realtime) {
4313 mem->vmp_realtime = false;
4314 vm_page_realtime_count--;
4315 }
4316 vm_page_unlock_queues();
4317 }
4318
4319 if (prepare_object == TRUE) {
4320 vm_page_free_prepare_object(mem, TRUE);
4321 }
4322
4323 if (!mem->vmp_fictitious) {
4324 assert(mem->vmp_busy);
4325
4326 if ((mem->vmp_lopage == TRUE || vm_lopage_refill == TRUE) &&
4327 vm_lopage_free_count < vm_lopage_free_limit &&
4328 VM_PAGE_GET_PHYS_PAGE(mem) < max_valid_low_ppnum) {
4329 vm_page_release(mem, FALSE); /* page queues are not locked */
4330 #if CONFIG_SECLUDED_MEMORY
4331 } else if (vm_page_secluded_count < vm_page_secluded_target &&
4332 num_tasks_can_use_secluded_mem == 0) {
4333 vm_page_release(mem,
4334 FALSE); /* page queues are not locked */
4335 #endif /* CONFIG_SECLUDED_MEMORY */
4336 } else {
4337 /*
4338 * IMPORTANT: we can't set the page "free" here
4339 * because that would make the page eligible for
4340 * a physically-contiguous allocation (see
4341 * vm_page_find_contiguous()) right away (we don't
4342 * hold the vm_page_queue_free lock). That would
4343 * cause trouble because the page is not actually
4344 * in the free queue yet...
4345 */
4346 mem->vmp_snext = local_freeq;
4347 local_freeq = mem;
4348 pg_count++;
4349
4350 pmap_clear_noencrypt(VM_PAGE_GET_PHYS_PAGE(mem));
4351 }
4352 } else {
4353 assert(VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_fictitious_addr ||
4354 VM_PAGE_GET_PHYS_PAGE(mem) == vm_page_guard_addr);
4355 vm_page_release_fictitious(mem);
4356 }
4357 mem = nxt;
4358 }
4359 freeq = mem;
4360
4361 if ((mem = local_freeq)) {
4362 unsigned int avail_free_count;
4363 unsigned int need_wakeup = 0;
4364 unsigned int need_priv_wakeup = 0;
4365 #if CONFIG_SECLUDED_MEMORY
4366 unsigned int need_wakeup_secluded = 0;
4367 #endif /* CONFIG_SECLUDED_MEMORY */
4368 event_t priv_wakeup_event, secluded_wakeup_event, normal_wakeup_event;
4369 boolean_t priv_wakeup_all, secluded_wakeup_all, normal_wakeup_all;
4370
4371 vm_free_page_lock_spin();
4372
4373 while (mem) {
4374 int color;
4375
4376 nxt = mem->vmp_snext;
4377
4378 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
4379 assert(mem->vmp_busy);
4380 assert(!mem->vmp_realtime);
4381 mem->vmp_lopage = FALSE;
4382 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
4383
4384 color = VM_PAGE_GET_COLOR(mem);
4385 #if defined(__x86_64__)
4386 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
4387 #else
4388 vm_page_queue_enter(&vm_page_queue_free[color].qhead,
4389 mem, vmp_pageq);
4390 #endif
4391 mem = nxt;
4392 }
4393 vm_pageout_vminfo.vm_page_pages_freed += pg_count;
4394 vm_page_free_count += pg_count;
4395 avail_free_count = vm_page_free_count;
4396
4397 VM_DEBUG_CONSTANT_EVENT(vm_page_release, VM_PAGE_RELEASE, DBG_FUNC_NONE, pg_count, 0, 0, 0);
4398
4399 if (vm_page_free_wanted_privileged > 0 && avail_free_count > 0) {
4400 if (avail_free_count < vm_page_free_wanted_privileged) {
4401 need_priv_wakeup = avail_free_count;
4402 vm_page_free_wanted_privileged -= avail_free_count;
4403 avail_free_count = 0;
4404 } else {
4405 need_priv_wakeup = vm_page_free_wanted_privileged;
4406 avail_free_count -= vm_page_free_wanted_privileged;
4407 vm_page_free_wanted_privileged = 0;
4408 }
4409 }
4410 #if CONFIG_SECLUDED_MEMORY
4411 if (vm_page_free_wanted_secluded > 0 &&
4412 avail_free_count > vm_page_free_reserved) {
4413 unsigned int available_pages;
4414 available_pages = (avail_free_count -
4415 vm_page_free_reserved);
4416 if (available_pages <
4417 vm_page_free_wanted_secluded) {
4418 need_wakeup_secluded = available_pages;
4419 vm_page_free_wanted_secluded -=
4420 available_pages;
4421 avail_free_count -= available_pages;
4422 } else {
4423 need_wakeup_secluded =
4424 vm_page_free_wanted_secluded;
4425 avail_free_count -=
4426 vm_page_free_wanted_secluded;
4427 vm_page_free_wanted_secluded = 0;
4428 }
4429 }
4430 #endif /* CONFIG_SECLUDED_MEMORY */
4431 if (vm_page_free_wanted > 0 && avail_free_count > vm_page_free_reserved) {
4432 unsigned int available_pages;
4433
4434 available_pages = avail_free_count - vm_page_free_reserved;
4435
4436 if (available_pages >= vm_page_free_wanted) {
4437 need_wakeup = vm_page_free_wanted;
4438 vm_page_free_wanted = 0;
4439 } else {
4440 need_wakeup = available_pages;
4441 vm_page_free_wanted -= available_pages;
4442 }
4443 }
4444 vm_free_page_unlock();
4445
4446 priv_wakeup_event = NULL;
4447 secluded_wakeup_event = NULL;
4448 normal_wakeup_event = NULL;
4449
4450 priv_wakeup_all = FALSE;
4451 secluded_wakeup_all = FALSE;
4452 normal_wakeup_all = FALSE;
4453
4454
4455 if (need_priv_wakeup != 0) {
4456 /*
4457 * There shouldn't be that many VM-privileged threads,
4458 * so let's wake them all up, even if we don't quite
4459 * have enough pages to satisfy them all.
4460 */
4461 priv_wakeup_event = (event_t)&vm_page_free_wanted_privileged;
4462 priv_wakeup_all = TRUE;
4463 }
4464 #if CONFIG_SECLUDED_MEMORY
4465 if (need_wakeup_secluded != 0 &&
4466 vm_page_free_wanted_secluded == 0) {
4467 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4468 secluded_wakeup_all = TRUE;
4469 need_wakeup_secluded = 0;
4470 } else {
4471 secluded_wakeup_event = (event_t)&vm_page_free_wanted_secluded;
4472 }
4473 #endif /* CONFIG_SECLUDED_MEMORY */
4474 if (need_wakeup != 0 && vm_page_free_wanted == 0) {
4475 /*
4476 * We don't expect to have any more waiters
4477 * after this, so let's wake them all up at
4478 * once.
4479 */
4480 normal_wakeup_event = (event_t) &vm_page_free_count;
4481 normal_wakeup_all = TRUE;
4482 need_wakeup = 0;
4483 } else {
4484 normal_wakeup_event = (event_t) &vm_page_free_count;
4485 }
4486
4487 if (priv_wakeup_event ||
4488 #if CONFIG_SECLUDED_MEMORY
4489 secluded_wakeup_event ||
4490 #endif /* CONFIG_SECLUDED_MEMORY */
4491 normal_wakeup_event) {
4492 if (vps_dynamic_priority_enabled == TRUE) {
4493 thread_t thread_woken = NULL;
4494
4495 if (priv_wakeup_all == TRUE) {
4496 wakeup_all_with_inheritor(priv_wakeup_event, THREAD_AWAKENED);
4497 }
4498
4499 #if CONFIG_SECLUDED_MEMORY
4500 if (secluded_wakeup_all == TRUE) {
4501 wakeup_all_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED);
4502 }
4503
4504 while (need_wakeup_secluded-- != 0) {
4505 /*
4506 * Wake up one waiter per page we just released.
4507 */
4508 wakeup_one_with_inheritor(secluded_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4509 thread_deallocate(thread_woken);
4510 }
4511 #endif /* CONFIG_SECLUDED_MEMORY */
4512
4513 if (normal_wakeup_all == TRUE) {
4514 wakeup_all_with_inheritor(normal_wakeup_event, THREAD_AWAKENED);
4515 }
4516
4517 while (need_wakeup-- != 0) {
4518 /*
4519 * Wake up one waiter per page we just released.
4520 */
4521 wakeup_one_with_inheritor(normal_wakeup_event, THREAD_AWAKENED, LCK_WAKE_DO_NOT_TRANSFER_PUSH, &thread_woken);
4522 thread_deallocate(thread_woken);
4523 }
4524 } else {
4525 /*
4526 * Non-priority-aware wakeups.
4527 */
4528
4529 if (priv_wakeup_all == TRUE) {
4530 thread_wakeup(priv_wakeup_event);
4531 }
4532
4533 #if CONFIG_SECLUDED_MEMORY
4534 if (secluded_wakeup_all == TRUE) {
4535 thread_wakeup(secluded_wakeup_event);
4536 }
4537
4538 while (need_wakeup_secluded-- != 0) {
4539 /*
4540 * Wake up one waiter per page we just released.
4541 */
4542 thread_wakeup_one(secluded_wakeup_event);
4543 }
4544
4545 #endif /* CONFIG_SECLUDED_MEMORY */
4546 if (normal_wakeup_all == TRUE) {
4547 thread_wakeup(normal_wakeup_event);
4548 }
4549
4550 while (need_wakeup-- != 0) {
4551 /*
4552 * Wake up one waiter per page we just released.
4553 */
4554 thread_wakeup_one(normal_wakeup_event);
4555 }
4556 }
4557 }
4558
4559 VM_CHECK_MEMORYSTATUS;
4560 }
4561 }
4562 }
4563
4564
4565 /*
4566 * vm_page_wire:
4567 *
4568 * Mark this page as wired down by yet
4569 * another map, removing it from paging queues
4570 * as necessary.
4571 *
4572 * The page's object and the page queues must be locked.
4573 */
4574
4575
4576 void
vm_page_wire(vm_page_t mem,vm_tag_t tag,boolean_t check_memorystatus)4577 vm_page_wire(
4578 vm_page_t mem,
4579 vm_tag_t tag,
4580 boolean_t check_memorystatus)
4581 {
4582 vm_object_t m_object;
4583
4584 m_object = VM_PAGE_OBJECT(mem);
4585
4586 // dbgLog(current_thread(), mem->vmp_offset, m_object, 1); /* (TEST/DEBUG) */
4587
4588 VM_PAGE_CHECK(mem);
4589 if (m_object) {
4590 vm_object_lock_assert_exclusive(m_object);
4591 } else {
4592 /*
4593 * In theory, the page should be in an object before it
4594 * gets wired, since we need to hold the object lock
4595 * to update some fields in the page structure.
4596 * However, some code (i386 pmap, for example) might want
4597 * to wire a page before it gets inserted into an object.
4598 * That's somewhat OK, as long as nobody else can get to
4599 * that page and update it at the same time.
4600 */
4601 }
4602 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4603 if (!VM_PAGE_WIRED(mem)) {
4604 if (mem->vmp_laundry) {
4605 vm_pageout_steal_laundry(mem, TRUE);
4606 }
4607
4608 vm_page_queues_remove(mem, TRUE);
4609
4610 assert(mem->vmp_wire_count == 0);
4611 mem->vmp_q_state = VM_PAGE_IS_WIRED;
4612
4613 if (m_object) {
4614 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4615 VM_OBJECT_WIRED_PAGE_ADD(m_object, mem);
4616 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, tag);
4617
4618 assert(m_object->resident_page_count >=
4619 m_object->wired_page_count);
4620 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4621 assert(vm_page_purgeable_count > 0);
4622 OSAddAtomic(-1, &vm_page_purgeable_count);
4623 OSAddAtomic(1, &vm_page_purgeable_wired_count);
4624 }
4625 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4626 m_object->purgable == VM_PURGABLE_EMPTY) &&
4627 m_object->vo_owner != TASK_NULL) {
4628 task_t owner;
4629 int ledger_idx_volatile;
4630 int ledger_idx_nonvolatile;
4631 int ledger_idx_volatile_compressed;
4632 int ledger_idx_nonvolatile_compressed;
4633 boolean_t do_footprint;
4634
4635 owner = VM_OBJECT_OWNER(m_object);
4636 vm_object_ledger_tag_ledgers(
4637 m_object,
4638 &ledger_idx_volatile,
4639 &ledger_idx_nonvolatile,
4640 &ledger_idx_volatile_compressed,
4641 &ledger_idx_nonvolatile_compressed,
4642 &do_footprint);
4643 /* less volatile bytes */
4644 ledger_debit(owner->ledger,
4645 ledger_idx_volatile,
4646 PAGE_SIZE);
4647 /* more not-quite-volatile bytes */
4648 ledger_credit(owner->ledger,
4649 ledger_idx_nonvolatile,
4650 PAGE_SIZE);
4651 if (do_footprint) {
4652 /* more footprint */
4653 ledger_credit(owner->ledger,
4654 task_ledgers.phys_footprint,
4655 PAGE_SIZE);
4656 }
4657 }
4658 if (m_object->all_reusable) {
4659 /*
4660 * Wired pages are not counted as "re-usable"
4661 * in "all_reusable" VM objects, so nothing
4662 * to do here.
4663 */
4664 } else if (mem->vmp_reusable) {
4665 /*
4666 * This page is not "re-usable" when it's
4667 * wired, so adjust its state and the
4668 * accounting.
4669 */
4670 vm_object_reuse_pages(m_object,
4671 mem->vmp_offset,
4672 mem->vmp_offset + PAGE_SIZE_64,
4673 FALSE);
4674 }
4675 }
4676 assert(!mem->vmp_reusable);
4677
4678 if (!mem->vmp_private && !mem->vmp_fictitious && !mem->vmp_gobbled) {
4679 vm_page_wire_count++;
4680 }
4681 if (mem->vmp_gobbled) {
4682 vm_page_gobble_count--;
4683 }
4684 mem->vmp_gobbled = FALSE;
4685
4686 if (check_memorystatus == TRUE) {
4687 VM_CHECK_MEMORYSTATUS;
4688 }
4689 }
4690 assert(!mem->vmp_gobbled);
4691 assert(mem->vmp_q_state == VM_PAGE_IS_WIRED);
4692 mem->vmp_wire_count++;
4693 if (__improbable(mem->vmp_wire_count == 0)) {
4694 panic("vm_page_wire(%p): wire_count overflow", mem);
4695 }
4696 VM_PAGE_CHECK(mem);
4697 }
4698
4699 /*
4700 * vm_page_unwire:
4701 *
4702 * Release one wiring of this page, potentially
4703 * enabling it to be paged again.
4704 *
4705 * The page's object and the page queues must be locked.
4706 */
4707 void
vm_page_unwire(vm_page_t mem,boolean_t queueit)4708 vm_page_unwire(
4709 vm_page_t mem,
4710 boolean_t queueit)
4711 {
4712 vm_object_t m_object;
4713
4714 m_object = VM_PAGE_OBJECT(mem);
4715
4716 // dbgLog(current_thread(), mem->vmp_offset, m_object, 0); /* (TEST/DEBUG) */
4717
4718 VM_PAGE_CHECK(mem);
4719 assert(VM_PAGE_WIRED(mem));
4720 assert(mem->vmp_wire_count > 0);
4721 assert(!mem->vmp_gobbled);
4722 assert(m_object != VM_OBJECT_NULL);
4723 vm_object_lock_assert_exclusive(m_object);
4724 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4725 if (--mem->vmp_wire_count == 0) {
4726 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
4727
4728 VM_OBJECT_WIRED_PAGE_UPDATE_START(m_object);
4729 VM_OBJECT_WIRED_PAGE_REMOVE(m_object, mem);
4730 VM_OBJECT_WIRED_PAGE_UPDATE_END(m_object, m_object->wire_tag);
4731 if (!mem->vmp_private && !mem->vmp_fictitious) {
4732 vm_page_wire_count--;
4733 }
4734
4735 assert(m_object->resident_page_count >=
4736 m_object->wired_page_count);
4737 if (m_object->purgable == VM_PURGABLE_VOLATILE) {
4738 OSAddAtomic(+1, &vm_page_purgeable_count);
4739 assert(vm_page_purgeable_wired_count > 0);
4740 OSAddAtomic(-1, &vm_page_purgeable_wired_count);
4741 }
4742 if ((m_object->purgable == VM_PURGABLE_VOLATILE ||
4743 m_object->purgable == VM_PURGABLE_EMPTY) &&
4744 m_object->vo_owner != TASK_NULL) {
4745 task_t owner;
4746 int ledger_idx_volatile;
4747 int ledger_idx_nonvolatile;
4748 int ledger_idx_volatile_compressed;
4749 int ledger_idx_nonvolatile_compressed;
4750 boolean_t do_footprint;
4751
4752 owner = VM_OBJECT_OWNER(m_object);
4753 vm_object_ledger_tag_ledgers(
4754 m_object,
4755 &ledger_idx_volatile,
4756 &ledger_idx_nonvolatile,
4757 &ledger_idx_volatile_compressed,
4758 &ledger_idx_nonvolatile_compressed,
4759 &do_footprint);
4760 /* more volatile bytes */
4761 ledger_credit(owner->ledger,
4762 ledger_idx_volatile,
4763 PAGE_SIZE);
4764 /* less not-quite-volatile bytes */
4765 ledger_debit(owner->ledger,
4766 ledger_idx_nonvolatile,
4767 PAGE_SIZE);
4768 if (do_footprint) {
4769 /* less footprint */
4770 ledger_debit(owner->ledger,
4771 task_ledgers.phys_footprint,
4772 PAGE_SIZE);
4773 }
4774 }
4775 assert(m_object != kernel_object);
4776 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
4777
4778 if (queueit == TRUE) {
4779 if (m_object->purgable == VM_PURGABLE_EMPTY) {
4780 vm_page_deactivate(mem);
4781 } else {
4782 vm_page_activate(mem);
4783 }
4784 }
4785
4786 VM_CHECK_MEMORYSTATUS;
4787 }
4788 VM_PAGE_CHECK(mem);
4789 }
4790
4791 /*
4792 * vm_page_deactivate:
4793 *
4794 * Returns the given page to the inactive list,
4795 * indicating that no physical maps have access
4796 * to this page. [Used by the physical mapping system.]
4797 *
4798 * The page queues must be locked.
4799 */
4800 void
vm_page_deactivate(vm_page_t m)4801 vm_page_deactivate(
4802 vm_page_t m)
4803 {
4804 vm_page_deactivate_internal(m, TRUE);
4805 }
4806
4807
4808 void
vm_page_deactivate_internal(vm_page_t m,boolean_t clear_hw_reference)4809 vm_page_deactivate_internal(
4810 vm_page_t m,
4811 boolean_t clear_hw_reference)
4812 {
4813 vm_object_t m_object;
4814
4815 m_object = VM_PAGE_OBJECT(m);
4816
4817 VM_PAGE_CHECK(m);
4818 assert(m_object != kernel_object);
4819 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4820
4821 // dbgLog(VM_PAGE_GET_PHYS_PAGE(m), vm_page_free_count, vm_page_wire_count, 6); /* (TEST/DEBUG) */
4822 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4823 /*
4824 * This page is no longer very interesting. If it was
4825 * interesting (active or inactive/referenced), then we
4826 * clear the reference bit and (re)enter it in the
4827 * inactive queue. Note wired pages should not have
4828 * their reference bit cleared.
4829 */
4830 assert( !(m->vmp_absent && !m->vmp_unusual));
4831
4832 if (m->vmp_gobbled) { /* can this happen? */
4833 assert( !VM_PAGE_WIRED(m));
4834
4835 if (!m->vmp_private && !m->vmp_fictitious) {
4836 vm_page_wire_count--;
4837 }
4838 vm_page_gobble_count--;
4839 m->vmp_gobbled = FALSE;
4840 }
4841 /*
4842 * if this page is currently on the pageout queue, we can't do the
4843 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4844 * and we can't remove it manually since we would need the object lock
4845 * (which is not required here) to decrement the activity_in_progress
4846 * reference which is held on the object while the page is in the pageout queue...
4847 * just let the normal laundry processing proceed
4848 */
4849 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4850 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4851 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
4852 VM_PAGE_WIRED(m)) {
4853 return;
4854 }
4855 if (!m->vmp_absent && clear_hw_reference == TRUE) {
4856 pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(m));
4857 }
4858
4859 m->vmp_reference = FALSE;
4860 m->vmp_no_cache = FALSE;
4861
4862 if (!VM_PAGE_INACTIVE(m)) {
4863 vm_page_queues_remove(m, FALSE);
4864
4865 if (!VM_DYNAMIC_PAGING_ENABLED() &&
4866 m->vmp_dirty && m_object->internal &&
4867 (m_object->purgable == VM_PURGABLE_DENY ||
4868 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
4869 m_object->purgable == VM_PURGABLE_VOLATILE)) {
4870 vm_page_check_pageable_safe(m);
4871 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
4872 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
4873 vm_page_throttled_count++;
4874 } else {
4875 if (m_object->named && m_object->ref_count == 1) {
4876 vm_page_speculate(m, FALSE);
4877 #if DEVELOPMENT || DEBUG
4878 vm_page_speculative_recreated++;
4879 #endif
4880 } else {
4881 vm_page_enqueue_inactive(m, FALSE);
4882 }
4883 }
4884 }
4885 }
4886
4887 /*
4888 * vm_page_enqueue_cleaned
4889 *
4890 * Put the page on the cleaned queue, mark it cleaned, etc.
4891 * Being on the cleaned queue (and having m->clean_queue set)
4892 * does ** NOT ** guarantee that the page is clean!
4893 *
4894 * Call with the queues lock held.
4895 */
4896
4897 void
vm_page_enqueue_cleaned(vm_page_t m)4898 vm_page_enqueue_cleaned(vm_page_t m)
4899 {
4900 vm_object_t m_object;
4901
4902 m_object = VM_PAGE_OBJECT(m);
4903
4904 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4905 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4906 assert( !(m->vmp_absent && !m->vmp_unusual));
4907
4908 if (VM_PAGE_WIRED(m)) {
4909 return;
4910 }
4911
4912 if (m->vmp_gobbled) {
4913 if (!m->vmp_private && !m->vmp_fictitious) {
4914 vm_page_wire_count--;
4915 }
4916 vm_page_gobble_count--;
4917 m->vmp_gobbled = FALSE;
4918 }
4919 /*
4920 * if this page is currently on the pageout queue, we can't do the
4921 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4922 * and we can't remove it manually since we would need the object lock
4923 * (which is not required here) to decrement the activity_in_progress
4924 * reference which is held on the object while the page is in the pageout queue...
4925 * just let the normal laundry processing proceed
4926 */
4927 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4928 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
4929 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4930 return;
4931 }
4932 vm_page_queues_remove(m, FALSE);
4933
4934 vm_page_check_pageable_safe(m);
4935 vm_page_queue_enter(&vm_page_queue_cleaned, m, vmp_pageq);
4936 m->vmp_q_state = VM_PAGE_ON_INACTIVE_CLEANED_Q;
4937 vm_page_cleaned_count++;
4938
4939 vm_page_inactive_count++;
4940 if (m_object->internal) {
4941 vm_page_pageable_internal_count++;
4942 } else {
4943 vm_page_pageable_external_count++;
4944 }
4945 vm_page_add_to_specialq(m, TRUE);
4946 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
4947 }
4948
4949 /*
4950 * vm_page_activate:
4951 *
4952 * Put the specified page on the active list (if appropriate).
4953 *
4954 * The page queues must be locked.
4955 */
4956
4957 void
vm_page_activate(vm_page_t m)4958 vm_page_activate(
4959 vm_page_t m)
4960 {
4961 vm_object_t m_object;
4962
4963 m_object = VM_PAGE_OBJECT(m);
4964
4965 VM_PAGE_CHECK(m);
4966 #ifdef FIXME_4778297
4967 assert(m_object != kernel_object);
4968 #endif
4969 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
4970 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
4971 assert( !(m->vmp_absent && !m->vmp_unusual));
4972
4973 if (m->vmp_gobbled) {
4974 assert( !VM_PAGE_WIRED(m));
4975 if (!m->vmp_private && !m->vmp_fictitious) {
4976 vm_page_wire_count--;
4977 }
4978 vm_page_gobble_count--;
4979 m->vmp_gobbled = FALSE;
4980 }
4981 /*
4982 * if this page is currently on the pageout queue, we can't do the
4983 * vm_page_queues_remove (which doesn't handle the pageout queue case)
4984 * and we can't remove it manually since we would need the object lock
4985 * (which is not required here) to decrement the activity_in_progress
4986 * reference which is held on the object while the page is in the pageout queue...
4987 * just let the normal laundry processing proceed
4988 */
4989 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
4990 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
4991 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
4992 return;
4993 }
4994
4995 #if DEBUG
4996 if (m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q) {
4997 panic("vm_page_activate: already active");
4998 }
4999 #endif
5000
5001 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
5002 DTRACE_VM2(pgrec, int, 1, (uint64_t *), NULL);
5003 DTRACE_VM2(pgfrec, int, 1, (uint64_t *), NULL);
5004 }
5005
5006 /*
5007 * A freshly activated page should be promoted in the donation queue.
5008 * So we remove it here while preserving its hint and we will enqueue
5009 * it again in vm_page_enqueue_active.
5010 */
5011 vm_page_queues_remove(m, ((m->vmp_on_specialq == VM_PAGE_SPECIAL_Q_DONATE) ? TRUE : FALSE));
5012
5013 if (!VM_PAGE_WIRED(m)) {
5014 vm_page_check_pageable_safe(m);
5015 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5016 m->vmp_dirty && m_object->internal &&
5017 (m_object->purgable == VM_PURGABLE_DENY ||
5018 m_object->purgable == VM_PURGABLE_NONVOLATILE ||
5019 m_object->purgable == VM_PURGABLE_VOLATILE)) {
5020 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
5021 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
5022 vm_page_throttled_count++;
5023 } else {
5024 #if CONFIG_SECLUDED_MEMORY
5025 if (secluded_for_filecache &&
5026 vm_page_secluded_target != 0 &&
5027 num_tasks_can_use_secluded_mem == 0 &&
5028 m_object->eligible_for_secluded &&
5029 !m->vmp_realtime) {
5030 vm_page_queue_enter(&vm_page_queue_secluded, m, vmp_pageq);
5031 m->vmp_q_state = VM_PAGE_ON_SECLUDED_Q;
5032 vm_page_secluded_count++;
5033 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
5034 vm_page_secluded_count_inuse++;
5035 assert(!m_object->internal);
5036 // vm_page_pageable_external_count++;
5037 } else
5038 #endif /* CONFIG_SECLUDED_MEMORY */
5039 vm_page_enqueue_active(m, FALSE);
5040 }
5041 m->vmp_reference = TRUE;
5042 m->vmp_no_cache = FALSE;
5043 }
5044 VM_PAGE_CHECK(m);
5045 }
5046
5047
5048 /*
5049 * vm_page_speculate:
5050 *
5051 * Put the specified page on the speculative list (if appropriate).
5052 *
5053 * The page queues must be locked.
5054 */
5055 void
vm_page_speculate(vm_page_t m,boolean_t new)5056 vm_page_speculate(
5057 vm_page_t m,
5058 boolean_t new)
5059 {
5060 struct vm_speculative_age_q *aq;
5061 vm_object_t m_object;
5062
5063 m_object = VM_PAGE_OBJECT(m);
5064
5065 VM_PAGE_CHECK(m);
5066 vm_page_check_pageable_safe(m);
5067
5068 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5069 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5070 assert( !(m->vmp_absent && !m->vmp_unusual));
5071 assert(m_object->internal == FALSE);
5072
5073 /*
5074 * if this page is currently on the pageout queue, we can't do the
5075 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5076 * and we can't remove it manually since we would need the object lock
5077 * (which is not required here) to decrement the activity_in_progress
5078 * reference which is held on the object while the page is in the pageout queue...
5079 * just let the normal laundry processing proceed
5080 */
5081 if (m->vmp_laundry || m->vmp_private || m->vmp_fictitious ||
5082 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5083 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5084 return;
5085 }
5086
5087 vm_page_queues_remove(m, FALSE);
5088
5089 if (!VM_PAGE_WIRED(m)) {
5090 mach_timespec_t ts;
5091 clock_sec_t sec;
5092 clock_nsec_t nsec;
5093
5094 clock_get_system_nanotime(&sec, &nsec);
5095 ts.tv_sec = (unsigned int) sec;
5096 ts.tv_nsec = nsec;
5097
5098 if (vm_page_speculative_count == 0) {
5099 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5100 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5101
5102 aq = &vm_page_queue_speculative[speculative_age_index];
5103
5104 /*
5105 * set the timer to begin a new group
5106 */
5107 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5108 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5109
5110 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5111 } else {
5112 aq = &vm_page_queue_speculative[speculative_age_index];
5113
5114 if (CMP_MACH_TIMESPEC(&ts, &aq->age_ts) >= 0) {
5115 speculative_age_index++;
5116
5117 if (speculative_age_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5118 speculative_age_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5119 }
5120 if (speculative_age_index == speculative_steal_index) {
5121 speculative_steal_index = speculative_age_index + 1;
5122
5123 if (speculative_steal_index > VM_PAGE_MAX_SPECULATIVE_AGE_Q) {
5124 speculative_steal_index = VM_PAGE_MIN_SPECULATIVE_AGE_Q;
5125 }
5126 }
5127 aq = &vm_page_queue_speculative[speculative_age_index];
5128
5129 if (!vm_page_queue_empty(&aq->age_q)) {
5130 vm_page_speculate_ageit(aq);
5131 }
5132
5133 aq->age_ts.tv_sec = vm_pageout_state.vm_page_speculative_q_age_ms / 1000;
5134 aq->age_ts.tv_nsec = (vm_pageout_state.vm_page_speculative_q_age_ms % 1000) * 1000 * NSEC_PER_USEC;
5135
5136 ADD_MACH_TIMESPEC(&aq->age_ts, &ts);
5137 }
5138 }
5139 vm_page_enqueue_tail(&aq->age_q, &m->vmp_pageq);
5140 m->vmp_q_state = VM_PAGE_ON_SPECULATIVE_Q;
5141 vm_page_speculative_count++;
5142 vm_page_pageable_external_count++;
5143
5144 if (new == TRUE) {
5145 vm_object_lock_assert_exclusive(m_object);
5146
5147 m_object->pages_created++;
5148 #if DEVELOPMENT || DEBUG
5149 vm_page_speculative_created++;
5150 #endif
5151 }
5152 }
5153 VM_PAGE_CHECK(m);
5154 }
5155
5156
5157 /*
5158 * move pages from the specified aging bin to
5159 * the speculative bin that pageout_scan claims from
5160 *
5161 * The page queues must be locked.
5162 */
5163 void
vm_page_speculate_ageit(struct vm_speculative_age_q * aq)5164 vm_page_speculate_ageit(struct vm_speculative_age_q *aq)
5165 {
5166 struct vm_speculative_age_q *sq;
5167 vm_page_t t;
5168
5169 sq = &vm_page_queue_speculative[VM_PAGE_SPECULATIVE_AGED_Q];
5170
5171 if (vm_page_queue_empty(&sq->age_q)) {
5172 sq->age_q.next = aq->age_q.next;
5173 sq->age_q.prev = aq->age_q.prev;
5174
5175 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.next);
5176 t->vmp_pageq.prev = VM_PAGE_PACK_PTR(&sq->age_q);
5177
5178 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5179 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5180 } else {
5181 t = (vm_page_t)VM_PAGE_UNPACK_PTR(sq->age_q.prev);
5182 t->vmp_pageq.next = aq->age_q.next;
5183
5184 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.next);
5185 t->vmp_pageq.prev = sq->age_q.prev;
5186
5187 t = (vm_page_t)VM_PAGE_UNPACK_PTR(aq->age_q.prev);
5188 t->vmp_pageq.next = VM_PAGE_PACK_PTR(&sq->age_q);
5189
5190 sq->age_q.prev = aq->age_q.prev;
5191 }
5192 vm_page_queue_init(&aq->age_q);
5193 }
5194
5195
5196 void
vm_page_lru(vm_page_t m)5197 vm_page_lru(
5198 vm_page_t m)
5199 {
5200 VM_PAGE_CHECK(m);
5201 assert(VM_PAGE_OBJECT(m) != kernel_object);
5202 assert(VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr);
5203
5204 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
5205
5206 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q) {
5207 /*
5208 * we don't need to do all the other work that
5209 * vm_page_queues_remove and vm_page_enqueue_inactive
5210 * bring along for the ride
5211 */
5212 assert(!m->vmp_laundry);
5213 assert(!m->vmp_private);
5214
5215 m->vmp_no_cache = FALSE;
5216
5217 vm_page_queue_remove(&vm_page_queue_inactive, m, vmp_pageq);
5218 vm_page_queue_enter(&vm_page_queue_inactive, m, vmp_pageq);
5219
5220 return;
5221 }
5222 /*
5223 * if this page is currently on the pageout queue, we can't do the
5224 * vm_page_queues_remove (which doesn't handle the pageout queue case)
5225 * and we can't remove it manually since we would need the object lock
5226 * (which is not required here) to decrement the activity_in_progress
5227 * reference which is held on the object while the page is in the pageout queue...
5228 * just let the normal laundry processing proceed
5229 */
5230 if (m->vmp_laundry || m->vmp_private ||
5231 (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) ||
5232 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) ||
5233 VM_PAGE_WIRED(m)) {
5234 return;
5235 }
5236
5237 m->vmp_no_cache = FALSE;
5238
5239 vm_page_queues_remove(m, FALSE);
5240
5241 vm_page_enqueue_inactive(m, FALSE);
5242 }
5243
5244
5245 void
vm_page_reactivate_all_throttled(void)5246 vm_page_reactivate_all_throttled(void)
5247 {
5248 vm_page_t first_throttled, last_throttled;
5249 vm_page_t first_active;
5250 vm_page_t m;
5251 int extra_active_count;
5252 int extra_internal_count, extra_external_count;
5253 vm_object_t m_object;
5254
5255 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5256 return;
5257 }
5258
5259 extra_active_count = 0;
5260 extra_internal_count = 0;
5261 extra_external_count = 0;
5262 vm_page_lock_queues();
5263 if (!vm_page_queue_empty(&vm_page_queue_throttled)) {
5264 /*
5265 * Switch "throttled" pages to "active".
5266 */
5267 vm_page_queue_iterate(&vm_page_queue_throttled, m, vmp_pageq) {
5268 VM_PAGE_CHECK(m);
5269 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
5270
5271 m_object = VM_PAGE_OBJECT(m);
5272
5273 extra_active_count++;
5274 if (m_object->internal) {
5275 extra_internal_count++;
5276 } else {
5277 extra_external_count++;
5278 }
5279
5280 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5281 VM_PAGE_CHECK(m);
5282 vm_page_add_to_specialq(m, FALSE);
5283 }
5284
5285 /*
5286 * Transfer the entire throttled queue to a regular LRU page queues.
5287 * We insert it at the head of the active queue, so that these pages
5288 * get re-evaluated by the LRU algorithm first, since they've been
5289 * completely out of it until now.
5290 */
5291 first_throttled = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
5292 last_throttled = (vm_page_t) vm_page_queue_last(&vm_page_queue_throttled);
5293 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5294 if (vm_page_queue_empty(&vm_page_queue_active)) {
5295 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5296 } else {
5297 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_throttled);
5298 }
5299 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_throttled);
5300 first_throttled->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5301 last_throttled->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5302
5303 #if DEBUG
5304 printf("reactivated %d throttled pages\n", vm_page_throttled_count);
5305 #endif
5306 vm_page_queue_init(&vm_page_queue_throttled);
5307 /*
5308 * Adjust the global page counts.
5309 */
5310 vm_page_active_count += extra_active_count;
5311 vm_page_pageable_internal_count += extra_internal_count;
5312 vm_page_pageable_external_count += extra_external_count;
5313 vm_page_throttled_count = 0;
5314 }
5315 assert(vm_page_throttled_count == 0);
5316 assert(vm_page_queue_empty(&vm_page_queue_throttled));
5317 vm_page_unlock_queues();
5318 }
5319
5320
5321 /*
5322 * move pages from the indicated local queue to the global active queue
5323 * its ok to fail if we're below the hard limit and force == FALSE
5324 * the nolocks == TRUE case is to allow this function to be run on
5325 * the hibernate path
5326 */
5327
5328 void
vm_page_reactivate_local(uint32_t lid,boolean_t force,boolean_t nolocks)5329 vm_page_reactivate_local(uint32_t lid, boolean_t force, boolean_t nolocks)
5330 {
5331 struct vpl *lq;
5332 vm_page_t first_local, last_local;
5333 vm_page_t first_active;
5334 vm_page_t m;
5335 uint32_t count = 0;
5336
5337 if (vm_page_local_q == NULL) {
5338 return;
5339 }
5340
5341 lq = zpercpu_get_cpu(vm_page_local_q, lid);
5342
5343 if (nolocks == FALSE) {
5344 if (lq->vpl_count < vm_page_local_q_hard_limit && force == FALSE) {
5345 if (!vm_page_trylockspin_queues()) {
5346 return;
5347 }
5348 } else {
5349 vm_page_lockspin_queues();
5350 }
5351
5352 VPL_LOCK(&lq->vpl_lock);
5353 }
5354 if (lq->vpl_count) {
5355 /*
5356 * Switch "local" pages to "active".
5357 */
5358 assert(!vm_page_queue_empty(&lq->vpl_queue));
5359
5360 vm_page_queue_iterate(&lq->vpl_queue, m, vmp_pageq) {
5361 VM_PAGE_CHECK(m);
5362 vm_page_check_pageable_safe(m);
5363 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q);
5364 assert(!m->vmp_fictitious);
5365
5366 if (m->vmp_local_id != lid) {
5367 panic("vm_page_reactivate_local: found vm_page_t(%p) with wrong cpuid", m);
5368 }
5369
5370 m->vmp_local_id = 0;
5371 m->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
5372 VM_PAGE_CHECK(m);
5373 vm_page_add_to_specialq(m, FALSE);
5374 count++;
5375 }
5376 if (count != lq->vpl_count) {
5377 panic("vm_page_reactivate_local: count = %d, vm_page_local_count = %d", count, lq->vpl_count);
5378 }
5379
5380 /*
5381 * Transfer the entire local queue to a regular LRU page queues.
5382 */
5383 first_local = (vm_page_t) vm_page_queue_first(&lq->vpl_queue);
5384 last_local = (vm_page_t) vm_page_queue_last(&lq->vpl_queue);
5385 first_active = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
5386
5387 if (vm_page_queue_empty(&vm_page_queue_active)) {
5388 vm_page_queue_active.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5389 } else {
5390 first_active->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(last_local);
5391 }
5392 vm_page_queue_active.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_local);
5393 first_local->vmp_pageq.prev = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(&vm_page_queue_active);
5394 last_local->vmp_pageq.next = VM_PAGE_CONVERT_TO_QUEUE_ENTRY(first_active);
5395
5396 vm_page_queue_init(&lq->vpl_queue);
5397 /*
5398 * Adjust the global page counts.
5399 */
5400 vm_page_active_count += lq->vpl_count;
5401 vm_page_pageable_internal_count += lq->vpl_internal_count;
5402 vm_page_pageable_external_count += lq->vpl_external_count;
5403 lq->vpl_count = 0;
5404 lq->vpl_internal_count = 0;
5405 lq->vpl_external_count = 0;
5406 }
5407 assert(vm_page_queue_empty(&lq->vpl_queue));
5408
5409 if (nolocks == FALSE) {
5410 VPL_UNLOCK(&lq->vpl_lock);
5411
5412 vm_page_balance_inactive(count / 4);
5413 vm_page_unlock_queues();
5414 }
5415 }
5416
5417 /*
5418 * vm_page_part_zero_fill:
5419 *
5420 * Zero-fill a part of the page.
5421 */
5422 #define PMAP_ZERO_PART_PAGE_IMPLEMENTED
5423 void
vm_page_part_zero_fill(vm_page_t m,vm_offset_t m_pa,vm_size_t len)5424 vm_page_part_zero_fill(
5425 vm_page_t m,
5426 vm_offset_t m_pa,
5427 vm_size_t len)
5428 {
5429 #if 0
5430 /*
5431 * we don't hold the page queue lock
5432 * so this check isn't safe to make
5433 */
5434 VM_PAGE_CHECK(m);
5435 #endif
5436
5437 #ifdef PMAP_ZERO_PART_PAGE_IMPLEMENTED
5438 pmap_zero_part_page(VM_PAGE_GET_PHYS_PAGE(m), m_pa, len);
5439 #else
5440 vm_page_t tmp;
5441 while (1) {
5442 tmp = vm_page_grab();
5443 if (tmp == VM_PAGE_NULL) {
5444 vm_page_wait(THREAD_UNINT);
5445 continue;
5446 }
5447 break;
5448 }
5449 vm_page_zero_fill(tmp);
5450 if (m_pa != 0) {
5451 vm_page_part_copy(m, 0, tmp, 0, m_pa);
5452 }
5453 if ((m_pa + len) < PAGE_SIZE) {
5454 vm_page_part_copy(m, m_pa + len, tmp,
5455 m_pa + len, PAGE_SIZE - (m_pa + len));
5456 }
5457 vm_page_copy(tmp, m);
5458 VM_PAGE_FREE(tmp);
5459 #endif
5460 }
5461
5462 /*
5463 * vm_page_zero_fill:
5464 *
5465 * Zero-fill the specified page.
5466 */
5467 void
vm_page_zero_fill(vm_page_t m)5468 vm_page_zero_fill(
5469 vm_page_t m)
5470 {
5471 #if 0
5472 /*
5473 * we don't hold the page queue lock
5474 * so this check isn't safe to make
5475 */
5476 VM_PAGE_CHECK(m);
5477 #endif
5478
5479 // dbgTrace(0xAEAEAEAE, VM_PAGE_GET_PHYS_PAGE(m), 0); /* (BRINGUP) */
5480 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
5481 }
5482
5483 /*
5484 * vm_page_part_copy:
5485 *
5486 * copy part of one page to another
5487 */
5488
5489 void
vm_page_part_copy(vm_page_t src_m,vm_offset_t src_pa,vm_page_t dst_m,vm_offset_t dst_pa,vm_size_t len)5490 vm_page_part_copy(
5491 vm_page_t src_m,
5492 vm_offset_t src_pa,
5493 vm_page_t dst_m,
5494 vm_offset_t dst_pa,
5495 vm_size_t len)
5496 {
5497 #if 0
5498 /*
5499 * we don't hold the page queue lock
5500 * so this check isn't safe to make
5501 */
5502 VM_PAGE_CHECK(src_m);
5503 VM_PAGE_CHECK(dst_m);
5504 #endif
5505 pmap_copy_part_page(VM_PAGE_GET_PHYS_PAGE(src_m), src_pa,
5506 VM_PAGE_GET_PHYS_PAGE(dst_m), dst_pa, len);
5507 }
5508
5509 /*
5510 * vm_page_copy:
5511 *
5512 * Copy one page to another
5513 */
5514
5515 int vm_page_copy_cs_validations = 0;
5516 int vm_page_copy_cs_tainted = 0;
5517
5518 void
vm_page_copy(vm_page_t src_m,vm_page_t dest_m)5519 vm_page_copy(
5520 vm_page_t src_m,
5521 vm_page_t dest_m)
5522 {
5523 vm_object_t src_m_object;
5524
5525 src_m_object = VM_PAGE_OBJECT(src_m);
5526
5527 #if 0
5528 /*
5529 * we don't hold the page queue lock
5530 * so this check isn't safe to make
5531 */
5532 VM_PAGE_CHECK(src_m);
5533 VM_PAGE_CHECK(dest_m);
5534 #endif
5535 vm_object_lock_assert_held(src_m_object);
5536
5537 if (src_m_object != VM_OBJECT_NULL &&
5538 src_m_object->code_signed) {
5539 /*
5540 * We're copying a page from a code-signed object.
5541 * Whoever ends up mapping the copy page might care about
5542 * the original page's integrity, so let's validate the
5543 * source page now.
5544 */
5545 vm_page_copy_cs_validations++;
5546 vm_page_validate_cs(src_m, PAGE_SIZE, 0);
5547 #if DEVELOPMENT || DEBUG
5548 DTRACE_VM4(codesigned_copy,
5549 vm_object_t, src_m_object,
5550 vm_object_offset_t, src_m->vmp_offset,
5551 int, src_m->vmp_cs_validated,
5552 int, src_m->vmp_cs_tainted);
5553 #endif /* DEVELOPMENT || DEBUG */
5554 }
5555
5556 /*
5557 * Propagate the cs_tainted bit to the copy page. Do not propagate
5558 * the cs_validated bit.
5559 */
5560 dest_m->vmp_cs_tainted = src_m->vmp_cs_tainted;
5561 dest_m->vmp_cs_nx = src_m->vmp_cs_nx;
5562 if (dest_m->vmp_cs_tainted) {
5563 vm_page_copy_cs_tainted++;
5564 }
5565 dest_m->vmp_error = VMP_ERROR_GET(src_m); /* sliding src_m might have failed... */
5566 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(src_m), VM_PAGE_GET_PHYS_PAGE(dest_m));
5567 }
5568
5569 #if MACH_ASSERT
5570 static void
_vm_page_print(vm_page_t p)5571 _vm_page_print(
5572 vm_page_t p)
5573 {
5574 printf("vm_page %p: \n", p);
5575 printf(" pageq: next=%p prev=%p\n",
5576 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.next),
5577 (vm_page_t)VM_PAGE_UNPACK_PTR(p->vmp_pageq.prev));
5578 printf(" listq: next=%p prev=%p\n",
5579 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.next)),
5580 (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_listq.prev)));
5581 printf(" next=%p\n", (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m)));
5582 printf(" object=%p offset=0x%llx\n", VM_PAGE_OBJECT(p), p->vmp_offset);
5583 printf(" wire_count=%u\n", p->vmp_wire_count);
5584 printf(" q_state=%u\n", p->vmp_q_state);
5585
5586 printf(" %slaundry, %sref, %sgobbled, %sprivate\n",
5587 (p->vmp_laundry ? "" : "!"),
5588 (p->vmp_reference ? "" : "!"),
5589 (p->vmp_gobbled ? "" : "!"),
5590 (p->vmp_private ? "" : "!"));
5591 printf(" %sbusy, %swanted, %stabled, %sfictitious, %spmapped, %swpmapped\n",
5592 (p->vmp_busy ? "" : "!"),
5593 (p->vmp_wanted ? "" : "!"),
5594 (p->vmp_tabled ? "" : "!"),
5595 (p->vmp_fictitious ? "" : "!"),
5596 (p->vmp_pmapped ? "" : "!"),
5597 (p->vmp_wpmapped ? "" : "!"));
5598 printf(" %sfree_when_done, %sabsent, %serror, %sdirty, %scleaning, %sprecious, %sclustered\n",
5599 (p->vmp_free_when_done ? "" : "!"),
5600 (p->vmp_absent ? "" : "!"),
5601 (VMP_ERROR_GET(p) ? "" : "!"),
5602 (p->vmp_dirty ? "" : "!"),
5603 (p->vmp_cleaning ? "" : "!"),
5604 (p->vmp_precious ? "" : "!"),
5605 (p->vmp_clustered ? "" : "!"));
5606 printf(" %soverwriting, %srestart, %sunusual\n",
5607 (p->vmp_overwriting ? "" : "!"),
5608 (p->vmp_restart ? "" : "!"),
5609 (p->vmp_unusual ? "" : "!"));
5610 printf(" cs_validated=%d, cs_tainted=%d, cs_nx=%d, %sno_cache\n",
5611 p->vmp_cs_validated,
5612 p->vmp_cs_tainted,
5613 p->vmp_cs_nx,
5614 (p->vmp_no_cache ? "" : "!"));
5615
5616 printf("phys_page=0x%x\n", VM_PAGE_GET_PHYS_PAGE(p));
5617 }
5618
5619 /*
5620 * Check that the list of pages is ordered by
5621 * ascending physical address and has no holes.
5622 */
5623 static int
vm_page_verify_contiguous(vm_page_t pages,unsigned int npages)5624 vm_page_verify_contiguous(
5625 vm_page_t pages,
5626 unsigned int npages)
5627 {
5628 vm_page_t m;
5629 unsigned int page_count;
5630 vm_offset_t prev_addr;
5631
5632 prev_addr = VM_PAGE_GET_PHYS_PAGE(pages);
5633 page_count = 1;
5634 for (m = NEXT_PAGE(pages); m != VM_PAGE_NULL; m = NEXT_PAGE(m)) {
5635 if (VM_PAGE_GET_PHYS_PAGE(m) != prev_addr + 1) {
5636 printf("m %p prev_addr 0x%lx, current addr 0x%x\n",
5637 m, (long)prev_addr, VM_PAGE_GET_PHYS_PAGE(m));
5638 printf("pages %p page_count %d npages %d\n", pages, page_count, npages);
5639 panic("vm_page_verify_contiguous: not contiguous!");
5640 }
5641 prev_addr = VM_PAGE_GET_PHYS_PAGE(m);
5642 ++page_count;
5643 }
5644 if (page_count != npages) {
5645 printf("pages %p actual count 0x%x but requested 0x%x\n",
5646 pages, page_count, npages);
5647 panic("vm_page_verify_contiguous: count error");
5648 }
5649 return 1;
5650 }
5651
5652
5653 /*
5654 * Check the free lists for proper length etc.
5655 */
5656 static boolean_t vm_page_verify_this_free_list_enabled = FALSE;
5657 static unsigned int
vm_page_verify_free_list(vm_page_queue_head_t * vm_page_queue,unsigned int color,vm_page_t look_for_page,boolean_t expect_page)5658 vm_page_verify_free_list(
5659 vm_page_queue_head_t *vm_page_queue,
5660 unsigned int color,
5661 vm_page_t look_for_page,
5662 boolean_t expect_page)
5663 {
5664 unsigned int npages;
5665 vm_page_t m;
5666 vm_page_t prev_m;
5667 boolean_t found_page;
5668
5669 if (!vm_page_verify_this_free_list_enabled) {
5670 return 0;
5671 }
5672
5673 found_page = FALSE;
5674 npages = 0;
5675 prev_m = (vm_page_t)((uintptr_t)vm_page_queue);
5676
5677 vm_page_queue_iterate(vm_page_queue, m, vmp_pageq) {
5678 if (m == look_for_page) {
5679 found_page = TRUE;
5680 }
5681 if ((vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev) != prev_m) {
5682 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p corrupted prev ptr %p instead of %p",
5683 color, npages, m, (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.prev), prev_m);
5684 }
5685 if (!m->vmp_busy) {
5686 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p not busy",
5687 color, npages, m);
5688 }
5689 if (color != (unsigned int) -1) {
5690 if (VM_PAGE_GET_COLOR(m) != color) {
5691 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p wrong color %u instead of %u",
5692 color, npages, m, VM_PAGE_GET_COLOR(m), color);
5693 }
5694 if (m->vmp_q_state != VM_PAGE_ON_FREE_Q) {
5695 panic("vm_page_verify_free_list(color=%u, npages=%u): page %p - expecting q_state == VM_PAGE_ON_FREE_Q, found %d",
5696 color, npages, m, m->vmp_q_state);
5697 }
5698 } else {
5699 if (m->vmp_q_state != VM_PAGE_ON_FREE_LOCAL_Q) {
5700 panic("vm_page_verify_free_list(npages=%u): local page %p - expecting q_state == VM_PAGE_ON_FREE_LOCAL_Q, found %d",
5701 npages, m, m->vmp_q_state);
5702 }
5703 }
5704 ++npages;
5705 prev_m = m;
5706 }
5707 if (look_for_page != VM_PAGE_NULL) {
5708 unsigned int other_color;
5709
5710 if (expect_page && !found_page) {
5711 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p not found phys=%u\n",
5712 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5713 _vm_page_print(look_for_page);
5714 for (other_color = 0;
5715 other_color < vm_colors;
5716 other_color++) {
5717 if (other_color == color) {
5718 continue;
5719 }
5720 vm_page_verify_free_list(&vm_page_queue_free[other_color].qhead,
5721 other_color, look_for_page, FALSE);
5722 }
5723 if (color == (unsigned int) -1) {
5724 vm_page_verify_free_list(&vm_lopage_queue_free,
5725 (unsigned int) -1, look_for_page, FALSE);
5726 }
5727 panic("vm_page_verify_free_list(color=%u)", color);
5728 }
5729 if (!expect_page && found_page) {
5730 printf("vm_page_verify_free_list(color=%u, npages=%u): page %p found phys=%u\n",
5731 color, npages, look_for_page, VM_PAGE_GET_PHYS_PAGE(look_for_page));
5732 }
5733 }
5734 return npages;
5735 }
5736
5737 static boolean_t vm_page_verify_all_free_lists_enabled = FALSE;
5738 static void
vm_page_verify_free_lists(void)5739 vm_page_verify_free_lists( void )
5740 {
5741 unsigned int color, npages, nlopages;
5742 boolean_t toggle = TRUE;
5743
5744 if (!vm_page_verify_all_free_lists_enabled) {
5745 return;
5746 }
5747
5748 npages = 0;
5749
5750 vm_free_page_lock();
5751
5752 if (vm_page_verify_this_free_list_enabled == TRUE) {
5753 /*
5754 * This variable has been set globally for extra checking of
5755 * each free list Q. Since we didn't set it, we don't own it
5756 * and we shouldn't toggle it.
5757 */
5758 toggle = FALSE;
5759 }
5760
5761 if (toggle == TRUE) {
5762 vm_page_verify_this_free_list_enabled = TRUE;
5763 }
5764
5765 for (color = 0; color < vm_colors; color++) {
5766 npages += vm_page_verify_free_list(&vm_page_queue_free[color].qhead,
5767 color, VM_PAGE_NULL, FALSE);
5768 }
5769 nlopages = vm_page_verify_free_list(&vm_lopage_queue_free,
5770 (unsigned int) -1,
5771 VM_PAGE_NULL, FALSE);
5772 if (npages != vm_page_free_count || nlopages != vm_lopage_free_count) {
5773 panic("vm_page_verify_free_lists: "
5774 "npages %u free_count %d nlopages %u lo_free_count %u",
5775 npages, vm_page_free_count, nlopages, vm_lopage_free_count);
5776 }
5777
5778 if (toggle == TRUE) {
5779 vm_page_verify_this_free_list_enabled = FALSE;
5780 }
5781
5782 vm_free_page_unlock();
5783 }
5784
5785 #endif /* MACH_ASSERT */
5786
5787
5788 extern boolean_t(*volatile consider_buffer_cache_collect)(int);
5789
5790 /*
5791 * CONTIGUOUS PAGE ALLOCATION
5792 *
5793 * Find a region large enough to contain at least n pages
5794 * of contiguous physical memory.
5795 *
5796 * This is done by traversing the vm_page_t array in a linear fashion
5797 * we assume that the vm_page_t array has the avaiable physical pages in an
5798 * ordered, ascending list... this is currently true of all our implementations
5799 * and must remain so... there can be 'holes' in the array... we also can
5800 * no longer tolerate the vm_page_t's in the list being 'freed' and reclaimed
5801 * which use to happen via 'vm_page_convert'... that function was no longer
5802 * being called and was removed...
5803 *
5804 * The basic flow consists of stabilizing some of the interesting state of
5805 * a vm_page_t behind the vm_page_queue and vm_page_free locks... we start our
5806 * sweep at the beginning of the array looking for pages that meet our criterea
5807 * for a 'stealable' page... currently we are pretty conservative... if the page
5808 * meets this criterea and is physically contiguous to the previous page in the 'run'
5809 * we keep developing it. If we hit a page that doesn't fit, we reset our state
5810 * and start to develop a new run... if at this point we've already considered
5811 * at least MAX_CONSIDERED_BEFORE_YIELD pages, we'll drop the 2 locks we hold,
5812 * and mutex_pause (which will yield the processor), to keep the latency low w/r
5813 * to other threads trying to acquire free pages (or move pages from q to q),
5814 * and then continue from the spot we left off... we only make 1 pass through the
5815 * array. Once we have a 'run' that is long enough, we'll go into the loop which
5816 * which steals the pages from the queues they're currently on... pages on the free
5817 * queue can be stolen directly... pages that are on any of the other queues
5818 * must be removed from the object they are tabled on... this requires taking the
5819 * object lock... we do this as a 'try' to prevent deadlocks... if the 'try' fails
5820 * or if the state of the page behind the vm_object lock is no longer viable, we'll
5821 * dump the pages we've currently stolen back to the free list, and pick up our
5822 * scan from the point where we aborted the 'current' run.
5823 *
5824 *
5825 * Requirements:
5826 * - neither vm_page_queue nor vm_free_list lock can be held on entry
5827 *
5828 * Returns a pointer to a list of gobbled/wired pages or VM_PAGE_NULL.
5829 *
5830 * Algorithm:
5831 */
5832
5833 #define MAX_CONSIDERED_BEFORE_YIELD 1000
5834
5835
5836 #define RESET_STATE_OF_RUN() \
5837 MACRO_BEGIN \
5838 prevcontaddr = -2; \
5839 start_pnum = -1; \
5840 free_considered = 0; \
5841 substitute_needed = 0; \
5842 npages = 0; \
5843 MACRO_END
5844
5845 /*
5846 * Can we steal in-use (i.e. not free) pages when searching for
5847 * physically-contiguous pages ?
5848 */
5849 #define VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL 1
5850
5851 static unsigned int vm_page_find_contiguous_last_idx = 0, vm_page_lomem_find_contiguous_last_idx = 0;
5852 #if DEBUG
5853 int vm_page_find_contig_debug = 0;
5854 #endif
5855
5856 static vm_page_t
vm_page_find_contiguous(unsigned int contig_pages,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)5857 vm_page_find_contiguous(
5858 unsigned int contig_pages,
5859 ppnum_t max_pnum,
5860 ppnum_t pnum_mask,
5861 boolean_t wire,
5862 int flags)
5863 {
5864 vm_page_t m = NULL;
5865 ppnum_t prevcontaddr = 0;
5866 ppnum_t start_pnum = 0;
5867 unsigned int npages = 0, considered = 0, scanned = 0;
5868 unsigned int page_idx = 0, start_idx = 0, last_idx = 0, orig_last_idx = 0;
5869 unsigned int idx_last_contig_page_found = 0;
5870 int free_considered = 0, free_available = 0;
5871 int substitute_needed = 0;
5872 int zone_gc_called = 0;
5873 boolean_t wrapped;
5874 kern_return_t kr;
5875 #if DEBUG
5876 clock_sec_t tv_start_sec = 0, tv_end_sec = 0;
5877 clock_usec_t tv_start_usec = 0, tv_end_usec = 0;
5878 #endif
5879
5880 int yielded = 0;
5881 int dumped_run = 0;
5882 int stolen_pages = 0;
5883 int compressed_pages = 0;
5884
5885
5886 if (contig_pages == 0) {
5887 return VM_PAGE_NULL;
5888 }
5889
5890 full_scan_again:
5891
5892 #if MACH_ASSERT
5893 vm_page_verify_free_lists();
5894 #endif
5895 #if DEBUG
5896 clock_get_system_microtime(&tv_start_sec, &tv_start_usec);
5897 #endif
5898 PAGE_REPLACEMENT_ALLOWED(TRUE);
5899
5900 /*
5901 * If there are still delayed pages, try to free up some that match.
5902 */
5903 if (__improbable(vm_delayed_count != 0 && contig_pages != 0)) {
5904 vm_free_delayed_pages_contig(contig_pages, max_pnum, pnum_mask);
5905 }
5906
5907 vm_page_lock_queues();
5908 vm_free_page_lock();
5909
5910 RESET_STATE_OF_RUN();
5911
5912 scanned = 0;
5913 considered = 0;
5914 free_available = vm_page_free_count - vm_page_free_reserved;
5915
5916 wrapped = FALSE;
5917
5918 if (flags & KMA_LOMEM) {
5919 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx;
5920 } else {
5921 idx_last_contig_page_found = vm_page_find_contiguous_last_idx;
5922 }
5923
5924 orig_last_idx = idx_last_contig_page_found;
5925 last_idx = orig_last_idx;
5926
5927 for (page_idx = last_idx, start_idx = last_idx;
5928 npages < contig_pages && page_idx < vm_pages_count;
5929 page_idx++) {
5930 retry:
5931 if (wrapped &&
5932 npages == 0 &&
5933 page_idx >= orig_last_idx) {
5934 /*
5935 * We're back where we started and we haven't
5936 * found any suitable contiguous range. Let's
5937 * give up.
5938 */
5939 break;
5940 }
5941 scanned++;
5942 m = &vm_pages[page_idx];
5943
5944 assert(!m->vmp_fictitious);
5945 assert(!m->vmp_private);
5946
5947 if (max_pnum && VM_PAGE_GET_PHYS_PAGE(m) > max_pnum) {
5948 /* no more low pages... */
5949 break;
5950 }
5951 if (!npages & ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0)) {
5952 /*
5953 * not aligned
5954 */
5955 RESET_STATE_OF_RUN();
5956 } else if (VM_PAGE_WIRED(m) || m->vmp_gobbled ||
5957 m->vmp_laundry || m->vmp_wanted ||
5958 m->vmp_cleaning || m->vmp_overwriting || m->vmp_free_when_done) {
5959 /*
5960 * page is in a transient state
5961 * or a state we don't want to deal
5962 * with, so don't consider it which
5963 * means starting a new run
5964 */
5965 RESET_STATE_OF_RUN();
5966 } else if ((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
5967 (m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q) ||
5968 (m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q) ||
5969 (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
5970 /*
5971 * page needs to be on one of our queues (other then the pageout or special free queues)
5972 * or it needs to belong to the compressor pool (which is now indicated
5973 * by vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR and falls out
5974 * from the check for VM_PAGE_NOT_ON_Q)
5975 * in order for it to be stable behind the
5976 * locks we hold at this point...
5977 * if not, don't consider it which
5978 * means starting a new run
5979 */
5980 RESET_STATE_OF_RUN();
5981 } else if ((m->vmp_q_state != VM_PAGE_ON_FREE_Q) && (!m->vmp_tabled || m->vmp_busy)) {
5982 /*
5983 * pages on the free list are always 'busy'
5984 * so we couldn't test for 'busy' in the check
5985 * for the transient states... pages that are
5986 * 'free' are never 'tabled', so we also couldn't
5987 * test for 'tabled'. So we check here to make
5988 * sure that a non-free page is not busy and is
5989 * tabled on an object...
5990 * if not, don't consider it which
5991 * means starting a new run
5992 */
5993 RESET_STATE_OF_RUN();
5994 } else {
5995 if (VM_PAGE_GET_PHYS_PAGE(m) != prevcontaddr + 1) {
5996 if ((VM_PAGE_GET_PHYS_PAGE(m) & pnum_mask) != 0) {
5997 RESET_STATE_OF_RUN();
5998 goto did_consider;
5999 } else {
6000 npages = 1;
6001 start_idx = page_idx;
6002 start_pnum = VM_PAGE_GET_PHYS_PAGE(m);
6003 }
6004 } else {
6005 npages++;
6006 }
6007 prevcontaddr = VM_PAGE_GET_PHYS_PAGE(m);
6008
6009 VM_PAGE_CHECK(m);
6010 if (m->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6011 free_considered++;
6012 } else {
6013 /*
6014 * This page is not free.
6015 * If we can't steal used pages,
6016 * we have to give up this run
6017 * and keep looking.
6018 * Otherwise, we might need to
6019 * move the contents of this page
6020 * into a substitute page.
6021 */
6022 #if VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6023 if (m->vmp_pmapped || m->vmp_dirty || m->vmp_precious) {
6024 substitute_needed++;
6025 }
6026 #else
6027 RESET_STATE_OF_RUN();
6028 #endif
6029 }
6030
6031 if ((free_considered + substitute_needed) > free_available) {
6032 /*
6033 * if we let this run continue
6034 * we will end up dropping the vm_page_free_count
6035 * below the reserve limit... we need to abort
6036 * this run, but we can at least re-consider this
6037 * page... thus the jump back to 'retry'
6038 */
6039 RESET_STATE_OF_RUN();
6040
6041 if (free_available && considered <= MAX_CONSIDERED_BEFORE_YIELD) {
6042 considered++;
6043 goto retry;
6044 }
6045 /*
6046 * free_available == 0
6047 * so can't consider any free pages... if
6048 * we went to retry in this case, we'd
6049 * get stuck looking at the same page
6050 * w/o making any forward progress
6051 * we also want to take this path if we've already
6052 * reached our limit that controls the lock latency
6053 */
6054 }
6055 }
6056 did_consider:
6057 if (considered > MAX_CONSIDERED_BEFORE_YIELD && npages <= 1) {
6058 PAGE_REPLACEMENT_ALLOWED(FALSE);
6059
6060 vm_free_page_unlock();
6061 vm_page_unlock_queues();
6062
6063 mutex_pause(0);
6064
6065 PAGE_REPLACEMENT_ALLOWED(TRUE);
6066
6067 vm_page_lock_queues();
6068 vm_free_page_lock();
6069
6070 RESET_STATE_OF_RUN();
6071 /*
6072 * reset our free page limit since we
6073 * dropped the lock protecting the vm_page_free_queue
6074 */
6075 free_available = vm_page_free_count - vm_page_free_reserved;
6076 considered = 0;
6077
6078 yielded++;
6079
6080 goto retry;
6081 }
6082 considered++;
6083 }
6084 m = VM_PAGE_NULL;
6085
6086 if (npages != contig_pages) {
6087 if (!wrapped) {
6088 /*
6089 * We didn't find a contiguous range but we didn't
6090 * start from the very first page.
6091 * Start again from the very first page.
6092 */
6093 RESET_STATE_OF_RUN();
6094 if (flags & KMA_LOMEM) {
6095 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = 0;
6096 } else {
6097 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = 0;
6098 }
6099 last_idx = 0;
6100 page_idx = last_idx;
6101 wrapped = TRUE;
6102 goto retry;
6103 }
6104 vm_free_page_unlock();
6105 } else {
6106 vm_page_t m1;
6107 vm_page_t m2;
6108 unsigned int cur_idx;
6109 unsigned int tmp_start_idx;
6110 vm_object_t locked_object = VM_OBJECT_NULL;
6111 boolean_t abort_run = FALSE;
6112
6113 assert(page_idx - start_idx == contig_pages);
6114
6115 tmp_start_idx = start_idx;
6116
6117 /*
6118 * first pass through to pull the free pages
6119 * off of the free queue so that in case we
6120 * need substitute pages, we won't grab any
6121 * of the free pages in the run... we'll clear
6122 * the 'free' bit in the 2nd pass, and even in
6123 * an abort_run case, we'll collect all of the
6124 * free pages in this run and return them to the free list
6125 */
6126 while (start_idx < page_idx) {
6127 m1 = &vm_pages[start_idx++];
6128
6129 #if !VM_PAGE_FIND_CONTIGUOUS_CAN_STEAL
6130 assert(m1->vmp_q_state == VM_PAGE_ON_FREE_Q);
6131 #endif
6132
6133 if (m1->vmp_q_state == VM_PAGE_ON_FREE_Q) {
6134 unsigned int color;
6135
6136 color = VM_PAGE_GET_COLOR(m1);
6137 #if MACH_ASSERT
6138 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, m1, TRUE);
6139 #endif
6140 vm_page_queue_remove(&vm_page_queue_free[color].qhead, m1, vmp_pageq);
6141
6142 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6143 #if MACH_ASSERT
6144 vm_page_verify_free_list(&vm_page_queue_free[color].qhead, color, VM_PAGE_NULL, FALSE);
6145 #endif
6146 /*
6147 * Clear the "free" bit so that this page
6148 * does not get considered for another
6149 * concurrent physically-contiguous allocation.
6150 */
6151 m1->vmp_q_state = VM_PAGE_NOT_ON_Q;
6152 assert(m1->vmp_busy);
6153
6154 vm_page_free_count--;
6155 }
6156 }
6157 if (flags & KMA_LOMEM) {
6158 vm_page_lomem_find_contiguous_last_idx = page_idx;
6159 } else {
6160 vm_page_find_contiguous_last_idx = page_idx;
6161 }
6162
6163 /*
6164 * we can drop the free queue lock at this point since
6165 * we've pulled any 'free' candidates off of the list
6166 * we need it dropped so that we can do a vm_page_grab
6167 * when substituing for pmapped/dirty pages
6168 */
6169 vm_free_page_unlock();
6170
6171 start_idx = tmp_start_idx;
6172 cur_idx = page_idx - 1;
6173
6174 while (start_idx++ < page_idx) {
6175 /*
6176 * must go through the list from back to front
6177 * so that the page list is created in the
6178 * correct order - low -> high phys addresses
6179 */
6180 m1 = &vm_pages[cur_idx--];
6181
6182 if (m1->vmp_object == 0) {
6183 /*
6184 * page has already been removed from
6185 * the free list in the 1st pass
6186 */
6187 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6188 assert(m1->vmp_offset == (vm_object_offset_t) -1);
6189 assert(m1->vmp_busy);
6190 assert(!m1->vmp_wanted);
6191 assert(!m1->vmp_laundry);
6192 } else {
6193 vm_object_t object;
6194 int refmod;
6195 boolean_t disconnected, reusable;
6196
6197 if (abort_run == TRUE) {
6198 continue;
6199 }
6200
6201 assert(m1->vmp_q_state != VM_PAGE_NOT_ON_Q);
6202
6203 object = VM_PAGE_OBJECT(m1);
6204
6205 if (object != locked_object) {
6206 if (locked_object) {
6207 vm_object_unlock(locked_object);
6208 locked_object = VM_OBJECT_NULL;
6209 }
6210 if (vm_object_lock_try(object)) {
6211 locked_object = object;
6212 }
6213 }
6214 if (locked_object == VM_OBJECT_NULL ||
6215 (VM_PAGE_WIRED(m1) || m1->vmp_gobbled ||
6216 m1->vmp_laundry || m1->vmp_wanted ||
6217 m1->vmp_cleaning || m1->vmp_overwriting || m1->vmp_free_when_done || m1->vmp_busy) ||
6218 (m1->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)) {
6219 if (locked_object) {
6220 vm_object_unlock(locked_object);
6221 locked_object = VM_OBJECT_NULL;
6222 }
6223 tmp_start_idx = cur_idx;
6224 abort_run = TRUE;
6225 continue;
6226 }
6227
6228 disconnected = FALSE;
6229 reusable = FALSE;
6230
6231 if ((m1->vmp_reusable ||
6232 object->all_reusable) &&
6233 (m1->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q) &&
6234 !m1->vmp_dirty &&
6235 !m1->vmp_reference) {
6236 /* reusable page... */
6237 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6238 disconnected = TRUE;
6239 if (refmod == 0) {
6240 /*
6241 * ... not reused: can steal
6242 * without relocating contents.
6243 */
6244 reusable = TRUE;
6245 }
6246 }
6247
6248 if ((m1->vmp_pmapped &&
6249 !reusable) ||
6250 m1->vmp_dirty ||
6251 m1->vmp_precious) {
6252 vm_object_offset_t offset;
6253
6254 m2 = vm_page_grab_options(VM_PAGE_GRAB_Q_LOCK_HELD);
6255
6256 if (m2 == VM_PAGE_NULL) {
6257 if (locked_object) {
6258 vm_object_unlock(locked_object);
6259 locked_object = VM_OBJECT_NULL;
6260 }
6261 tmp_start_idx = cur_idx;
6262 abort_run = TRUE;
6263 continue;
6264 }
6265 if (!disconnected) {
6266 if (m1->vmp_pmapped) {
6267 refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m1));
6268 } else {
6269 refmod = 0;
6270 }
6271 }
6272
6273 /* copy the page's contents */
6274 pmap_copy_page(VM_PAGE_GET_PHYS_PAGE(m1), VM_PAGE_GET_PHYS_PAGE(m2));
6275 /* copy the page's state */
6276 assert(!VM_PAGE_WIRED(m1));
6277 assert(m1->vmp_q_state != VM_PAGE_ON_FREE_Q);
6278 assert(m1->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q);
6279 assert(!m1->vmp_laundry);
6280 m2->vmp_reference = m1->vmp_reference;
6281 assert(!m1->vmp_gobbled);
6282 assert(!m1->vmp_private);
6283 m2->vmp_no_cache = m1->vmp_no_cache;
6284 m2->vmp_xpmapped = 0;
6285 assert(!m1->vmp_busy);
6286 assert(!m1->vmp_wanted);
6287 assert(!m1->vmp_fictitious);
6288 m2->vmp_pmapped = m1->vmp_pmapped; /* should flush cache ? */
6289 m2->vmp_wpmapped = m1->vmp_wpmapped;
6290 assert(!m1->vmp_free_when_done);
6291 m2->vmp_absent = m1->vmp_absent;
6292 m2->vmp_error = VMP_ERROR_GET(m1);
6293 m2->vmp_dirty = m1->vmp_dirty;
6294 assert(!m1->vmp_cleaning);
6295 m2->vmp_precious = m1->vmp_precious;
6296 m2->vmp_clustered = m1->vmp_clustered;
6297 assert(!m1->vmp_overwriting);
6298 m2->vmp_restart = m1->vmp_restart;
6299 m2->vmp_unusual = m1->vmp_unusual;
6300 m2->vmp_cs_validated = m1->vmp_cs_validated;
6301 m2->vmp_cs_tainted = m1->vmp_cs_tainted;
6302 m2->vmp_cs_nx = m1->vmp_cs_nx;
6303
6304 m2->vmp_realtime = m1->vmp_realtime;
6305 m1->vmp_realtime = false;
6306
6307 /*
6308 * If m1 had really been reusable,
6309 * we would have just stolen it, so
6310 * let's not propagate it's "reusable"
6311 * bit and assert that m2 is not
6312 * marked as "reusable".
6313 */
6314 // m2->vmp_reusable = m1->vmp_reusable;
6315 assert(!m2->vmp_reusable);
6316
6317 // assert(!m1->vmp_lopage);
6318
6319 if (m1->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6320 m2->vmp_q_state = VM_PAGE_USED_BY_COMPRESSOR;
6321 /*
6322 * We just grabbed m2 up above and so it isn't
6323 * going to be on any special Q as yet and so
6324 * we don't need to 'remove' it from the special
6325 * queues. Just resetting the state should be enough.
6326 */
6327 m2->vmp_on_specialq = VM_PAGE_SPECIAL_Q_EMPTY;
6328 }
6329
6330 /*
6331 * page may need to be flushed if
6332 * it is marshalled into a UPL
6333 * that is going to be used by a device
6334 * that doesn't support coherency
6335 */
6336 m2->vmp_written_by_kernel = TRUE;
6337
6338 /*
6339 * make sure we clear the ref/mod state
6340 * from the pmap layer... else we risk
6341 * inheriting state from the last time
6342 * this page was used...
6343 */
6344 pmap_clear_refmod(VM_PAGE_GET_PHYS_PAGE(m2), VM_MEM_MODIFIED | VM_MEM_REFERENCED);
6345
6346 if (refmod & VM_MEM_REFERENCED) {
6347 m2->vmp_reference = TRUE;
6348 }
6349 if (refmod & VM_MEM_MODIFIED) {
6350 SET_PAGE_DIRTY(m2, TRUE);
6351 }
6352 offset = m1->vmp_offset;
6353
6354 /*
6355 * completely cleans up the state
6356 * of the page so that it is ready
6357 * to be put onto the free list, or
6358 * for this purpose it looks like it
6359 * just came off of the free list
6360 */
6361 vm_page_free_prepare(m1);
6362
6363 /*
6364 * now put the substitute page
6365 * on the object
6366 */
6367 vm_page_insert_internal(m2, locked_object, offset, VM_KERN_MEMORY_NONE, TRUE, TRUE, FALSE, FALSE, NULL);
6368
6369 if (m2->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
6370 m2->vmp_pmapped = TRUE;
6371 m2->vmp_wpmapped = TRUE;
6372
6373 PMAP_ENTER(kernel_pmap, (vm_map_offset_t)m2->vmp_offset, m2,
6374 VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, TRUE, kr);
6375
6376 assert(kr == KERN_SUCCESS);
6377
6378 compressed_pages++;
6379 } else {
6380 if (m2->vmp_reference) {
6381 vm_page_activate(m2);
6382 } else {
6383 vm_page_deactivate(m2);
6384 }
6385 }
6386 PAGE_WAKEUP_DONE(m2);
6387 } else {
6388 assert(m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
6389
6390 /*
6391 * completely cleans up the state
6392 * of the page so that it is ready
6393 * to be put onto the free list, or
6394 * for this purpose it looks like it
6395 * just came off of the free list
6396 */
6397 vm_page_free_prepare(m1);
6398 }
6399
6400 stolen_pages++;
6401 }
6402 if (m1->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR) {
6403 /*
6404 * The Q state is preserved on m1 because vm_page_queues_remove doesn't
6405 * change it for pages marked as used-by-compressor.
6406 */
6407 vm_page_assign_special_state(m1, VM_PAGE_SPECIAL_Q_BG);
6408 }
6409 VM_PAGE_ZERO_PAGEQ_ENTRY(m1);
6410 m1->vmp_snext = m;
6411 m = m1;
6412 }
6413 if (locked_object) {
6414 vm_object_unlock(locked_object);
6415 locked_object = VM_OBJECT_NULL;
6416 }
6417
6418 if (abort_run == TRUE) {
6419 /*
6420 * want the index of the last
6421 * page in this run that was
6422 * successfully 'stolen', so back
6423 * it up 1 for the auto-decrement on use
6424 * and 1 more to bump back over this page
6425 */
6426 page_idx = tmp_start_idx + 2;
6427 if (page_idx >= vm_pages_count) {
6428 if (wrapped) {
6429 if (m != VM_PAGE_NULL) {
6430 vm_page_unlock_queues();
6431 vm_page_free_list(m, FALSE);
6432 vm_page_lock_queues();
6433 m = VM_PAGE_NULL;
6434 }
6435 dumped_run++;
6436 goto done_scanning;
6437 }
6438 page_idx = last_idx = 0;
6439 wrapped = TRUE;
6440 }
6441 abort_run = FALSE;
6442
6443 /*
6444 * We didn't find a contiguous range but we didn't
6445 * start from the very first page.
6446 * Start again from the very first page.
6447 */
6448 RESET_STATE_OF_RUN();
6449
6450 if (flags & KMA_LOMEM) {
6451 idx_last_contig_page_found = vm_page_lomem_find_contiguous_last_idx = page_idx;
6452 } else {
6453 idx_last_contig_page_found = vm_page_find_contiguous_last_idx = page_idx;
6454 }
6455
6456 last_idx = page_idx;
6457
6458 if (m != VM_PAGE_NULL) {
6459 vm_page_unlock_queues();
6460 vm_page_free_list(m, FALSE);
6461 vm_page_lock_queues();
6462 m = VM_PAGE_NULL;
6463 }
6464 dumped_run++;
6465
6466 vm_free_page_lock();
6467 /*
6468 * reset our free page limit since we
6469 * dropped the lock protecting the vm_page_free_queue
6470 */
6471 free_available = vm_page_free_count - vm_page_free_reserved;
6472 goto retry;
6473 }
6474
6475 for (m1 = m; m1 != VM_PAGE_NULL; m1 = NEXT_PAGE(m1)) {
6476 assert(m1->vmp_q_state == VM_PAGE_NOT_ON_Q);
6477 assert(m1->vmp_wire_count == 0);
6478
6479 if (wire == TRUE) {
6480 m1->vmp_wire_count++;
6481 m1->vmp_q_state = VM_PAGE_IS_WIRED;
6482 } else {
6483 m1->vmp_gobbled = TRUE;
6484 }
6485 }
6486 if (wire == FALSE) {
6487 vm_page_gobble_count += npages;
6488 }
6489
6490 /*
6491 * gobbled pages are also counted as wired pages
6492 */
6493 vm_page_wire_count += npages;
6494
6495 assert(vm_page_verify_contiguous(m, npages));
6496 }
6497 done_scanning:
6498 PAGE_REPLACEMENT_ALLOWED(FALSE);
6499
6500 vm_page_unlock_queues();
6501
6502 #if DEBUG
6503 clock_get_system_microtime(&tv_end_sec, &tv_end_usec);
6504
6505 tv_end_sec -= tv_start_sec;
6506 if (tv_end_usec < tv_start_usec) {
6507 tv_end_sec--;
6508 tv_end_usec += 1000000;
6509 }
6510 tv_end_usec -= tv_start_usec;
6511 if (tv_end_usec >= 1000000) {
6512 tv_end_sec++;
6513 tv_end_sec -= 1000000;
6514 }
6515 if (vm_page_find_contig_debug) {
6516 printf("%s(num=%d,low=%d): found %d pages at 0x%llx in %ld.%06ds... started at %d... scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages\n",
6517 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6518 (long)tv_end_sec, tv_end_usec, orig_last_idx,
6519 scanned, yielded, dumped_run, stolen_pages, compressed_pages);
6520 }
6521
6522 #endif
6523 #if MACH_ASSERT
6524 vm_page_verify_free_lists();
6525 #endif
6526 if (m == NULL && zone_gc_called < 2) {
6527 printf("%s(num=%d,low=%d): found %d pages at 0x%llx...scanned %d pages... yielded %d times... dumped run %d times... stole %d pages... stole %d compressed pages... wired count is %d\n",
6528 __func__, contig_pages, max_pnum, npages, (vm_object_offset_t)start_pnum << PAGE_SHIFT,
6529 scanned, yielded, dumped_run, stolen_pages, compressed_pages, vm_page_wire_count);
6530
6531 if (consider_buffer_cache_collect != NULL) {
6532 (void)(*consider_buffer_cache_collect)(1);
6533 }
6534
6535 zone_gc(zone_gc_called ? ZONE_GC_DRAIN : ZONE_GC_TRIM);
6536
6537 zone_gc_called++;
6538
6539 printf("vm_page_find_contiguous: zone_gc called... wired count is %d\n", vm_page_wire_count);
6540 goto full_scan_again;
6541 }
6542
6543 return m;
6544 }
6545
6546 /*
6547 * Allocate a list of contiguous, wired pages.
6548 */
6549 kern_return_t
cpm_allocate(vm_size_t size,vm_page_t * list,ppnum_t max_pnum,ppnum_t pnum_mask,boolean_t wire,int flags)6550 cpm_allocate(
6551 vm_size_t size,
6552 vm_page_t *list,
6553 ppnum_t max_pnum,
6554 ppnum_t pnum_mask,
6555 boolean_t wire,
6556 int flags)
6557 {
6558 vm_page_t pages;
6559 unsigned int npages;
6560
6561 if (size % PAGE_SIZE != 0) {
6562 return KERN_INVALID_ARGUMENT;
6563 }
6564
6565 npages = (unsigned int) (size / PAGE_SIZE);
6566 if (npages != size / PAGE_SIZE) {
6567 /* 32-bit overflow */
6568 return KERN_INVALID_ARGUMENT;
6569 }
6570
6571 /*
6572 * Obtain a pointer to a subset of the free
6573 * list large enough to satisfy the request;
6574 * the region will be physically contiguous.
6575 */
6576 pages = vm_page_find_contiguous(npages, max_pnum, pnum_mask, wire, flags);
6577
6578 if (pages == VM_PAGE_NULL) {
6579 return KERN_NO_SPACE;
6580 }
6581 /*
6582 * determine need for wakeups
6583 */
6584 if (vm_page_free_count < vm_page_free_min) {
6585 vm_free_page_lock();
6586 if (vm_pageout_running == FALSE) {
6587 vm_free_page_unlock();
6588 thread_wakeup((event_t) &vm_page_free_wanted);
6589 } else {
6590 vm_free_page_unlock();
6591 }
6592 }
6593
6594 VM_CHECK_MEMORYSTATUS;
6595
6596 /*
6597 * The CPM pages should now be available and
6598 * ordered by ascending physical address.
6599 */
6600 assert(vm_page_verify_contiguous(pages, npages));
6601
6602 *list = pages;
6603 return KERN_SUCCESS;
6604 }
6605
6606
6607 unsigned int vm_max_delayed_work_limit = DEFAULT_DELAYED_WORK_LIMIT;
6608
6609 /*
6610 * when working on a 'run' of pages, it is necessary to hold
6611 * the vm_page_queue_lock (a hot global lock) for certain operations
6612 * on the page... however, the majority of the work can be done
6613 * while merely holding the object lock... in fact there are certain
6614 * collections of pages that don't require any work brokered by the
6615 * vm_page_queue_lock... to mitigate the time spent behind the global
6616 * lock, go to a 2 pass algorithm... collect pages up to DELAYED_WORK_LIMIT
6617 * while doing all of the work that doesn't require the vm_page_queue_lock...
6618 * then call vm_page_do_delayed_work to acquire the vm_page_queue_lock and do the
6619 * necessary work for each page... we will grab the busy bit on the page
6620 * if it's not already held so that vm_page_do_delayed_work can drop the object lock
6621 * if it can't immediately take the vm_page_queue_lock in order to compete
6622 * for the locks in the same order that vm_pageout_scan takes them.
6623 * the operation names are modeled after the names of the routines that
6624 * need to be called in order to make the changes very obvious in the
6625 * original loop
6626 */
6627
6628 void
vm_page_do_delayed_work(vm_object_t object,vm_tag_t tag,struct vm_page_delayed_work * dwp,int dw_count)6629 vm_page_do_delayed_work(
6630 vm_object_t object,
6631 vm_tag_t tag,
6632 struct vm_page_delayed_work *dwp,
6633 int dw_count)
6634 {
6635 int j;
6636 vm_page_t m;
6637 vm_page_t local_free_q = VM_PAGE_NULL;
6638
6639 /*
6640 * pageout_scan takes the vm_page_lock_queues first
6641 * then tries for the object lock... to avoid what
6642 * is effectively a lock inversion, we'll go to the
6643 * trouble of taking them in that same order... otherwise
6644 * if this object contains the majority of the pages resident
6645 * in the UBC (or a small set of large objects actively being
6646 * worked on contain the majority of the pages), we could
6647 * cause the pageout_scan thread to 'starve' in its attempt
6648 * to find pages to move to the free queue, since it has to
6649 * successfully acquire the object lock of any candidate page
6650 * before it can steal/clean it.
6651 */
6652 if (!vm_page_trylockspin_queues()) {
6653 vm_object_unlock(object);
6654
6655 /*
6656 * "Turnstile enabled vm_pageout_scan" can be runnable
6657 * for a very long time without getting on a core.
6658 * If this is a higher priority thread it could be
6659 * waiting here for a very long time respecting the fact
6660 * that pageout_scan would like its object after VPS does
6661 * a mutex_pause(0).
6662 * So we cap the number of yields in the vm_object_lock_avoid()
6663 * case to a single mutex_pause(0) which will give vm_pageout_scan
6664 * 10us to run and grab the object if needed.
6665 */
6666 vm_page_lockspin_queues();
6667
6668 for (j = 0;; j++) {
6669 if ((!vm_object_lock_avoid(object) ||
6670 (vps_dynamic_priority_enabled && (j > 0))) &&
6671 _vm_object_lock_try(object)) {
6672 break;
6673 }
6674 vm_page_unlock_queues();
6675 mutex_pause(j);
6676 vm_page_lockspin_queues();
6677 }
6678 }
6679 for (j = 0; j < dw_count; j++, dwp++) {
6680 m = dwp->dw_m;
6681
6682 if (dwp->dw_mask & DW_vm_pageout_throttle_up) {
6683 vm_pageout_throttle_up(m);
6684 }
6685 #if CONFIG_PHANTOM_CACHE
6686 if (dwp->dw_mask & DW_vm_phantom_cache_update) {
6687 vm_phantom_cache_update(m);
6688 }
6689 #endif
6690 if (dwp->dw_mask & DW_vm_page_wire) {
6691 vm_page_wire(m, tag, FALSE);
6692 } else if (dwp->dw_mask & DW_vm_page_unwire) {
6693 boolean_t queueit;
6694
6695 queueit = (dwp->dw_mask & (DW_vm_page_free | DW_vm_page_deactivate_internal)) ? FALSE : TRUE;
6696
6697 vm_page_unwire(m, queueit);
6698 }
6699 if (dwp->dw_mask & DW_vm_page_free) {
6700 vm_page_free_prepare_queues(m);
6701
6702 assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
6703 /*
6704 * Add this page to our list of reclaimed pages,
6705 * to be freed later.
6706 */
6707 m->vmp_snext = local_free_q;
6708 local_free_q = m;
6709 } else {
6710 if (dwp->dw_mask & DW_vm_page_deactivate_internal) {
6711 vm_page_deactivate_internal(m, FALSE);
6712 } else if (dwp->dw_mask & DW_vm_page_activate) {
6713 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6714 vm_page_activate(m);
6715 }
6716 } else if (dwp->dw_mask & DW_vm_page_speculate) {
6717 vm_page_speculate(m, TRUE);
6718 } else if (dwp->dw_mask & DW_enqueue_cleaned) {
6719 /*
6720 * if we didn't hold the object lock and did this,
6721 * we might disconnect the page, then someone might
6722 * soft fault it back in, then we would put it on the
6723 * cleaned queue, and so we would have a referenced (maybe even dirty)
6724 * page on that queue, which we don't want
6725 */
6726 int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
6727
6728 if ((refmod_state & VM_MEM_REFERENCED)) {
6729 /*
6730 * this page has been touched since it got cleaned; let's activate it
6731 * if it hasn't already been
6732 */
6733 VM_PAGEOUT_DEBUG(vm_pageout_enqueued_cleaned, 1);
6734 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
6735
6736 if (m->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) {
6737 vm_page_activate(m);
6738 }
6739 } else {
6740 m->vmp_reference = FALSE;
6741 vm_page_enqueue_cleaned(m);
6742 }
6743 } else if (dwp->dw_mask & DW_vm_page_lru) {
6744 vm_page_lru(m);
6745 } else if (dwp->dw_mask & DW_VM_PAGE_QUEUES_REMOVE) {
6746 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6747 vm_page_queues_remove(m, TRUE);
6748 }
6749 }
6750 if (dwp->dw_mask & DW_set_reference) {
6751 m->vmp_reference = TRUE;
6752 } else if (dwp->dw_mask & DW_clear_reference) {
6753 m->vmp_reference = FALSE;
6754 }
6755
6756 if (dwp->dw_mask & DW_move_page) {
6757 if (m->vmp_q_state != VM_PAGE_ON_PAGEOUT_Q) {
6758 vm_page_queues_remove(m, FALSE);
6759
6760 assert(VM_PAGE_OBJECT(m) != kernel_object);
6761
6762 vm_page_enqueue_inactive(m, FALSE);
6763 }
6764 }
6765 if (dwp->dw_mask & DW_clear_busy) {
6766 m->vmp_busy = FALSE;
6767 }
6768
6769 if (dwp->dw_mask & DW_PAGE_WAKEUP) {
6770 PAGE_WAKEUP(m);
6771 }
6772 }
6773 }
6774 vm_page_unlock_queues();
6775
6776 if (local_free_q) {
6777 vm_page_free_list(local_free_q, TRUE);
6778 }
6779
6780 VM_CHECK_MEMORYSTATUS;
6781 }
6782
6783 __abortlike
6784 static void
__vm_page_alloc_list_failed_panic(vm_size_t page_count,kma_flags_t flags,kern_return_t kr)6785 __vm_page_alloc_list_failed_panic(
6786 vm_size_t page_count,
6787 kma_flags_t flags,
6788 kern_return_t kr)
6789 {
6790 panic("vm_page_alloc_list(%zd, 0x%x) failed unexpectedly with %d",
6791 (size_t)page_count, flags, kr);
6792 }
6793
6794 kern_return_t
vm_page_alloc_list(vm_size_t page_count,kma_flags_t flags,vm_page_t * list)6795 vm_page_alloc_list(
6796 vm_size_t page_count,
6797 kma_flags_t flags,
6798 vm_page_t *list)
6799 {
6800 vm_page_t page_list = VM_PAGE_NULL;
6801 vm_page_t mem;
6802 kern_return_t kr = KERN_SUCCESS;
6803 int page_grab_count = 0;
6804 #if DEVELOPMENT || DEBUG
6805 task_t task;
6806 #endif /* DEVELOPMENT || DEBUG */
6807
6808 for (vm_size_t i = 0; i < page_count; i++) {
6809 for (;;) {
6810 if (flags & KMA_LOMEM) {
6811 mem = vm_page_grablo();
6812 } else {
6813 mem = vm_page_grab();
6814 }
6815
6816 if (mem != VM_PAGE_NULL) {
6817 break;
6818 }
6819
6820 if (flags & KMA_NOPAGEWAIT) {
6821 kr = KERN_RESOURCE_SHORTAGE;
6822 goto out;
6823 }
6824 if ((flags & KMA_LOMEM) && (vm_lopage_needed == TRUE)) {
6825 kr = KERN_RESOURCE_SHORTAGE;
6826 goto out;
6827 }
6828
6829 /* VM privileged threads should have waited in vm_page_grab() and not get here. */
6830 assert(!(current_thread()->options & TH_OPT_VMPRIV));
6831
6832 if ((flags & KMA_NOFAIL) == 0) {
6833 uint64_t unavailable = ptoa_64(vm_page_wire_count + vm_page_free_target);
6834 if (unavailable > max_mem || ptoa_64(page_count) > (max_mem - unavailable)) {
6835 kr = KERN_RESOURCE_SHORTAGE;
6836 goto out;
6837 }
6838 }
6839 VM_PAGE_WAIT();
6840 }
6841
6842 page_grab_count++;
6843 mem->vmp_snext = page_list;
6844 page_list = mem;
6845 }
6846
6847 if ((KMA_ZERO | KMA_NOENCRYPT) & flags) {
6848 for (mem = page_list; mem; mem = mem->vmp_snext) {
6849 vm_page_zero_fill(mem);
6850 }
6851 }
6852
6853 out:
6854 #if DEBUG || DEVELOPMENT
6855 task = current_task_early();
6856 if (task != NULL) {
6857 ledger_credit(task->ledger, task_ledgers.pages_grabbed_kern, page_grab_count);
6858 }
6859 #endif
6860
6861 if (kr == KERN_SUCCESS) {
6862 *list = page_list;
6863 } else if (flags & KMA_NOFAIL) {
6864 __vm_page_alloc_list_failed_panic(page_count, flags, kr);
6865 } else {
6866 vm_page_free_list(page_list, FALSE);
6867 }
6868
6869 return kr;
6870 }
6871
6872 void
vm_page_set_offset(vm_page_t page,vm_object_offset_t offset)6873 vm_page_set_offset(vm_page_t page, vm_object_offset_t offset)
6874 {
6875 page->vmp_offset = offset;
6876 }
6877
6878 vm_page_t
vm_page_get_next(vm_page_t page)6879 vm_page_get_next(vm_page_t page)
6880 {
6881 return page->vmp_snext;
6882 }
6883
6884 vm_object_offset_t
vm_page_get_offset(vm_page_t page)6885 vm_page_get_offset(vm_page_t page)
6886 {
6887 return page->vmp_offset;
6888 }
6889
6890 ppnum_t
vm_page_get_phys_page(vm_page_t page)6891 vm_page_get_phys_page(vm_page_t page)
6892 {
6893 return VM_PAGE_GET_PHYS_PAGE(page);
6894 }
6895
6896
6897 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
6898
6899 #if HIBERNATION
6900
6901 static vm_page_t hibernate_gobble_queue;
6902
6903 static int hibernate_drain_pageout_queue(struct vm_pageout_queue *);
6904 static int hibernate_flush_dirty_pages(int);
6905 static int hibernate_flush_queue(vm_page_queue_head_t *, int);
6906
6907 void hibernate_flush_wait(void);
6908 void hibernate_mark_in_progress(void);
6909 void hibernate_clear_in_progress(void);
6910
6911 void hibernate_free_range(int, int);
6912 void hibernate_hash_insert_page(vm_page_t);
6913 uint32_t hibernate_mark_as_unneeded(addr64_t, addr64_t, hibernate_page_list_t *, hibernate_page_list_t *);
6914 uint32_t hibernate_teardown_vm_structs(hibernate_page_list_t *, hibernate_page_list_t *);
6915 ppnum_t hibernate_lookup_paddr(unsigned int);
6916
6917 struct hibernate_statistics {
6918 int hibernate_considered;
6919 int hibernate_reentered_on_q;
6920 int hibernate_found_dirty;
6921 int hibernate_skipped_cleaning;
6922 int hibernate_skipped_transient;
6923 int hibernate_skipped_precious;
6924 int hibernate_skipped_external;
6925 int hibernate_queue_nolock;
6926 int hibernate_queue_paused;
6927 int hibernate_throttled;
6928 int hibernate_throttle_timeout;
6929 int hibernate_drained;
6930 int hibernate_drain_timeout;
6931 int cd_lock_failed;
6932 int cd_found_precious;
6933 int cd_found_wired;
6934 int cd_found_busy;
6935 int cd_found_unusual;
6936 int cd_found_cleaning;
6937 int cd_found_laundry;
6938 int cd_found_dirty;
6939 int cd_found_xpmapped;
6940 int cd_skipped_xpmapped;
6941 int cd_local_free;
6942 int cd_total_free;
6943 int cd_vm_page_wire_count;
6944 int cd_vm_struct_pages_unneeded;
6945 int cd_pages;
6946 int cd_discarded;
6947 int cd_count_wire;
6948 } hibernate_stats;
6949
6950
6951 /*
6952 * clamp the number of 'xpmapped' pages we'll sweep into the hibernation image
6953 * so that we don't overrun the estimated image size, which would
6954 * result in a hibernation failure.
6955 *
6956 * We use a size value instead of pages because we don't want to take up more space
6957 * on disk if the system has a 16K page size vs 4K. Also, we are not guaranteed
6958 * to have that additional space available.
6959 *
6960 * Since this was set at 40000 pages on X86 we are going to use 160MB as our
6961 * xpmapped size.
6962 */
6963 #define HIBERNATE_XPMAPPED_LIMIT ((160 * 1024 * 1024ULL) / PAGE_SIZE)
6964
6965
6966 static int
hibernate_drain_pageout_queue(struct vm_pageout_queue * q)6967 hibernate_drain_pageout_queue(struct vm_pageout_queue *q)
6968 {
6969 wait_result_t wait_result;
6970
6971 vm_page_lock_queues();
6972
6973 while (!vm_page_queue_empty(&q->pgo_pending)) {
6974 q->pgo_draining = TRUE;
6975
6976 assert_wait_timeout((event_t) (&q->pgo_laundry + 1), THREAD_INTERRUPTIBLE, 5000, 1000 * NSEC_PER_USEC);
6977
6978 vm_page_unlock_queues();
6979
6980 wait_result = thread_block(THREAD_CONTINUE_NULL);
6981
6982 if (wait_result == THREAD_TIMED_OUT && !vm_page_queue_empty(&q->pgo_pending)) {
6983 hibernate_stats.hibernate_drain_timeout++;
6984
6985 if (q == &vm_pageout_queue_external) {
6986 return 0;
6987 }
6988
6989 return 1;
6990 }
6991 vm_page_lock_queues();
6992
6993 hibernate_stats.hibernate_drained++;
6994 }
6995 vm_page_unlock_queues();
6996
6997 return 0;
6998 }
6999
7000
7001 boolean_t hibernate_skip_external = FALSE;
7002
7003 static int
hibernate_flush_queue(vm_page_queue_head_t * q,int qcount)7004 hibernate_flush_queue(vm_page_queue_head_t *q, int qcount)
7005 {
7006 vm_page_t m;
7007 vm_object_t l_object = NULL;
7008 vm_object_t m_object = NULL;
7009 int refmod_state = 0;
7010 int try_failed_count = 0;
7011 int retval = 0;
7012 int current_run = 0;
7013 struct vm_pageout_queue *iq;
7014 struct vm_pageout_queue *eq;
7015 struct vm_pageout_queue *tq;
7016
7017 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_START,
7018 VM_KERNEL_UNSLIDE_OR_PERM(q), qcount);
7019
7020 iq = &vm_pageout_queue_internal;
7021 eq = &vm_pageout_queue_external;
7022
7023 vm_page_lock_queues();
7024
7025 while (qcount && !vm_page_queue_empty(q)) {
7026 if (current_run++ == 1000) {
7027 if (hibernate_should_abort()) {
7028 retval = 1;
7029 break;
7030 }
7031 current_run = 0;
7032 }
7033
7034 m = (vm_page_t) vm_page_queue_first(q);
7035 m_object = VM_PAGE_OBJECT(m);
7036
7037 /*
7038 * check to see if we currently are working
7039 * with the same object... if so, we've
7040 * already got the lock
7041 */
7042 if (m_object != l_object) {
7043 /*
7044 * the object associated with candidate page is
7045 * different from the one we were just working
7046 * with... dump the lock if we still own it
7047 */
7048 if (l_object != NULL) {
7049 vm_object_unlock(l_object);
7050 l_object = NULL;
7051 }
7052 /*
7053 * Try to lock object; since we've alread got the
7054 * page queues lock, we can only 'try' for this one.
7055 * if the 'try' fails, we need to do a mutex_pause
7056 * to allow the owner of the object lock a chance to
7057 * run...
7058 */
7059 if (!vm_object_lock_try_scan(m_object)) {
7060 if (try_failed_count > 20) {
7061 hibernate_stats.hibernate_queue_nolock++;
7062
7063 goto reenter_pg_on_q;
7064 }
7065
7066 vm_page_unlock_queues();
7067 mutex_pause(try_failed_count++);
7068 vm_page_lock_queues();
7069
7070 hibernate_stats.hibernate_queue_paused++;
7071 continue;
7072 } else {
7073 l_object = m_object;
7074 }
7075 }
7076 if (!m_object->alive || m->vmp_cleaning || m->vmp_laundry || m->vmp_busy || m->vmp_absent || VMP_ERROR_GET(m)) {
7077 /*
7078 * page is not to be cleaned
7079 * put it back on the head of its queue
7080 */
7081 if (m->vmp_cleaning) {
7082 hibernate_stats.hibernate_skipped_cleaning++;
7083 } else {
7084 hibernate_stats.hibernate_skipped_transient++;
7085 }
7086
7087 goto reenter_pg_on_q;
7088 }
7089 if (m_object->copy == VM_OBJECT_NULL) {
7090 if (m_object->purgable == VM_PURGABLE_VOLATILE || m_object->purgable == VM_PURGABLE_EMPTY) {
7091 /*
7092 * let the normal hibernate image path
7093 * deal with these
7094 */
7095 goto reenter_pg_on_q;
7096 }
7097 }
7098 if (!m->vmp_dirty && m->vmp_pmapped) {
7099 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7100
7101 if ((refmod_state & VM_MEM_MODIFIED)) {
7102 SET_PAGE_DIRTY(m, FALSE);
7103 }
7104 } else {
7105 refmod_state = 0;
7106 }
7107
7108 if (!m->vmp_dirty) {
7109 /*
7110 * page is not to be cleaned
7111 * put it back on the head of its queue
7112 */
7113 if (m->vmp_precious) {
7114 hibernate_stats.hibernate_skipped_precious++;
7115 }
7116
7117 goto reenter_pg_on_q;
7118 }
7119
7120 if (hibernate_skip_external == TRUE && !m_object->internal) {
7121 hibernate_stats.hibernate_skipped_external++;
7122
7123 goto reenter_pg_on_q;
7124 }
7125 tq = NULL;
7126
7127 if (m_object->internal) {
7128 if (VM_PAGE_Q_THROTTLED(iq)) {
7129 tq = iq;
7130 }
7131 } else if (VM_PAGE_Q_THROTTLED(eq)) {
7132 tq = eq;
7133 }
7134
7135 if (tq != NULL) {
7136 wait_result_t wait_result;
7137 int wait_count = 5;
7138
7139 if (l_object != NULL) {
7140 vm_object_unlock(l_object);
7141 l_object = NULL;
7142 }
7143
7144 while (retval == 0) {
7145 tq->pgo_throttled = TRUE;
7146
7147 assert_wait_timeout((event_t) &tq->pgo_laundry, THREAD_INTERRUPTIBLE, 1000, 1000 * NSEC_PER_USEC);
7148
7149 vm_page_unlock_queues();
7150
7151 wait_result = thread_block(THREAD_CONTINUE_NULL);
7152
7153 vm_page_lock_queues();
7154
7155 if (wait_result != THREAD_TIMED_OUT) {
7156 break;
7157 }
7158 if (!VM_PAGE_Q_THROTTLED(tq)) {
7159 break;
7160 }
7161
7162 if (hibernate_should_abort()) {
7163 retval = 1;
7164 }
7165
7166 if (--wait_count == 0) {
7167 hibernate_stats.hibernate_throttle_timeout++;
7168
7169 if (tq == eq) {
7170 hibernate_skip_external = TRUE;
7171 break;
7172 }
7173 retval = 1;
7174 }
7175 }
7176 if (retval) {
7177 break;
7178 }
7179
7180 hibernate_stats.hibernate_throttled++;
7181
7182 continue;
7183 }
7184 /*
7185 * we've already factored out pages in the laundry which
7186 * means this page can't be on the pageout queue so it's
7187 * safe to do the vm_page_queues_remove
7188 */
7189 vm_page_queues_remove(m, TRUE);
7190
7191 if (m_object->internal == TRUE) {
7192 pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(m), PMAP_OPTIONS_COMPRESSOR, NULL);
7193 }
7194
7195 vm_pageout_cluster(m);
7196
7197 hibernate_stats.hibernate_found_dirty++;
7198
7199 goto next_pg;
7200
7201 reenter_pg_on_q:
7202 vm_page_queue_remove(q, m, vmp_pageq);
7203 vm_page_queue_enter(q, m, vmp_pageq);
7204
7205 hibernate_stats.hibernate_reentered_on_q++;
7206 next_pg:
7207 hibernate_stats.hibernate_considered++;
7208
7209 qcount--;
7210 try_failed_count = 0;
7211 }
7212 if (l_object != NULL) {
7213 vm_object_unlock(l_object);
7214 l_object = NULL;
7215 }
7216
7217 vm_page_unlock_queues();
7218
7219 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 4) | DBG_FUNC_END, hibernate_stats.hibernate_found_dirty, retval, 0, 0, 0);
7220
7221 return retval;
7222 }
7223
7224
7225 static int
hibernate_flush_dirty_pages(int pass)7226 hibernate_flush_dirty_pages(int pass)
7227 {
7228 struct vm_speculative_age_q *aq;
7229 uint32_t i;
7230
7231 if (vm_page_local_q) {
7232 zpercpu_foreach_cpu(lid) {
7233 vm_page_reactivate_local(lid, TRUE, FALSE);
7234 }
7235 }
7236
7237 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7238 int qcount;
7239 vm_page_t m;
7240
7241 aq = &vm_page_queue_speculative[i];
7242
7243 if (vm_page_queue_empty(&aq->age_q)) {
7244 continue;
7245 }
7246 qcount = 0;
7247
7248 vm_page_lockspin_queues();
7249
7250 vm_page_queue_iterate(&aq->age_q, m, vmp_pageq) {
7251 qcount++;
7252 }
7253 vm_page_unlock_queues();
7254
7255 if (qcount) {
7256 if (hibernate_flush_queue(&aq->age_q, qcount)) {
7257 return 1;
7258 }
7259 }
7260 }
7261 if (hibernate_flush_queue(&vm_page_queue_inactive, vm_page_inactive_count - vm_page_anonymous_count - vm_page_cleaned_count)) {
7262 return 1;
7263 }
7264 /* XXX FBDP TODO: flush secluded queue */
7265 if (hibernate_flush_queue(&vm_page_queue_anonymous, vm_page_anonymous_count)) {
7266 return 1;
7267 }
7268 if (hibernate_flush_queue(&vm_page_queue_cleaned, vm_page_cleaned_count)) {
7269 return 1;
7270 }
7271 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7272 return 1;
7273 }
7274
7275 if (pass == 1) {
7276 vm_compressor_record_warmup_start();
7277 }
7278
7279 if (hibernate_flush_queue(&vm_page_queue_active, vm_page_active_count)) {
7280 if (pass == 1) {
7281 vm_compressor_record_warmup_end();
7282 }
7283 return 1;
7284 }
7285 if (hibernate_drain_pageout_queue(&vm_pageout_queue_internal)) {
7286 if (pass == 1) {
7287 vm_compressor_record_warmup_end();
7288 }
7289 return 1;
7290 }
7291 if (pass == 1) {
7292 vm_compressor_record_warmup_end();
7293 }
7294
7295 if (hibernate_skip_external == FALSE && hibernate_drain_pageout_queue(&vm_pageout_queue_external)) {
7296 return 1;
7297 }
7298
7299 return 0;
7300 }
7301
7302
7303 void
hibernate_reset_stats()7304 hibernate_reset_stats()
7305 {
7306 bzero(&hibernate_stats, sizeof(struct hibernate_statistics));
7307 }
7308
7309
7310 int
hibernate_flush_memory()7311 hibernate_flush_memory()
7312 {
7313 int retval;
7314
7315 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
7316
7317 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_START, vm_page_free_count, 0, 0, 0, 0);
7318
7319 hibernate_cleaning_in_progress = TRUE;
7320 hibernate_skip_external = FALSE;
7321
7322 if ((retval = hibernate_flush_dirty_pages(1)) == 0) {
7323 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_START, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7324
7325 vm_compressor_flush();
7326
7327 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 10) | DBG_FUNC_END, VM_PAGE_COMPRESSOR_COUNT, 0, 0, 0, 0);
7328
7329 if (consider_buffer_cache_collect != NULL) {
7330 unsigned int orig_wire_count;
7331
7332 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_START, 0, 0, 0, 0, 0);
7333 orig_wire_count = vm_page_wire_count;
7334
7335 (void)(*consider_buffer_cache_collect)(1);
7336 zone_gc(ZONE_GC_DRAIN);
7337
7338 HIBLOG("hibernate_flush_memory: buffer_cache_gc freed up %d wired pages\n", orig_wire_count - vm_page_wire_count);
7339
7340 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 7) | DBG_FUNC_END, orig_wire_count - vm_page_wire_count, 0, 0, 0, 0);
7341 }
7342 }
7343 hibernate_cleaning_in_progress = FALSE;
7344
7345 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 3) | DBG_FUNC_END, vm_page_free_count, hibernate_stats.hibernate_found_dirty, retval, 0, 0);
7346
7347 if (retval) {
7348 HIBLOG("hibernate_flush_memory() failed to finish - vm_page_compressor_count(%d)\n", VM_PAGE_COMPRESSOR_COUNT);
7349 }
7350
7351
7352 HIBPRINT("hibernate_flush_memory() considered(%d) reentered_on_q(%d) found_dirty(%d)\n",
7353 hibernate_stats.hibernate_considered,
7354 hibernate_stats.hibernate_reentered_on_q,
7355 hibernate_stats.hibernate_found_dirty);
7356 HIBPRINT(" skipped_cleaning(%d) skipped_transient(%d) skipped_precious(%d) skipped_external(%d) queue_nolock(%d)\n",
7357 hibernate_stats.hibernate_skipped_cleaning,
7358 hibernate_stats.hibernate_skipped_transient,
7359 hibernate_stats.hibernate_skipped_precious,
7360 hibernate_stats.hibernate_skipped_external,
7361 hibernate_stats.hibernate_queue_nolock);
7362 HIBPRINT(" queue_paused(%d) throttled(%d) throttle_timeout(%d) drained(%d) drain_timeout(%d)\n",
7363 hibernate_stats.hibernate_queue_paused,
7364 hibernate_stats.hibernate_throttled,
7365 hibernate_stats.hibernate_throttle_timeout,
7366 hibernate_stats.hibernate_drained,
7367 hibernate_stats.hibernate_drain_timeout);
7368
7369 return retval;
7370 }
7371
7372
7373 static void
hibernate_page_list_zero(hibernate_page_list_t * list)7374 hibernate_page_list_zero(hibernate_page_list_t *list)
7375 {
7376 uint32_t bank;
7377 hibernate_bitmap_t * bitmap;
7378
7379 bitmap = &list->bank_bitmap[0];
7380 for (bank = 0; bank < list->bank_count; bank++) {
7381 uint32_t last_bit;
7382
7383 bzero((void *) &bitmap->bitmap[0], bitmap->bitmapwords << 2);
7384 // set out-of-bound bits at end of bitmap.
7385 last_bit = ((bitmap->last_page - bitmap->first_page + 1) & 31);
7386 if (last_bit) {
7387 bitmap->bitmap[bitmap->bitmapwords - 1] = (0xFFFFFFFF >> last_bit);
7388 }
7389
7390 bitmap = (hibernate_bitmap_t *) &bitmap->bitmap[bitmap->bitmapwords];
7391 }
7392 }
7393
7394 void
hibernate_free_gobble_pages(void)7395 hibernate_free_gobble_pages(void)
7396 {
7397 vm_page_t m, next;
7398 uint32_t count = 0;
7399
7400 m = (vm_page_t) hibernate_gobble_queue;
7401 while (m) {
7402 next = m->vmp_snext;
7403 vm_page_free(m);
7404 count++;
7405 m = next;
7406 }
7407 hibernate_gobble_queue = VM_PAGE_NULL;
7408
7409 if (count) {
7410 HIBLOG("Freed %d pages\n", count);
7411 }
7412 }
7413
7414 static boolean_t
hibernate_consider_discard(vm_page_t m,boolean_t preflight)7415 hibernate_consider_discard(vm_page_t m, boolean_t preflight)
7416 {
7417 vm_object_t object = NULL;
7418 int refmod_state;
7419 boolean_t discard = FALSE;
7420
7421 do{
7422 if (m->vmp_private) {
7423 panic("hibernate_consider_discard: private");
7424 }
7425
7426 object = VM_PAGE_OBJECT(m);
7427
7428 if (!vm_object_lock_try(object)) {
7429 object = NULL;
7430 if (!preflight) {
7431 hibernate_stats.cd_lock_failed++;
7432 }
7433 break;
7434 }
7435 if (VM_PAGE_WIRED(m)) {
7436 if (!preflight) {
7437 hibernate_stats.cd_found_wired++;
7438 }
7439 break;
7440 }
7441 if (m->vmp_precious) {
7442 if (!preflight) {
7443 hibernate_stats.cd_found_precious++;
7444 }
7445 break;
7446 }
7447 if (m->vmp_busy || !object->alive) {
7448 /*
7449 * Somebody is playing with this page.
7450 */
7451 if (!preflight) {
7452 hibernate_stats.cd_found_busy++;
7453 }
7454 break;
7455 }
7456 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7457 /*
7458 * If it's unusual in anyway, ignore it
7459 */
7460 if (!preflight) {
7461 hibernate_stats.cd_found_unusual++;
7462 }
7463 break;
7464 }
7465 if (m->vmp_cleaning) {
7466 if (!preflight) {
7467 hibernate_stats.cd_found_cleaning++;
7468 }
7469 break;
7470 }
7471 if (m->vmp_laundry) {
7472 if (!preflight) {
7473 hibernate_stats.cd_found_laundry++;
7474 }
7475 break;
7476 }
7477 if (!m->vmp_dirty) {
7478 refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(m));
7479
7480 if (refmod_state & VM_MEM_REFERENCED) {
7481 m->vmp_reference = TRUE;
7482 }
7483 if (refmod_state & VM_MEM_MODIFIED) {
7484 SET_PAGE_DIRTY(m, FALSE);
7485 }
7486 }
7487
7488 /*
7489 * If it's clean or purgeable we can discard the page on wakeup.
7490 */
7491 discard = (!m->vmp_dirty)
7492 || (VM_PURGABLE_VOLATILE == object->purgable)
7493 || (VM_PURGABLE_EMPTY == object->purgable);
7494
7495
7496 if (discard == FALSE) {
7497 if (!preflight) {
7498 hibernate_stats.cd_found_dirty++;
7499 }
7500 } else if (m->vmp_xpmapped && m->vmp_reference && !object->internal) {
7501 if (hibernate_stats.cd_found_xpmapped < HIBERNATE_XPMAPPED_LIMIT) {
7502 if (!preflight) {
7503 hibernate_stats.cd_found_xpmapped++;
7504 }
7505 discard = FALSE;
7506 } else {
7507 if (!preflight) {
7508 hibernate_stats.cd_skipped_xpmapped++;
7509 }
7510 }
7511 }
7512 }while (FALSE);
7513
7514 if (object) {
7515 vm_object_unlock(object);
7516 }
7517
7518 return discard;
7519 }
7520
7521
7522 static void
hibernate_discard_page(vm_page_t m)7523 hibernate_discard_page(vm_page_t m)
7524 {
7525 vm_object_t m_object;
7526
7527 if (m->vmp_absent || m->vmp_unusual || VMP_ERROR_GET(m)) {
7528 /*
7529 * If it's unusual in anyway, ignore
7530 */
7531 return;
7532 }
7533
7534 m_object = VM_PAGE_OBJECT(m);
7535
7536 #if MACH_ASSERT || DEBUG
7537 if (!vm_object_lock_try(m_object)) {
7538 panic("hibernate_discard_page(%p) !vm_object_lock_try", m);
7539 }
7540 #else
7541 /* No need to lock page queue for token delete, hibernate_vm_unlock()
7542 * makes sure these locks are uncontended before sleep */
7543 #endif /* MACH_ASSERT || DEBUG */
7544
7545 if (m->vmp_pmapped == TRUE) {
7546 __unused int refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
7547 }
7548
7549 if (m->vmp_laundry) {
7550 panic("hibernate_discard_page(%p) laundry", m);
7551 }
7552 if (m->vmp_private) {
7553 panic("hibernate_discard_page(%p) private", m);
7554 }
7555 if (m->vmp_fictitious) {
7556 panic("hibernate_discard_page(%p) fictitious", m);
7557 }
7558
7559 if (VM_PURGABLE_VOLATILE == m_object->purgable) {
7560 /* object should be on a queue */
7561 assert((m_object->objq.next != NULL) && (m_object->objq.prev != NULL));
7562 purgeable_q_t old_queue = vm_purgeable_object_remove(m_object);
7563 assert(old_queue);
7564 if (m_object->purgeable_when_ripe) {
7565 vm_purgeable_token_delete_first(old_queue);
7566 }
7567 vm_object_lock_assert_exclusive(m_object);
7568 m_object->purgable = VM_PURGABLE_EMPTY;
7569
7570 /*
7571 * Purgeable ledgers: pages of VOLATILE and EMPTY objects are
7572 * accounted in the "volatile" ledger, so no change here.
7573 * We have to update vm_page_purgeable_count, though, since we're
7574 * effectively purging this object.
7575 */
7576 unsigned int delta;
7577 assert(m_object->resident_page_count >= m_object->wired_page_count);
7578 delta = (m_object->resident_page_count - m_object->wired_page_count);
7579 assert(vm_page_purgeable_count >= delta);
7580 assert(delta > 0);
7581 OSAddAtomic(-delta, (SInt32 *)&vm_page_purgeable_count);
7582 }
7583
7584 vm_page_free(m);
7585
7586 #if MACH_ASSERT || DEBUG
7587 vm_object_unlock(m_object);
7588 #endif /* MACH_ASSERT || DEBUG */
7589 }
7590
7591 /*
7592 * Grab locks for hibernate_page_list_setall()
7593 */
7594 void
hibernate_vm_lock_queues(void)7595 hibernate_vm_lock_queues(void)
7596 {
7597 vm_object_lock(compressor_object);
7598 vm_page_lock_queues();
7599 vm_free_page_lock();
7600 lck_mtx_lock(&vm_purgeable_queue_lock);
7601
7602 if (vm_page_local_q) {
7603 zpercpu_foreach(lq, vm_page_local_q) {
7604 VPL_LOCK(&lq->vpl_lock);
7605 }
7606 }
7607 }
7608
7609 void
hibernate_vm_unlock_queues(void)7610 hibernate_vm_unlock_queues(void)
7611 {
7612 if (vm_page_local_q) {
7613 zpercpu_foreach(lq, vm_page_local_q) {
7614 VPL_UNLOCK(&lq->vpl_lock);
7615 }
7616 }
7617 lck_mtx_unlock(&vm_purgeable_queue_lock);
7618 vm_free_page_unlock();
7619 vm_page_unlock_queues();
7620 vm_object_unlock(compressor_object);
7621 }
7622
7623 /*
7624 * Bits zero in the bitmaps => page needs to be saved. All pages default to be saved,
7625 * pages known to VM to not need saving are subtracted.
7626 * Wired pages to be saved are present in page_list_wired, pageable in page_list.
7627 */
7628
7629 void
hibernate_page_list_setall(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired,hibernate_page_list_t * page_list_pal,boolean_t preflight,boolean_t will_discard,uint32_t * pagesOut)7630 hibernate_page_list_setall(hibernate_page_list_t * page_list,
7631 hibernate_page_list_t * page_list_wired,
7632 hibernate_page_list_t * page_list_pal,
7633 boolean_t preflight,
7634 boolean_t will_discard,
7635 uint32_t * pagesOut)
7636 {
7637 uint64_t start, end, nsec;
7638 vm_page_t m;
7639 vm_page_t next;
7640 uint32_t pages = page_list->page_count;
7641 uint32_t count_anonymous = 0, count_throttled = 0, count_compressor = 0;
7642 uint32_t count_inactive = 0, count_active = 0, count_speculative = 0, count_cleaned = 0;
7643 uint32_t count_wire = pages;
7644 uint32_t count_discard_active = 0;
7645 uint32_t count_discard_inactive = 0;
7646 uint32_t count_retired = 0;
7647 uint32_t count_discard_cleaned = 0;
7648 uint32_t count_discard_purgeable = 0;
7649 uint32_t count_discard_speculative = 0;
7650 uint32_t count_discard_vm_struct_pages = 0;
7651 uint32_t i;
7652 uint32_t bank;
7653 hibernate_bitmap_t * bitmap;
7654 hibernate_bitmap_t * bitmap_wired;
7655 boolean_t discard_all;
7656 boolean_t discard = FALSE;
7657
7658 HIBLOG("hibernate_page_list_setall(preflight %d) start\n", preflight);
7659
7660 if (preflight) {
7661 page_list = NULL;
7662 page_list_wired = NULL;
7663 page_list_pal = NULL;
7664 discard_all = FALSE;
7665 } else {
7666 discard_all = will_discard;
7667 }
7668
7669 #if MACH_ASSERT || DEBUG
7670 if (!preflight) {
7671 assert(hibernate_vm_locks_are_safe());
7672 vm_page_lock_queues();
7673 if (vm_page_local_q) {
7674 zpercpu_foreach(lq, vm_page_local_q) {
7675 VPL_LOCK(&lq->vpl_lock);
7676 }
7677 }
7678 }
7679 #endif /* MACH_ASSERT || DEBUG */
7680
7681
7682 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_START, count_wire, 0, 0, 0, 0);
7683
7684 clock_get_uptime(&start);
7685
7686 if (!preflight) {
7687 hibernate_page_list_zero(page_list);
7688 hibernate_page_list_zero(page_list_wired);
7689 hibernate_page_list_zero(page_list_pal);
7690
7691 hibernate_stats.cd_vm_page_wire_count = vm_page_wire_count;
7692 hibernate_stats.cd_pages = pages;
7693 }
7694
7695 if (vm_page_local_q) {
7696 zpercpu_foreach_cpu(lid) {
7697 vm_page_reactivate_local(lid, TRUE, !preflight);
7698 }
7699 }
7700
7701 if (preflight) {
7702 vm_object_lock(compressor_object);
7703 vm_page_lock_queues();
7704 vm_free_page_lock();
7705 }
7706
7707 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
7708
7709 hibernation_vmqueues_inspection = TRUE;
7710
7711 m = (vm_page_t) hibernate_gobble_queue;
7712 while (m) {
7713 pages--;
7714 count_wire--;
7715 if (!preflight) {
7716 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7717 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7718 }
7719 m = m->vmp_snext;
7720 }
7721
7722 if (!preflight) {
7723 percpu_foreach(free_pages_head, free_pages) {
7724 for (m = *free_pages_head; m; m = m->vmp_snext) {
7725 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOCAL_Q);
7726
7727 pages--;
7728 count_wire--;
7729 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7730 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7731
7732 hibernate_stats.cd_local_free++;
7733 hibernate_stats.cd_total_free++;
7734 }
7735 }
7736 }
7737
7738 for (i = 0; i < vm_colors; i++) {
7739 vm_page_queue_iterate(&vm_page_queue_free[i].qhead, m, vmp_pageq) {
7740 assert(m->vmp_q_state == VM_PAGE_ON_FREE_Q);
7741
7742 pages--;
7743 count_wire--;
7744 if (!preflight) {
7745 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7746 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7747
7748 hibernate_stats.cd_total_free++;
7749 }
7750 }
7751 }
7752
7753 vm_page_queue_iterate(&vm_lopage_queue_free, m, vmp_pageq) {
7754 assert(m->vmp_q_state == VM_PAGE_ON_FREE_LOPAGE_Q);
7755
7756 pages--;
7757 count_wire--;
7758 if (!preflight) {
7759 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7760 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7761
7762 hibernate_stats.cd_total_free++;
7763 }
7764 }
7765
7766 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_throttled);
7767 while (m && !vm_page_queue_end(&vm_page_queue_throttled, (vm_page_queue_entry_t)m)) {
7768 assert(m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q);
7769
7770 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7771 discard = FALSE;
7772 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode)
7773 && hibernate_consider_discard(m, preflight)) {
7774 if (!preflight) {
7775 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7776 }
7777 count_discard_inactive++;
7778 discard = discard_all;
7779 } else {
7780 count_throttled++;
7781 }
7782 count_wire--;
7783 if (!preflight) {
7784 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7785 }
7786
7787 if (discard) {
7788 hibernate_discard_page(m);
7789 }
7790 m = next;
7791 }
7792
7793 m = (vm_page_t)vm_page_queue_first(&vm_page_queue_anonymous);
7794 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
7795 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
7796
7797 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7798 discard = FALSE;
7799 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7800 hibernate_consider_discard(m, preflight)) {
7801 if (!preflight) {
7802 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7803 }
7804 if (m->vmp_dirty) {
7805 count_discard_purgeable++;
7806 } else {
7807 count_discard_inactive++;
7808 }
7809 discard = discard_all;
7810 } else {
7811 count_anonymous++;
7812 }
7813 count_wire--;
7814 if (!preflight) {
7815 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7816 }
7817 if (discard) {
7818 hibernate_discard_page(m);
7819 }
7820 m = next;
7821 }
7822
7823 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
7824 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
7825 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
7826
7827 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7828 discard = FALSE;
7829 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7830 hibernate_consider_discard(m, preflight)) {
7831 if (!preflight) {
7832 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7833 }
7834 if (m->vmp_dirty) {
7835 count_discard_purgeable++;
7836 } else {
7837 count_discard_cleaned++;
7838 }
7839 discard = discard_all;
7840 } else {
7841 count_cleaned++;
7842 }
7843 count_wire--;
7844 if (!preflight) {
7845 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7846 }
7847 if (discard) {
7848 hibernate_discard_page(m);
7849 }
7850 m = next;
7851 }
7852
7853 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
7854 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
7855 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
7856
7857 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7858 discard = FALSE;
7859 if ((kIOHibernateModeDiscardCleanActive & gIOHibernateMode) &&
7860 hibernate_consider_discard(m, preflight)) {
7861 if (!preflight) {
7862 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7863 }
7864 if (m->vmp_dirty) {
7865 count_discard_purgeable++;
7866 } else {
7867 count_discard_active++;
7868 }
7869 discard = discard_all;
7870 } else {
7871 count_active++;
7872 }
7873 count_wire--;
7874 if (!preflight) {
7875 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7876 }
7877 if (discard) {
7878 hibernate_discard_page(m);
7879 }
7880 m = next;
7881 }
7882
7883 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
7884 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
7885 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
7886
7887 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7888 discard = FALSE;
7889 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7890 hibernate_consider_discard(m, preflight)) {
7891 if (!preflight) {
7892 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7893 }
7894 if (m->vmp_dirty) {
7895 count_discard_purgeable++;
7896 } else {
7897 count_discard_inactive++;
7898 }
7899 discard = discard_all;
7900 } else {
7901 count_inactive++;
7902 }
7903 count_wire--;
7904 if (!preflight) {
7905 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7906 }
7907 if (discard) {
7908 hibernate_discard_page(m);
7909 }
7910 m = next;
7911 }
7912 /* XXX FBDP TODO: secluded queue */
7913
7914 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
7915 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
7916 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
7917 assertf(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q,
7918 "Bad page: %p (0x%x:0x%x) on queue %d has state: %d (Discard: %d, Preflight: %d)",
7919 m, m->vmp_pageq.next, m->vmp_pageq.prev, i, m->vmp_q_state, discard, preflight);
7920
7921 next = (vm_page_t)VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
7922 discard = FALSE;
7923 if ((kIOHibernateModeDiscardCleanInactive & gIOHibernateMode) &&
7924 hibernate_consider_discard(m, preflight)) {
7925 if (!preflight) {
7926 hibernate_page_bitset(page_list, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7927 }
7928 count_discard_speculative++;
7929 discard = discard_all;
7930 } else {
7931 count_speculative++;
7932 }
7933 count_wire--;
7934 if (!preflight) {
7935 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7936 }
7937 if (discard) {
7938 hibernate_discard_page(m);
7939 }
7940 m = next;
7941 }
7942 }
7943
7944 vm_page_queue_iterate(&compressor_object->memq, m, vmp_listq) {
7945 assert(m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR);
7946
7947 count_compressor++;
7948 count_wire--;
7949 if (!preflight) {
7950 hibernate_page_bitset(page_list_wired, TRUE, VM_PAGE_GET_PHYS_PAGE(m));
7951 }
7952 }
7953
7954
7955 if (preflight == FALSE && discard_all == TRUE) {
7956 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_START);
7957
7958 HIBLOG("hibernate_teardown started\n");
7959 count_discard_vm_struct_pages = hibernate_teardown_vm_structs(page_list, page_list_wired);
7960 HIBLOG("hibernate_teardown completed - discarded %d\n", count_discard_vm_struct_pages);
7961
7962 pages -= count_discard_vm_struct_pages;
7963 count_wire -= count_discard_vm_struct_pages;
7964
7965 hibernate_stats.cd_vm_struct_pages_unneeded = count_discard_vm_struct_pages;
7966
7967 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 12) | DBG_FUNC_END);
7968 }
7969
7970 if (!preflight) {
7971 // pull wired from hibernate_bitmap
7972 bitmap = &page_list->bank_bitmap[0];
7973 bitmap_wired = &page_list_wired->bank_bitmap[0];
7974 for (bank = 0; bank < page_list->bank_count; bank++) {
7975 for (i = 0; i < bitmap->bitmapwords; i++) {
7976 bitmap->bitmap[i] = bitmap->bitmap[i] | ~bitmap_wired->bitmap[i];
7977 }
7978 bitmap = (hibernate_bitmap_t *)&bitmap->bitmap[bitmap->bitmapwords];
7979 bitmap_wired = (hibernate_bitmap_t *) &bitmap_wired->bitmap[bitmap_wired->bitmapwords];
7980 }
7981 }
7982
7983 // machine dependent adjustments
7984 hibernate_page_list_setall_machine(page_list, page_list_wired, preflight, &pages);
7985
7986 if (!preflight) {
7987 hibernate_stats.cd_count_wire = count_wire;
7988 hibernate_stats.cd_discarded = count_discard_active + count_discard_inactive + count_discard_purgeable +
7989 count_discard_speculative + count_discard_cleaned + count_discard_vm_struct_pages;
7990 }
7991
7992 clock_get_uptime(&end);
7993 absolutetime_to_nanoseconds(end - start, &nsec);
7994 HIBLOG("hibernate_page_list_setall time: %qd ms\n", nsec / 1000000ULL);
7995
7996 HIBLOG("pages %d, wire %d, act %d, inact %d, cleaned %d spec %d, zf %d, throt %d, compr %d, xpmapped %d\n %s discard act %d inact %d purgeable %d spec %d cleaned %d retired %d\n",
7997 pages, count_wire, count_active, count_inactive, count_cleaned, count_speculative, count_anonymous, count_throttled, count_compressor, hibernate_stats.cd_found_xpmapped,
7998 discard_all ? "did" : "could",
7999 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned, count_retired);
8000
8001 if (hibernate_stats.cd_skipped_xpmapped) {
8002 HIBLOG("WARNING: hibernate_page_list_setall skipped %d xpmapped pages\n", hibernate_stats.cd_skipped_xpmapped);
8003 }
8004
8005 *pagesOut = pages - count_discard_active - count_discard_inactive - count_discard_purgeable - count_discard_speculative - count_discard_cleaned - count_retired;
8006
8007 if (preflight && will_discard) {
8008 *pagesOut -= count_compressor + count_throttled + count_anonymous + count_inactive + count_cleaned + count_speculative + count_active;
8009 /*
8010 * We try to keep max HIBERNATE_XPMAPPED_LIMIT pages around in the hibernation image
8011 * even if these are clean and so we need to size the hibernation image accordingly.
8012 *
8013 * NB: We have to assume all HIBERNATE_XPMAPPED_LIMIT pages might show up because 'dirty'
8014 * xpmapped pages aren't distinguishable from other 'dirty' pages in preflight. So we might
8015 * only see part of the xpmapped pages if we look at 'cd_found_xpmapped' which solely tracks
8016 * clean xpmapped pages.
8017 *
8018 * Since these pages are all cleaned by the time we are in the post-preflight phase, we might
8019 * see a much larger number in 'cd_found_xpmapped' now than we did in the preflight phase
8020 */
8021 *pagesOut += HIBERNATE_XPMAPPED_LIMIT;
8022 }
8023
8024 hibernation_vmqueues_inspection = FALSE;
8025
8026 #if MACH_ASSERT || DEBUG
8027 if (!preflight) {
8028 if (vm_page_local_q) {
8029 zpercpu_foreach(lq, vm_page_local_q) {
8030 VPL_UNLOCK(&lq->vpl_lock);
8031 }
8032 }
8033 vm_page_unlock_queues();
8034 }
8035 #endif /* MACH_ASSERT || DEBUG */
8036
8037 if (preflight) {
8038 vm_free_page_unlock();
8039 vm_page_unlock_queues();
8040 vm_object_unlock(compressor_object);
8041 }
8042
8043 KERNEL_DEBUG_CONSTANT(IOKDBG_CODE(DBG_HIBERNATE, 8) | DBG_FUNC_END, count_wire, *pagesOut, 0, 0, 0);
8044 }
8045
8046 void
hibernate_page_list_discard(hibernate_page_list_t * page_list)8047 hibernate_page_list_discard(hibernate_page_list_t * page_list)
8048 {
8049 uint64_t start, end, nsec;
8050 vm_page_t m;
8051 vm_page_t next;
8052 uint32_t i;
8053 uint32_t count_discard_active = 0;
8054 uint32_t count_discard_inactive = 0;
8055 uint32_t count_discard_purgeable = 0;
8056 uint32_t count_discard_cleaned = 0;
8057 uint32_t count_discard_speculative = 0;
8058
8059
8060 #if MACH_ASSERT || DEBUG
8061 vm_page_lock_queues();
8062 if (vm_page_local_q) {
8063 zpercpu_foreach(lq, vm_page_local_q) {
8064 VPL_LOCK(&lq->vpl_lock);
8065 }
8066 }
8067 #endif /* MACH_ASSERT || DEBUG */
8068
8069 clock_get_uptime(&start);
8070
8071 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_anonymous);
8072 while (m && !vm_page_queue_end(&vm_page_queue_anonymous, (vm_page_queue_entry_t)m)) {
8073 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_INTERNAL_Q);
8074
8075 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8076 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8077 if (m->vmp_dirty) {
8078 count_discard_purgeable++;
8079 } else {
8080 count_discard_inactive++;
8081 }
8082 hibernate_discard_page(m);
8083 }
8084 m = next;
8085 }
8086
8087 for (i = 0; i <= VM_PAGE_MAX_SPECULATIVE_AGE_Q; i++) {
8088 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_speculative[i].age_q);
8089 while (m && !vm_page_queue_end(&vm_page_queue_speculative[i].age_q, (vm_page_queue_entry_t)m)) {
8090 assert(m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q);
8091
8092 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8093 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8094 count_discard_speculative++;
8095 hibernate_discard_page(m);
8096 }
8097 m = next;
8098 }
8099 }
8100
8101 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_inactive);
8102 while (m && !vm_page_queue_end(&vm_page_queue_inactive, (vm_page_queue_entry_t)m)) {
8103 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_EXTERNAL_Q);
8104
8105 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8106 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8107 if (m->vmp_dirty) {
8108 count_discard_purgeable++;
8109 } else {
8110 count_discard_inactive++;
8111 }
8112 hibernate_discard_page(m);
8113 }
8114 m = next;
8115 }
8116 /* XXX FBDP TODO: secluded queue */
8117
8118 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_active);
8119 while (m && !vm_page_queue_end(&vm_page_queue_active, (vm_page_queue_entry_t)m)) {
8120 assert(m->vmp_q_state == VM_PAGE_ON_ACTIVE_Q);
8121
8122 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8123 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8124 if (m->vmp_dirty) {
8125 count_discard_purgeable++;
8126 } else {
8127 count_discard_active++;
8128 }
8129 hibernate_discard_page(m);
8130 }
8131 m = next;
8132 }
8133
8134 m = (vm_page_t) vm_page_queue_first(&vm_page_queue_cleaned);
8135 while (m && !vm_page_queue_end(&vm_page_queue_cleaned, (vm_page_queue_entry_t)m)) {
8136 assert(m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q);
8137
8138 next = (vm_page_t) VM_PAGE_UNPACK_PTR(m->vmp_pageq.next);
8139 if (hibernate_page_bittst(page_list, VM_PAGE_GET_PHYS_PAGE(m))) {
8140 if (m->vmp_dirty) {
8141 count_discard_purgeable++;
8142 } else {
8143 count_discard_cleaned++;
8144 }
8145 hibernate_discard_page(m);
8146 }
8147 m = next;
8148 }
8149
8150 #if MACH_ASSERT || DEBUG
8151 if (vm_page_local_q) {
8152 zpercpu_foreach(lq, vm_page_local_q) {
8153 VPL_UNLOCK(&lq->vpl_lock);
8154 }
8155 }
8156 vm_page_unlock_queues();
8157 #endif /* MACH_ASSERT || DEBUG */
8158
8159 clock_get_uptime(&end);
8160 absolutetime_to_nanoseconds(end - start, &nsec);
8161 HIBLOG("hibernate_page_list_discard time: %qd ms, discarded act %d inact %d purgeable %d spec %d cleaned %d\n",
8162 nsec / 1000000ULL,
8163 count_discard_active, count_discard_inactive, count_discard_purgeable, count_discard_speculative, count_discard_cleaned);
8164 }
8165
8166 boolean_t hibernate_paddr_map_inited = FALSE;
8167 unsigned int hibernate_teardown_last_valid_compact_indx = -1;
8168 vm_page_t hibernate_rebuild_hash_list = NULL;
8169
8170 unsigned int hibernate_teardown_found_tabled_pages = 0;
8171 unsigned int hibernate_teardown_found_created_pages = 0;
8172 unsigned int hibernate_teardown_found_free_pages = 0;
8173 unsigned int hibernate_teardown_vm_page_free_count;
8174
8175
8176 struct ppnum_mapping {
8177 struct ppnum_mapping *ppnm_next;
8178 ppnum_t ppnm_base_paddr;
8179 unsigned int ppnm_sindx;
8180 unsigned int ppnm_eindx;
8181 };
8182
8183 struct ppnum_mapping *ppnm_head;
8184 struct ppnum_mapping *ppnm_last_found = NULL;
8185
8186
8187 void
hibernate_create_paddr_map(void)8188 hibernate_create_paddr_map(void)
8189 {
8190 unsigned int i;
8191 ppnum_t next_ppnum_in_run = 0;
8192 struct ppnum_mapping *ppnm = NULL;
8193
8194 if (hibernate_paddr_map_inited == FALSE) {
8195 for (i = 0; i < vm_pages_count; i++) {
8196 if (ppnm) {
8197 ppnm->ppnm_eindx = i;
8198 }
8199
8200 if (ppnm == NULL || VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) != next_ppnum_in_run) {
8201 ppnm = zalloc_permanent_type(struct ppnum_mapping);
8202
8203 ppnm->ppnm_next = ppnm_head;
8204 ppnm_head = ppnm;
8205
8206 ppnm->ppnm_sindx = i;
8207 ppnm->ppnm_base_paddr = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]);
8208 }
8209 next_ppnum_in_run = VM_PAGE_GET_PHYS_PAGE(&vm_pages[i]) + 1;
8210 }
8211 ppnm->ppnm_eindx = vm_pages_count;
8212
8213 hibernate_paddr_map_inited = TRUE;
8214 }
8215 }
8216
8217 ppnum_t
hibernate_lookup_paddr(unsigned int indx)8218 hibernate_lookup_paddr(unsigned int indx)
8219 {
8220 struct ppnum_mapping *ppnm = NULL;
8221
8222 ppnm = ppnm_last_found;
8223
8224 if (ppnm) {
8225 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8226 goto done;
8227 }
8228 }
8229 for (ppnm = ppnm_head; ppnm; ppnm = ppnm->ppnm_next) {
8230 if (indx >= ppnm->ppnm_sindx && indx < ppnm->ppnm_eindx) {
8231 ppnm_last_found = ppnm;
8232 break;
8233 }
8234 }
8235 if (ppnm == NULL) {
8236 panic("hibernate_lookup_paddr of %d failed", indx);
8237 }
8238 done:
8239 return ppnm->ppnm_base_paddr + (indx - ppnm->ppnm_sindx);
8240 }
8241
8242
8243 uint32_t
hibernate_mark_as_unneeded(addr64_t saddr,addr64_t eaddr,hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8244 hibernate_mark_as_unneeded(addr64_t saddr, addr64_t eaddr, hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8245 {
8246 addr64_t saddr_aligned;
8247 addr64_t eaddr_aligned;
8248 addr64_t addr;
8249 ppnum_t paddr;
8250 unsigned int mark_as_unneeded_pages = 0;
8251
8252 saddr_aligned = (saddr + PAGE_MASK_64) & ~PAGE_MASK_64;
8253 eaddr_aligned = eaddr & ~PAGE_MASK_64;
8254
8255 for (addr = saddr_aligned; addr < eaddr_aligned; addr += PAGE_SIZE_64) {
8256 paddr = pmap_find_phys(kernel_pmap, addr);
8257
8258 assert(paddr);
8259
8260 hibernate_page_bitset(page_list, TRUE, paddr);
8261 hibernate_page_bitset(page_list_wired, TRUE, paddr);
8262
8263 mark_as_unneeded_pages++;
8264 }
8265 return mark_as_unneeded_pages;
8266 }
8267
8268
8269 void
hibernate_hash_insert_page(vm_page_t mem)8270 hibernate_hash_insert_page(vm_page_t mem)
8271 {
8272 vm_page_bucket_t *bucket;
8273 int hash_id;
8274 vm_object_t m_object;
8275
8276 m_object = VM_PAGE_OBJECT(mem);
8277
8278 assert(mem->vmp_hashed);
8279 assert(m_object);
8280 assert(mem->vmp_offset != (vm_object_offset_t) -1);
8281
8282 /*
8283 * Insert it into the object_object/offset hash table
8284 */
8285 hash_id = vm_page_hash(m_object, mem->vmp_offset);
8286 bucket = &vm_page_buckets[hash_id];
8287
8288 mem->vmp_next_m = bucket->page_list;
8289 bucket->page_list = VM_PAGE_PACK_PTR(mem);
8290 }
8291
8292
8293 void
hibernate_free_range(int sindx,int eindx)8294 hibernate_free_range(int sindx, int eindx)
8295 {
8296 vm_page_t mem;
8297 unsigned int color;
8298
8299 while (sindx < eindx) {
8300 mem = &vm_pages[sindx];
8301
8302 vm_page_init(mem, hibernate_lookup_paddr(sindx), FALSE);
8303
8304 mem->vmp_lopage = FALSE;
8305 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8306
8307 color = VM_PAGE_GET_COLOR(mem);
8308 #if defined(__x86_64__)
8309 vm_page_queue_enter_clump(&vm_page_queue_free[color].qhead, mem);
8310 #else
8311 vm_page_queue_enter(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8312 #endif
8313 vm_page_free_count++;
8314
8315 sindx++;
8316 }
8317 }
8318
8319 void
hibernate_rebuild_vm_structs(void)8320 hibernate_rebuild_vm_structs(void)
8321 {
8322 int i, cindx, sindx, eindx;
8323 vm_page_t mem, tmem, mem_next;
8324 AbsoluteTime startTime, endTime;
8325 uint64_t nsec;
8326
8327 if (hibernate_rebuild_needed == FALSE) {
8328 return;
8329 }
8330
8331 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_START);
8332 HIBLOG("hibernate_rebuild started\n");
8333
8334 clock_get_uptime(&startTime);
8335
8336 pal_hib_rebuild_pmap_structs();
8337
8338 bzero(&vm_page_buckets[0], vm_page_bucket_count * sizeof(vm_page_bucket_t));
8339 eindx = vm_pages_count;
8340
8341 /*
8342 * Mark all the vm_pages[] that have not been initialized yet as being
8343 * transient. This is needed to ensure that buddy page search is corrrect.
8344 * Without this random data in these vm_pages[] can trip the buddy search
8345 */
8346 for (i = hibernate_teardown_last_valid_compact_indx + 1; i < eindx; ++i) {
8347 vm_pages[i].vmp_q_state = VM_PAGE_NOT_ON_Q;
8348 }
8349
8350 for (cindx = hibernate_teardown_last_valid_compact_indx; cindx >= 0; cindx--) {
8351 mem = &vm_pages[cindx];
8352 assert(mem->vmp_q_state != VM_PAGE_ON_FREE_Q);
8353 /*
8354 * hibernate_teardown_vm_structs leaves the location where
8355 * this vm_page_t must be located in "next".
8356 */
8357 tmem = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8358 mem->vmp_next_m = VM_PAGE_PACK_PTR(NULL);
8359
8360 sindx = (int)(tmem - &vm_pages[0]);
8361
8362 if (mem != tmem) {
8363 /*
8364 * this vm_page_t was moved by hibernate_teardown_vm_structs,
8365 * so move it back to its real location
8366 */
8367 *tmem = *mem;
8368 mem = tmem;
8369 }
8370 if (mem->vmp_hashed) {
8371 hibernate_hash_insert_page(mem);
8372 }
8373 /*
8374 * the 'hole' between this vm_page_t and the previous
8375 * vm_page_t we moved needs to be initialized as
8376 * a range of free vm_page_t's
8377 */
8378 hibernate_free_range(sindx + 1, eindx);
8379
8380 eindx = sindx;
8381 }
8382 if (sindx) {
8383 hibernate_free_range(0, sindx);
8384 }
8385
8386 assert(vm_page_free_count == hibernate_teardown_vm_page_free_count);
8387
8388 /*
8389 * process the list of vm_page_t's that were entered in the hash,
8390 * but were not located in the vm_pages arrary... these are
8391 * vm_page_t's that were created on the fly (i.e. fictitious)
8392 */
8393 for (mem = hibernate_rebuild_hash_list; mem; mem = mem_next) {
8394 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8395
8396 mem->vmp_next_m = 0;
8397 hibernate_hash_insert_page(mem);
8398 }
8399 hibernate_rebuild_hash_list = NULL;
8400
8401 clock_get_uptime(&endTime);
8402 SUB_ABSOLUTETIME(&endTime, &startTime);
8403 absolutetime_to_nanoseconds(endTime, &nsec);
8404
8405 HIBLOG("hibernate_rebuild completed - took %qd msecs\n", nsec / 1000000ULL);
8406
8407 hibernate_rebuild_needed = FALSE;
8408
8409 KDBG(IOKDBG_CODE(DBG_HIBERNATE, 13) | DBG_FUNC_END);
8410 }
8411
8412 uint32_t
hibernate_teardown_vm_structs(hibernate_page_list_t * page_list,hibernate_page_list_t * page_list_wired)8413 hibernate_teardown_vm_structs(hibernate_page_list_t *page_list, hibernate_page_list_t *page_list_wired)
8414 {
8415 unsigned int i;
8416 unsigned int compact_target_indx;
8417 vm_page_t mem, mem_next;
8418 vm_page_bucket_t *bucket;
8419 unsigned int mark_as_unneeded_pages = 0;
8420 unsigned int unneeded_vm_page_bucket_pages = 0;
8421 unsigned int unneeded_vm_pages_pages = 0;
8422 unsigned int unneeded_pmap_pages = 0;
8423 addr64_t start_of_unneeded = 0;
8424 addr64_t end_of_unneeded = 0;
8425
8426
8427 if (hibernate_should_abort()) {
8428 return 0;
8429 }
8430
8431 hibernate_rebuild_needed = TRUE;
8432
8433 HIBLOG("hibernate_teardown: wired_pages %d, free_pages %d, active_pages %d, inactive_pages %d, speculative_pages %d, cleaned_pages %d, compressor_pages %d\n",
8434 vm_page_wire_count, vm_page_free_count, vm_page_active_count, vm_page_inactive_count, vm_page_speculative_count,
8435 vm_page_cleaned_count, compressor_object->resident_page_count);
8436
8437 for (i = 0; i < vm_page_bucket_count; i++) {
8438 bucket = &vm_page_buckets[i];
8439
8440 for (mem = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list)); mem != VM_PAGE_NULL; mem = mem_next) {
8441 assert(mem->vmp_hashed);
8442
8443 mem_next = (vm_page_t)(VM_PAGE_UNPACK_PTR(mem->vmp_next_m));
8444
8445 if (mem < &vm_pages[0] || mem >= &vm_pages[vm_pages_count]) {
8446 mem->vmp_next_m = VM_PAGE_PACK_PTR(hibernate_rebuild_hash_list);
8447 hibernate_rebuild_hash_list = mem;
8448 }
8449 }
8450 }
8451 unneeded_vm_page_bucket_pages = hibernate_mark_as_unneeded((addr64_t)&vm_page_buckets[0], (addr64_t)&vm_page_buckets[vm_page_bucket_count], page_list, page_list_wired);
8452 mark_as_unneeded_pages += unneeded_vm_page_bucket_pages;
8453
8454 hibernate_teardown_vm_page_free_count = vm_page_free_count;
8455
8456 compact_target_indx = 0;
8457
8458 for (i = 0; i < vm_pages_count; i++) {
8459 mem = &vm_pages[i];
8460
8461 if (mem->vmp_q_state == VM_PAGE_ON_FREE_Q) {
8462 unsigned int color;
8463
8464 assert(mem->vmp_busy);
8465 assert(!mem->vmp_lopage);
8466
8467 color = VM_PAGE_GET_COLOR(mem);
8468
8469 vm_page_queue_remove(&vm_page_queue_free[color].qhead, mem, vmp_pageq);
8470
8471 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8472
8473 vm_page_free_count--;
8474
8475 hibernate_teardown_found_free_pages++;
8476
8477 if (vm_pages[compact_target_indx].vmp_q_state != VM_PAGE_ON_FREE_Q) {
8478 compact_target_indx = i;
8479 }
8480 } else {
8481 /*
8482 * record this vm_page_t's original location
8483 * we need this even if it doesn't get moved
8484 * as an indicator to the rebuild function that
8485 * we don't have to move it
8486 */
8487 mem->vmp_next_m = VM_PAGE_PACK_PTR(mem);
8488
8489 if (vm_pages[compact_target_indx].vmp_q_state == VM_PAGE_ON_FREE_Q) {
8490 /*
8491 * we've got a hole to fill, so
8492 * move this vm_page_t to it's new home
8493 */
8494 vm_pages[compact_target_indx] = *mem;
8495 mem->vmp_q_state = VM_PAGE_ON_FREE_Q;
8496
8497 hibernate_teardown_last_valid_compact_indx = compact_target_indx;
8498 compact_target_indx++;
8499 } else {
8500 hibernate_teardown_last_valid_compact_indx = i;
8501 }
8502 }
8503 }
8504 unneeded_vm_pages_pages = hibernate_mark_as_unneeded((addr64_t)&vm_pages[hibernate_teardown_last_valid_compact_indx + 1],
8505 (addr64_t)&vm_pages[vm_pages_count - 1], page_list, page_list_wired);
8506 mark_as_unneeded_pages += unneeded_vm_pages_pages;
8507
8508 pal_hib_teardown_pmap_structs(&start_of_unneeded, &end_of_unneeded);
8509
8510 if (start_of_unneeded) {
8511 unneeded_pmap_pages = hibernate_mark_as_unneeded(start_of_unneeded, end_of_unneeded, page_list, page_list_wired);
8512 mark_as_unneeded_pages += unneeded_pmap_pages;
8513 }
8514 HIBLOG("hibernate_teardown: mark_as_unneeded_pages %d, %d, %d\n", unneeded_vm_page_bucket_pages, unneeded_vm_pages_pages, unneeded_pmap_pages);
8515
8516 return mark_as_unneeded_pages;
8517 }
8518
8519
8520 #endif /* HIBERNATION */
8521
8522 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8523
8524 #include <mach_vm_debug.h>
8525 #if MACH_VM_DEBUG
8526
8527 #include <mach_debug/hash_info.h>
8528 #include <vm/vm_debug.h>
8529
8530 /*
8531 * Routine: vm_page_info
8532 * Purpose:
8533 * Return information about the global VP table.
8534 * Fills the buffer with as much information as possible
8535 * and returns the desired size of the buffer.
8536 * Conditions:
8537 * Nothing locked. The caller should provide
8538 * possibly-pageable memory.
8539 */
8540
8541 unsigned int
vm_page_info(hash_info_bucket_t * info,unsigned int count)8542 vm_page_info(
8543 hash_info_bucket_t *info,
8544 unsigned int count)
8545 {
8546 unsigned int i;
8547 lck_spin_t *bucket_lock;
8548
8549 if (vm_page_bucket_count < count) {
8550 count = vm_page_bucket_count;
8551 }
8552
8553 for (i = 0; i < count; i++) {
8554 vm_page_bucket_t *bucket = &vm_page_buckets[i];
8555 unsigned int bucket_count = 0;
8556 vm_page_t m;
8557
8558 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8559 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8560
8561 for (m = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8562 m != VM_PAGE_NULL;
8563 m = (vm_page_t)(VM_PAGE_UNPACK_PTR(m->vmp_next_m))) {
8564 bucket_count++;
8565 }
8566
8567 lck_spin_unlock(bucket_lock);
8568
8569 /* don't touch pageable memory while holding locks */
8570 info[i].hib_count = bucket_count;
8571 }
8572
8573 return vm_page_bucket_count;
8574 }
8575 #endif /* MACH_VM_DEBUG */
8576
8577 #if VM_PAGE_BUCKETS_CHECK
8578 void
vm_page_buckets_check(void)8579 vm_page_buckets_check(void)
8580 {
8581 unsigned int i;
8582 vm_page_t p;
8583 unsigned int p_hash;
8584 vm_page_bucket_t *bucket;
8585 lck_spin_t *bucket_lock;
8586
8587 if (!vm_page_buckets_check_ready) {
8588 return;
8589 }
8590
8591 #if HIBERNATION
8592 if (hibernate_rebuild_needed ||
8593 hibernate_rebuild_hash_list) {
8594 panic("BUCKET_CHECK: hibernation in progress: "
8595 "rebuild_needed=%d rebuild_hash_list=%p\n",
8596 hibernate_rebuild_needed,
8597 hibernate_rebuild_hash_list);
8598 }
8599 #endif /* HIBERNATION */
8600
8601 #if VM_PAGE_FAKE_BUCKETS
8602 char *cp;
8603 for (cp = (char *) vm_page_fake_buckets_start;
8604 cp < (char *) vm_page_fake_buckets_end;
8605 cp++) {
8606 if (*cp != 0x5a) {
8607 panic("BUCKET_CHECK: corruption at %p in fake buckets "
8608 "[0x%llx:0x%llx]\n",
8609 cp,
8610 (uint64_t) vm_page_fake_buckets_start,
8611 (uint64_t) vm_page_fake_buckets_end);
8612 }
8613 }
8614 #endif /* VM_PAGE_FAKE_BUCKETS */
8615
8616 for (i = 0; i < vm_page_bucket_count; i++) {
8617 vm_object_t p_object;
8618
8619 bucket = &vm_page_buckets[i];
8620 if (!bucket->page_list) {
8621 continue;
8622 }
8623
8624 bucket_lock = &vm_page_bucket_locks[i / BUCKETS_PER_LOCK];
8625 lck_spin_lock_grp(bucket_lock, &vm_page_lck_grp_bucket);
8626 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(bucket->page_list));
8627
8628 while (p != VM_PAGE_NULL) {
8629 p_object = VM_PAGE_OBJECT(p);
8630
8631 if (!p->vmp_hashed) {
8632 panic("BUCKET_CHECK: page %p (%p,0x%llx) "
8633 "hash %d in bucket %d at %p "
8634 "is not hashed\n",
8635 p, p_object, p->vmp_offset,
8636 p_hash, i, bucket);
8637 }
8638 p_hash = vm_page_hash(p_object, p->vmp_offset);
8639 if (p_hash != i) {
8640 panic("BUCKET_CHECK: corruption in bucket %d "
8641 "at %p: page %p object %p offset 0x%llx "
8642 "hash %d\n",
8643 i, bucket, p, p_object, p->vmp_offset,
8644 p_hash);
8645 }
8646 p = (vm_page_t)(VM_PAGE_UNPACK_PTR(p->vmp_next_m));
8647 }
8648 lck_spin_unlock(bucket_lock);
8649 }
8650
8651 // printf("BUCKET_CHECK: checked buckets\n");
8652 }
8653 #endif /* VM_PAGE_BUCKETS_CHECK */
8654
8655 /*
8656 * 'vm_fault_enter' will place newly created pages (zero-fill and COW) onto the
8657 * local queues if they exist... its the only spot in the system where we add pages
8658 * to those queues... once on those queues, those pages can only move to one of the
8659 * global page queues or the free queues... they NEVER move from local q to local q.
8660 * the 'local' state is stable when vm_page_queues_remove is called since we're behind
8661 * the global vm_page_queue_lock at this point... we still need to take the local lock
8662 * in case this operation is being run on a different CPU then the local queue's identity,
8663 * but we don't have to worry about the page moving to a global queue or becoming wired
8664 * while we're grabbing the local lock since those operations would require the global
8665 * vm_page_queue_lock to be held, and we already own it.
8666 *
8667 * this is why its safe to utilze the wire_count field in the vm_page_t as the local_id...
8668 * 'wired' and local are ALWAYS mutually exclusive conditions.
8669 */
8670
8671 void
vm_page_queues_remove(vm_page_t mem,boolean_t remove_from_specialq)8672 vm_page_queues_remove(vm_page_t mem, boolean_t remove_from_specialq)
8673 {
8674 boolean_t was_pageable = TRUE;
8675 vm_object_t m_object;
8676
8677 m_object = VM_PAGE_OBJECT(mem);
8678
8679 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8680
8681 if (mem->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8682 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8683 if (remove_from_specialq == TRUE) {
8684 vm_page_remove_from_specialq(mem);
8685 }
8686 /*if (mem->vmp_on_specialq != VM_PAGE_SPECIAL_Q_EMPTY) {
8687 * assert(mem->vmp_specialq.next != 0);
8688 * assert(mem->vmp_specialq.prev != 0);
8689 * } else {*/
8690 if (mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY) {
8691 assert(mem->vmp_specialq.next == 0);
8692 assert(mem->vmp_specialq.prev == 0);
8693 }
8694 return;
8695 }
8696
8697 if (mem->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8698 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8699 assert(mem->vmp_specialq.next == 0 &&
8700 mem->vmp_specialq.prev == 0 &&
8701 mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8702 return;
8703 }
8704 if (mem->vmp_q_state == VM_PAGE_IS_WIRED) {
8705 /*
8706 * might put these guys on a list for debugging purposes
8707 * if we do, we'll need to remove this assert
8708 */
8709 assert(mem->vmp_pageq.next == 0 && mem->vmp_pageq.prev == 0);
8710 assert(mem->vmp_specialq.next == 0 &&
8711 mem->vmp_specialq.prev == 0);
8712 /*
8713 * Recall that vmp_on_specialq also means a request to put
8714 * it on the special Q. So we don't want to reset that bit
8715 * just because a wiring request came in. We might want to
8716 * put it on the special queue post-unwiring.
8717 *
8718 * &&
8719 * mem->vmp_on_specialq == VM_PAGE_SPECIAL_Q_EMPTY);
8720 */
8721 return;
8722 }
8723
8724 assert(m_object != compressor_object);
8725 assert(m_object != kernel_object);
8726 assert(!mem->vmp_fictitious);
8727
8728 switch (mem->vmp_q_state) {
8729 case VM_PAGE_ON_ACTIVE_LOCAL_Q:
8730 {
8731 struct vpl *lq;
8732
8733 lq = zpercpu_get_cpu(vm_page_local_q, mem->vmp_local_id);
8734 VPL_LOCK(&lq->vpl_lock);
8735 vm_page_queue_remove(&lq->vpl_queue, mem, vmp_pageq);
8736 mem->vmp_local_id = 0;
8737 lq->vpl_count--;
8738 if (m_object->internal) {
8739 lq->vpl_internal_count--;
8740 } else {
8741 lq->vpl_external_count--;
8742 }
8743 VPL_UNLOCK(&lq->vpl_lock);
8744 was_pageable = FALSE;
8745 break;
8746 }
8747 case VM_PAGE_ON_ACTIVE_Q:
8748 {
8749 vm_page_queue_remove(&vm_page_queue_active, mem, vmp_pageq);
8750 vm_page_active_count--;
8751 break;
8752 }
8753
8754 case VM_PAGE_ON_INACTIVE_INTERNAL_Q:
8755 {
8756 assert(m_object->internal == TRUE);
8757
8758 vm_page_inactive_count--;
8759 vm_page_queue_remove(&vm_page_queue_anonymous, mem, vmp_pageq);
8760 vm_page_anonymous_count--;
8761
8762 vm_purgeable_q_advance_all();
8763 vm_page_balance_inactive(3);
8764 break;
8765 }
8766
8767 case VM_PAGE_ON_INACTIVE_EXTERNAL_Q:
8768 {
8769 assert(m_object->internal == FALSE);
8770
8771 vm_page_inactive_count--;
8772 vm_page_queue_remove(&vm_page_queue_inactive, mem, vmp_pageq);
8773 vm_purgeable_q_advance_all();
8774 vm_page_balance_inactive(3);
8775 break;
8776 }
8777
8778 case VM_PAGE_ON_INACTIVE_CLEANED_Q:
8779 {
8780 assert(m_object->internal == FALSE);
8781
8782 vm_page_inactive_count--;
8783 vm_page_queue_remove(&vm_page_queue_cleaned, mem, vmp_pageq);
8784 vm_page_cleaned_count--;
8785 vm_page_balance_inactive(3);
8786 break;
8787 }
8788
8789 case VM_PAGE_ON_THROTTLED_Q:
8790 {
8791 assert(m_object->internal == TRUE);
8792
8793 vm_page_queue_remove(&vm_page_queue_throttled, mem, vmp_pageq);
8794 vm_page_throttled_count--;
8795 was_pageable = FALSE;
8796 break;
8797 }
8798
8799 case VM_PAGE_ON_SPECULATIVE_Q:
8800 {
8801 assert(m_object->internal == FALSE);
8802
8803 vm_page_remque(&mem->vmp_pageq);
8804 vm_page_speculative_count--;
8805 vm_page_balance_inactive(3);
8806 break;
8807 }
8808
8809 #if CONFIG_SECLUDED_MEMORY
8810 case VM_PAGE_ON_SECLUDED_Q:
8811 {
8812 vm_page_queue_remove(&vm_page_queue_secluded, mem, vmp_pageq);
8813 vm_page_secluded_count--;
8814 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
8815 if (m_object == VM_OBJECT_NULL) {
8816 vm_page_secluded_count_free--;
8817 was_pageable = FALSE;
8818 } else {
8819 assert(!m_object->internal);
8820 vm_page_secluded_count_inuse--;
8821 was_pageable = FALSE;
8822 // was_pageable = TRUE;
8823 }
8824 break;
8825 }
8826 #endif /* CONFIG_SECLUDED_MEMORY */
8827
8828 default:
8829 {
8830 /*
8831 * if (mem->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q)
8832 * NOTE: vm_page_queues_remove does not deal with removing pages from the pageout queue...
8833 * the caller is responsible for determing if the page is on that queue, and if so, must
8834 * either first remove it (it needs both the page queues lock and the object lock to do
8835 * this via vm_pageout_steal_laundry), or avoid the call to vm_page_queues_remove
8836 *
8837 * we also don't expect to encounter VM_PAGE_ON_FREE_Q, VM_PAGE_ON_FREE_LOCAL_Q, VM_PAGE_ON_FREE_LOPAGE_Q
8838 * or any of the undefined states
8839 */
8840 panic("vm_page_queues_remove - bad page q_state (%p, %d)", mem, mem->vmp_q_state);
8841 break;
8842 }
8843 }
8844 VM_PAGE_ZERO_PAGEQ_ENTRY(mem);
8845 mem->vmp_q_state = VM_PAGE_NOT_ON_Q;
8846
8847 if (remove_from_specialq == TRUE) {
8848 vm_page_remove_from_specialq(mem);
8849 }
8850 if (was_pageable) {
8851 if (m_object->internal) {
8852 vm_page_pageable_internal_count--;
8853 } else {
8854 vm_page_pageable_external_count--;
8855 }
8856 }
8857 }
8858
8859 void
vm_page_remove_internal(vm_page_t page)8860 vm_page_remove_internal(vm_page_t page)
8861 {
8862 vm_object_t __object = VM_PAGE_OBJECT(page);
8863 if (page == __object->memq_hint) {
8864 vm_page_t __new_hint;
8865 vm_page_queue_entry_t __qe;
8866 __qe = (vm_page_queue_entry_t)vm_page_queue_next(&page->vmp_listq);
8867 if (vm_page_queue_end(&__object->memq, __qe)) {
8868 __qe = (vm_page_queue_entry_t)vm_page_queue_prev(&page->vmp_listq);
8869 if (vm_page_queue_end(&__object->memq, __qe)) {
8870 __qe = NULL;
8871 }
8872 }
8873 __new_hint = (vm_page_t)((uintptr_t) __qe);
8874 __object->memq_hint = __new_hint;
8875 }
8876 vm_page_queue_remove(&__object->memq, page, vmp_listq);
8877 #if CONFIG_SECLUDED_MEMORY
8878 if (__object->eligible_for_secluded) {
8879 vm_page_secluded.eligible_for_secluded--;
8880 }
8881 #endif /* CONFIG_SECLUDED_MEMORY */
8882 }
8883
8884 void
vm_page_enqueue_inactive(vm_page_t mem,boolean_t first)8885 vm_page_enqueue_inactive(vm_page_t mem, boolean_t first)
8886 {
8887 vm_object_t m_object;
8888
8889 m_object = VM_PAGE_OBJECT(mem);
8890
8891 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8892 assert(!mem->vmp_fictitious);
8893 assert(!mem->vmp_laundry);
8894 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8895 vm_page_check_pageable_safe(mem);
8896
8897 if (m_object->internal) {
8898 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_INTERNAL_Q;
8899
8900 if (first == TRUE) {
8901 vm_page_queue_enter_first(&vm_page_queue_anonymous, mem, vmp_pageq);
8902 } else {
8903 vm_page_queue_enter(&vm_page_queue_anonymous, mem, vmp_pageq);
8904 }
8905
8906 vm_page_anonymous_count++;
8907 vm_page_pageable_internal_count++;
8908 } else {
8909 mem->vmp_q_state = VM_PAGE_ON_INACTIVE_EXTERNAL_Q;
8910
8911 if (first == TRUE) {
8912 vm_page_queue_enter_first(&vm_page_queue_inactive, mem, vmp_pageq);
8913 } else {
8914 vm_page_queue_enter(&vm_page_queue_inactive, mem, vmp_pageq);
8915 }
8916
8917 vm_page_pageable_external_count++;
8918 }
8919 vm_page_inactive_count++;
8920 token_new_pagecount++;
8921
8922 vm_page_add_to_specialq(mem, FALSE);
8923 }
8924
8925 void
vm_page_enqueue_active(vm_page_t mem,boolean_t first)8926 vm_page_enqueue_active(vm_page_t mem, boolean_t first)
8927 {
8928 vm_object_t m_object;
8929
8930 m_object = VM_PAGE_OBJECT(mem);
8931
8932 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
8933 assert(!mem->vmp_fictitious);
8934 assert(!mem->vmp_laundry);
8935 assert(mem->vmp_q_state == VM_PAGE_NOT_ON_Q);
8936 vm_page_check_pageable_safe(mem);
8937
8938 mem->vmp_q_state = VM_PAGE_ON_ACTIVE_Q;
8939 if (first == TRUE) {
8940 vm_page_queue_enter_first(&vm_page_queue_active, mem, vmp_pageq);
8941 } else {
8942 vm_page_queue_enter(&vm_page_queue_active, mem, vmp_pageq);
8943 }
8944 vm_page_active_count++;
8945
8946 if (m_object->internal) {
8947 vm_page_pageable_internal_count++;
8948 } else {
8949 vm_page_pageable_external_count++;
8950 }
8951
8952 vm_page_add_to_specialq(mem, FALSE);
8953 vm_page_balance_inactive(3);
8954 }
8955
8956 /*
8957 * Pages from special kernel objects shouldn't
8958 * be placed on pageable queues.
8959 */
8960 void
vm_page_check_pageable_safe(vm_page_t page)8961 vm_page_check_pageable_safe(vm_page_t page)
8962 {
8963 vm_object_t page_object;
8964
8965 page_object = VM_PAGE_OBJECT(page);
8966
8967 if (page_object == kernel_object) {
8968 panic("vm_page_check_pageable_safe: trying to add page"
8969 "from kernel object (%p) to pageable queue", kernel_object);
8970 }
8971
8972 if (page_object == compressor_object) {
8973 panic("vm_page_check_pageable_safe: trying to add page"
8974 "from compressor object (%p) to pageable queue", compressor_object);
8975 }
8976 }
8977
8978 /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
8979 * wired page diagnose
8980 * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
8981
8982 #include <libkern/OSKextLibPrivate.h>
8983
8984 #define KA_SIZE(namelen, subtotalscount) \
8985 (sizeof(struct vm_allocation_site) + (namelen) + 1 + ((subtotalscount) * sizeof(struct vm_allocation_total)))
8986
8987 #define KA_NAME(alloc) \
8988 ((char *)(&(alloc)->subtotals[(alloc->subtotalscount)]))
8989
8990 #define KA_NAME_LEN(alloc) \
8991 (VM_TAG_NAME_LEN_MAX & (alloc->flags >> VM_TAG_NAME_LEN_SHIFT))
8992
8993 vm_tag_t
vm_tag_bt(void)8994 vm_tag_bt(void)
8995 {
8996 uintptr_t* frameptr;
8997 uintptr_t* frameptr_next;
8998 uintptr_t retaddr;
8999 uintptr_t kstackb, kstackt;
9000 const vm_allocation_site_t * site;
9001 thread_t cthread;
9002 kern_allocation_name_t name;
9003
9004 cthread = current_thread();
9005 if (__improbable(cthread == NULL)) {
9006 return VM_KERN_MEMORY_OSFMK;
9007 }
9008
9009 if ((name = thread_get_kernel_state(cthread)->allocation_name)) {
9010 if (!name->tag) {
9011 vm_tag_alloc(name);
9012 }
9013 return name->tag;
9014 }
9015
9016 kstackb = cthread->kernel_stack;
9017 kstackt = kstackb + kernel_stack_size;
9018
9019 /* Load stack frame pointer (EBP on x86) into frameptr */
9020 frameptr = __builtin_frame_address(0);
9021 site = NULL;
9022 while (frameptr != NULL) {
9023 /* Verify thread stack bounds */
9024 if (((uintptr_t)(frameptr + 2) > kstackt) || ((uintptr_t)frameptr < kstackb)) {
9025 break;
9026 }
9027
9028 /* Next frame pointer is pointed to by the previous one */
9029 frameptr_next = (uintptr_t*) *frameptr;
9030
9031 /* Pull return address from one spot above the frame pointer */
9032 retaddr = *(frameptr + 1);
9033
9034 #if defined(HAS_APPLE_PAC)
9035 retaddr = (uintptr_t) ptrauth_strip((void *)retaddr, ptrauth_key_return_address);
9036 #endif
9037
9038 if (((retaddr < vm_kernel_builtinkmod_text_end) && (retaddr >= vm_kernel_builtinkmod_text))
9039 || (retaddr < vm_kernel_stext) || (retaddr > vm_kernel_top)) {
9040 site = OSKextGetAllocationSiteForCaller(retaddr);
9041 break;
9042 }
9043 frameptr = frameptr_next;
9044 }
9045
9046 return site ? site->tag : VM_KERN_MEMORY_NONE;
9047 }
9048
9049 static uint64_t free_tag_bits[VM_MAX_TAG_VALUE / 64];
9050
9051 void
vm_tag_alloc_locked(vm_allocation_site_t * site,vm_allocation_site_t ** releasesiteP)9052 vm_tag_alloc_locked(vm_allocation_site_t * site, vm_allocation_site_t ** releasesiteP)
9053 {
9054 vm_tag_t tag;
9055 uint64_t avail;
9056 uint32_t idx;
9057 vm_allocation_site_t * prev;
9058
9059 if (site->tag) {
9060 return;
9061 }
9062
9063 idx = 0;
9064 while (TRUE) {
9065 avail = free_tag_bits[idx];
9066 if (avail) {
9067 tag = (vm_tag_t)__builtin_clzll(avail);
9068 avail &= ~(1ULL << (63 - tag));
9069 free_tag_bits[idx] = avail;
9070 tag += (idx << 6);
9071 break;
9072 }
9073 idx++;
9074 if (idx >= ARRAY_COUNT(free_tag_bits)) {
9075 for (idx = 0; idx < ARRAY_COUNT(vm_allocation_sites); idx++) {
9076 prev = vm_allocation_sites[idx];
9077 if (!prev) {
9078 continue;
9079 }
9080 if (!KA_NAME_LEN(prev)) {
9081 continue;
9082 }
9083 if (!prev->tag) {
9084 continue;
9085 }
9086 if (prev->total) {
9087 continue;
9088 }
9089 if (1 != prev->refcount) {
9090 continue;
9091 }
9092
9093 assert(idx == prev->tag);
9094 tag = (vm_tag_t)idx;
9095 prev->tag = VM_KERN_MEMORY_NONE;
9096 *releasesiteP = prev;
9097 break;
9098 }
9099 if (idx >= ARRAY_COUNT(vm_allocation_sites)) {
9100 tag = VM_KERN_MEMORY_ANY;
9101 }
9102 break;
9103 }
9104 }
9105 site->tag = tag;
9106
9107 OSAddAtomic16(1, &site->refcount);
9108
9109 if (VM_KERN_MEMORY_ANY != tag) {
9110 vm_allocation_sites[tag] = site;
9111 }
9112
9113 if (tag > vm_allocation_tag_highest) {
9114 vm_allocation_tag_highest = tag;
9115 }
9116 }
9117
9118 static void
vm_tag_free_locked(vm_tag_t tag)9119 vm_tag_free_locked(vm_tag_t tag)
9120 {
9121 uint64_t avail;
9122 uint32_t idx;
9123 uint64_t bit;
9124
9125 if (VM_KERN_MEMORY_ANY == tag) {
9126 return;
9127 }
9128
9129 idx = (tag >> 6);
9130 avail = free_tag_bits[idx];
9131 tag &= 63;
9132 bit = (1ULL << (63 - tag));
9133 assert(!(avail & bit));
9134 free_tag_bits[idx] = (avail | bit);
9135 }
9136
9137 static void
vm_tag_init(void)9138 vm_tag_init(void)
9139 {
9140 vm_tag_t tag;
9141 for (tag = VM_KERN_MEMORY_FIRST_DYNAMIC; tag < VM_KERN_MEMORY_ANY; tag++) {
9142 vm_tag_free_locked(tag);
9143 }
9144
9145 for (tag = VM_KERN_MEMORY_ANY + 1; tag < VM_MAX_TAG_VALUE; tag++) {
9146 vm_tag_free_locked(tag);
9147 }
9148 }
9149
9150 vm_tag_t
vm_tag_alloc(vm_allocation_site_t * site)9151 vm_tag_alloc(vm_allocation_site_t * site)
9152 {
9153 vm_allocation_site_t * releasesite;
9154
9155 if (!site->tag) {
9156 releasesite = NULL;
9157 lck_ticket_lock(&vm_allocation_sites_lock, LCK_GRP_NULL);
9158 vm_tag_alloc_locked(site, &releasesite);
9159 lck_ticket_unlock(&vm_allocation_sites_lock);
9160 if (releasesite) {
9161 kern_allocation_name_release(releasesite);
9162 }
9163 }
9164
9165 return site->tag;
9166 }
9167
9168 void
vm_tag_update_size(vm_tag_t tag,int64_t delta)9169 vm_tag_update_size(vm_tag_t tag, int64_t delta)
9170 {
9171 vm_allocation_site_t * allocation;
9172 uint64_t value;
9173
9174 assert(VM_KERN_MEMORY_NONE != tag);
9175 assert(tag < VM_MAX_TAG_VALUE);
9176
9177 allocation = vm_allocation_sites[tag];
9178 assert(allocation);
9179
9180 value = os_atomic_add(&allocation->total, delta, relaxed);
9181 if (delta < 0) {
9182 assertf(value + (uint64_t)-delta > value,
9183 "tag %d, site %p", tag, allocation);
9184 }
9185
9186 #if DEBUG || DEVELOPMENT
9187 if (value > allocation->peak) {
9188 os_atomic_max(&allocation->peak, value, relaxed);
9189 }
9190 #endif /* DEBUG || DEVELOPMENT */
9191
9192 if (tag < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9193 return;
9194 }
9195
9196 if (value == (uint64_t)delta && !allocation->tag) {
9197 vm_tag_alloc(allocation);
9198 }
9199 }
9200
9201 uint64_t
vm_tag_get_size(vm_tag_t tag)9202 vm_tag_get_size(vm_tag_t tag)
9203 {
9204 vm_allocation_site_t * allocation;
9205
9206 assert(VM_KERN_MEMORY_NONE != tag);
9207 assert(tag < VM_MAX_TAG_VALUE);
9208
9209 allocation = vm_allocation_sites[tag];
9210 return allocation ? os_atomic_load(&allocation->total, relaxed) : 0;
9211 }
9212
9213 void
kern_allocation_update_size(kern_allocation_name_t allocation,int64_t delta)9214 kern_allocation_update_size(kern_allocation_name_t allocation, int64_t delta)
9215 {
9216 uint64_t prior;
9217
9218 if (delta < 0) {
9219 assertf(allocation->total >= ((uint64_t)-delta), "name %p", allocation);
9220 }
9221 prior = OSAddAtomic64(delta, &allocation->total);
9222
9223 #if DEBUG || DEVELOPMENT
9224
9225 uint64_t new, peak;
9226 new = prior + delta;
9227 do{
9228 peak = allocation->peak;
9229 if (new <= peak) {
9230 break;
9231 }
9232 }while (!OSCompareAndSwap64(peak, new, &allocation->peak));
9233
9234 #endif /* DEBUG || DEVELOPMENT */
9235
9236 if (!prior && !allocation->tag) {
9237 vm_tag_alloc(allocation);
9238 }
9239 }
9240
9241 #if VM_TAG_SIZECLASSES
9242
9243 void
vm_allocation_zones_init(void)9244 vm_allocation_zones_init(void)
9245 {
9246 vm_offset_t addr;
9247 vm_size_t size;
9248
9249 const vm_tag_t early_tags[] = {
9250 VM_KERN_MEMORY_DIAG,
9251 VM_KERN_MEMORY_KALLOC,
9252 VM_KERN_MEMORY_KALLOC_DATA,
9253 VM_KERN_MEMORY_KALLOC_TYPE,
9254 VM_KERN_MEMORY_LIBKERN,
9255 VM_KERN_MEMORY_OSFMK,
9256 VM_KERN_MEMORY_RECOUNT,
9257 };
9258
9259 size = VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *)
9260 + ARRAY_COUNT(early_tags) * VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9261
9262 kmem_alloc(kernel_map, &addr, round_page(size),
9263 KMA_NOFAIL | KMA_KOBJECT | KMA_ZERO | KMA_PERMANENT,
9264 VM_KERN_MEMORY_DIAG);
9265
9266 vm_allocation_zone_totals = (vm_allocation_zone_total_t **) addr;
9267 addr += VM_MAX_TAG_VALUE * sizeof(vm_allocation_zone_total_t * *);
9268
9269 // prepopulate early tag ranges so allocations
9270 // in vm_tag_update_zone_size() and early boot won't recurse
9271 for (size_t i = 0; i < ARRAY_COUNT(early_tags); i++) {
9272 vm_allocation_zone_totals[early_tags[i]] = (vm_allocation_zone_total_t *)addr;
9273 addr += VM_TAG_SIZECLASSES * sizeof(vm_allocation_zone_total_t);
9274 }
9275 }
9276
9277 __attribute__((noinline))
9278 static vm_tag_t
vm_tag_zone_stats_alloc(vm_tag_t tag,zalloc_flags_t flags)9279 vm_tag_zone_stats_alloc(vm_tag_t tag, zalloc_flags_t flags)
9280 {
9281 vm_allocation_zone_total_t *stats;
9282 vm_size_t size = sizeof(*stats) * VM_TAG_SIZECLASSES;
9283
9284 flags = Z_VM_TAG(Z_ZERO | flags, VM_KERN_MEMORY_DIAG);
9285 stats = kalloc_data(size, flags);
9286 if (!stats) {
9287 return VM_KERN_MEMORY_NONE;
9288 }
9289 if (!os_atomic_cmpxchg(&vm_allocation_zone_totals[tag], NULL, stats, release)) {
9290 kfree_data(stats, size);
9291 }
9292 return tag;
9293 }
9294
9295 vm_tag_t
vm_tag_will_update_zone(vm_tag_t tag,uint32_t zidx,uint32_t zflags)9296 vm_tag_will_update_zone(vm_tag_t tag, uint32_t zidx, uint32_t zflags)
9297 {
9298 assert(VM_KERN_MEMORY_NONE != tag);
9299 assert(tag < VM_MAX_TAG_VALUE);
9300
9301 if (zidx >= VM_TAG_SIZECLASSES) {
9302 return VM_KERN_MEMORY_NONE;
9303 }
9304
9305 if (__probable(vm_allocation_zone_totals[tag])) {
9306 return tag;
9307 }
9308 return vm_tag_zone_stats_alloc(tag, zflags);
9309 }
9310
9311 void
vm_tag_update_zone_size(vm_tag_t tag,uint32_t zidx,long delta)9312 vm_tag_update_zone_size(vm_tag_t tag, uint32_t zidx, long delta)
9313 {
9314 vm_allocation_zone_total_t *stats;
9315 vm_size_t value;
9316
9317 assert(VM_KERN_MEMORY_NONE != tag);
9318 assert(tag < VM_MAX_TAG_VALUE);
9319
9320 if (zidx >= VM_TAG_SIZECLASSES) {
9321 return;
9322 }
9323
9324 stats = vm_allocation_zone_totals[tag];
9325 assert(stats);
9326 stats += zidx;
9327
9328 value = os_atomic_add(&stats->vazt_total, delta, relaxed);
9329 if (delta < 0) {
9330 assertf((long)value >= 0, "zidx %d, tag %d, %p", zidx, tag, stats);
9331 return;
9332 } else if (os_atomic_load(&stats->vazt_peak, relaxed) < value) {
9333 os_atomic_max(&stats->vazt_peak, value, relaxed);
9334 }
9335 }
9336
9337 #endif /* VM_TAG_SIZECLASSES */
9338
9339 void
kern_allocation_update_subtotal(kern_allocation_name_t allocation,uint32_t subtag,int64_t delta)9340 kern_allocation_update_subtotal(kern_allocation_name_t allocation, uint32_t subtag, int64_t delta)
9341 {
9342 kern_allocation_name_t other;
9343 struct vm_allocation_total * total;
9344 uint32_t subidx;
9345
9346 subidx = 0;
9347 assert(VM_KERN_MEMORY_NONE != subtag);
9348 lck_ticket_lock(&vm_allocation_sites_lock, LCK_GRP_NULL);
9349 for (; subidx < allocation->subtotalscount; subidx++) {
9350 if (VM_KERN_MEMORY_NONE == allocation->subtotals[subidx].tag) {
9351 allocation->subtotals[subidx].tag = (vm_tag_t)subtag;
9352 break;
9353 }
9354 if (subtag == allocation->subtotals[subidx].tag) {
9355 break;
9356 }
9357 }
9358 lck_ticket_unlock(&vm_allocation_sites_lock);
9359 assert(subidx < allocation->subtotalscount);
9360 if (subidx >= allocation->subtotalscount) {
9361 return;
9362 }
9363
9364 total = &allocation->subtotals[subidx];
9365 other = vm_allocation_sites[subtag];
9366 assert(other);
9367
9368 if (delta < 0) {
9369 assertf(total->total >= ((uint64_t)-delta), "name %p", allocation);
9370 assertf(other->mapped >= ((uint64_t)-delta), "other %p", other);
9371 }
9372 OSAddAtomic64(delta, &other->mapped);
9373 OSAddAtomic64(delta, &total->total);
9374 }
9375
9376 const char *
kern_allocation_get_name(kern_allocation_name_t allocation)9377 kern_allocation_get_name(kern_allocation_name_t allocation)
9378 {
9379 return KA_NAME(allocation);
9380 }
9381
9382 kern_allocation_name_t
kern_allocation_name_allocate(const char * name,uint16_t subtotalscount)9383 kern_allocation_name_allocate(const char * name, uint16_t subtotalscount)
9384 {
9385 kern_allocation_name_t allocation;
9386 uint16_t namelen;
9387
9388 namelen = (uint16_t)strnlen(name, MACH_MEMORY_INFO_NAME_MAX_LEN - 1);
9389
9390 allocation = kalloc_data(KA_SIZE(namelen, subtotalscount), Z_WAITOK | Z_ZERO);
9391 allocation->refcount = 1;
9392 allocation->subtotalscount = subtotalscount;
9393 allocation->flags = (uint16_t)(namelen << VM_TAG_NAME_LEN_SHIFT);
9394 strlcpy(KA_NAME(allocation), name, namelen + 1);
9395
9396 vm_tag_alloc(allocation);
9397 return allocation;
9398 }
9399
9400 void
kern_allocation_name_release(kern_allocation_name_t allocation)9401 kern_allocation_name_release(kern_allocation_name_t allocation)
9402 {
9403 assert(allocation->refcount > 0);
9404 if (1 == OSAddAtomic16(-1, &allocation->refcount)) {
9405 kfree_data(allocation,
9406 KA_SIZE(KA_NAME_LEN(allocation), allocation->subtotalscount));
9407 }
9408 }
9409
9410 vm_tag_t
kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)9411 kern_allocation_name_get_vm_tag(kern_allocation_name_t allocation)
9412 {
9413 return vm_tag_alloc(allocation);
9414 }
9415
9416 #if !VM_TAG_ACTIVE_UPDATE
9417 static void
vm_page_count_object(mach_memory_info_t * info,unsigned int __unused num_info,vm_object_t object)9418 vm_page_count_object(mach_memory_info_t * info, unsigned int __unused num_info, vm_object_t object)
9419 {
9420 if (!object->wired_page_count) {
9421 return;
9422 }
9423 if (object != kernel_object) {
9424 assert(object->wire_tag < num_info);
9425 info[object->wire_tag].size += ptoa_64(object->wired_page_count);
9426 }
9427 }
9428
9429 typedef void (*vm_page_iterate_proc)(mach_memory_info_t * info,
9430 unsigned int num_info, vm_object_t object);
9431
9432 static void
vm_page_iterate_purgeable_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc,purgeable_q_t queue,int group)9433 vm_page_iterate_purgeable_objects(mach_memory_info_t * info, unsigned int num_info,
9434 vm_page_iterate_proc proc, purgeable_q_t queue,
9435 int group)
9436 {
9437 vm_object_t object;
9438
9439 for (object = (vm_object_t) queue_first(&queue->objq[group]);
9440 !queue_end(&queue->objq[group], (queue_entry_t) object);
9441 object = (vm_object_t) queue_next(&object->objq)) {
9442 proc(info, num_info, object);
9443 }
9444 }
9445
9446 static void
vm_page_iterate_objects(mach_memory_info_t * info,unsigned int num_info,vm_page_iterate_proc proc)9447 vm_page_iterate_objects(mach_memory_info_t * info, unsigned int num_info,
9448 vm_page_iterate_proc proc)
9449 {
9450 vm_object_t object;
9451
9452 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);
9453 queue_iterate(&vm_objects_wired,
9454 object,
9455 vm_object_t,
9456 wired_objq)
9457 {
9458 proc(info, num_info, object);
9459 }
9460 lck_spin_unlock(&vm_objects_wired_lock);
9461 }
9462 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9463
9464 static uint64_t
process_account(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes,boolean_t iterated)9465 process_account(mach_memory_info_t * info, unsigned int num_info,
9466 uint64_t zones_collectable_bytes, boolean_t iterated)
9467 {
9468 size_t namelen;
9469 unsigned int idx, count, nextinfo;
9470 vm_allocation_site_t * site;
9471 lck_ticket_lock(&vm_allocation_sites_lock, LCK_GRP_NULL);
9472
9473 for (idx = 0; idx <= vm_allocation_tag_highest; idx++) {
9474 site = vm_allocation_sites[idx];
9475 if (!site) {
9476 continue;
9477 }
9478 info[idx].mapped = site->mapped;
9479 info[idx].tag = site->tag;
9480 if (!iterated) {
9481 info[idx].size = site->total;
9482 #if DEBUG || DEVELOPMENT
9483 info[idx].peak = site->peak;
9484 #endif /* DEBUG || DEVELOPMENT */
9485 } else {
9486 if (!site->subtotalscount && (site->total != info[idx].size)) {
9487 printf("tag mismatch[%d] 0x%qx, iter 0x%qx\n", idx, site->total, info[idx].size);
9488 info[idx].size = site->total;
9489 }
9490 }
9491 info[idx].flags |= VM_KERN_SITE_WIRED;
9492 if (idx < VM_KERN_MEMORY_FIRST_DYNAMIC) {
9493 info[idx].site = idx;
9494 info[idx].flags |= VM_KERN_SITE_TAG;
9495 if (VM_KERN_MEMORY_ZONE == idx) {
9496 info[idx].flags |= VM_KERN_SITE_HIDE;
9497 info[idx].flags &= ~VM_KERN_SITE_WIRED;
9498 info[idx].collectable_bytes = zones_collectable_bytes;
9499 }
9500 } else if ((namelen = (VM_TAG_NAME_LEN_MAX & (site->flags >> VM_TAG_NAME_LEN_SHIFT)))) {
9501 info[idx].site = 0;
9502 info[idx].flags |= VM_KERN_SITE_NAMED;
9503 if (namelen > sizeof(info[idx].name)) {
9504 namelen = sizeof(info[idx].name);
9505 }
9506 strncpy(&info[idx].name[0], KA_NAME(site), namelen);
9507 } else if (VM_TAG_KMOD & site->flags) {
9508 info[idx].site = OSKextGetKmodIDForSite(site, NULL, 0);
9509 info[idx].flags |= VM_KERN_SITE_KMOD;
9510 } else {
9511 info[idx].site = VM_KERNEL_UNSLIDE(site);
9512 info[idx].flags |= VM_KERN_SITE_KERNEL;
9513 }
9514 }
9515
9516 nextinfo = (vm_allocation_tag_highest + 1);
9517 count = nextinfo;
9518 if (count >= num_info) {
9519 count = num_info;
9520 }
9521
9522 for (idx = 0; idx < count; idx++) {
9523 site = vm_allocation_sites[idx];
9524 if (!site) {
9525 continue;
9526 }
9527 #if VM_TAG_SIZECLASSES
9528 vm_allocation_zone_total_t * zone;
9529 unsigned int zidx;
9530
9531 if (vm_allocation_zone_totals
9532 && (zone = vm_allocation_zone_totals[idx])
9533 && (nextinfo < num_info)) {
9534 for (zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9535 if (!zone[zidx].vazt_peak) {
9536 continue;
9537 }
9538 info[nextinfo] = info[idx];
9539 info[nextinfo].zone = (uint16_t)zone_index_from_tag_index(zidx);
9540 info[nextinfo].flags &= ~VM_KERN_SITE_WIRED;
9541 info[nextinfo].flags |= VM_KERN_SITE_ZONE;
9542 info[nextinfo].flags |= VM_KERN_SITE_KALLOC;
9543 info[nextinfo].size = zone[zidx].vazt_total;
9544 info[nextinfo].peak = zone[zidx].vazt_peak;
9545 info[nextinfo].mapped = 0;
9546 nextinfo++;
9547 }
9548 }
9549 #endif /* VM_TAG_SIZECLASSES */
9550 if (site->subtotalscount) {
9551 uint64_t mapped, mapcost, take;
9552 uint32_t sub;
9553 vm_tag_t alloctag;
9554
9555 info[idx].size = site->total;
9556 mapped = info[idx].size;
9557 info[idx].mapped = mapped;
9558 mapcost = 0;
9559 for (sub = 0; sub < site->subtotalscount; sub++) {
9560 alloctag = site->subtotals[sub].tag;
9561 assert(alloctag < num_info);
9562 if (info[alloctag].name[0]) {
9563 continue;
9564 }
9565 take = site->subtotals[sub].total;
9566 if (take > info[alloctag].size) {
9567 take = info[alloctag].size;
9568 }
9569 if (take > mapped) {
9570 take = mapped;
9571 }
9572 info[alloctag].mapped -= take;
9573 info[alloctag].size -= take;
9574 mapped -= take;
9575 mapcost += take;
9576 }
9577 info[idx].size = mapcost;
9578 }
9579 }
9580 lck_ticket_unlock(&vm_allocation_sites_lock);
9581
9582 return 0;
9583 }
9584
9585 uint32_t
vm_page_diagnose_estimate(void)9586 vm_page_diagnose_estimate(void)
9587 {
9588 vm_allocation_site_t * site;
9589 uint32_t count = zone_view_count;
9590 uint32_t idx;
9591
9592 lck_ticket_lock(&vm_allocation_sites_lock, LCK_GRP_NULL);
9593 for (idx = 0; idx < VM_MAX_TAG_VALUE; idx++) {
9594 site = vm_allocation_sites[idx];
9595 if (!site) {
9596 continue;
9597 }
9598 count++;
9599 #if VM_TAG_SIZECLASSES
9600 if (vm_allocation_zone_totals) {
9601 vm_allocation_zone_total_t * zone;
9602 zone = vm_allocation_zone_totals[idx];
9603 if (!zone) {
9604 continue;
9605 }
9606 for (uint32_t zidx = 0; zidx < VM_TAG_SIZECLASSES; zidx++) {
9607 count += (zone[zidx].vazt_peak != 0);
9608 }
9609 }
9610 #endif
9611 }
9612 lck_ticket_unlock(&vm_allocation_sites_lock);
9613
9614 /* some slop for new tags created */
9615 count += 8;
9616 count += VM_KERN_COUNTER_COUNT;
9617
9618 return count;
9619 }
9620
9621 static void
vm_page_diagnose_zone_stats(mach_memory_info_t * info,zone_stats_t zstats,bool percpu)9622 vm_page_diagnose_zone_stats(mach_memory_info_t *info, zone_stats_t zstats,
9623 bool percpu)
9624 {
9625 zpercpu_foreach(zs, zstats) {
9626 info->size += zs->zs_mem_allocated - zs->zs_mem_freed;
9627 }
9628 if (percpu) {
9629 info->size *= zpercpu_count();
9630 }
9631 info->flags |= VM_KERN_SITE_NAMED | VM_KERN_SITE_ZONE_VIEW;
9632 }
9633
9634 static void
vm_page_diagnose_zone(mach_memory_info_t * info,zone_t z)9635 vm_page_diagnose_zone(mach_memory_info_t *info, zone_t z)
9636 {
9637 vm_page_diagnose_zone_stats(info, z->z_stats, z->z_percpu);
9638 snprintf(info->name, sizeof(info->name),
9639 "%s%s[raw]", zone_heap_name(z), z->z_name);
9640 }
9641
9642 static int
vm_page_diagnose_heap(mach_memory_info_t * info,kalloc_heap_t kheap)9643 vm_page_diagnose_heap(mach_memory_info_t *info, kalloc_heap_t kheap)
9644 {
9645 struct kalloc_heap *kh = kheap->kh_views;
9646 int i = 0;
9647
9648 for (; i < KHEAP_NUM_ZONES; i++) {
9649 vm_page_diagnose_zone(info + i, zone_by_id(kheap->kh_zstart + i));
9650 }
9651
9652 while (kh) {
9653 vm_page_diagnose_zone_stats(info + i, kh->kh_stats, false);
9654 snprintf(info[i].name, sizeof(info[i].name),
9655 "%skalloc[%s]", kheap->kh_name, kh->kh_name);
9656 kh = kh->kh_views;
9657 i++;
9658 }
9659
9660 return i;
9661 }
9662
9663 static int
vm_page_diagnose_kt_heaps(mach_memory_info_t * info)9664 vm_page_diagnose_kt_heaps(mach_memory_info_t *info)
9665 {
9666 uint32_t idx = 0;
9667 vm_page_diagnose_zone_stats(info + idx, KHEAP_KT_VAR->kh_stats, false);
9668 snprintf(info[idx].name, sizeof(info[idx].name),
9669 "%s[raw]", KHEAP_KT_VAR->kh_name);
9670 idx++;
9671
9672 for (uint32_t i = 0; i < KT_VAR_MAX_HEAPS; i++) {
9673 struct kheap_info heap = kalloc_type_heap_array[i];
9674
9675 for (kalloc_type_var_view_t ktv = heap.kt_views; ktv;
9676 ktv = (kalloc_type_var_view_t) ktv->kt_next) {
9677 if (ktv->kt_stats && ktv->kt_stats != KHEAP_KT_VAR->kh_stats) {
9678 vm_page_diagnose_zone_stats(info + idx, ktv->kt_stats, false);
9679 snprintf(info[idx].name, sizeof(info[idx].name),
9680 "%s[%s]", KHEAP_KT_VAR->kh_name, ktv->kt_name);
9681 idx++;
9682 }
9683 }
9684 }
9685
9686 return idx;
9687 }
9688
9689 kern_return_t
vm_page_diagnose(mach_memory_info_t * info,unsigned int num_info,uint64_t zones_collectable_bytes)9690 vm_page_diagnose(mach_memory_info_t * info, unsigned int num_info, uint64_t zones_collectable_bytes)
9691 {
9692 uint64_t wired_size;
9693 uint64_t wired_managed_size;
9694 uint64_t wired_reserved_size;
9695 boolean_t iterate;
9696 mach_memory_info_t * counts;
9697 uint32_t i;
9698
9699 bzero(info, num_info * sizeof(mach_memory_info_t));
9700
9701 if (!vm_page_wire_count_initial) {
9702 return KERN_ABORTED;
9703 }
9704
9705 #if !XNU_TARGET_OS_OSX
9706 wired_size = ptoa_64(vm_page_wire_count);
9707 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count);
9708 #else /* !XNU_TARGET_OS_OSX */
9709 wired_size = ptoa_64(vm_page_wire_count + vm_lopage_free_count + vm_page_throttled_count);
9710 wired_reserved_size = ptoa_64(vm_page_wire_count_initial - vm_page_stolen_count + vm_page_throttled_count);
9711 #endif /* !XNU_TARGET_OS_OSX */
9712 wired_managed_size = ptoa_64(vm_page_wire_count - vm_page_wire_count_initial);
9713
9714 wired_size += booter_size;
9715
9716 assert(num_info >= VM_KERN_COUNTER_COUNT);
9717 num_info -= VM_KERN_COUNTER_COUNT;
9718 counts = &info[num_info];
9719
9720 #define SET_COUNT(xcount, xsize, xflags) \
9721 counts[xcount].tag = VM_MAX_TAG_VALUE + xcount; \
9722 counts[xcount].site = (xcount); \
9723 counts[xcount].size = (xsize); \
9724 counts[xcount].mapped = (xsize); \
9725 counts[xcount].flags = VM_KERN_SITE_COUNTER | xflags;
9726
9727 SET_COUNT(VM_KERN_COUNT_MANAGED, ptoa_64(vm_page_pages), 0);
9728 SET_COUNT(VM_KERN_COUNT_WIRED, wired_size, 0);
9729 SET_COUNT(VM_KERN_COUNT_WIRED_MANAGED, wired_managed_size, 0);
9730 SET_COUNT(VM_KERN_COUNT_RESERVED, wired_reserved_size, VM_KERN_SITE_WIRED);
9731 SET_COUNT(VM_KERN_COUNT_STOLEN, ptoa_64(vm_page_stolen_count), VM_KERN_SITE_WIRED);
9732 SET_COUNT(VM_KERN_COUNT_LOPAGE, ptoa_64(vm_lopage_free_count), VM_KERN_SITE_WIRED);
9733 SET_COUNT(VM_KERN_COUNT_WIRED_BOOT, ptoa_64(vm_page_wire_count_on_boot), 0);
9734 SET_COUNT(VM_KERN_COUNT_BOOT_STOLEN, booter_size, VM_KERN_SITE_WIRED);
9735 SET_COUNT(VM_KERN_COUNT_WIRED_STATIC_KERNELCACHE, ptoa_64(vm_page_kernelcache_count), 0);
9736
9737 #define SET_MAP(xcount, xsize, xfree, xlargest) \
9738 counts[xcount].site = (xcount); \
9739 counts[xcount].size = (xsize); \
9740 counts[xcount].mapped = (xsize); \
9741 counts[xcount].free = (xfree); \
9742 counts[xcount].largest = (xlargest); \
9743 counts[xcount].flags = VM_KERN_SITE_COUNTER;
9744
9745 vm_map_size_t map_size, map_free, map_largest;
9746
9747 vm_map_sizes(kernel_map, &map_size, &map_free, &map_largest);
9748 SET_MAP(VM_KERN_COUNT_MAP_KERNEL, map_size, map_free, map_largest);
9749
9750 zone_map_sizes(&map_size, &map_free, &map_largest);
9751 SET_MAP(VM_KERN_COUNT_MAP_ZONE, map_size, map_free, map_largest);
9752
9753 assert(num_info >= zone_view_count);
9754 num_info -= zone_view_count;
9755 counts = &info[num_info];
9756 i = 0;
9757
9758 i += vm_page_diagnose_heap(counts + i, KHEAP_DEFAULT);
9759 if (KHEAP_DATA_BUFFERS->kh_heap_id == KHEAP_ID_DATA_BUFFERS) {
9760 i += vm_page_diagnose_heap(counts + i, KHEAP_DATA_BUFFERS);
9761 }
9762 if (KHEAP_KT_VAR->kh_heap_id == KHEAP_ID_KT_VAR) {
9763 i += vm_page_diagnose_kt_heaps(counts + i);
9764 }
9765 assert(i <= zone_view_count);
9766
9767 zone_index_foreach(zidx) {
9768 zone_t z = &zone_array[zidx];
9769 zone_security_flags_t zsflags = zone_security_array[zidx];
9770 zone_view_t zv = z->z_views;
9771
9772 if (zv == NULL) {
9773 continue;
9774 }
9775
9776 zone_stats_t zv_stats_head = z->z_stats;
9777 bool has_raw_view = false;
9778
9779 for (; zv; zv = zv->zv_next) {
9780 /*
9781 * kalloc_types that allocate from the same zone are linked
9782 * as views. Only print the ones that have their own stats.
9783 */
9784 if (zv->zv_stats == zv_stats_head) {
9785 continue;
9786 }
9787 has_raw_view = true;
9788 vm_page_diagnose_zone_stats(counts + i, zv->zv_stats,
9789 z->z_percpu);
9790 snprintf(counts[i].name, sizeof(counts[i].name), "%s%s[%s]",
9791 zone_heap_name(z), z->z_name, zv->zv_name);
9792 i++;
9793 assert(i <= zone_view_count);
9794 }
9795
9796 /*
9797 * Print raw views for non kalloc or kalloc_type zones
9798 */
9799 bool kalloc_type = zsflags.z_kalloc_type;
9800 if ((zsflags.z_kheap_id == KHEAP_ID_NONE && !kalloc_type) ||
9801 (kalloc_type && has_raw_view)) {
9802 vm_page_diagnose_zone(counts + i, z);
9803 i++;
9804 assert(i <= zone_view_count);
9805 }
9806 }
9807
9808 iterate = !VM_TAG_ACTIVE_UPDATE;
9809 if (iterate) {
9810 enum { kMaxKernelDepth = 1 };
9811 vm_map_t maps[kMaxKernelDepth];
9812 vm_map_entry_t entries[kMaxKernelDepth];
9813 vm_map_t map;
9814 vm_map_entry_t entry;
9815 vm_object_offset_t offset;
9816 vm_page_t page;
9817 int stackIdx, count;
9818
9819 #if !VM_TAG_ACTIVE_UPDATE
9820 vm_page_iterate_objects(info, num_info, &vm_page_count_object);
9821 #endif /* ! VM_TAG_ACTIVE_UPDATE */
9822
9823 map = kernel_map;
9824 stackIdx = 0;
9825 while (map) {
9826 vm_map_lock(map);
9827 for (entry = map->hdr.links.next; map; entry = entry->links.next) {
9828 if (entry->is_sub_map) {
9829 assert(stackIdx < kMaxKernelDepth);
9830 maps[stackIdx] = map;
9831 entries[stackIdx] = entry;
9832 stackIdx++;
9833 map = VME_SUBMAP(entry);
9834 entry = NULL;
9835 break;
9836 }
9837 if (VME_OBJECT(entry) == kernel_object) {
9838 count = 0;
9839 vm_object_lock(VME_OBJECT(entry));
9840 for (offset = entry->links.start; offset < entry->links.end; offset += page_size) {
9841 page = vm_page_lookup(VME_OBJECT(entry), offset);
9842 if (page && VM_PAGE_WIRED(page)) {
9843 count++;
9844 }
9845 }
9846 vm_object_unlock(VME_OBJECT(entry));
9847
9848 if (count) {
9849 assert(VME_ALIAS(entry) != VM_KERN_MEMORY_NONE);
9850 assert(VME_ALIAS(entry) < num_info);
9851 info[VME_ALIAS(entry)].size += ptoa_64(count);
9852 }
9853 }
9854 while (map && (entry == vm_map_last_entry(map))) {
9855 vm_map_unlock(map);
9856 if (!stackIdx) {
9857 map = NULL;
9858 } else {
9859 --stackIdx;
9860 map = maps[stackIdx];
9861 entry = entries[stackIdx];
9862 }
9863 }
9864 }
9865 }
9866 }
9867
9868 process_account(info, num_info, zones_collectable_bytes, iterate);
9869
9870 return KERN_SUCCESS;
9871 }
9872
9873 #if DEBUG || DEVELOPMENT
9874
9875 kern_return_t
vm_kern_allocation_info(uintptr_t addr,vm_size_t * size,vm_tag_t * tag,vm_size_t * zone_size)9876 vm_kern_allocation_info(uintptr_t addr, vm_size_t * size, vm_tag_t * tag, vm_size_t * zone_size)
9877 {
9878 kern_return_t ret;
9879 vm_size_t zsize;
9880 vm_map_t map;
9881 vm_map_entry_t entry;
9882
9883 zsize = zone_element_info((void *) addr, tag);
9884 if (zsize) {
9885 *zone_size = *size = zsize;
9886 return KERN_SUCCESS;
9887 }
9888
9889 *zone_size = 0;
9890 ret = KERN_INVALID_ADDRESS;
9891 for (map = kernel_map; map;) {
9892 vm_map_lock(map);
9893 if (!vm_map_lookup_entry_allow_pgz(map, addr, &entry)) {
9894 break;
9895 }
9896 if (entry->is_sub_map) {
9897 if (map != kernel_map) {
9898 break;
9899 }
9900 map = VME_SUBMAP(entry);
9901 continue;
9902 }
9903 if (entry->vme_start != addr) {
9904 break;
9905 }
9906 *tag = (vm_tag_t)VME_ALIAS(entry);
9907 *size = (entry->vme_end - addr);
9908 ret = KERN_SUCCESS;
9909 break;
9910 }
9911 if (map != kernel_map) {
9912 vm_map_unlock(map);
9913 }
9914 vm_map_unlock(kernel_map);
9915
9916 return ret;
9917 }
9918
9919 #endif /* DEBUG || DEVELOPMENT */
9920
9921 uint32_t
vm_tag_get_kext(vm_tag_t tag,char * name,vm_size_t namelen)9922 vm_tag_get_kext(vm_tag_t tag, char * name, vm_size_t namelen)
9923 {
9924 vm_allocation_site_t * site;
9925 uint32_t kmodId;
9926
9927 kmodId = 0;
9928 lck_ticket_lock(&vm_allocation_sites_lock, LCK_GRP_NULL);
9929 if ((site = vm_allocation_sites[tag])) {
9930 if (VM_TAG_KMOD & site->flags) {
9931 kmodId = OSKextGetKmodIDForSite(site, name, namelen);
9932 }
9933 }
9934 lck_ticket_unlock(&vm_allocation_sites_lock);
9935
9936 return kmodId;
9937 }
9938
9939
9940 #if CONFIG_SECLUDED_MEMORY
9941 /*
9942 * Note that there's no locking around other accesses to vm_page_secluded_target.
9943 * That should be OK, since these are the only place where it can be changed after
9944 * initialization. Other users (like vm_pageout) may see the wrong value briefly,
9945 * but will eventually get the correct value. This brief mismatch is OK as pageout
9946 * and page freeing will auto-adjust the vm_page_secluded_count to match the target
9947 * over time.
9948 */
9949 unsigned int vm_page_secluded_suppress_cnt = 0;
9950 unsigned int vm_page_secluded_save_target;
9951
9952 LCK_GRP_DECLARE(secluded_suppress_slock_grp, "secluded_suppress_slock");
9953 LCK_SPIN_DECLARE(secluded_suppress_slock, &secluded_suppress_slock_grp);
9954
9955 void
start_secluded_suppression(task_t task)9956 start_secluded_suppression(task_t task)
9957 {
9958 if (task->task_suppressed_secluded) {
9959 return;
9960 }
9961 lck_spin_lock(&secluded_suppress_slock);
9962 if (!task->task_suppressed_secluded && vm_page_secluded_suppress_cnt++ == 0) {
9963 task->task_suppressed_secluded = TRUE;
9964 vm_page_secluded_save_target = vm_page_secluded_target;
9965 vm_page_secluded_target = 0;
9966 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9967 }
9968 lck_spin_unlock(&secluded_suppress_slock);
9969 }
9970
9971 void
stop_secluded_suppression(task_t task)9972 stop_secluded_suppression(task_t task)
9973 {
9974 lck_spin_lock(&secluded_suppress_slock);
9975 if (task->task_suppressed_secluded && --vm_page_secluded_suppress_cnt == 0) {
9976 task->task_suppressed_secluded = FALSE;
9977 vm_page_secluded_target = vm_page_secluded_save_target;
9978 VM_PAGE_SECLUDED_COUNT_OVER_TARGET_UPDATE();
9979 }
9980 lck_spin_unlock(&secluded_suppress_slock);
9981 }
9982
9983 #endif /* CONFIG_SECLUDED_MEMORY */
9984
9985 /*
9986 * Move the list of retired pages on the vm_page_queue_retired to
9987 * their final resting place on retired_pages_object.
9988 */
9989 void
vm_retire_boot_pages(void)9990 vm_retire_boot_pages(void)
9991 {
9992 }
9993
9994 /*
9995 * This holds the reported physical address if an ECC error leads to a panic.
9996 * SMC will store it in PMU SRAM under the 'sECC' key.
9997 */
9998 uint64_t ecc_panic_physical_address = 0;
9999
10000