1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66 #ifndef _VM_VM_PAGEOUT_H_ 67 #define _VM_VM_PAGEOUT_H_ 68 69 #ifdef KERNEL_PRIVATE 70 71 #include <mach/mach_types.h> 72 #include <mach/boolean.h> 73 #include <mach/machine/vm_types.h> 74 #include <mach/memory_object_types.h> 75 76 #include <kern/kern_types.h> 77 #include <kern/locks.h> 78 79 #include <libkern/OSAtomic.h> 80 81 82 #include <vm/vm_options.h> 83 84 #ifdef MACH_KERNEL_PRIVATE 85 #include <vm/vm_page.h> 86 #endif 87 88 #include <sys/kdebug.h> 89 90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 91 92 /* externally manipulated counters */ 93 extern unsigned int vm_pageout_cleaned_fault_reactivated; 94 95 #if CONFIG_FREEZE 96 extern boolean_t memorystatus_freeze_enabled; 97 98 struct freezer_context { 99 /* 100 * All these counters & variables track the task 101 * being frozen. 102 * Currently we only freeze one task at a time. Should that 103 * change, we'll need to add support for multiple freezer contexts. 104 */ 105 106 task_t freezer_ctx_task; /* Task being frozen. */ 107 108 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ 109 /* to freeze the task.*/ 110 111 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ 112 113 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ 114 115 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ 116 }; 117 118 #endif /* CONFIG_FREEZE */ 119 120 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) 121 122 #if VM_PRESSURE_EVENTS 123 extern boolean_t vm_pressure_events_enabled; 124 #endif /* VM_PRESSURE_EVENTS */ 125 126 127 /* 128 * the following codes are used in the DBG_MACH_WORKINGSET subclass 129 * of the DBG_MACH class 130 */ 131 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 132 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 133 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 134 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 135 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 136 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 137 #define VM_REAL_FAULT_FAST 0x06 138 #define VM_REAL_FAULT_SLOW 0x07 139 #define VM_MAP_LOOKUP_OBJECT 0x08 140 141 142 143 extern int vm_debug_events; 144 145 #define VMF_CHECK_ZFDELAY 0x100 146 #define VMF_COWDELAY 0x101 147 #define VMF_ZFDELAY 0x102 148 #define VMF_COMPRESSORDELAY 0x103 149 150 #define VM_PAGEOUT_SCAN 0x104 151 #define VM_PAGEOUT_BALANCE 0x105 152 #define VM_PAGEOUT_FREELIST 0x106 153 #define VM_PAGEOUT_PURGEONE 0x107 154 #define VM_PAGEOUT_CACHE_EVICT 0x108 155 #define VM_PAGEOUT_THREAD_BLOCK 0x109 156 #define VM_PAGEOUT_JETSAM 0x10A 157 #define VM_INFO1 0x10B 158 #define VM_INFO2 0x10C 159 #define VM_INFO3 0x10D 160 #define VM_INFO4 0x10E 161 #define VM_INFO5 0x10F 162 #define VM_INFO6 0x110 163 #define VM_INFO7 0x111 164 #define VM_INFO8 0x112 165 #define VM_INFO9 0x113 166 167 #define VM_UPL_PAGE_WAIT 0x120 168 #define VM_IOPL_PAGE_WAIT 0x121 169 #define VM_PAGE_WAIT_BLOCK 0x122 170 171 #if CONFIG_IOSCHED 172 #define VM_PAGE_SLEEP 0x123 173 #define VM_PAGE_EXPEDITE 0x124 174 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 175 #endif 176 177 #define VM_PAGE_GRAB 0x126 178 #define VM_PAGE_RELEASE 0x127 179 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128 180 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 181 182 183 #define VM_PRESSURE_EVENT 0x130 184 #define VM_EXECVE 0x131 185 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 186 #define VM_UPL_REQUEST 0x133 187 #define VM_IOPL_REQUEST 0x134 188 #define VM_KERN_REQUEST 0x135 189 190 #define VM_DATA_WRITE 0x140 191 192 #define VM_PRESSURE_LEVEL_CHANGE 0x141 193 194 #define VM_PHYS_WRITE_ACCT 0x142 195 196 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 197 MACRO_BEGIN \ 198 if (__improbable(vm_debug_events)) { \ 199 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 200 } \ 201 MACRO_END 202 203 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 204 MACRO_BEGIN \ 205 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 206 MACRO_END 207 208 extern void memoryshot(unsigned int event, unsigned int control); 209 210 extern void update_vm_info(void); 211 212 #if CONFIG_IOSCHED 213 extern int upl_get_cached_tier( 214 upl_t upl); 215 #endif 216 217 extern void upl_set_iodone(upl_t, void *); 218 extern void upl_set_iodone_error(upl_t, int); 219 extern void upl_callout_iodone(upl_t); 220 221 extern ppnum_t upl_get_highest_page( 222 upl_t upl); 223 224 extern upl_size_t upl_get_size( 225 upl_t upl); 226 227 extern upl_t upl_associated_upl(upl_t upl); 228 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); 229 230 #ifndef MACH_KERNEL_PRIVATE 231 typedef struct vm_page *vm_page_t; 232 #endif 233 #ifdef XNU_KERNEL_PRIVATE 234 #include <vm/vm_kern.h> 235 236 extern upl_size_t upl_adjusted_size( 237 upl_t upl, 238 vm_map_offset_t page_mask); 239 extern vm_object_offset_t upl_adjusted_offset( 240 upl_t upl, 241 vm_map_offset_t page_mask); 242 extern vm_object_offset_t upl_get_data_offset( 243 upl_t upl); 244 245 extern kern_return_t vm_map_create_upl( 246 vm_map_t map, 247 vm_map_address_t offset, 248 upl_size_t *upl_size, 249 upl_t *upl, 250 upl_page_info_array_t page_list, 251 unsigned int *count, 252 upl_control_flags_t *flags, 253 vm_tag_t tag); 254 255 extern void iopl_valid_data( 256 upl_t upl_ptr, 257 vm_tag_t tag); 258 259 extern void vm_page_free_list( 260 vm_page_t mem, 261 boolean_t prepare_object); 262 263 extern kern_return_t vm_page_alloc_list( 264 int page_count, 265 kma_flags_t flags, 266 vm_page_t *list); 267 268 #endif /* XNU_KERNEL_PRIVATE */ 269 270 extern struct vnode * upl_lookup_vnode(upl_t upl); 271 272 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 273 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 274 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 275 extern vm_page_t vm_page_get_next(vm_page_t page); 276 277 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 278 279 #if XNU_TARGET_OS_OSX 280 extern kern_return_t vm_pageout_wait(uint64_t deadline); 281 #endif /* XNU_TARGET_OS_OSX */ 282 283 #ifdef MACH_KERNEL_PRIVATE 284 285 #include <vm/vm_page.h> 286 287 extern unsigned int vm_pageout_scan_event_counter; 288 extern unsigned int vm_page_anonymous_count; 289 extern thread_t vm_pageout_scan_thread; 290 extern thread_t vm_pageout_gc_thread; 291 292 #define VM_PAGEOUT_GC_INIT ((void *)0) 293 #define VM_PAGEOUT_GC_COLLECT ((void *)1) 294 #define VM_PAGEOUT_GC_EVENT ((event_t)&vm_pageout_garbage_collect) 295 extern void vm_pageout_garbage_collect(void *, wait_result_t); 296 297 298 /* 299 * must hold the page queues lock to 300 * manipulate this structure 301 */ 302 struct vm_pageout_queue { 303 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 304 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ 305 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 306 unsigned int pgo_maxlaundry; 307 308 uint32_t 309 pgo_idle:1, /* iothread is blocked waiting for work to do */ 310 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 311 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 312 pgo_lowpriority:1, /* iothread is set to use low priority I/O */ 313 pgo_draining:1, 314 pgo_inited:1, 315 pgo_unused_bits:26; 316 }; 317 318 #define VM_PAGE_Q_THROTTLED(q) \ 319 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 320 321 extern struct vm_pageout_queue vm_pageout_queue_internal; 322 extern struct vm_pageout_queue vm_pageout_queue_external; 323 324 325 /* 326 * Routines exported to Mach. 327 */ 328 extern void vm_pageout(void); 329 330 __startup_func extern void vm_config_init(void); 331 332 extern kern_return_t vm_pageout_internal_start(void); 333 334 extern void vm_pageout_object_terminate( 335 vm_object_t object); 336 337 extern void vm_pageout_cluster( 338 vm_page_t m); 339 340 extern void vm_pageout_initialize_page( 341 vm_page_t m); 342 343 /* UPL exported routines and structures */ 344 345 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 346 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 347 #define upl_lock(object) lck_mtx_lock(&(object)->Lock) 348 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 349 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) 350 351 #define MAX_VECTOR_UPL_ELEMENTS 8 352 353 struct _vector_upl_iostates { 354 upl_offset_t offset; 355 upl_size_t size; 356 }; 357 358 typedef struct _vector_upl_iostates vector_upl_iostates_t; 359 360 struct _vector_upl { 361 upl_size_t size; 362 uint32_t num_upls; 363 uint32_t invalid_upls; 364 uint32_t _reserved; 365 vm_map_t submap; 366 vm_offset_t submap_dst_addr; 367 vm_object_offset_t offset; 368 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; 369 upl_page_info_array_t pagelist; 370 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; 371 }; 372 373 typedef struct _vector_upl* vector_upl_t; 374 375 /* universal page list structure */ 376 377 #if UPL_DEBUG 378 #define UPL_DEBUG_STACK_FRAMES 16 379 #define UPL_DEBUG_COMMIT_RECORDS 4 380 381 struct ucd { 382 upl_offset_t c_beg; 383 upl_offset_t c_end; 384 int c_aborted; 385 void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; 386 }; 387 #endif 388 389 struct upl_io_completion { 390 void *io_context; 391 void (*io_done)(void *, int); 392 393 int io_error; 394 }; 395 396 397 struct upl { 398 decl_lck_mtx_data(, Lock); /* Synchronization */ 399 int ref_count; 400 int ext_ref_count; 401 int flags; 402 /* 403 * XXX CAUTION: to accomodate devices with "mixed page sizes", 404 * u_offset and u_size are now byte-aligned and no longer 405 * page-aligned, on all devices. 406 */ 407 vm_object_offset_t u_offset; 408 upl_size_t u_size; /* size in bytes of the address space */ 409 upl_size_t u_mapped_size; /* size in bytes of the UPL that is mapped */ 410 vm_offset_t kaddr; /* secondary mapping in kernel */ 411 vm_object_t map_object; 412 ppnum_t highest_page; 413 void* vector_upl; 414 upl_t associated_upl; 415 struct upl_io_completion *upl_iodone; 416 #if CONFIG_IOSCHED 417 int upl_priority; 418 uint64_t *upl_reprio_info; 419 void *decmp_io_upl; 420 #endif 421 #if CONFIG_IOSCHED || UPL_DEBUG 422 thread_t upl_creator; 423 queue_chain_t uplq; /* List of outstanding upls on an obj */ 424 #endif 425 #if UPL_DEBUG 426 uintptr_t ubc_alias1; 427 uintptr_t ubc_alias2; 428 429 uint32_t upl_state; 430 uint32_t upl_commit_index; 431 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; 432 433 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 434 #endif /* UPL_DEBUG */ 435 }; 436 437 /* upl struct flags */ 438 #define UPL_PAGE_LIST_MAPPED 0x1 439 #define UPL_KERNEL_MAPPED 0x2 440 #define UPL_CLEAR_DIRTY 0x4 441 #define UPL_COMPOSITE_LIST 0x8 442 #define UPL_INTERNAL 0x10 443 #define UPL_PAGE_SYNC_DONE 0x20 444 #define UPL_DEVICE_MEMORY 0x40 445 #define UPL_PAGEOUT 0x80 446 #define UPL_LITE 0x100 447 #define UPL_IO_WIRE 0x200 448 #define UPL_ACCESS_BLOCKED 0x400 449 #define UPL_SHADOWED 0x1000 450 #define UPL_KERNEL_OBJECT 0x2000 451 #define UPL_VECTOR 0x4000 452 #define UPL_SET_DIRTY 0x8000 453 #define UPL_HAS_BUSY 0x10000 454 #define UPL_TRACKED_BY_OBJECT 0x20000 455 #define UPL_EXPEDITE_SUPPORTED 0x40000 456 #define UPL_DECMP_REQ 0x80000 457 #define UPL_DECMP_REAL_IO 0x100000 458 459 /* flags for upl_create flags parameter */ 460 #define UPL_CREATE_EXTERNAL 0 461 #define UPL_CREATE_INTERNAL 0x1 462 #define UPL_CREATE_LITE 0x2 463 #define UPL_CREATE_IO_TRACKING 0x4 464 #define UPL_CREATE_EXPEDITE_SUP 0x8 465 466 extern upl_t vector_upl_create(vm_offset_t); 467 extern void vector_upl_deallocate(upl_t); 468 extern boolean_t vector_upl_is_valid(upl_t); 469 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 470 extern void vector_upl_set_pagelist(upl_t); 471 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 472 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 473 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 474 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 475 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 476 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); 477 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); 478 479 extern void vm_object_set_pmap_cache_attr( 480 vm_object_t object, 481 upl_page_info_array_t user_page_list, 482 unsigned int num_pages, 483 boolean_t batch_pmap_op); 484 485 extern kern_return_t vm_object_iopl_request( 486 vm_object_t object, 487 vm_object_offset_t offset, 488 upl_size_t size, 489 upl_t *upl_ptr, 490 upl_page_info_array_t user_page_list, 491 unsigned int *page_list_count, 492 upl_control_flags_t cntrl_flags, 493 vm_tag_t tag); 494 495 extern kern_return_t vm_object_super_upl_request( 496 vm_object_t object, 497 vm_object_offset_t offset, 498 upl_size_t size, 499 upl_size_t super_cluster, 500 upl_t *upl, 501 upl_page_info_t *user_page_list, 502 unsigned int *page_list_count, 503 upl_control_flags_t cntrl_flags, 504 vm_tag_t tag); 505 506 /* should be just a regular vm_map_enter() */ 507 extern kern_return_t vm_map_enter_upl( 508 vm_map_t map, 509 upl_t upl, 510 vm_map_offset_t *dst_addr); 511 512 /* should be just a regular vm_map_remove() */ 513 extern kern_return_t vm_map_remove_upl( 514 vm_map_t map, 515 upl_t upl); 516 517 extern kern_return_t vm_map_enter_upl_range( 518 vm_map_t map, 519 upl_t upl, 520 vm_object_offset_t offset, 521 upl_size_t size, 522 vm_prot_t prot, 523 vm_map_offset_t *dst_addr); 524 525 extern kern_return_t vm_map_remove_upl_range( 526 vm_map_t map, 527 upl_t upl, 528 vm_object_offset_t offset, 529 upl_size_t size); 530 531 /* wired page list structure */ 532 typedef uint32_t *wpl_array_t; 533 534 extern struct vm_page_delayed_work* 535 vm_page_delayed_work_get_ctx(void); 536 537 extern void 538 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); 539 540 extern void vm_page_free_reserve(int pages); 541 542 extern void vm_pageout_throttle_down(vm_page_t page); 543 extern void vm_pageout_throttle_up(vm_page_t page); 544 545 extern kern_return_t vm_paging_map_object( 546 vm_page_t page, 547 vm_object_t object, 548 vm_object_offset_t offset, 549 vm_prot_t protection, 550 boolean_t can_unlock_object, 551 vm_map_size_t *size, /* IN/OUT */ 552 vm_map_offset_t *address, /* OUT */ 553 boolean_t *need_unmap); /* OUT */ 554 extern void vm_paging_unmap_object( 555 vm_object_t object, 556 vm_map_offset_t start, 557 vm_map_offset_t end); 558 decl_simple_lock_data(extern, vm_paging_lock); 559 560 /* 561 * Backing store throttle when BS is exhausted 562 */ 563 extern unsigned int vm_backing_store_low; 564 565 extern void vm_pageout_steal_laundry( 566 vm_page_t page, 567 boolean_t queues_locked); 568 569 #endif /* MACH_KERNEL_PRIVATE */ 570 571 #if UPL_DEBUG 572 extern kern_return_t upl_ubc_alias_set( 573 upl_t upl, 574 uintptr_t alias1, 575 uintptr_t alias2); 576 extern int upl_ubc_alias_get( 577 upl_t upl, 578 uintptr_t * al, 579 uintptr_t * al2); 580 #endif /* UPL_DEBUG */ 581 582 extern void vm_countdirtypages(void); 583 584 extern void vm_backing_store_disable( 585 boolean_t suspend); 586 587 extern kern_return_t upl_transpose( 588 upl_t upl1, 589 upl_t upl2); 590 591 extern kern_return_t mach_vm_pressure_monitor( 592 boolean_t wait_for_pressure, 593 unsigned int nsecs_monitored, 594 unsigned int *pages_reclaimed_p, 595 unsigned int *pages_wanted_p); 596 597 extern kern_return_t 598 vm_set_buffer_cleanup_callout( 599 boolean_t (*func)(int)); 600 601 struct vm_page_stats_reusable { 602 SInt32 reusable_count; 603 uint64_t reusable; 604 uint64_t reused; 605 uint64_t reused_wire; 606 uint64_t reused_remove; 607 uint64_t all_reusable_calls; 608 uint64_t partial_reusable_calls; 609 uint64_t all_reuse_calls; 610 uint64_t partial_reuse_calls; 611 uint64_t reusable_pages_success; 612 uint64_t reusable_pages_failure; 613 uint64_t reusable_pages_shared; 614 uint64_t reuse_pages_success; 615 uint64_t reuse_pages_failure; 616 uint64_t can_reuse_success; 617 uint64_t can_reuse_failure; 618 uint64_t reusable_reclaimed; 619 uint64_t reusable_nonwritable; 620 uint64_t reusable_shared; 621 uint64_t free_shared; 622 }; 623 extern struct vm_page_stats_reusable vm_page_stats_reusable; 624 625 extern int hibernate_flush_memory(void); 626 extern void hibernate_reset_stats(void); 627 extern void hibernate_create_paddr_map(void); 628 629 extern void vm_set_restrictions(unsigned int num_cpus); 630 631 extern int vm_compressor_mode; 632 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); 633 extern void vm_pageout_anonymous_pages(void); 634 extern void vm_pageout_disconnect_all_pages(void); 635 636 637 struct vm_config { 638 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ 639 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ 640 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ 641 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ 642 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ 643 }; 644 645 extern struct vm_config vm_config; 646 647 648 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ 649 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ 650 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ 651 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ 652 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ 653 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 654 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ 655 656 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 657 658 659 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) 660 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) 661 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) 662 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) 663 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) 664 665 #endif /* KERNEL_PRIVATE */ 666 667 #ifdef XNU_KERNEL_PRIVATE 668 669 struct vm_pageout_state { 670 boolean_t vm_pressure_thread_running; 671 boolean_t vm_pressure_changed; 672 boolean_t vm_restricted_to_single_processor; 673 int vm_compressor_thread_count; 674 675 unsigned int vm_page_speculative_q_age_ms; 676 unsigned int vm_page_speculative_percentage; 677 unsigned int vm_page_speculative_target; 678 679 unsigned int vm_pageout_swap_wait; 680 unsigned int vm_pageout_idle_wait; /* milliseconds */ 681 unsigned int vm_pageout_empty_wait; /* milliseconds */ 682 unsigned int vm_pageout_burst_wait; /* milliseconds */ 683 unsigned int vm_pageout_deadlock_wait; /* milliseconds */ 684 unsigned int vm_pageout_deadlock_relief; 685 unsigned int vm_pageout_burst_inactive_throttle; 686 687 unsigned int vm_pageout_inactive; 688 unsigned int vm_pageout_inactive_used; /* debugging */ 689 unsigned int vm_pageout_inactive_clean; /* debugging */ 690 691 uint32_t vm_page_filecache_min; 692 uint32_t vm_page_filecache_min_divisor; 693 uint32_t vm_page_xpmapped_min; 694 uint32_t vm_page_xpmapped_min_divisor; 695 uint64_t vm_pageout_considered_page_last; 696 697 int vm_page_free_count_init; 698 699 unsigned int vm_memory_pressure; 700 701 int memorystatus_purge_on_critical; 702 int memorystatus_purge_on_warning; 703 int memorystatus_purge_on_urgent; 704 705 thread_t vm_pageout_external_iothread; 706 thread_t vm_pageout_internal_iothread; 707 }; 708 709 extern struct vm_pageout_state vm_pageout_state; 710 711 /* 712 * This structure is used to track the VM_INFO instrumentation 713 */ 714 struct vm_pageout_vminfo { 715 unsigned long vm_pageout_considered_page; 716 unsigned long vm_pageout_considered_bq_internal; 717 unsigned long vm_pageout_considered_bq_external; 718 unsigned long vm_pageout_skipped_external; 719 unsigned long vm_pageout_skipped_internal; 720 721 unsigned long vm_pageout_pages_evicted; 722 unsigned long vm_pageout_pages_purged; 723 unsigned long vm_pageout_freed_cleaned; 724 unsigned long vm_pageout_freed_speculative; 725 unsigned long vm_pageout_freed_external; 726 unsigned long vm_pageout_freed_internal; 727 unsigned long vm_pageout_inactive_dirty_internal; 728 unsigned long vm_pageout_inactive_dirty_external; 729 unsigned long vm_pageout_inactive_referenced; 730 unsigned long vm_pageout_reactivation_limit_exceeded; 731 unsigned long vm_pageout_inactive_force_reclaim; 732 unsigned long vm_pageout_inactive_nolock; 733 unsigned long vm_pageout_filecache_min_reactivated; 734 unsigned long vm_pageout_scan_inactive_throttled_internal; 735 unsigned long vm_pageout_scan_inactive_throttled_external; 736 737 uint64_t vm_pageout_compressions; 738 uint64_t vm_compressor_pages_grabbed; 739 unsigned long vm_compressor_failed; 740 741 unsigned long vm_page_pages_freed; 742 743 unsigned long vm_phantom_cache_found_ghost; 744 unsigned long vm_phantom_cache_added_ghost; 745 }; 746 747 extern struct vm_pageout_vminfo vm_pageout_vminfo; 748 749 750 #if DEVELOPMENT || DEBUG 751 752 /* 753 * This structure records the pageout daemon's actions: 754 * how many pages it looks at and what happens to those pages. 755 * No locking needed because only one thread modifies the fields. 756 */ 757 struct vm_pageout_debug { 758 uint32_t vm_pageout_balanced; 759 uint32_t vm_pageout_scan_event_counter; 760 uint32_t vm_pageout_speculative_dirty; 761 762 uint32_t vm_pageout_inactive_busy; 763 uint32_t vm_pageout_inactive_absent; 764 uint32_t vm_pageout_inactive_notalive; 765 uint32_t vm_pageout_inactive_error; 766 uint32_t vm_pageout_inactive_deactivated; 767 768 uint32_t vm_pageout_enqueued_cleaned; 769 770 uint32_t vm_pageout_cleaned_busy; 771 uint32_t vm_pageout_cleaned_nolock; 772 uint32_t vm_pageout_cleaned_reference_reactivated; 773 uint32_t vm_pageout_cleaned_volatile_reactivated; 774 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ 775 uint32_t vm_pageout_cleaned_fault_reactivated; 776 777 uint32_t vm_pageout_dirty_no_pager; 778 uint32_t vm_pageout_purged_objects; 779 780 uint32_t vm_pageout_scan_throttle; 781 uint32_t vm_pageout_scan_reclaimed_throttled; 782 uint32_t vm_pageout_scan_burst_throttle; 783 uint32_t vm_pageout_scan_empty_throttle; 784 uint32_t vm_pageout_scan_swap_throttle; 785 uint32_t vm_pageout_scan_deadlock_detected; 786 uint32_t vm_pageout_scan_inactive_throttle_success; 787 uint32_t vm_pageout_scan_throttle_deferred; 788 789 uint32_t vm_pageout_inactive_external_forced_jetsam_count; 790 791 uint32_t vm_grab_anon_overrides; 792 uint32_t vm_grab_anon_nops; 793 794 uint32_t vm_pageout_no_victim; 795 unsigned long vm_pageout_throttle_up_count; 796 uint32_t vm_page_steal_pageout_page; 797 798 uint32_t vm_cs_validated_resets; 799 uint32_t vm_object_iopl_request_sleep_for_cleaning; 800 uint32_t vm_page_slide_counter; 801 uint32_t vm_page_slide_errors; 802 uint32_t vm_page_throttle_count; 803 /* 804 * Statistics about UPL enforcement of copy-on-write obligations. 805 */ 806 unsigned long upl_cow; 807 unsigned long upl_cow_again; 808 unsigned long upl_cow_pages; 809 unsigned long upl_cow_again_pages; 810 unsigned long iopl_cow; 811 unsigned long iopl_cow_pages; 812 }; 813 814 extern struct vm_pageout_debug vm_pageout_debug; 815 816 #define VM_PAGEOUT_DEBUG(member, value) \ 817 MACRO_BEGIN \ 818 vm_pageout_debug.member += value; \ 819 MACRO_END 820 #else 821 #define VM_PAGEOUT_DEBUG(member, value) 822 #endif 823 824 #define MAX_COMPRESSOR_THREAD_COUNT 8 825 826 struct vm_compressor_swapper_stats { 827 uint64_t unripe_under_30s; 828 uint64_t unripe_under_60s; 829 uint64_t unripe_under_300s; 830 uint64_t reclaim_swapins; 831 uint64_t defrag_swapins; 832 uint64_t compressor_swap_threshold_exceeded; 833 uint64_t external_q_throttled; 834 uint64_t free_count_below_reserve; 835 uint64_t thrashing_detected; 836 uint64_t fragmentation_detected; 837 }; 838 extern struct vm_compressor_swapper_stats vmcs_stats; 839 840 #if DEVELOPMENT || DEBUG 841 typedef struct vmct_stats_s { 842 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; 843 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; 844 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; 845 uint64_t vmct_cthreads_total; 846 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; 847 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; 848 } vmct_stats_t; 849 #endif 850 #endif 851 #endif /* _VM_VM_PAGEOUT_H_ */ 852