1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66 #ifndef _VM_VM_PAGEOUT_H_ 67 #define _VM_VM_PAGEOUT_H_ 68 69 #ifdef KERNEL_PRIVATE 70 71 #include <mach/mach_types.h> 72 #include <mach/boolean.h> 73 #include <mach/machine/vm_types.h> 74 #include <mach/memory_object_types.h> 75 76 #include <kern/kern_types.h> 77 #include <kern/locks.h> 78 79 #include <libkern/OSAtomic.h> 80 81 82 #include <vm/vm_options.h> 83 84 #ifdef MACH_KERNEL_PRIVATE 85 #include <vm/vm_page.h> 86 #endif 87 88 #include <sys/kdebug.h> 89 90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 91 92 /* externally manipulated counters */ 93 extern unsigned int vm_pageout_cleaned_fault_reactivated; 94 95 #if CONFIG_FREEZE 96 extern boolean_t memorystatus_freeze_enabled; 97 98 struct freezer_context { 99 /* 100 * All these counters & variables track the task 101 * being frozen. 102 * Currently we only freeze one task at a time. Should that 103 * change, we'll need to add support for multiple freezer contexts. 104 */ 105 106 task_t freezer_ctx_task; /* Task being frozen. */ 107 108 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ 109 /* to freeze the task.*/ 110 111 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ 112 113 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ 114 115 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ 116 }; 117 118 #endif /* CONFIG_FREEZE */ 119 120 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) 121 122 #if VM_PRESSURE_EVENTS 123 extern boolean_t vm_pressure_events_enabled; 124 #endif /* VM_PRESSURE_EVENTS */ 125 126 127 /* 128 * the following codes are used in the DBG_MACH_WORKINGSET subclass 129 * of the DBG_MACH class 130 */ 131 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 132 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 133 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 134 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 135 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 136 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 137 #define VM_REAL_FAULT_FAST 0x06 138 #define VM_REAL_FAULT_SLOW 0x07 139 #define VM_MAP_LOOKUP_OBJECT 0x08 140 141 142 143 extern int vm_debug_events; 144 145 #define VMF_CHECK_ZFDELAY 0x100 146 #define VMF_COWDELAY 0x101 147 #define VMF_ZFDELAY 0x102 148 #define VMF_COMPRESSORDELAY 0x103 149 150 #define VM_PAGEOUT_SCAN 0x104 151 #define VM_PAGEOUT_BALANCE 0x105 152 #define VM_PAGEOUT_FREELIST 0x106 153 #define VM_PAGEOUT_PURGEONE 0x107 154 #define VM_PAGEOUT_CACHE_EVICT 0x108 155 #define VM_PAGEOUT_THREAD_BLOCK 0x109 156 #define VM_PAGEOUT_JETSAM 0x10A 157 #define VM_INFO1 0x10B 158 #define VM_INFO2 0x10C 159 #define VM_INFO3 0x10D 160 #define VM_INFO4 0x10E 161 #define VM_INFO5 0x10F 162 #define VM_INFO6 0x110 163 #define VM_INFO7 0x111 164 #define VM_INFO8 0x112 165 #define VM_INFO9 0x113 166 #define VM_INFO10 0x114 167 168 #define VM_UPL_PAGE_WAIT 0x120 169 #define VM_IOPL_PAGE_WAIT 0x121 170 #define VM_PAGE_WAIT_BLOCK 0x122 171 172 #if CONFIG_IOSCHED 173 #define VM_PAGE_SLEEP 0x123 174 #define VM_PAGE_EXPEDITE 0x124 175 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 176 #endif 177 178 #define VM_PAGE_GRAB 0x126 179 #define VM_PAGE_RELEASE 0x127 180 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128 181 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 182 183 184 #define VM_PRESSURE_EVENT 0x130 185 #define VM_EXECVE 0x131 186 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 187 #define VM_UPL_REQUEST 0x133 188 #define VM_IOPL_REQUEST 0x134 189 #define VM_KERN_REQUEST 0x135 190 191 #define VM_DATA_WRITE 0x140 192 193 #define VM_PRESSURE_LEVEL_CHANGE 0x141 194 195 #define VM_PHYS_WRITE_ACCT 0x142 196 197 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 198 MACRO_BEGIN \ 199 if (__improbable(vm_debug_events)) { \ 200 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 201 } \ 202 MACRO_END 203 204 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 205 MACRO_BEGIN \ 206 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 207 MACRO_END 208 209 extern void memoryshot(unsigned int event, unsigned int control); 210 211 extern void update_vm_info(void); 212 213 #if CONFIG_IOSCHED 214 extern int upl_get_cached_tier( 215 upl_t upl); 216 #endif 217 218 extern void upl_set_iodone(upl_t, void *); 219 extern void upl_set_iodone_error(upl_t, int); 220 extern void upl_callout_iodone(upl_t); 221 222 extern ppnum_t upl_get_highest_page( 223 upl_t upl); 224 225 extern upl_size_t upl_get_size( 226 upl_t upl); 227 228 extern upl_t upl_associated_upl(upl_t upl); 229 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); 230 231 #ifndef MACH_KERNEL_PRIVATE 232 typedef struct vm_page *vm_page_t; 233 #endif 234 #ifdef XNU_KERNEL_PRIVATE 235 #include <vm/vm_kern.h> 236 237 extern upl_size_t upl_adjusted_size( 238 upl_t upl, 239 vm_map_offset_t page_mask); 240 extern vm_object_offset_t upl_adjusted_offset( 241 upl_t upl, 242 vm_map_offset_t page_mask); 243 extern vm_object_offset_t upl_get_data_offset( 244 upl_t upl); 245 246 extern kern_return_t vm_map_create_upl( 247 vm_map_t map, 248 vm_map_address_t offset, 249 upl_size_t *upl_size, 250 upl_t *upl, 251 upl_page_info_array_t page_list, 252 unsigned int *count, 253 upl_control_flags_t *flags, 254 vm_tag_t tag); 255 256 extern void iopl_valid_data( 257 upl_t upl_ptr, 258 vm_tag_t tag); 259 260 extern void vm_page_free_list( 261 vm_page_t mem, 262 boolean_t prepare_object); 263 264 extern kern_return_t vm_page_alloc_list( 265 vm_size_t page_count, 266 kma_flags_t flags, 267 vm_page_t *list); 268 269 #endif /* XNU_KERNEL_PRIVATE */ 270 271 extern struct vnode * upl_lookup_vnode(upl_t upl); 272 273 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 274 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 275 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 276 extern vm_page_t vm_page_get_next(vm_page_t page); 277 278 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 279 280 #if XNU_TARGET_OS_OSX 281 extern kern_return_t vm_pageout_wait(uint64_t deadline); 282 #endif /* XNU_TARGET_OS_OSX */ 283 284 #ifdef MACH_KERNEL_PRIVATE 285 286 #include <vm/vm_page.h> 287 288 extern unsigned int vm_pageout_scan_event_counter; 289 extern unsigned int vm_page_anonymous_count; 290 extern thread_t vm_pageout_scan_thread; 291 extern thread_t vm_pageout_gc_thread; 292 293 #define VM_PAGEOUT_GC_INIT ((void *)0) 294 #define VM_PAGEOUT_GC_COLLECT ((void *)1) 295 #define VM_PAGEOUT_GC_EVENT ((event_t)&vm_pageout_garbage_collect) 296 extern void vm_pageout_garbage_collect(void *, wait_result_t); 297 298 299 /* 300 * must hold the page queues lock to 301 * manipulate this structure 302 */ 303 struct vm_pageout_queue { 304 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 305 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ 306 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 307 unsigned int pgo_maxlaundry; 308 309 uint32_t 310 pgo_idle:1, /* iothread is blocked waiting for work to do */ 311 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 312 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 313 pgo_lowpriority:1, /* iothread is set to use low priority I/O */ 314 pgo_draining:1, 315 pgo_inited:1, 316 pgo_unused_bits:26; 317 }; 318 319 #define VM_PAGE_Q_THROTTLED(q) \ 320 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 321 322 extern struct vm_pageout_queue vm_pageout_queue_internal; 323 extern struct vm_pageout_queue vm_pageout_queue_external; 324 325 326 /* 327 * Routines exported to Mach. 328 */ 329 extern void vm_pageout(void); 330 331 __startup_func extern void vm_config_init(void); 332 333 extern kern_return_t vm_pageout_internal_start(void); 334 335 extern void vm_pageout_object_terminate( 336 vm_object_t object); 337 338 extern void vm_pageout_cluster( 339 vm_page_t m); 340 341 extern void vm_pageout_initialize_page( 342 vm_page_t m); 343 344 /* UPL exported routines and structures */ 345 346 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 347 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 348 #define upl_lock(object) lck_mtx_lock(&(object)->Lock) 349 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 350 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) 351 352 #define MAX_VECTOR_UPL_ELEMENTS 8 353 354 struct _vector_upl_iostates { 355 upl_offset_t offset; 356 upl_size_t size; 357 }; 358 359 typedef struct _vector_upl_iostates vector_upl_iostates_t; 360 361 struct _vector_upl { 362 upl_size_t size; 363 uint32_t num_upls; 364 uint32_t invalid_upls; 365 uint32_t _reserved; 366 vm_map_t submap; 367 vm_offset_t submap_dst_addr; 368 vm_object_offset_t offset; 369 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; 370 upl_page_info_array_t pagelist; 371 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; 372 }; 373 374 typedef struct _vector_upl* vector_upl_t; 375 376 /* universal page list structure */ 377 378 #if UPL_DEBUG 379 #define UPL_DEBUG_STACK_FRAMES 16 380 #define UPL_DEBUG_COMMIT_RECORDS 4 381 382 struct ucd { 383 upl_offset_t c_beg; 384 upl_offset_t c_end; 385 int c_aborted; 386 void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; 387 }; 388 #endif 389 390 struct upl_io_completion { 391 void *io_context; 392 void (*io_done)(void *, int); 393 394 int io_error; 395 }; 396 397 398 struct upl { 399 decl_lck_mtx_data(, Lock); /* Synchronization */ 400 int ref_count; 401 int ext_ref_count; 402 int flags; 403 /* 404 * XXX CAUTION: to accomodate devices with "mixed page sizes", 405 * u_offset and u_size are now byte-aligned and no longer 406 * page-aligned, on all devices. 407 */ 408 vm_object_offset_t u_offset; 409 upl_size_t u_size; /* size in bytes of the address space */ 410 upl_size_t u_mapped_size; /* size in bytes of the UPL that is mapped */ 411 vm_offset_t kaddr; /* secondary mapping in kernel */ 412 vm_object_t map_object; 413 ppnum_t highest_page; 414 void* vector_upl; 415 upl_t associated_upl; 416 struct upl_io_completion *upl_iodone; 417 #if CONFIG_IOSCHED 418 int upl_priority; 419 uint64_t *upl_reprio_info; 420 void *decmp_io_upl; 421 #endif 422 #if CONFIG_IOSCHED || UPL_DEBUG 423 thread_t upl_creator; 424 queue_chain_t uplq; /* List of outstanding upls on an obj */ 425 #endif 426 #if UPL_DEBUG 427 uintptr_t ubc_alias1; 428 uintptr_t ubc_alias2; 429 430 uint32_t upl_state; 431 uint32_t upl_commit_index; 432 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; 433 434 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 435 #endif /* UPL_DEBUG */ 436 }; 437 438 /* upl struct flags */ 439 #define UPL_PAGE_LIST_MAPPED 0x1 440 #define UPL_KERNEL_MAPPED 0x2 441 #define UPL_CLEAR_DIRTY 0x4 442 #define UPL_COMPOSITE_LIST 0x8 443 #define UPL_INTERNAL 0x10 444 #define UPL_PAGE_SYNC_DONE 0x20 445 #define UPL_DEVICE_MEMORY 0x40 446 #define UPL_PAGEOUT 0x80 447 #define UPL_LITE 0x100 448 #define UPL_IO_WIRE 0x200 449 #define UPL_ACCESS_BLOCKED 0x400 450 #define UPL_SHADOWED 0x1000 451 #define UPL_KERNEL_OBJECT 0x2000 452 #define UPL_VECTOR 0x4000 453 #define UPL_SET_DIRTY 0x8000 454 #define UPL_HAS_BUSY 0x10000 455 #define UPL_TRACKED_BY_OBJECT 0x20000 456 #define UPL_EXPEDITE_SUPPORTED 0x40000 457 #define UPL_DECMP_REQ 0x80000 458 #define UPL_DECMP_REAL_IO 0x100000 459 460 /* flags for upl_create flags parameter */ 461 #define UPL_CREATE_EXTERNAL 0 462 #define UPL_CREATE_INTERNAL 0x1 463 #define UPL_CREATE_LITE 0x2 464 #define UPL_CREATE_IO_TRACKING 0x4 465 #define UPL_CREATE_EXPEDITE_SUP 0x8 466 467 extern upl_t vector_upl_create(vm_offset_t); 468 extern void vector_upl_deallocate(upl_t); 469 extern boolean_t vector_upl_is_valid(upl_t); 470 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 471 extern void vector_upl_set_pagelist(upl_t); 472 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 473 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 474 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 475 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 476 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 477 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); 478 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); 479 480 extern void vm_object_set_pmap_cache_attr( 481 vm_object_t object, 482 upl_page_info_array_t user_page_list, 483 unsigned int num_pages, 484 boolean_t batch_pmap_op); 485 486 extern kern_return_t vm_object_iopl_request( 487 vm_object_t object, 488 vm_object_offset_t offset, 489 upl_size_t size, 490 upl_t *upl_ptr, 491 upl_page_info_array_t user_page_list, 492 unsigned int *page_list_count, 493 upl_control_flags_t cntrl_flags, 494 vm_tag_t tag); 495 496 extern kern_return_t vm_object_super_upl_request( 497 vm_object_t object, 498 vm_object_offset_t offset, 499 upl_size_t size, 500 upl_size_t super_cluster, 501 upl_t *upl, 502 upl_page_info_t *user_page_list, 503 unsigned int *page_list_count, 504 upl_control_flags_t cntrl_flags, 505 vm_tag_t tag); 506 507 /* should be just a regular vm_map_enter() */ 508 extern kern_return_t vm_map_enter_upl( 509 vm_map_t map, 510 upl_t upl, 511 vm_map_offset_t *dst_addr); 512 513 /* should be just a regular vm_map_remove() */ 514 extern kern_return_t vm_map_remove_upl( 515 vm_map_t map, 516 upl_t upl); 517 518 extern kern_return_t vm_map_enter_upl_range( 519 vm_map_t map, 520 upl_t upl, 521 vm_object_offset_t offset, 522 upl_size_t size, 523 vm_prot_t prot, 524 vm_map_offset_t *dst_addr); 525 526 extern kern_return_t vm_map_remove_upl_range( 527 vm_map_t map, 528 upl_t upl, 529 vm_object_offset_t offset, 530 upl_size_t size); 531 532 /* wired page list structure */ 533 typedef uint32_t *wpl_array_t; 534 535 extern struct vm_page_delayed_work* 536 vm_page_delayed_work_get_ctx(void); 537 538 extern void 539 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); 540 541 extern void vm_page_free_reserve(int pages); 542 543 extern void vm_pageout_throttle_down(vm_page_t page); 544 extern void vm_pageout_throttle_up(vm_page_t page); 545 546 extern kern_return_t vm_paging_map_object( 547 vm_page_t page, 548 vm_object_t object, 549 vm_object_offset_t offset, 550 vm_prot_t protection, 551 boolean_t can_unlock_object, 552 vm_map_size_t *size, /* IN/OUT */ 553 vm_map_offset_t *address, /* OUT */ 554 boolean_t *need_unmap); /* OUT */ 555 extern void vm_paging_unmap_object( 556 vm_object_t object, 557 vm_map_offset_t start, 558 vm_map_offset_t end); 559 decl_simple_lock_data(extern, vm_paging_lock); 560 561 /* 562 * Backing store throttle when BS is exhausted 563 */ 564 extern unsigned int vm_backing_store_low; 565 566 extern void vm_pageout_steal_laundry( 567 vm_page_t page, 568 boolean_t queues_locked); 569 570 #endif /* MACH_KERNEL_PRIVATE */ 571 572 #if UPL_DEBUG 573 extern kern_return_t upl_ubc_alias_set( 574 upl_t upl, 575 uintptr_t alias1, 576 uintptr_t alias2); 577 extern int upl_ubc_alias_get( 578 upl_t upl, 579 uintptr_t * al, 580 uintptr_t * al2); 581 #endif /* UPL_DEBUG */ 582 583 extern void vm_countdirtypages(void); 584 585 extern void vm_backing_store_disable( 586 boolean_t suspend); 587 588 extern kern_return_t upl_transpose( 589 upl_t upl1, 590 upl_t upl2); 591 592 extern kern_return_t mach_vm_pressure_monitor( 593 boolean_t wait_for_pressure, 594 unsigned int nsecs_monitored, 595 unsigned int *pages_reclaimed_p, 596 unsigned int *pages_wanted_p); 597 598 extern kern_return_t 599 vm_set_buffer_cleanup_callout( 600 boolean_t (*func)(int)); 601 602 struct vm_page_stats_reusable { 603 SInt32 reusable_count; 604 uint64_t reusable; 605 uint64_t reused; 606 uint64_t reused_wire; 607 uint64_t reused_remove; 608 uint64_t all_reusable_calls; 609 uint64_t partial_reusable_calls; 610 uint64_t all_reuse_calls; 611 uint64_t partial_reuse_calls; 612 uint64_t reusable_pages_success; 613 uint64_t reusable_pages_failure; 614 uint64_t reusable_pages_shared; 615 uint64_t reuse_pages_success; 616 uint64_t reuse_pages_failure; 617 uint64_t can_reuse_success; 618 uint64_t can_reuse_failure; 619 uint64_t reusable_reclaimed; 620 uint64_t reusable_nonwritable; 621 uint64_t reusable_shared; 622 uint64_t free_shared; 623 }; 624 extern struct vm_page_stats_reusable vm_page_stats_reusable; 625 626 extern int hibernate_flush_memory(void); 627 extern void hibernate_reset_stats(void); 628 extern void hibernate_create_paddr_map(void); 629 630 extern void vm_set_restrictions(unsigned int num_cpus); 631 632 extern int vm_compressor_mode; 633 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); 634 extern void vm_pageout_anonymous_pages(void); 635 extern void vm_pageout_disconnect_all_pages(void); 636 extern int vm_toggle_task_selfdonate_pages(task_t); 637 extern void vm_task_set_selfdonate_pages(task_t, bool); 638 639 struct vm_config { 640 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ 641 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ 642 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ 643 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ 644 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ 645 }; 646 647 extern struct vm_config vm_config; 648 649 650 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ 651 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ 652 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ 653 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ 654 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ 655 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 656 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ 657 658 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 659 660 661 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) 662 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) 663 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) 664 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) 665 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) 666 667 #endif /* KERNEL_PRIVATE */ 668 669 #ifdef XNU_KERNEL_PRIVATE 670 671 struct vm_pageout_state { 672 boolean_t vm_pressure_thread_running; 673 boolean_t vm_pressure_changed; 674 boolean_t vm_restricted_to_single_processor; 675 int vm_compressor_thread_count; 676 677 unsigned int vm_page_speculative_q_age_ms; 678 unsigned int vm_page_speculative_percentage; 679 unsigned int vm_page_speculative_target; 680 681 unsigned int vm_pageout_swap_wait; 682 unsigned int vm_pageout_idle_wait; /* milliseconds */ 683 unsigned int vm_pageout_empty_wait; /* milliseconds */ 684 unsigned int vm_pageout_burst_wait; /* milliseconds */ 685 unsigned int vm_pageout_deadlock_wait; /* milliseconds */ 686 unsigned int vm_pageout_deadlock_relief; 687 unsigned int vm_pageout_burst_inactive_throttle; 688 689 unsigned int vm_pageout_inactive; 690 unsigned int vm_pageout_inactive_used; /* debugging */ 691 unsigned int vm_pageout_inactive_clean; /* debugging */ 692 693 uint32_t vm_page_filecache_min; 694 uint32_t vm_page_filecache_min_divisor; 695 uint32_t vm_page_xpmapped_min; 696 uint32_t vm_page_xpmapped_min_divisor; 697 uint64_t vm_pageout_considered_page_last; 698 699 int vm_page_free_count_init; 700 701 unsigned int vm_memory_pressure; 702 703 int memorystatus_purge_on_critical; 704 int memorystatus_purge_on_warning; 705 int memorystatus_purge_on_urgent; 706 707 thread_t vm_pageout_external_iothread; 708 thread_t vm_pageout_internal_iothread; 709 thread_t vm_pageout_early_swapout_iothread; 710 }; 711 712 extern struct vm_pageout_state vm_pageout_state; 713 714 /* 715 * This structure is used to track the VM_INFO instrumentation 716 */ 717 struct vm_pageout_vminfo { 718 unsigned long vm_pageout_considered_page; 719 unsigned long vm_pageout_considered_bq_internal; 720 unsigned long vm_pageout_considered_bq_external; 721 unsigned long vm_pageout_skipped_external; 722 unsigned long vm_pageout_skipped_internal; 723 724 unsigned long vm_pageout_pages_evicted; 725 unsigned long vm_pageout_pages_purged; 726 unsigned long vm_pageout_freed_cleaned; 727 unsigned long vm_pageout_freed_speculative; 728 unsigned long vm_pageout_freed_external; 729 unsigned long vm_pageout_freed_internal; 730 unsigned long vm_pageout_inactive_dirty_internal; 731 unsigned long vm_pageout_inactive_dirty_external; 732 unsigned long vm_pageout_inactive_referenced; 733 unsigned long vm_pageout_reactivation_limit_exceeded; 734 unsigned long vm_pageout_inactive_force_reclaim; 735 unsigned long vm_pageout_inactive_nolock; 736 unsigned long vm_pageout_filecache_min_reactivated; 737 unsigned long vm_pageout_scan_inactive_throttled_internal; 738 unsigned long vm_pageout_scan_inactive_throttled_external; 739 740 uint64_t vm_pageout_compressions; 741 uint64_t vm_compressor_pages_grabbed; 742 unsigned long vm_compressor_failed; 743 744 unsigned long vm_page_pages_freed; 745 746 unsigned long vm_phantom_cache_found_ghost; 747 unsigned long vm_phantom_cache_added_ghost; 748 749 unsigned long vm_pageout_protected_sharedcache; 750 unsigned long vm_pageout_forcereclaimed_sharedcache; 751 unsigned long vm_pageout_protected_realtime; 752 unsigned long vm_pageout_forcereclaimed_realtime; 753 }; 754 755 extern struct vm_pageout_vminfo vm_pageout_vminfo; 756 757 extern void vm_swapout_thread(void); 758 759 #if DEVELOPMENT || DEBUG 760 761 /* 762 * This structure records the pageout daemon's actions: 763 * how many pages it looks at and what happens to those pages. 764 * No locking needed because only one thread modifies the fields. 765 */ 766 struct vm_pageout_debug { 767 uint32_t vm_pageout_balanced; 768 uint32_t vm_pageout_scan_event_counter; 769 uint32_t vm_pageout_speculative_dirty; 770 771 uint32_t vm_pageout_inactive_busy; 772 uint32_t vm_pageout_inactive_absent; 773 uint32_t vm_pageout_inactive_notalive; 774 uint32_t vm_pageout_inactive_error; 775 uint32_t vm_pageout_inactive_deactivated; 776 777 uint32_t vm_pageout_enqueued_cleaned; 778 779 uint32_t vm_pageout_cleaned_busy; 780 uint32_t vm_pageout_cleaned_nolock; 781 uint32_t vm_pageout_cleaned_reference_reactivated; 782 uint32_t vm_pageout_cleaned_volatile_reactivated; 783 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ 784 uint32_t vm_pageout_cleaned_fault_reactivated; 785 786 uint32_t vm_pageout_dirty_no_pager; 787 uint32_t vm_pageout_purged_objects; 788 789 uint32_t vm_pageout_scan_throttle; 790 uint32_t vm_pageout_scan_reclaimed_throttled; 791 uint32_t vm_pageout_scan_burst_throttle; 792 uint32_t vm_pageout_scan_empty_throttle; 793 uint32_t vm_pageout_scan_swap_throttle; 794 uint32_t vm_pageout_scan_deadlock_detected; 795 uint32_t vm_pageout_scan_inactive_throttle_success; 796 uint32_t vm_pageout_scan_throttle_deferred; 797 798 uint32_t vm_pageout_inactive_external_forced_jetsam_count; 799 800 uint32_t vm_grab_anon_overrides; 801 uint32_t vm_grab_anon_nops; 802 803 uint32_t vm_pageout_no_victim; 804 uint32_t vm_pageout_yield_for_free_pages; 805 unsigned long vm_pageout_throttle_up_count; 806 uint32_t vm_page_steal_pageout_page; 807 808 uint32_t vm_cs_validated_resets; 809 uint32_t vm_object_iopl_request_sleep_for_cleaning; 810 uint32_t vm_page_slide_counter; 811 uint32_t vm_page_slide_errors; 812 uint32_t vm_page_throttle_count; 813 /* 814 * Statistics about UPL enforcement of copy-on-write obligations. 815 */ 816 unsigned long upl_cow; 817 unsigned long upl_cow_again; 818 unsigned long upl_cow_pages; 819 unsigned long upl_cow_again_pages; 820 unsigned long iopl_cow; 821 unsigned long iopl_cow_pages; 822 }; 823 824 extern struct vm_pageout_debug vm_pageout_debug; 825 826 #define VM_PAGEOUT_DEBUG(member, value) \ 827 MACRO_BEGIN \ 828 vm_pageout_debug.member += value; \ 829 MACRO_END 830 #else 831 #define VM_PAGEOUT_DEBUG(member, value) 832 #endif 833 834 #define MAX_COMPRESSOR_THREAD_COUNT 8 835 836 /* 837 * Forward declarations for internal routines. 838 */ 839 struct cq { 840 struct vm_pageout_queue *q; 841 void *current_early_swapout_chead; 842 void *current_regular_swapout_chead; 843 void *current_late_swapout_chead; 844 char *scratch_buf; 845 int id; 846 #if DEVELOPMENT || DEBUG 847 struct vm_pageout_queue *benchmark_q; 848 #endif /* DEVELOPMENT || DEBUG */ 849 }; 850 851 extern struct cq ciq[MAX_COMPRESSOR_THREAD_COUNT]; 852 853 struct vm_compressor_swapper_stats { 854 uint64_t unripe_under_30s; 855 uint64_t unripe_under_60s; 856 uint64_t unripe_under_300s; 857 uint64_t reclaim_swapins; 858 uint64_t defrag_swapins; 859 uint64_t compressor_swap_threshold_exceeded; 860 uint64_t external_q_throttled; 861 uint64_t free_count_below_reserve; 862 uint64_t thrashing_detected; 863 uint64_t fragmentation_detected; 864 }; 865 extern struct vm_compressor_swapper_stats vmcs_stats; 866 867 #if DEVELOPMENT || DEBUG 868 typedef struct vmct_stats_s { 869 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; 870 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; 871 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; 872 // total mach absolute time that compressor threads has been running 873 uint64_t vmct_cthreads_total; 874 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; 875 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; 876 } vmct_stats_t; 877 #endif 878 #endif 879 #endif /* _VM_VM_PAGEOUT_H_ */ 880