1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66 #ifndef _VM_VM_PAGEOUT_H_ 67 #define _VM_VM_PAGEOUT_H_ 68 69 #ifdef KERNEL_PRIVATE 70 71 #include <mach/mach_types.h> 72 #include <mach/boolean.h> 73 #include <mach/machine/vm_types.h> 74 #include <mach/memory_object_types.h> 75 76 #include <kern/kern_types.h> 77 #include <kern/locks.h> 78 #include <kern/sched_prim.h> 79 #include <kern/bits.h> 80 81 #include <libkern/OSAtomic.h> 82 83 84 #include <vm/vm_options.h> 85 86 #ifdef MACH_KERNEL_PRIVATE 87 #include <vm/vm_page.h> 88 #endif 89 90 #include <sys/kdebug.h> 91 92 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 93 94 /* externally manipulated counters */ 95 extern unsigned int vm_pageout_cleaned_fault_reactivated; 96 97 #if CONFIG_FREEZE 98 extern boolean_t memorystatus_freeze_enabled; 99 100 struct freezer_context { 101 /* 102 * All these counters & variables track the task 103 * being frozen. 104 * Currently we only freeze one task at a time. Should that 105 * change, we'll need to add support for multiple freezer contexts. 106 */ 107 108 task_t freezer_ctx_task; /* Task being frozen. */ 109 110 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ 111 /* to freeze the task.*/ 112 113 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ 114 115 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ 116 117 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ 118 }; 119 120 #endif /* CONFIG_FREEZE */ 121 122 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) 123 124 #if VM_PRESSURE_EVENTS 125 extern boolean_t vm_pressure_events_enabled; 126 #endif /* VM_PRESSURE_EVENTS */ 127 128 129 /* 130 * the following codes are used in the DBG_MACH_WORKINGSET subclass 131 * of the DBG_MACH class 132 */ 133 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 134 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 135 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 136 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 137 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 138 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 139 #define VM_REAL_FAULT_FAST 0x06 140 #define VM_REAL_FAULT_SLOW 0x07 141 #define VM_MAP_LOOKUP_OBJECT 0x08 142 143 144 145 extern int vm_debug_events; 146 147 #define VMF_CHECK_ZFDELAY 0x100 148 #define VMF_COWDELAY 0x101 149 #define VMF_ZFDELAY 0x102 150 #define VMF_COMPRESSORDELAY 0x103 151 152 #define VM_PAGEOUT_SCAN 0x104 153 #define VM_PAGEOUT_BALANCE 0x105 154 #define VM_PAGEOUT_FREELIST 0x106 155 #define VM_PAGEOUT_PURGEONE 0x107 156 #define VM_PAGEOUT_CACHE_EVICT 0x108 157 #define VM_PAGEOUT_THREAD_BLOCK 0x109 158 #define VM_PAGEOUT_JETSAM 0x10A 159 #define VM_INFO1 0x10B 160 #define VM_INFO2 0x10C 161 #define VM_INFO3 0x10D 162 #define VM_INFO4 0x10E 163 #define VM_INFO5 0x10F 164 #define VM_INFO6 0x110 165 #define VM_INFO7 0x111 166 #define VM_INFO8 0x112 167 #define VM_INFO9 0x113 168 #define VM_INFO10 0x114 169 170 #define VM_UPL_PAGE_WAIT 0x120 171 #define VM_IOPL_PAGE_WAIT 0x121 172 #define VM_PAGE_WAIT_BLOCK 0x122 173 174 #if CONFIG_IOSCHED 175 #define VM_PAGE_SLEEP 0x123 176 #define VM_PAGE_EXPEDITE 0x124 177 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 178 #endif 179 180 #define VM_PAGE_GRAB 0x126 181 #define VM_PAGE_RELEASE 0x127 182 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128 183 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 184 185 186 #define VM_PRESSURE_EVENT 0x130 187 #define VM_EXECVE 0x131 188 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 189 #define VM_UPL_REQUEST 0x133 190 #define VM_IOPL_REQUEST 0x134 191 #define VM_KERN_REQUEST 0x135 192 193 #define VM_DATA_WRITE 0x140 194 195 #define VM_PRESSURE_LEVEL_CHANGE 0x141 196 197 #define VM_PHYS_WRITE_ACCT 0x142 198 199 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 200 MACRO_BEGIN \ 201 if (__improbable(vm_debug_events)) { \ 202 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 203 } \ 204 MACRO_END 205 206 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 207 MACRO_BEGIN \ 208 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 209 MACRO_END 210 211 extern void memoryshot(unsigned int event, unsigned int control); 212 213 extern void update_vm_info(void); 214 215 #if CONFIG_IOSCHED 216 extern int upl_get_cached_tier( 217 upl_t upl); 218 #endif 219 220 extern void upl_set_iodone(upl_t, void *); 221 extern void upl_set_iodone_error(upl_t, int); 222 extern void upl_callout_iodone(upl_t); 223 224 extern ppnum_t upl_get_highest_page( 225 upl_t upl); 226 227 extern upl_size_t upl_get_size( 228 upl_t upl); 229 230 extern upl_t upl_associated_upl(upl_t upl); 231 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); 232 233 #ifndef MACH_KERNEL_PRIVATE 234 typedef struct vm_page *vm_page_t; 235 #endif 236 #ifdef XNU_KERNEL_PRIVATE 237 #include <vm/vm_kern.h> 238 239 extern upl_size_t upl_adjusted_size( 240 upl_t upl, 241 vm_map_offset_t page_mask); 242 extern vm_object_offset_t upl_adjusted_offset( 243 upl_t upl, 244 vm_map_offset_t page_mask); 245 extern vm_object_offset_t upl_get_data_offset( 246 upl_t upl); 247 248 extern kern_return_t vm_map_create_upl( 249 vm_map_t map, 250 vm_map_address_t offset, 251 upl_size_t *upl_size, 252 upl_t *upl, 253 upl_page_info_array_t page_list, 254 unsigned int *count, 255 upl_control_flags_t *flags, 256 vm_tag_t tag); 257 258 extern void iopl_valid_data( 259 upl_t upl_ptr, 260 vm_tag_t tag); 261 262 extern void vm_page_free_list( 263 vm_page_t mem, 264 boolean_t prepare_object); 265 266 extern kern_return_t vm_page_alloc_list( 267 vm_size_t page_count, 268 kma_flags_t flags, 269 vm_page_t *list); 270 271 #endif /* XNU_KERNEL_PRIVATE */ 272 273 extern struct vnode * upl_lookup_vnode(upl_t upl); 274 275 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 276 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 277 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 278 extern vm_page_t vm_page_get_next(vm_page_t page); 279 280 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 281 282 #if XNU_TARGET_OS_OSX 283 extern kern_return_t vm_pageout_wait(uint64_t deadline); 284 #endif /* XNU_TARGET_OS_OSX */ 285 286 #ifdef MACH_KERNEL_PRIVATE 287 288 #include <vm/vm_page.h> 289 290 extern unsigned int vm_pageout_scan_event_counter; 291 extern unsigned int vm_page_anonymous_count; 292 extern thread_t vm_pageout_scan_thread; 293 extern thread_t vm_pageout_gc_thread; 294 295 #define VM_PAGEOUT_GC_INIT ((void *)0) 296 #define VM_PAGEOUT_GC_COLLECT ((void *)1) 297 #define VM_PAGEOUT_GC_EVENT ((event_t)&vm_pageout_garbage_collect) 298 extern void vm_pageout_garbage_collect(void *, wait_result_t); 299 300 301 /* 302 * must hold the page queues lock to 303 * manipulate this structure 304 */ 305 struct vm_pageout_queue { 306 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 307 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 308 unsigned int pgo_maxlaundry; 309 310 uint32_t 311 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 312 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 313 pgo_lowpriority:1, /* iothread is set to use low priority I/O */ 314 pgo_draining:1, 315 pgo_inited:1, 316 pgo_unused_bits:26; 317 }; 318 319 #define VM_PAGE_Q_THROTTLED(q) \ 320 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 321 322 extern struct vm_pageout_queue vm_pageout_queue_internal; 323 extern struct vm_pageout_queue vm_pageout_queue_external; 324 325 326 /* 327 * Routines exported to Mach. 328 */ 329 extern void vm_pageout(void); 330 331 __startup_func extern void vm_config_init(void); 332 333 extern kern_return_t vm_pageout_internal_start(void); 334 335 extern void vm_pageout_object_terminate( 336 vm_object_t object); 337 338 extern void vm_pageout_cluster( 339 vm_page_t m); 340 341 extern void vm_pageout_initialize_page( 342 vm_page_t m); 343 344 /* UPL exported routines and structures */ 345 346 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 347 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 348 #define upl_lock(object) lck_mtx_lock(&(object)->Lock) 349 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 350 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) 351 352 struct _vector_upl_iostates { 353 upl_offset_t offset; 354 upl_size_t size; 355 }; 356 357 typedef struct _vector_upl_iostates vector_upl_iostates_t; 358 359 struct _vector_upl { 360 upl_size_t size; 361 uint32_t num_upls; 362 uint32_t invalid_upls; 363 uint32_t max_upls; 364 vm_map_t submap; 365 vm_offset_t submap_dst_addr; 366 vm_object_offset_t offset; 367 upl_page_info_array_t pagelist; 368 struct { 369 upl_t elem; 370 vector_upl_iostates_t iostate; 371 } upls[]; 372 }; 373 374 typedef struct _vector_upl* vector_upl_t; 375 376 uint32_t vector_upl_max_upls(const upl_t upl); 377 378 /* universal page list structure */ 379 380 #if UPL_DEBUG 381 #define UPL_DEBUG_COMMIT_RECORDS 4 382 383 struct ucd { 384 upl_offset_t c_beg; 385 upl_offset_t c_end; 386 int c_aborted; 387 uint32_t c_btref; /* btref_t */ 388 }; 389 #endif 390 391 struct upl_io_completion { 392 void *io_context; 393 void (*io_done)(void *, int); 394 395 int io_error; 396 }; 397 398 399 struct upl { 400 decl_lck_mtx_data(, Lock); /* Synchronization */ 401 int ref_count; 402 int ext_ref_count; 403 int flags; 404 /* 405 * XXX CAUTION: to accomodate devices with "mixed page sizes", 406 * u_offset and u_size are now byte-aligned and no longer 407 * page-aligned, on all devices. 408 */ 409 vm_object_offset_t u_offset; 410 upl_size_t u_size; /* size in bytes of the address space */ 411 upl_size_t u_mapped_size; /* size in bytes of the UPL that is mapped */ 412 vm_offset_t kaddr; /* secondary mapping in kernel */ 413 vm_object_t map_object; 414 vector_upl_t vector_upl; 415 upl_t associated_upl; 416 struct upl_io_completion *upl_iodone; 417 ppnum_t highest_page; 418 #if CONFIG_IOSCHED 419 int upl_priority; 420 uint64_t *upl_reprio_info; 421 void *decmp_io_upl; 422 #endif 423 #if CONFIG_IOSCHED || UPL_DEBUG 424 thread_t upl_creator; 425 queue_chain_t uplq; /* List of outstanding upls on an obj */ 426 #endif 427 #if UPL_DEBUG 428 uintptr_t ubc_alias1; 429 uintptr_t ubc_alias2; 430 431 uint32_t upl_state; 432 uint32_t upl_commit_index; 433 uint32_t upl_create_btref; /* btref_t */ 434 435 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 436 #endif /* UPL_DEBUG */ 437 438 bitmap_t *lite_list; 439 struct upl_page_info page_list[]; 440 }; 441 442 /* upl struct flags */ 443 #define UPL_PAGE_LIST_MAPPED 0x1 444 #define UPL_KERNEL_MAPPED 0x2 445 #define UPL_CLEAR_DIRTY 0x4 446 #define UPL_COMPOSITE_LIST 0x8 447 #define UPL_INTERNAL 0x10 448 #define UPL_PAGE_SYNC_DONE 0x20 449 #define UPL_DEVICE_MEMORY 0x40 450 #define UPL_PAGEOUT 0x80 451 #define UPL_LITE 0x100 452 #define UPL_IO_WIRE 0x200 453 #define UPL_ACCESS_BLOCKED 0x400 454 #define UPL_SHADOWED 0x1000 455 #define UPL_KERNEL_OBJECT 0x2000 456 #define UPL_VECTOR 0x4000 457 #define UPL_SET_DIRTY 0x8000 458 #define UPL_HAS_BUSY 0x10000 459 #define UPL_TRACKED_BY_OBJECT 0x20000 460 #define UPL_EXPEDITE_SUPPORTED 0x40000 461 #define UPL_DECMP_REQ 0x80000 462 #define UPL_DECMP_REAL_IO 0x100000 463 464 /* flags for upl_create flags parameter */ 465 #define UPL_CREATE_EXTERNAL 0 466 #define UPL_CREATE_INTERNAL 0x1 467 #define UPL_CREATE_LITE 0x2 468 #define UPL_CREATE_IO_TRACKING 0x4 469 #define UPL_CREATE_EXPEDITE_SUP 0x8 470 471 extern upl_t vector_upl_create(vm_offset_t, uint32_t); 472 extern void vector_upl_deallocate(upl_t); 473 extern boolean_t vector_upl_is_valid(upl_t); 474 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 475 extern void vector_upl_set_pagelist(upl_t); 476 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 477 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 478 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 479 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 480 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 481 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); 482 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); 483 484 extern void vm_object_set_pmap_cache_attr( 485 vm_object_t object, 486 upl_page_info_array_t user_page_list, 487 unsigned int num_pages, 488 boolean_t batch_pmap_op); 489 490 extern kern_return_t vm_object_iopl_request( 491 vm_object_t object, 492 vm_object_offset_t offset, 493 upl_size_t size, 494 upl_t *upl_ptr, 495 upl_page_info_array_t user_page_list, 496 unsigned int *page_list_count, 497 upl_control_flags_t cntrl_flags, 498 vm_tag_t tag); 499 500 extern kern_return_t vm_object_super_upl_request( 501 vm_object_t object, 502 vm_object_offset_t offset, 503 upl_size_t size, 504 upl_size_t super_cluster, 505 upl_t *upl, 506 upl_page_info_t *user_page_list, 507 unsigned int *page_list_count, 508 upl_control_flags_t cntrl_flags, 509 vm_tag_t tag); 510 511 /* should be just a regular vm_map_enter() */ 512 extern kern_return_t vm_map_enter_upl( 513 vm_map_t map, 514 upl_t upl, 515 vm_map_offset_t *dst_addr); 516 517 /* should be just a regular vm_map_remove() */ 518 extern kern_return_t vm_map_remove_upl( 519 vm_map_t map, 520 upl_t upl); 521 522 extern kern_return_t vm_map_enter_upl_range( 523 vm_map_t map, 524 upl_t upl, 525 vm_object_offset_t offset, 526 upl_size_t size, 527 vm_prot_t prot, 528 vm_map_offset_t *dst_addr); 529 530 extern kern_return_t vm_map_remove_upl_range( 531 vm_map_t map, 532 upl_t upl, 533 vm_object_offset_t offset, 534 upl_size_t size); 535 536 extern struct vm_page_delayed_work* 537 vm_page_delayed_work_get_ctx(void); 538 539 extern void 540 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); 541 542 extern void vm_page_free_reserve(int pages); 543 544 extern void vm_pageout_throttle_down(vm_page_t page); 545 extern void vm_pageout_throttle_up(vm_page_t page); 546 547 extern kern_return_t vm_paging_map_object( 548 vm_page_t page, 549 vm_object_t object, 550 vm_object_offset_t offset, 551 vm_prot_t protection, 552 boolean_t can_unlock_object, 553 vm_map_size_t *size, /* IN/OUT */ 554 vm_map_offset_t *address, /* OUT */ 555 boolean_t *need_unmap); /* OUT */ 556 extern void vm_paging_unmap_object( 557 vm_object_t object, 558 vm_map_offset_t start, 559 vm_map_offset_t end); 560 decl_simple_lock_data(extern, vm_paging_lock); 561 562 /* 563 * Backing store throttle when BS is exhausted 564 */ 565 extern unsigned int vm_backing_store_low; 566 567 extern void vm_pageout_steal_laundry( 568 vm_page_t page, 569 boolean_t queues_locked); 570 571 #endif /* MACH_KERNEL_PRIVATE */ 572 573 #if UPL_DEBUG 574 extern kern_return_t upl_ubc_alias_set( 575 upl_t upl, 576 uintptr_t alias1, 577 uintptr_t alias2); 578 extern int upl_ubc_alias_get( 579 upl_t upl, 580 uintptr_t * al, 581 uintptr_t * al2); 582 #endif /* UPL_DEBUG */ 583 584 extern void vm_countdirtypages(void); 585 586 extern void vm_backing_store_disable( 587 boolean_t suspend); 588 589 extern kern_return_t upl_transpose( 590 upl_t upl1, 591 upl_t upl2); 592 593 extern kern_return_t mach_vm_pressure_monitor( 594 boolean_t wait_for_pressure, 595 unsigned int nsecs_monitored, 596 unsigned int *pages_reclaimed_p, 597 unsigned int *pages_wanted_p); 598 599 extern kern_return_t 600 vm_set_buffer_cleanup_callout( 601 boolean_t (*func)(int)); 602 603 struct vm_page_stats_reusable { 604 SInt32 reusable_count; 605 uint64_t reusable; 606 uint64_t reused; 607 uint64_t reused_wire; 608 uint64_t reused_remove; 609 uint64_t all_reusable_calls; 610 uint64_t partial_reusable_calls; 611 uint64_t all_reuse_calls; 612 uint64_t partial_reuse_calls; 613 uint64_t reusable_pages_success; 614 uint64_t reusable_pages_failure; 615 uint64_t reusable_pages_shared; 616 uint64_t reuse_pages_success; 617 uint64_t reuse_pages_failure; 618 uint64_t can_reuse_success; 619 uint64_t can_reuse_failure; 620 uint64_t reusable_reclaimed; 621 uint64_t reusable_nonwritable; 622 uint64_t reusable_shared; 623 uint64_t free_shared; 624 }; 625 extern struct vm_page_stats_reusable vm_page_stats_reusable; 626 627 extern int hibernate_flush_memory(void); 628 extern void hibernate_reset_stats(void); 629 extern void hibernate_create_paddr_map(void); 630 631 extern void vm_set_restrictions(unsigned int num_cpus); 632 633 extern int vm_compressor_mode; 634 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); 635 extern void vm_pageout_anonymous_pages(void); 636 extern void vm_pageout_disconnect_all_pages(void); 637 extern int vm_toggle_task_selfdonate_pages(task_t); 638 extern void vm_task_set_selfdonate_pages(task_t, bool); 639 640 struct vm_config { 641 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ 642 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ 643 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ 644 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ 645 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ 646 }; 647 648 extern struct vm_config vm_config; 649 650 651 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ 652 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ 653 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ 654 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ 655 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ 656 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 657 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ 658 659 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 660 661 662 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) 663 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) 664 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) 665 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) 666 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) 667 668 #endif /* KERNEL_PRIVATE */ 669 670 #ifdef XNU_KERNEL_PRIVATE 671 672 struct vm_pageout_state { 673 boolean_t vm_pressure_thread_running; 674 boolean_t vm_pressure_changed; 675 boolean_t vm_restricted_to_single_processor; 676 int vm_compressor_thread_count; 677 678 unsigned int vm_page_speculative_q_age_ms; 679 unsigned int vm_page_speculative_percentage; 680 unsigned int vm_page_speculative_target; 681 682 unsigned int vm_pageout_swap_wait; 683 unsigned int vm_pageout_idle_wait; /* milliseconds */ 684 unsigned int vm_pageout_empty_wait; /* milliseconds */ 685 unsigned int vm_pageout_burst_wait; /* milliseconds */ 686 unsigned int vm_pageout_deadlock_wait; /* milliseconds */ 687 unsigned int vm_pageout_deadlock_relief; 688 unsigned int vm_pageout_burst_inactive_throttle; 689 690 unsigned int vm_pageout_inactive; 691 unsigned int vm_pageout_inactive_used; /* debugging */ 692 unsigned int vm_pageout_inactive_clean; /* debugging */ 693 694 uint32_t vm_page_filecache_min; 695 uint32_t vm_page_filecache_min_divisor; 696 uint32_t vm_page_xpmapped_min; 697 uint32_t vm_page_xpmapped_min_divisor; 698 uint64_t vm_pageout_considered_page_last; 699 700 int vm_page_free_count_init; 701 702 unsigned int vm_memory_pressure; 703 704 int memorystatus_purge_on_critical; 705 int memorystatus_purge_on_warning; 706 int memorystatus_purge_on_urgent; 707 708 thread_t vm_pageout_early_swapout_iothread; 709 }; 710 711 extern struct vm_pageout_state vm_pageout_state; 712 713 /* 714 * This structure is used to track the VM_INFO instrumentation 715 */ 716 struct vm_pageout_vminfo { 717 unsigned long vm_pageout_considered_page; 718 unsigned long vm_pageout_considered_bq_internal; 719 unsigned long vm_pageout_considered_bq_external; 720 unsigned long vm_pageout_skipped_external; 721 unsigned long vm_pageout_skipped_internal; 722 723 unsigned long vm_pageout_pages_evicted; 724 unsigned long vm_pageout_pages_purged; 725 unsigned long vm_pageout_freed_cleaned; 726 unsigned long vm_pageout_freed_speculative; 727 unsigned long vm_pageout_freed_external; 728 unsigned long vm_pageout_freed_internal; 729 unsigned long vm_pageout_inactive_dirty_internal; 730 unsigned long vm_pageout_inactive_dirty_external; 731 unsigned long vm_pageout_inactive_referenced; 732 unsigned long vm_pageout_reactivation_limit_exceeded; 733 unsigned long vm_pageout_inactive_force_reclaim; 734 unsigned long vm_pageout_inactive_nolock; 735 unsigned long vm_pageout_filecache_min_reactivated; 736 unsigned long vm_pageout_scan_inactive_throttled_internal; 737 unsigned long vm_pageout_scan_inactive_throttled_external; 738 739 uint64_t vm_pageout_compressions; 740 uint64_t vm_compressor_pages_grabbed; 741 unsigned long vm_compressor_failed; 742 743 unsigned long vm_page_pages_freed; 744 745 unsigned long vm_phantom_cache_found_ghost; 746 unsigned long vm_phantom_cache_added_ghost; 747 748 unsigned long vm_pageout_protected_sharedcache; 749 unsigned long vm_pageout_forcereclaimed_sharedcache; 750 unsigned long vm_pageout_protected_realtime; 751 unsigned long vm_pageout_forcereclaimed_realtime; 752 }; 753 754 extern struct vm_pageout_vminfo vm_pageout_vminfo; 755 756 extern void vm_swapout_thread(void); 757 758 #if DEVELOPMENT || DEBUG 759 760 /* 761 * This structure records the pageout daemon's actions: 762 * how many pages it looks at and what happens to those pages. 763 * No locking needed because only one thread modifies the fields. 764 */ 765 struct vm_pageout_debug { 766 uint32_t vm_pageout_balanced; 767 uint32_t vm_pageout_scan_event_counter; 768 uint32_t vm_pageout_speculative_dirty; 769 770 uint32_t vm_pageout_inactive_busy; 771 uint32_t vm_pageout_inactive_absent; 772 uint32_t vm_pageout_inactive_notalive; 773 uint32_t vm_pageout_inactive_error; 774 uint32_t vm_pageout_inactive_deactivated; 775 776 uint32_t vm_pageout_enqueued_cleaned; 777 778 uint32_t vm_pageout_cleaned_busy; 779 uint32_t vm_pageout_cleaned_nolock; 780 uint32_t vm_pageout_cleaned_reference_reactivated; 781 uint32_t vm_pageout_cleaned_volatile_reactivated; 782 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ 783 uint32_t vm_pageout_cleaned_fault_reactivated; 784 785 uint32_t vm_pageout_dirty_no_pager; 786 uint32_t vm_pageout_purged_objects; 787 788 uint32_t vm_pageout_scan_throttle; 789 uint32_t vm_pageout_scan_reclaimed_throttled; 790 uint32_t vm_pageout_scan_burst_throttle; 791 uint32_t vm_pageout_scan_empty_throttle; 792 uint32_t vm_pageout_scan_swap_throttle; 793 uint32_t vm_pageout_scan_deadlock_detected; 794 uint32_t vm_pageout_scan_inactive_throttle_success; 795 uint32_t vm_pageout_scan_throttle_deferred; 796 797 uint32_t vm_pageout_inactive_external_forced_jetsam_count; 798 799 uint32_t vm_grab_anon_overrides; 800 uint32_t vm_grab_anon_nops; 801 802 uint32_t vm_pageout_no_victim; 803 uint32_t vm_pageout_yield_for_free_pages; 804 unsigned long vm_pageout_throttle_up_count; 805 uint32_t vm_page_steal_pageout_page; 806 807 uint32_t vm_cs_validated_resets; 808 uint32_t vm_object_iopl_request_sleep_for_cleaning; 809 uint32_t vm_page_slide_counter; 810 uint32_t vm_page_slide_errors; 811 uint32_t vm_page_throttle_count; 812 /* 813 * Statistics about UPL enforcement of copy-on-write obligations. 814 */ 815 unsigned long upl_cow; 816 unsigned long upl_cow_again; 817 unsigned long upl_cow_pages; 818 unsigned long upl_cow_again_pages; 819 unsigned long iopl_cow; 820 unsigned long iopl_cow_pages; 821 }; 822 823 extern struct vm_pageout_debug vm_pageout_debug; 824 825 #define VM_PAGEOUT_DEBUG(member, value) \ 826 MACRO_BEGIN \ 827 vm_pageout_debug.member += value; \ 828 MACRO_END 829 #else 830 #define VM_PAGEOUT_DEBUG(member, value) 831 #endif 832 833 #define MAX_COMPRESSOR_THREAD_COUNT 8 834 835 /* 836 * Forward declarations for internal routines. 837 */ 838 839 /* 840 * Contains relevant state for pageout iothreads. Some state is unused by 841 * external (file-backed) thread. 842 */ 843 struct pgo_iothread_state { 844 struct vm_pageout_queue *q; 845 // cheads unused by external thread 846 void *current_early_swapout_chead; 847 void *current_regular_swapout_chead; 848 void *current_late_swapout_chead; 849 char *scratch_buf; 850 int id; 851 thread_t pgo_iothread; // holds a +1 ref 852 sched_cond_atomic_t pgo_wakeup; 853 #if DEVELOPMENT || DEBUG 854 // for perf_compressor benchmark 855 struct vm_pageout_queue *benchmark_q; 856 #endif /* DEVELOPMENT || DEBUG */ 857 }; 858 859 extern struct pgo_iothread_state pgo_iothread_internal_state[MAX_COMPRESSOR_THREAD_COUNT]; 860 861 extern struct pgo_iothread_state pgo_iothread_external_state; 862 863 struct vm_compressor_swapper_stats { 864 uint64_t unripe_under_30s; 865 uint64_t unripe_under_60s; 866 uint64_t unripe_under_300s; 867 uint64_t reclaim_swapins; 868 uint64_t defrag_swapins; 869 uint64_t compressor_swap_threshold_exceeded; 870 uint64_t external_q_throttled; 871 uint64_t free_count_below_reserve; 872 uint64_t thrashing_detected; 873 uint64_t fragmentation_detected; 874 }; 875 extern struct vm_compressor_swapper_stats vmcs_stats; 876 877 #if DEVELOPMENT || DEBUG 878 typedef struct vmct_stats_s { 879 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; 880 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; 881 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; 882 // total mach absolute time that compressor threads has been running 883 uint64_t vmct_cthreads_total; 884 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; 885 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; 886 } vmct_stats_t; 887 #endif 888 #endif 889 #endif /* _VM_VM_PAGEOUT_H_ */ 890