1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66 #ifndef _VM_VM_PAGEOUT_H_ 67 #define _VM_VM_PAGEOUT_H_ 68 69 #ifdef KERNEL_PRIVATE 70 71 #include <mach/mach_types.h> 72 #include <mach/boolean.h> 73 #include <mach/machine/vm_types.h> 74 #include <mach/memory_object_types.h> 75 76 #include <kern/kern_types.h> 77 #include <kern/locks.h> 78 #include <kern/sched_prim.h> 79 #include <kern/bits.h> 80 81 #include <libkern/OSAtomic.h> 82 83 84 #include <vm/vm_options.h> 85 86 #ifdef MACH_KERNEL_PRIVATE 87 #include <vm/vm_page.h> 88 #endif 89 90 #include <sys/kdebug.h> 91 92 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 93 94 /* externally manipulated counters */ 95 extern unsigned int vm_pageout_cleaned_fault_reactivated; 96 97 #if CONFIG_FREEZE 98 extern boolean_t memorystatus_freeze_enabled; 99 100 struct freezer_context { 101 /* 102 * All these counters & variables track the task 103 * being frozen. 104 * Currently we only freeze one task at a time. Should that 105 * change, we'll need to add support for multiple freezer contexts. 106 */ 107 108 task_t freezer_ctx_task; /* Task being frozen. */ 109 110 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ 111 /* to freeze the task.*/ 112 113 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ 114 115 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ 116 117 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ 118 }; 119 120 #endif /* CONFIG_FREEZE */ 121 122 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) 123 124 #if VM_PRESSURE_EVENTS 125 extern boolean_t vm_pressure_events_enabled; 126 #endif /* VM_PRESSURE_EVENTS */ 127 128 129 /* 130 * the following codes are used in the DBG_MACH_WORKINGSET subclass 131 * of the DBG_MACH class 132 */ 133 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 134 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 135 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 136 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 137 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 138 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 139 #define VM_REAL_FAULT_FAST 0x06 140 #define VM_REAL_FAULT_SLOW 0x07 141 #define VM_MAP_LOOKUP_OBJECT 0x08 142 143 144 145 extern int vm_debug_events; 146 147 #define VMF_CHECK_ZFDELAY 0x100 148 #define VMF_COWDELAY 0x101 149 #define VMF_ZFDELAY 0x102 150 #define VMF_COMPRESSORDELAY 0x103 151 152 #define VM_PAGEOUT_SCAN 0x104 153 #define VM_PAGEOUT_BALANCE 0x105 154 #define VM_PAGEOUT_FREELIST 0x106 155 #define VM_PAGEOUT_PURGEONE 0x107 156 #define VM_PAGEOUT_CACHE_EVICT 0x108 157 #define VM_PAGEOUT_THREAD_BLOCK 0x109 158 #define VM_PAGEOUT_JETSAM 0x10A 159 #define VM_INFO1 0x10B 160 #define VM_INFO2 0x10C 161 #define VM_INFO3 0x10D 162 #define VM_INFO4 0x10E 163 #define VM_INFO5 0x10F 164 #define VM_INFO6 0x110 165 #define VM_INFO7 0x111 166 #define VM_INFO8 0x112 167 #define VM_INFO9 0x113 168 #define VM_INFO10 0x114 169 170 #define VM_UPL_PAGE_WAIT 0x120 171 #define VM_IOPL_PAGE_WAIT 0x121 172 #define VM_PAGE_WAIT_BLOCK 0x122 173 174 #if CONFIG_IOSCHED 175 #define VM_PAGE_SLEEP 0x123 176 #define VM_PAGE_EXPEDITE 0x124 177 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 178 #endif 179 180 #define VM_PAGE_GRAB 0x126 181 #define VM_PAGE_RELEASE 0x127 182 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128 183 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 184 185 186 #define VM_PRESSURE_EVENT 0x130 187 #define VM_EXECVE 0x131 188 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 189 #define VM_UPL_REQUEST 0x133 190 #define VM_IOPL_REQUEST 0x134 191 #define VM_KERN_REQUEST 0x135 192 193 #define VM_DATA_WRITE 0x140 194 195 #define VM_PRESSURE_LEVEL_CHANGE 0x141 196 197 #define VM_PHYS_WRITE_ACCT 0x142 198 199 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 200 MACRO_BEGIN \ 201 if (__improbable(vm_debug_events)) { \ 202 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 203 } \ 204 MACRO_END 205 206 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 207 MACRO_BEGIN \ 208 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 209 MACRO_END 210 211 extern void memoryshot(unsigned int event, unsigned int control); 212 213 extern void update_vm_info(void); 214 215 #if CONFIG_IOSCHED 216 extern int upl_get_cached_tier( 217 upl_t upl); 218 #endif 219 220 extern void upl_set_iodone(upl_t, void *); 221 extern void upl_set_iodone_error(upl_t, int); 222 extern void upl_callout_iodone(upl_t); 223 224 extern ppnum_t upl_get_highest_page( 225 upl_t upl); 226 227 extern upl_size_t upl_get_size( 228 upl_t upl); 229 230 extern upl_t upl_associated_upl(upl_t upl); 231 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); 232 233 #ifndef MACH_KERNEL_PRIVATE 234 typedef struct vm_page *vm_page_t; 235 #endif 236 #ifdef XNU_KERNEL_PRIVATE 237 #include <vm/vm_kern.h> 238 239 extern upl_size_t upl_adjusted_size( 240 upl_t upl, 241 vm_map_offset_t page_mask); 242 extern vm_object_offset_t upl_adjusted_offset( 243 upl_t upl, 244 vm_map_offset_t page_mask); 245 extern vm_object_offset_t upl_get_data_offset( 246 upl_t upl); 247 248 extern kern_return_t vm_map_create_upl( 249 vm_map_t map, 250 vm_map_address_t offset, 251 upl_size_t *upl_size, 252 upl_t *upl, 253 upl_page_info_array_t page_list, 254 unsigned int *count, 255 upl_control_flags_t *flags, 256 vm_tag_t tag); 257 258 extern void iopl_valid_data( 259 upl_t upl_ptr, 260 vm_tag_t tag); 261 262 extern void vm_page_free_list( 263 vm_page_t mem, 264 boolean_t prepare_object); 265 266 extern kern_return_t vm_page_alloc_list( 267 vm_size_t page_count, 268 kma_flags_t flags, 269 vm_page_t *list); 270 271 #endif /* XNU_KERNEL_PRIVATE */ 272 273 extern struct vnode * upl_lookup_vnode(upl_t upl); 274 275 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 276 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 277 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 278 extern vm_page_t vm_page_get_next(vm_page_t page); 279 280 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 281 #if KERNEL_PRIVATE 282 extern kern_return_t mach_vm_wire_level_monitor(int64_t requested_pages); 283 #endif /* KERNEL_PRIVATE */ 284 285 #if XNU_TARGET_OS_OSX 286 extern kern_return_t vm_pageout_wait(uint64_t deadline); 287 #endif /* XNU_TARGET_OS_OSX */ 288 289 #ifdef MACH_KERNEL_PRIVATE 290 291 #include <vm/vm_page.h> 292 293 extern unsigned int vm_pageout_scan_event_counter; 294 extern unsigned int vm_page_anonymous_count; 295 extern thread_t vm_pageout_scan_thread; 296 extern thread_t vm_pageout_gc_thread; 297 298 #define VM_PAGEOUT_GC_INIT ((void *)0) 299 #define VM_PAGEOUT_GC_COLLECT ((void *)1) 300 #define VM_PAGEOUT_GC_EVENT ((event_t)&vm_pageout_garbage_collect) 301 extern void vm_pageout_garbage_collect(void *, wait_result_t); 302 303 304 /* 305 * must hold the page queues lock to 306 * manipulate this structure 307 */ 308 struct vm_pageout_queue { 309 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 310 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 311 unsigned int pgo_maxlaundry; 312 313 uint32_t 314 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 315 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 316 pgo_lowpriority:1, /* iothread is set to use low priority I/O */ 317 pgo_draining:1, 318 pgo_inited:1, 319 pgo_unused_bits:26; 320 }; 321 322 #define VM_PAGE_Q_THROTTLED(q) \ 323 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 324 325 extern struct vm_pageout_queue vm_pageout_queue_internal; 326 extern struct vm_pageout_queue vm_pageout_queue_external; 327 328 329 /* 330 * Routines exported to Mach. 331 */ 332 extern void vm_pageout(void); 333 334 __startup_func extern void vm_config_init(void); 335 336 extern kern_return_t vm_pageout_internal_start(void); 337 338 extern void vm_pageout_object_terminate( 339 vm_object_t object); 340 341 extern void vm_pageout_cluster( 342 vm_page_t m); 343 344 extern void vm_pageout_initialize_page( 345 vm_page_t m); 346 347 /* UPL exported routines and structures */ 348 349 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 350 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 351 #define upl_lock(object) lck_mtx_lock(&(object)->Lock) 352 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 353 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) 354 355 struct _vector_upl_iostates { 356 upl_offset_t offset; 357 upl_size_t size; 358 }; 359 360 typedef struct _vector_upl_iostates vector_upl_iostates_t; 361 362 struct _vector_upl { 363 upl_size_t size; 364 uint32_t num_upls; 365 uint32_t invalid_upls; 366 uint32_t max_upls; 367 vm_map_t submap; 368 vm_offset_t submap_dst_addr; 369 vm_object_offset_t offset; 370 upl_page_info_array_t pagelist; 371 struct { 372 upl_t elem; 373 vector_upl_iostates_t iostate; 374 } upls[]; 375 }; 376 377 typedef struct _vector_upl* vector_upl_t; 378 379 uint32_t vector_upl_max_upls(const upl_t upl); 380 381 /* universal page list structure */ 382 383 #if UPL_DEBUG 384 #define UPL_DEBUG_COMMIT_RECORDS 4 385 386 struct ucd { 387 upl_offset_t c_beg; 388 upl_offset_t c_end; 389 int c_aborted; 390 uint32_t c_btref; /* btref_t */ 391 }; 392 #endif 393 394 struct upl_io_completion { 395 void *io_context; 396 void (*io_done)(void *, int); 397 398 int io_error; 399 }; 400 401 402 struct upl { 403 decl_lck_mtx_data(, Lock); /* Synchronization */ 404 int ref_count; 405 int ext_ref_count; 406 int flags; 407 /* 408 * XXX CAUTION: to accomodate devices with "mixed page sizes", 409 * u_offset and u_size are now byte-aligned and no longer 410 * page-aligned, on all devices. 411 */ 412 vm_object_offset_t u_offset; 413 upl_size_t u_size; /* size in bytes of the address space */ 414 upl_size_t u_mapped_size; /* size in bytes of the UPL that is mapped */ 415 vm_offset_t kaddr; /* secondary mapping in kernel */ 416 vm_object_t map_object; 417 vector_upl_t vector_upl; 418 upl_t associated_upl; 419 struct upl_io_completion *upl_iodone; 420 ppnum_t highest_page; 421 #if CONFIG_IOSCHED 422 int upl_priority; 423 uint64_t *upl_reprio_info; 424 void *decmp_io_upl; 425 #endif 426 #if CONFIG_IOSCHED || UPL_DEBUG 427 thread_t upl_creator; 428 queue_chain_t uplq; /* List of outstanding upls on an obj */ 429 #endif 430 #if UPL_DEBUG 431 uintptr_t ubc_alias1; 432 uintptr_t ubc_alias2; 433 434 uint32_t upl_state; 435 uint32_t upl_commit_index; 436 uint32_t upl_create_btref; /* btref_t */ 437 438 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 439 #endif /* UPL_DEBUG */ 440 441 bitmap_t *lite_list; 442 struct upl_page_info page_list[]; 443 }; 444 445 /* upl struct flags */ 446 #define UPL_PAGE_LIST_MAPPED 0x1 447 #define UPL_KERNEL_MAPPED 0x2 448 #define UPL_CLEAR_DIRTY 0x4 449 #define UPL_COMPOSITE_LIST 0x8 450 #define UPL_INTERNAL 0x10 451 #define UPL_PAGE_SYNC_DONE 0x20 452 #define UPL_DEVICE_MEMORY 0x40 453 #define UPL_PAGEOUT 0x80 454 #define UPL_LITE 0x100 455 #define UPL_IO_WIRE 0x200 456 #define UPL_ACCESS_BLOCKED 0x400 457 #define UPL_SHADOWED 0x1000 458 #define UPL_KERNEL_OBJECT 0x2000 459 #define UPL_VECTOR 0x4000 460 #define UPL_SET_DIRTY 0x8000 461 #define UPL_HAS_BUSY 0x10000 462 #define UPL_TRACKED_BY_OBJECT 0x20000 463 #define UPL_EXPEDITE_SUPPORTED 0x40000 464 #define UPL_DECMP_REQ 0x80000 465 #define UPL_DECMP_REAL_IO 0x100000 466 467 /* flags for upl_create flags parameter */ 468 #define UPL_CREATE_EXTERNAL 0 469 #define UPL_CREATE_INTERNAL 0x1 470 #define UPL_CREATE_LITE 0x2 471 #define UPL_CREATE_IO_TRACKING 0x4 472 #define UPL_CREATE_EXPEDITE_SUP 0x8 473 474 extern upl_t vector_upl_create(vm_offset_t, uint32_t); 475 extern upl_size_t vector_upl_get_size(const upl_t); 476 extern void vector_upl_deallocate(upl_t); 477 extern boolean_t vector_upl_is_valid(upl_t); 478 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 479 extern void vector_upl_set_pagelist(upl_t); 480 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 481 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 482 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 483 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 484 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 485 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); 486 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); 487 488 extern void vm_object_set_pmap_cache_attr( 489 vm_object_t object, 490 upl_page_info_array_t user_page_list, 491 unsigned int num_pages, 492 boolean_t batch_pmap_op); 493 494 extern kern_return_t vm_object_iopl_request( 495 vm_object_t object, 496 vm_object_offset_t offset, 497 upl_size_t size, 498 upl_t *upl_ptr, 499 upl_page_info_array_t user_page_list, 500 unsigned int *page_list_count, 501 upl_control_flags_t cntrl_flags, 502 vm_tag_t tag); 503 504 extern kern_return_t vm_object_super_upl_request( 505 vm_object_t object, 506 vm_object_offset_t offset, 507 upl_size_t size, 508 upl_size_t super_cluster, 509 upl_t *upl, 510 upl_page_info_t *user_page_list, 511 unsigned int *page_list_count, 512 upl_control_flags_t cntrl_flags, 513 vm_tag_t tag); 514 515 /* should be just a regular vm_map_enter() */ 516 extern kern_return_t vm_map_enter_upl( 517 vm_map_t map, 518 upl_t upl, 519 vm_map_offset_t *dst_addr); 520 521 /* should be just a regular vm_map_remove() */ 522 extern kern_return_t vm_map_remove_upl( 523 vm_map_t map, 524 upl_t upl); 525 526 extern kern_return_t vm_map_enter_upl_range( 527 vm_map_t map, 528 upl_t upl, 529 vm_object_offset_t offset, 530 upl_size_t size, 531 vm_prot_t prot, 532 vm_map_offset_t *dst_addr); 533 534 extern kern_return_t vm_map_remove_upl_range( 535 vm_map_t map, 536 upl_t upl, 537 vm_object_offset_t offset, 538 upl_size_t size); 539 540 extern struct vm_page_delayed_work* 541 vm_page_delayed_work_get_ctx(void); 542 543 extern void 544 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); 545 546 extern void vm_page_free_reserve(int pages); 547 548 extern void vm_pageout_throttle_down(vm_page_t page); 549 extern void vm_pageout_throttle_up(vm_page_t page); 550 551 extern kern_return_t vm_paging_map_object( 552 vm_page_t page, 553 vm_object_t object, 554 vm_object_offset_t offset, 555 vm_prot_t protection, 556 boolean_t can_unlock_object, 557 vm_map_size_t *size, /* IN/OUT */ 558 vm_map_offset_t *address, /* OUT */ 559 boolean_t *need_unmap); /* OUT */ 560 extern void vm_paging_unmap_object( 561 vm_object_t object, 562 vm_map_offset_t start, 563 vm_map_offset_t end); 564 decl_simple_lock_data(extern, vm_paging_lock); 565 566 /* 567 * Backing store throttle when BS is exhausted 568 */ 569 extern unsigned int vm_backing_store_low; 570 571 extern void vm_pageout_steal_laundry( 572 vm_page_t page, 573 boolean_t queues_locked); 574 575 #endif /* MACH_KERNEL_PRIVATE */ 576 577 #if UPL_DEBUG 578 extern kern_return_t upl_ubc_alias_set( 579 upl_t upl, 580 uintptr_t alias1, 581 uintptr_t alias2); 582 extern int upl_ubc_alias_get( 583 upl_t upl, 584 uintptr_t * al, 585 uintptr_t * al2); 586 #endif /* UPL_DEBUG */ 587 588 extern void vm_countdirtypages(void); 589 590 extern void vm_backing_store_disable( 591 boolean_t suspend); 592 593 extern kern_return_t upl_transpose( 594 upl_t upl1, 595 upl_t upl2); 596 597 extern kern_return_t mach_vm_pressure_monitor( 598 boolean_t wait_for_pressure, 599 unsigned int nsecs_monitored, 600 unsigned int *pages_reclaimed_p, 601 unsigned int *pages_wanted_p); 602 603 extern kern_return_t 604 vm_set_buffer_cleanup_callout( 605 boolean_t (*func)(int)); 606 607 struct vm_page_stats_reusable { 608 SInt32 reusable_count; 609 uint64_t reusable; 610 uint64_t reused; 611 uint64_t reused_wire; 612 uint64_t reused_remove; 613 uint64_t all_reusable_calls; 614 uint64_t partial_reusable_calls; 615 uint64_t all_reuse_calls; 616 uint64_t partial_reuse_calls; 617 uint64_t reusable_pages_success; 618 uint64_t reusable_pages_failure; 619 uint64_t reusable_pages_shared; 620 uint64_t reuse_pages_success; 621 uint64_t reuse_pages_failure; 622 uint64_t can_reuse_success; 623 uint64_t can_reuse_failure; 624 uint64_t reusable_reclaimed; 625 uint64_t reusable_nonwritable; 626 uint64_t reusable_shared; 627 uint64_t free_shared; 628 }; 629 extern struct vm_page_stats_reusable vm_page_stats_reusable; 630 631 extern int hibernate_flush_memory(void); 632 extern void hibernate_reset_stats(void); 633 extern void hibernate_create_paddr_map(void); 634 635 extern void vm_set_restrictions(unsigned int num_cpus); 636 637 extern int vm_compressor_mode; 638 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); 639 extern void vm_pageout_anonymous_pages(void); 640 extern void vm_pageout_disconnect_all_pages(void); 641 extern int vm_toggle_task_selfdonate_pages(task_t); 642 extern void vm_task_set_selfdonate_pages(task_t, bool); 643 644 struct vm_config { 645 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ 646 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ 647 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ 648 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ 649 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ 650 }; 651 652 extern struct vm_config vm_config; 653 654 655 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ 656 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ 657 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ 658 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ 659 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ 660 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 661 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ 662 663 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 664 665 666 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) 667 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) 668 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) 669 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) 670 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) 671 672 #endif /* KERNEL_PRIVATE */ 673 674 #ifdef XNU_KERNEL_PRIVATE 675 676 struct vm_pageout_state { 677 boolean_t vm_pressure_thread_running; 678 boolean_t vm_pressure_changed; 679 boolean_t vm_restricted_to_single_processor; 680 int vm_compressor_thread_count; 681 682 unsigned int vm_page_speculative_q_age_ms; 683 unsigned int vm_page_speculative_percentage; 684 unsigned int vm_page_speculative_target; 685 686 unsigned int vm_pageout_swap_wait; 687 unsigned int vm_pageout_idle_wait; /* milliseconds */ 688 unsigned int vm_pageout_empty_wait; /* milliseconds */ 689 unsigned int vm_pageout_burst_wait; /* milliseconds */ 690 unsigned int vm_pageout_deadlock_wait; /* milliseconds */ 691 unsigned int vm_pageout_deadlock_relief; 692 unsigned int vm_pageout_burst_inactive_throttle; 693 694 unsigned int vm_pageout_inactive; 695 unsigned int vm_pageout_inactive_used; /* debugging */ 696 unsigned int vm_pageout_inactive_clean; /* debugging */ 697 698 uint32_t vm_page_filecache_min; 699 uint32_t vm_page_filecache_min_divisor; 700 uint32_t vm_page_xpmapped_min; 701 uint32_t vm_page_xpmapped_min_divisor; 702 uint64_t vm_pageout_considered_page_last; 703 704 int vm_page_free_count_init; 705 706 unsigned int vm_memory_pressure; 707 708 int memorystatus_purge_on_critical; 709 int memorystatus_purge_on_warning; 710 int memorystatus_purge_on_urgent; 711 712 thread_t vm_pageout_early_swapout_iothread; 713 }; 714 715 extern struct vm_pageout_state vm_pageout_state; 716 717 /* 718 * This structure is used to track the VM_INFO instrumentation 719 */ 720 struct vm_pageout_vminfo { 721 unsigned long vm_pageout_considered_page; 722 unsigned long vm_pageout_considered_bq_internal; 723 unsigned long vm_pageout_considered_bq_external; 724 unsigned long vm_pageout_skipped_external; 725 unsigned long vm_pageout_skipped_internal; 726 727 unsigned long vm_pageout_pages_evicted; 728 unsigned long vm_pageout_pages_purged; 729 unsigned long vm_pageout_freed_cleaned; 730 unsigned long vm_pageout_freed_speculative; 731 unsigned long vm_pageout_freed_external; 732 unsigned long vm_pageout_freed_internal; 733 unsigned long vm_pageout_inactive_dirty_internal; 734 unsigned long vm_pageout_inactive_dirty_external; 735 unsigned long vm_pageout_inactive_referenced; 736 unsigned long vm_pageout_reactivation_limit_exceeded; 737 unsigned long vm_pageout_inactive_force_reclaim; 738 unsigned long vm_pageout_inactive_nolock; 739 unsigned long vm_pageout_filecache_min_reactivated; 740 unsigned long vm_pageout_scan_inactive_throttled_internal; 741 unsigned long vm_pageout_scan_inactive_throttled_external; 742 743 uint64_t vm_pageout_compressions; 744 uint64_t vm_compressor_pages_grabbed; 745 unsigned long vm_compressor_failed; 746 747 unsigned long vm_page_pages_freed; 748 749 unsigned long vm_phantom_cache_found_ghost; 750 unsigned long vm_phantom_cache_added_ghost; 751 752 unsigned long vm_pageout_protected_sharedcache; 753 unsigned long vm_pageout_forcereclaimed_sharedcache; 754 unsigned long vm_pageout_protected_realtime; 755 unsigned long vm_pageout_forcereclaimed_realtime; 756 }; 757 758 extern struct vm_pageout_vminfo vm_pageout_vminfo; 759 760 extern void vm_swapout_thread(void); 761 762 #if DEVELOPMENT || DEBUG 763 764 /* 765 * This structure records the pageout daemon's actions: 766 * how many pages it looks at and what happens to those pages. 767 * No locking needed because only one thread modifies the fields. 768 */ 769 struct vm_pageout_debug { 770 uint32_t vm_pageout_balanced; 771 uint32_t vm_pageout_scan_event_counter; 772 uint32_t vm_pageout_speculative_dirty; 773 774 uint32_t vm_pageout_inactive_busy; 775 uint32_t vm_pageout_inactive_absent; 776 uint32_t vm_pageout_inactive_notalive; 777 uint32_t vm_pageout_inactive_error; 778 uint32_t vm_pageout_inactive_deactivated; 779 780 uint32_t vm_pageout_enqueued_cleaned; 781 782 uint32_t vm_pageout_cleaned_busy; 783 uint32_t vm_pageout_cleaned_nolock; 784 uint32_t vm_pageout_cleaned_reference_reactivated; 785 uint32_t vm_pageout_cleaned_volatile_reactivated; 786 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ 787 uint32_t vm_pageout_cleaned_fault_reactivated; 788 789 uint32_t vm_pageout_dirty_no_pager; 790 uint32_t vm_pageout_purged_objects; 791 792 uint32_t vm_pageout_scan_throttle; 793 uint32_t vm_pageout_scan_reclaimed_throttled; 794 uint32_t vm_pageout_scan_burst_throttle; 795 uint32_t vm_pageout_scan_empty_throttle; 796 uint32_t vm_pageout_scan_swap_throttle; 797 uint32_t vm_pageout_scan_deadlock_detected; 798 uint32_t vm_pageout_scan_inactive_throttle_success; 799 uint32_t vm_pageout_scan_throttle_deferred; 800 801 uint32_t vm_pageout_inactive_external_forced_jetsam_count; 802 803 uint32_t vm_grab_anon_overrides; 804 uint32_t vm_grab_anon_nops; 805 806 uint32_t vm_pageout_no_victim; 807 uint32_t vm_pageout_yield_for_free_pages; 808 unsigned long vm_pageout_throttle_up_count; 809 uint32_t vm_page_steal_pageout_page; 810 811 uint32_t vm_cs_validated_resets; 812 uint32_t vm_object_iopl_request_sleep_for_cleaning; 813 uint32_t vm_page_slide_counter; 814 uint32_t vm_page_slide_errors; 815 uint32_t vm_page_throttle_count; 816 /* 817 * Statistics about UPL enforcement of copy-on-write obligations. 818 */ 819 unsigned long upl_cow; 820 unsigned long upl_cow_again; 821 unsigned long upl_cow_pages; 822 unsigned long upl_cow_again_pages; 823 unsigned long iopl_cow; 824 unsigned long iopl_cow_pages; 825 }; 826 827 extern struct vm_pageout_debug vm_pageout_debug; 828 829 #define VM_PAGEOUT_DEBUG(member, value) \ 830 MACRO_BEGIN \ 831 vm_pageout_debug.member += value; \ 832 MACRO_END 833 #else /* DEVELOPMENT || DEBUG */ 834 #define VM_PAGEOUT_DEBUG(member, value) 835 #endif /* DEVELOPMENT || DEBUG */ 836 837 #define MAX_COMPRESSOR_THREAD_COUNT 8 838 839 /* 840 * Forward declarations for internal routines. 841 */ 842 843 /* 844 * Contains relevant state for pageout iothreads. Some state is unused by 845 * external (file-backed) thread. 846 */ 847 struct pgo_iothread_state { 848 struct vm_pageout_queue *q; 849 // cheads unused by external thread 850 void *current_early_swapout_chead; 851 void *current_regular_swapout_chead; 852 void *current_late_swapout_chead; 853 char *scratch_buf; 854 int id; 855 thread_t pgo_iothread; // holds a +1 ref 856 sched_cond_atomic_t pgo_wakeup; 857 #if DEVELOPMENT || DEBUG 858 // for perf_compressor benchmark 859 struct vm_pageout_queue *benchmark_q; 860 #endif /* DEVELOPMENT || DEBUG */ 861 }; 862 863 extern struct pgo_iothread_state pgo_iothread_internal_state[MAX_COMPRESSOR_THREAD_COUNT]; 864 865 extern struct pgo_iothread_state pgo_iothread_external_state; 866 867 struct vm_compressor_swapper_stats { 868 uint64_t unripe_under_30s; 869 uint64_t unripe_under_60s; 870 uint64_t unripe_under_300s; 871 uint64_t reclaim_swapins; 872 uint64_t defrag_swapins; 873 uint64_t compressor_swap_threshold_exceeded; 874 uint64_t external_q_throttled; 875 uint64_t free_count_below_reserve; 876 uint64_t thrashing_detected; 877 uint64_t fragmentation_detected; 878 }; 879 extern struct vm_compressor_swapper_stats vmcs_stats; 880 881 #if DEVELOPMENT || DEBUG 882 typedef struct vmct_stats_s { 883 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; 884 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; 885 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; 886 // total mach absolute time that compressor threads has been running 887 uint64_t vmct_cthreads_total; 888 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; 889 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; 890 } vmct_stats_t; 891 #endif /* DEVELOPMENT || DEBUG */ 892 #endif /* XNU_KERNEL_PRIVATE */ 893 #endif /* _VM_VM_PAGEOUT_H_ */ 894