1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/vm_pageout.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1986 62 * 63 * Declarations for the pageout daemon interface. 64 */ 65 66 #ifndef _VM_VM_PAGEOUT_H_ 67 #define _VM_VM_PAGEOUT_H_ 68 69 #ifdef KERNEL_PRIVATE 70 71 #include <mach/mach_types.h> 72 #include <mach/boolean.h> 73 #include <mach/machine/vm_types.h> 74 #include <mach/memory_object_types.h> 75 76 #include <kern/kern_types.h> 77 #include <kern/locks.h> 78 79 #include <libkern/OSAtomic.h> 80 81 82 #include <vm/vm_options.h> 83 84 #ifdef MACH_KERNEL_PRIVATE 85 #include <vm/vm_page.h> 86 #endif 87 88 #include <sys/kdebug.h> 89 90 #define VM_PAGE_AVAILABLE_COUNT() ((unsigned int)(vm_page_cleaned_count)) 91 92 /* externally manipulated counters */ 93 extern unsigned int vm_pageout_cleaned_fault_reactivated; 94 95 #if CONFIG_FREEZE 96 extern boolean_t memorystatus_freeze_enabled; 97 98 struct freezer_context { 99 /* 100 * All these counters & variables track the task 101 * being frozen. 102 * Currently we only freeze one task at a time. Should that 103 * change, we'll need to add support for multiple freezer contexts. 104 */ 105 106 task_t freezer_ctx_task; /* Task being frozen. */ 107 108 void *freezer_ctx_chead; /* The chead used to track c_segs allocated */ 109 /* to freeze the task.*/ 110 111 uint64_t freezer_ctx_swapped_bytes; /* Tracks # of compressed bytes.*/ 112 113 int freezer_ctx_uncompressed_pages; /* Tracks # of uncompressed pages frozen. */ 114 115 char *freezer_ctx_compressor_scratch_buf; /* Scratch buffer for the compressor algorithm. */ 116 }; 117 118 #endif /* CONFIG_FREEZE */ 119 120 #define VM_DYNAMIC_PAGING_ENABLED() (VM_CONFIG_COMPRESSOR_IS_ACTIVE) 121 122 #if VM_PRESSURE_EVENTS 123 extern boolean_t vm_pressure_events_enabled; 124 #endif /* VM_PRESSURE_EVENTS */ 125 126 127 /* 128 * the following codes are used in the DBG_MACH_WORKINGSET subclass 129 * of the DBG_MACH class 130 */ 131 #define VM_DISCONNECT_ALL_PAGE_MAPPINGS 0x00 132 #define VM_DISCONNECT_TASK_PAGE_MAPPINGS 0x01 133 #define VM_REAL_FAULT_ADDR_INTERNAL 0x02 134 #define VM_REAL_FAULT_ADDR_PURGABLE 0x03 135 #define VM_REAL_FAULT_ADDR_EXTERNAL 0x04 136 #define VM_REAL_FAULT_ADDR_SHAREDCACHE 0x05 137 #define VM_REAL_FAULT_FAST 0x06 138 #define VM_REAL_FAULT_SLOW 0x07 139 #define VM_MAP_LOOKUP_OBJECT 0x08 140 141 142 143 extern int vm_debug_events; 144 145 #define VMF_CHECK_ZFDELAY 0x100 146 #define VMF_COWDELAY 0x101 147 #define VMF_ZFDELAY 0x102 148 #define VMF_COMPRESSORDELAY 0x103 149 150 #define VM_PAGEOUT_SCAN 0x104 151 #define VM_PAGEOUT_BALANCE 0x105 152 #define VM_PAGEOUT_FREELIST 0x106 153 #define VM_PAGEOUT_PURGEONE 0x107 154 #define VM_PAGEOUT_CACHE_EVICT 0x108 155 #define VM_PAGEOUT_THREAD_BLOCK 0x109 156 #define VM_PAGEOUT_JETSAM 0x10A 157 #define VM_INFO1 0x10B 158 #define VM_INFO2 0x10C 159 #define VM_INFO3 0x10D 160 #define VM_INFO4 0x10E 161 #define VM_INFO5 0x10F 162 #define VM_INFO6 0x110 163 #define VM_INFO7 0x111 164 #define VM_INFO8 0x112 165 #define VM_INFO9 0x113 166 167 #define VM_UPL_PAGE_WAIT 0x120 168 #define VM_IOPL_PAGE_WAIT 0x121 169 #define VM_PAGE_WAIT_BLOCK 0x122 170 171 #if CONFIG_IOSCHED 172 #define VM_PAGE_SLEEP 0x123 173 #define VM_PAGE_EXPEDITE 0x124 174 #define VM_PAGE_EXPEDITE_NO_MEMORY 0x125 175 #endif 176 177 #define VM_PAGE_GRAB 0x126 178 #define VM_PAGE_RELEASE 0x127 179 #define VM_COMPRESSOR_COMPACT_AND_SWAP 0x128 180 #define VM_COMPRESSOR_DO_DELAYED_COMPACTIONS 0x129 181 182 183 #define VM_PRESSURE_EVENT 0x130 184 #define VM_EXECVE 0x131 185 #define VM_WAKEUP_COMPACTOR_SWAPPER 0x132 186 #define VM_UPL_REQUEST 0x133 187 #define VM_IOPL_REQUEST 0x134 188 #define VM_KERN_REQUEST 0x135 189 190 #define VM_DATA_WRITE 0x140 191 192 #define VM_PRESSURE_LEVEL_CHANGE 0x141 193 194 #define VM_PHYS_WRITE_ACCT 0x142 195 196 #define VM_DEBUG_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 197 MACRO_BEGIN \ 198 if (__improbable(vm_debug_events)) { \ 199 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 200 } \ 201 MACRO_END 202 203 #define VM_DEBUG_CONSTANT_EVENT(name, event, control, arg1, arg2, arg3, arg4) \ 204 MACRO_BEGIN \ 205 KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, event)) | control, arg1, arg2, arg3, arg4, 0); \ 206 MACRO_END 207 208 extern void memoryshot(unsigned int event, unsigned int control); 209 210 extern void update_vm_info(void); 211 212 #if CONFIG_IOSCHED 213 extern int upl_get_cached_tier( 214 upl_t upl); 215 #endif 216 217 extern void upl_set_iodone(upl_t, void *); 218 extern void upl_set_iodone_error(upl_t, int); 219 extern void upl_callout_iodone(upl_t); 220 221 extern ppnum_t upl_get_highest_page( 222 upl_t upl); 223 224 extern upl_size_t upl_get_size( 225 upl_t upl); 226 227 extern upl_t upl_associated_upl(upl_t upl); 228 extern void upl_set_associated_upl(upl_t upl, upl_t associated_upl); 229 230 #ifndef MACH_KERNEL_PRIVATE 231 typedef struct vm_page *vm_page_t; 232 #endif 233 #ifdef XNU_KERNEL_PRIVATE 234 #include <vm/vm_kern.h> 235 236 extern upl_size_t upl_adjusted_size( 237 upl_t upl, 238 vm_map_offset_t page_mask); 239 extern vm_object_offset_t upl_adjusted_offset( 240 upl_t upl, 241 vm_map_offset_t page_mask); 242 extern vm_object_offset_t upl_get_data_offset( 243 upl_t upl); 244 245 extern kern_return_t vm_map_create_upl( 246 vm_map_t map, 247 vm_map_address_t offset, 248 upl_size_t *upl_size, 249 upl_t *upl, 250 upl_page_info_array_t page_list, 251 unsigned int *count, 252 upl_control_flags_t *flags, 253 vm_tag_t tag); 254 255 extern void iopl_valid_data( 256 upl_t upl_ptr, 257 vm_tag_t tag); 258 259 extern void vm_page_free_list( 260 vm_page_t mem, 261 boolean_t prepare_object); 262 263 extern kern_return_t vm_page_alloc_list( 264 int page_count, 265 kma_flags_t flags, 266 vm_page_t *list); 267 268 #endif /* XNU_KERNEL_PRIVATE */ 269 270 extern struct vnode * upl_lookup_vnode(upl_t upl); 271 272 extern void vm_page_set_offset(vm_page_t page, vm_object_offset_t offset); 273 extern vm_object_offset_t vm_page_get_offset(vm_page_t page); 274 extern ppnum_t vm_page_get_phys_page(vm_page_t page); 275 extern vm_page_t vm_page_get_next(vm_page_t page); 276 277 extern kern_return_t mach_vm_pressure_level_monitor(boolean_t wait_for_pressure, unsigned int *pressure_level); 278 279 #if XNU_TARGET_OS_OSX 280 extern kern_return_t vm_pageout_wait(uint64_t deadline); 281 #endif /* XNU_TARGET_OS_OSX */ 282 283 #ifdef MACH_KERNEL_PRIVATE 284 285 #include <vm/vm_page.h> 286 287 extern unsigned int vm_pageout_scan_event_counter; 288 extern unsigned int vm_page_anonymous_count; 289 extern thread_t vm_pageout_scan_thread; 290 291 292 /* 293 * must hold the page queues lock to 294 * manipulate this structure 295 */ 296 struct vm_pageout_queue { 297 vm_page_queue_head_t pgo_pending; /* laundry pages to be processed by pager's iothread */ 298 uint64_t pgo_tid; /* thread ID of I/O thread that services this queue */ 299 unsigned int pgo_laundry; /* current count of laundry pages on queue or in flight */ 300 unsigned int pgo_maxlaundry; 301 302 uint32_t 303 pgo_idle:1, /* iothread is blocked waiting for work to do */ 304 pgo_busy:1, /* iothread is currently processing request from pgo_pending */ 305 pgo_throttled:1, /* vm_pageout_scan thread needs a wakeup when pgo_laundry drops */ 306 pgo_lowpriority:1, /* iothread is set to use low priority I/O */ 307 pgo_draining:1, 308 pgo_inited:1, 309 pgo_unused_bits:26; 310 }; 311 312 #define VM_PAGE_Q_THROTTLED(q) \ 313 ((q)->pgo_laundry >= (q)->pgo_maxlaundry) 314 315 extern struct vm_pageout_queue vm_pageout_queue_internal; 316 extern struct vm_pageout_queue vm_pageout_queue_external; 317 318 319 /* 320 * Routines exported to Mach. 321 */ 322 extern void vm_pageout(void); 323 324 __startup_func extern void vm_config_init(void); 325 326 extern kern_return_t vm_pageout_internal_start(void); 327 328 extern void vm_pageout_object_terminate( 329 vm_object_t object); 330 331 extern void vm_pageout_cluster( 332 vm_page_t m); 333 334 extern void vm_pageout_initialize_page( 335 vm_page_t m); 336 337 /* UPL exported routines and structures */ 338 339 #define upl_lock_init(object) lck_mtx_init(&(object)->Lock, &vm_object_lck_grp, &vm_object_lck_attr) 340 #define upl_lock_destroy(object) lck_mtx_destroy(&(object)->Lock, &vm_object_lck_grp) 341 #define upl_lock(object) lck_mtx_lock(&(object)->Lock) 342 #define upl_unlock(object) lck_mtx_unlock(&(object)->Lock) 343 #define upl_try_lock(object) lck_mtx_try_lock(&(object)->Lock) 344 345 #define MAX_VECTOR_UPL_ELEMENTS 8 346 347 struct _vector_upl_iostates { 348 upl_offset_t offset; 349 upl_size_t size; 350 }; 351 352 typedef struct _vector_upl_iostates vector_upl_iostates_t; 353 354 struct _vector_upl { 355 upl_size_t size; 356 uint32_t num_upls; 357 uint32_t invalid_upls; 358 uint32_t _reserved; 359 vm_map_t submap; 360 vm_offset_t submap_dst_addr; 361 vm_object_offset_t offset; 362 upl_t upl_elems[MAX_VECTOR_UPL_ELEMENTS]; 363 upl_page_info_array_t pagelist; 364 vector_upl_iostates_t upl_iostates[MAX_VECTOR_UPL_ELEMENTS]; 365 }; 366 367 typedef struct _vector_upl* vector_upl_t; 368 369 /* universal page list structure */ 370 371 #if UPL_DEBUG 372 #define UPL_DEBUG_STACK_FRAMES 16 373 #define UPL_DEBUG_COMMIT_RECORDS 4 374 375 struct ucd { 376 upl_offset_t c_beg; 377 upl_offset_t c_end; 378 int c_aborted; 379 void * c_retaddr[UPL_DEBUG_STACK_FRAMES]; 380 }; 381 #endif 382 383 struct upl_io_completion { 384 void *io_context; 385 void (*io_done)(void *, int); 386 387 int io_error; 388 }; 389 390 391 struct upl { 392 decl_lck_mtx_data(, Lock); /* Synchronization */ 393 int ref_count; 394 int ext_ref_count; 395 int flags; 396 /* 397 * XXX CAUTION: to accomodate devices with "mixed page sizes", 398 * u_offset and u_size are now byte-aligned and no longer 399 * page-aligned, on all devices. 400 */ 401 vm_object_offset_t u_offset; 402 upl_size_t u_size; /* size in bytes of the address space */ 403 upl_size_t u_mapped_size; /* size in bytes of the UPL that is mapped */ 404 vm_offset_t kaddr; /* secondary mapping in kernel */ 405 vm_object_t map_object; 406 ppnum_t highest_page; 407 void* vector_upl; 408 upl_t associated_upl; 409 struct upl_io_completion *upl_iodone; 410 #if CONFIG_IOSCHED 411 int upl_priority; 412 uint64_t *upl_reprio_info; 413 void *decmp_io_upl; 414 #endif 415 #if CONFIG_IOSCHED || UPL_DEBUG 416 thread_t upl_creator; 417 queue_chain_t uplq; /* List of outstanding upls on an obj */ 418 #endif 419 #if UPL_DEBUG 420 uintptr_t ubc_alias1; 421 uintptr_t ubc_alias2; 422 423 uint32_t upl_state; 424 uint32_t upl_commit_index; 425 void *upl_create_retaddr[UPL_DEBUG_STACK_FRAMES]; 426 427 struct ucd upl_commit_records[UPL_DEBUG_COMMIT_RECORDS]; 428 #endif /* UPL_DEBUG */ 429 }; 430 431 /* upl struct flags */ 432 #define UPL_PAGE_LIST_MAPPED 0x1 433 #define UPL_KERNEL_MAPPED 0x2 434 #define UPL_CLEAR_DIRTY 0x4 435 #define UPL_COMPOSITE_LIST 0x8 436 #define UPL_INTERNAL 0x10 437 #define UPL_PAGE_SYNC_DONE 0x20 438 #define UPL_DEVICE_MEMORY 0x40 439 #define UPL_PAGEOUT 0x80 440 #define UPL_LITE 0x100 441 #define UPL_IO_WIRE 0x200 442 #define UPL_ACCESS_BLOCKED 0x400 443 #define UPL_SHADOWED 0x1000 444 #define UPL_KERNEL_OBJECT 0x2000 445 #define UPL_VECTOR 0x4000 446 #define UPL_SET_DIRTY 0x8000 447 #define UPL_HAS_BUSY 0x10000 448 #define UPL_TRACKED_BY_OBJECT 0x20000 449 #define UPL_EXPEDITE_SUPPORTED 0x40000 450 #define UPL_DECMP_REQ 0x80000 451 #define UPL_DECMP_REAL_IO 0x100000 452 453 /* flags for upl_create flags parameter */ 454 #define UPL_CREATE_EXTERNAL 0 455 #define UPL_CREATE_INTERNAL 0x1 456 #define UPL_CREATE_LITE 0x2 457 #define UPL_CREATE_IO_TRACKING 0x4 458 #define UPL_CREATE_EXPEDITE_SUP 0x8 459 460 extern upl_t vector_upl_create(vm_offset_t); 461 extern void vector_upl_deallocate(upl_t); 462 extern boolean_t vector_upl_is_valid(upl_t); 463 extern boolean_t vector_upl_set_subupl(upl_t, upl_t, u_int32_t); 464 extern void vector_upl_set_pagelist(upl_t); 465 extern void vector_upl_set_submap(upl_t, vm_map_t, vm_offset_t); 466 extern void vector_upl_get_submap(upl_t, vm_map_t*, vm_offset_t*); 467 extern void vector_upl_set_iostate(upl_t, upl_t, upl_offset_t, upl_size_t); 468 extern void vector_upl_get_iostate(upl_t, upl_t, upl_offset_t*, upl_size_t*); 469 extern void vector_upl_get_iostate_byindex(upl_t, uint32_t, upl_offset_t*, upl_size_t*); 470 extern upl_t vector_upl_subupl_byindex(upl_t, uint32_t); 471 extern upl_t vector_upl_subupl_byoffset(upl_t, upl_offset_t*, upl_size_t*); 472 473 extern void vm_object_set_pmap_cache_attr( 474 vm_object_t object, 475 upl_page_info_array_t user_page_list, 476 unsigned int num_pages, 477 boolean_t batch_pmap_op); 478 479 extern kern_return_t vm_object_iopl_request( 480 vm_object_t object, 481 vm_object_offset_t offset, 482 upl_size_t size, 483 upl_t *upl_ptr, 484 upl_page_info_array_t user_page_list, 485 unsigned int *page_list_count, 486 upl_control_flags_t cntrl_flags, 487 vm_tag_t tag); 488 489 extern kern_return_t vm_object_super_upl_request( 490 vm_object_t object, 491 vm_object_offset_t offset, 492 upl_size_t size, 493 upl_size_t super_cluster, 494 upl_t *upl, 495 upl_page_info_t *user_page_list, 496 unsigned int *page_list_count, 497 upl_control_flags_t cntrl_flags, 498 vm_tag_t tag); 499 500 /* should be just a regular vm_map_enter() */ 501 extern kern_return_t vm_map_enter_upl( 502 vm_map_t map, 503 upl_t upl, 504 vm_map_offset_t *dst_addr); 505 506 /* should be just a regular vm_map_remove() */ 507 extern kern_return_t vm_map_remove_upl( 508 vm_map_t map, 509 upl_t upl); 510 511 extern kern_return_t vm_map_enter_upl_range( 512 vm_map_t map, 513 upl_t upl, 514 vm_object_offset_t offset, 515 upl_size_t size, 516 vm_prot_t prot, 517 vm_map_offset_t *dst_addr); 518 519 extern kern_return_t vm_map_remove_upl_range( 520 vm_map_t map, 521 upl_t upl, 522 vm_object_offset_t offset, 523 upl_size_t size); 524 525 /* wired page list structure */ 526 typedef uint32_t *wpl_array_t; 527 528 extern struct vm_page_delayed_work* 529 vm_page_delayed_work_get_ctx(void); 530 531 extern void 532 vm_page_delayed_work_finish_ctx(struct vm_page_delayed_work* dwp); 533 534 extern void vm_page_free_reserve(int pages); 535 536 extern void vm_pageout_throttle_down(vm_page_t page); 537 extern void vm_pageout_throttle_up(vm_page_t page); 538 539 extern kern_return_t vm_paging_map_object( 540 vm_page_t page, 541 vm_object_t object, 542 vm_object_offset_t offset, 543 vm_prot_t protection, 544 boolean_t can_unlock_object, 545 vm_map_size_t *size, /* IN/OUT */ 546 vm_map_offset_t *address, /* OUT */ 547 boolean_t *need_unmap); /* OUT */ 548 extern void vm_paging_unmap_object( 549 vm_object_t object, 550 vm_map_offset_t start, 551 vm_map_offset_t end); 552 decl_simple_lock_data(extern, vm_paging_lock); 553 554 /* 555 * Backing store throttle when BS is exhausted 556 */ 557 extern unsigned int vm_backing_store_low; 558 559 extern void vm_pageout_steal_laundry( 560 vm_page_t page, 561 boolean_t queues_locked); 562 563 #endif /* MACH_KERNEL_PRIVATE */ 564 565 #if UPL_DEBUG 566 extern kern_return_t upl_ubc_alias_set( 567 upl_t upl, 568 uintptr_t alias1, 569 uintptr_t alias2); 570 extern int upl_ubc_alias_get( 571 upl_t upl, 572 uintptr_t * al, 573 uintptr_t * al2); 574 #endif /* UPL_DEBUG */ 575 576 extern void vm_countdirtypages(void); 577 578 extern void vm_backing_store_disable( 579 boolean_t suspend); 580 581 extern kern_return_t upl_transpose( 582 upl_t upl1, 583 upl_t upl2); 584 585 extern kern_return_t mach_vm_pressure_monitor( 586 boolean_t wait_for_pressure, 587 unsigned int nsecs_monitored, 588 unsigned int *pages_reclaimed_p, 589 unsigned int *pages_wanted_p); 590 591 extern kern_return_t 592 vm_set_buffer_cleanup_callout( 593 boolean_t (*func)(int)); 594 595 struct vm_page_stats_reusable { 596 SInt32 reusable_count; 597 uint64_t reusable; 598 uint64_t reused; 599 uint64_t reused_wire; 600 uint64_t reused_remove; 601 uint64_t all_reusable_calls; 602 uint64_t partial_reusable_calls; 603 uint64_t all_reuse_calls; 604 uint64_t partial_reuse_calls; 605 uint64_t reusable_pages_success; 606 uint64_t reusable_pages_failure; 607 uint64_t reusable_pages_shared; 608 uint64_t reuse_pages_success; 609 uint64_t reuse_pages_failure; 610 uint64_t can_reuse_success; 611 uint64_t can_reuse_failure; 612 uint64_t reusable_reclaimed; 613 uint64_t reusable_nonwritable; 614 uint64_t reusable_shared; 615 uint64_t free_shared; 616 }; 617 extern struct vm_page_stats_reusable vm_page_stats_reusable; 618 619 extern int hibernate_flush_memory(void); 620 extern void hibernate_reset_stats(void); 621 extern void hibernate_create_paddr_map(void); 622 623 extern void vm_set_restrictions(unsigned int num_cpus); 624 625 extern int vm_compressor_mode; 626 extern kern_return_t vm_pageout_compress_page(void **, char *, vm_page_t); 627 extern void vm_pageout_anonymous_pages(void); 628 extern void vm_pageout_disconnect_all_pages(void); 629 630 631 struct vm_config { 632 boolean_t compressor_is_present; /* compressor is initialized and can be used by the freezer, the sweep or the pager */ 633 boolean_t compressor_is_active; /* pager can actively compress pages... 'compressor_is_present' must be set */ 634 boolean_t swap_is_present; /* swap is initialized and can be used by the freezer, the sweep or the pager */ 635 boolean_t swap_is_active; /* pager can actively swap out compressed segments... 'swap_is_present' must be set */ 636 boolean_t freezer_swap_is_active; /* freezer can swap out frozen tasks... "compressor_is_present + swap_is_present" must be set */ 637 }; 638 639 extern struct vm_config vm_config; 640 641 642 #define VM_PAGER_NOT_CONFIGURED 0x0 /* no compresser or swap configured */ 643 #define VM_PAGER_DEFAULT 0x1 /* Use default pager... DEPRECATED */ 644 #define VM_PAGER_COMPRESSOR_NO_SWAP 0x2 /* Active in-core compressor only. */ 645 #define VM_PAGER_COMPRESSOR_WITH_SWAP 0x4 /* Active in-core compressor + swap backend. */ 646 #define VM_PAGER_FREEZER_DEFAULT 0x8 /* Freezer backed by default pager... DEPRECATED */ 647 #define VM_PAGER_FREEZER_COMPRESSOR_NO_SWAP 0x10 /* Freezer backed by in-core compressor only i.e. frozen data remain in-core compressed.*/ 648 #define VM_PAGER_COMPRESSOR_NO_SWAP_PLUS_FREEZER_COMPRESSOR_WITH_SWAP 0x20 /* Active in-core compressor + Freezer backed by in-core compressor with swap support too.*/ 649 650 #define VM_PAGER_MAX_MODES 6 /* Total number of vm compressor modes supported */ 651 652 653 #define VM_CONFIG_COMPRESSOR_IS_PRESENT (vm_config.compressor_is_present == TRUE) 654 #define VM_CONFIG_COMPRESSOR_IS_ACTIVE (vm_config.compressor_is_active == TRUE) 655 #define VM_CONFIG_SWAP_IS_PRESENT (vm_config.swap_is_present == TRUE) 656 #define VM_CONFIG_SWAP_IS_ACTIVE (vm_config.swap_is_active == TRUE) 657 #define VM_CONFIG_FREEZER_SWAP_IS_ACTIVE (vm_config.freezer_swap_is_active == TRUE) 658 659 #endif /* KERNEL_PRIVATE */ 660 661 #ifdef XNU_KERNEL_PRIVATE 662 663 struct vm_pageout_state { 664 boolean_t vm_pressure_thread_running; 665 boolean_t vm_pressure_changed; 666 boolean_t vm_restricted_to_single_processor; 667 int vm_compressor_thread_count; 668 669 unsigned int vm_page_speculative_q_age_ms; 670 unsigned int vm_page_speculative_percentage; 671 unsigned int vm_page_speculative_target; 672 673 unsigned int vm_pageout_swap_wait; 674 unsigned int vm_pageout_idle_wait; /* milliseconds */ 675 unsigned int vm_pageout_empty_wait; /* milliseconds */ 676 unsigned int vm_pageout_burst_wait; /* milliseconds */ 677 unsigned int vm_pageout_deadlock_wait; /* milliseconds */ 678 unsigned int vm_pageout_deadlock_relief; 679 unsigned int vm_pageout_burst_inactive_throttle; 680 681 unsigned int vm_pageout_inactive; 682 unsigned int vm_pageout_inactive_used; /* debugging */ 683 unsigned int vm_pageout_inactive_clean; /* debugging */ 684 685 uint32_t vm_page_filecache_min; 686 uint32_t vm_page_filecache_min_divisor; 687 uint32_t vm_page_xpmapped_min; 688 uint32_t vm_page_xpmapped_min_divisor; 689 uint64_t vm_pageout_considered_page_last; 690 691 int vm_page_free_count_init; 692 693 unsigned int vm_memory_pressure; 694 695 int memorystatus_purge_on_critical; 696 int memorystatus_purge_on_warning; 697 int memorystatus_purge_on_urgent; 698 699 thread_t vm_pageout_external_iothread; 700 thread_t vm_pageout_internal_iothread; 701 }; 702 703 extern struct vm_pageout_state vm_pageout_state; 704 705 /* 706 * This structure is used to track the VM_INFO instrumentation 707 */ 708 struct vm_pageout_vminfo { 709 unsigned long vm_pageout_considered_page; 710 unsigned long vm_pageout_considered_bq_internal; 711 unsigned long vm_pageout_considered_bq_external; 712 unsigned long vm_pageout_skipped_external; 713 unsigned long vm_pageout_skipped_internal; 714 715 unsigned long vm_pageout_pages_evicted; 716 unsigned long vm_pageout_pages_purged; 717 unsigned long vm_pageout_freed_cleaned; 718 unsigned long vm_pageout_freed_speculative; 719 unsigned long vm_pageout_freed_external; 720 unsigned long vm_pageout_freed_internal; 721 unsigned long vm_pageout_inactive_dirty_internal; 722 unsigned long vm_pageout_inactive_dirty_external; 723 unsigned long vm_pageout_inactive_referenced; 724 unsigned long vm_pageout_reactivation_limit_exceeded; 725 unsigned long vm_pageout_inactive_force_reclaim; 726 unsigned long vm_pageout_inactive_nolock; 727 unsigned long vm_pageout_filecache_min_reactivated; 728 unsigned long vm_pageout_scan_inactive_throttled_internal; 729 unsigned long vm_pageout_scan_inactive_throttled_external; 730 731 uint64_t vm_pageout_compressions; 732 uint64_t vm_compressor_pages_grabbed; 733 unsigned long vm_compressor_failed; 734 735 unsigned long vm_page_pages_freed; 736 737 unsigned long vm_phantom_cache_found_ghost; 738 unsigned long vm_phantom_cache_added_ghost; 739 }; 740 741 extern struct vm_pageout_vminfo vm_pageout_vminfo; 742 743 744 #if DEVELOPMENT || DEBUG 745 746 /* 747 * This structure records the pageout daemon's actions: 748 * how many pages it looks at and what happens to those pages. 749 * No locking needed because only one thread modifies the fields. 750 */ 751 struct vm_pageout_debug { 752 uint32_t vm_pageout_balanced; 753 uint32_t vm_pageout_scan_event_counter; 754 uint32_t vm_pageout_speculative_dirty; 755 756 uint32_t vm_pageout_inactive_busy; 757 uint32_t vm_pageout_inactive_absent; 758 uint32_t vm_pageout_inactive_notalive; 759 uint32_t vm_pageout_inactive_error; 760 uint32_t vm_pageout_inactive_deactivated; 761 762 uint32_t vm_pageout_enqueued_cleaned; 763 764 uint32_t vm_pageout_cleaned_busy; 765 uint32_t vm_pageout_cleaned_nolock; 766 uint32_t vm_pageout_cleaned_reference_reactivated; 767 uint32_t vm_pageout_cleaned_volatile_reactivated; 768 uint32_t vm_pageout_cleaned_reactivated; /* debugging; how many cleaned pages are found to be referenced on pageout (and are therefore reactivated) */ 769 uint32_t vm_pageout_cleaned_fault_reactivated; 770 771 uint32_t vm_pageout_dirty_no_pager; 772 uint32_t vm_pageout_purged_objects; 773 774 uint32_t vm_pageout_scan_throttle; 775 uint32_t vm_pageout_scan_reclaimed_throttled; 776 uint32_t vm_pageout_scan_burst_throttle; 777 uint32_t vm_pageout_scan_empty_throttle; 778 uint32_t vm_pageout_scan_swap_throttle; 779 uint32_t vm_pageout_scan_deadlock_detected; 780 uint32_t vm_pageout_scan_inactive_throttle_success; 781 uint32_t vm_pageout_scan_throttle_deferred; 782 783 uint32_t vm_pageout_inactive_external_forced_jetsam_count; 784 785 uint32_t vm_grab_anon_overrides; 786 uint32_t vm_grab_anon_nops; 787 788 uint32_t vm_pageout_no_victim; 789 unsigned long vm_pageout_throttle_up_count; 790 uint32_t vm_page_steal_pageout_page; 791 792 uint32_t vm_cs_validated_resets; 793 uint32_t vm_object_iopl_request_sleep_for_cleaning; 794 uint32_t vm_page_slide_counter; 795 uint32_t vm_page_slide_errors; 796 uint32_t vm_page_throttle_count; 797 /* 798 * Statistics about UPL enforcement of copy-on-write obligations. 799 */ 800 unsigned long upl_cow; 801 unsigned long upl_cow_again; 802 unsigned long upl_cow_pages; 803 unsigned long upl_cow_again_pages; 804 unsigned long iopl_cow; 805 unsigned long iopl_cow_pages; 806 }; 807 808 extern struct vm_pageout_debug vm_pageout_debug; 809 810 #define VM_PAGEOUT_DEBUG(member, value) \ 811 MACRO_BEGIN \ 812 vm_pageout_debug.member += value; \ 813 MACRO_END 814 #else 815 #define VM_PAGEOUT_DEBUG(member, value) 816 #endif 817 818 #define MAX_COMPRESSOR_THREAD_COUNT 8 819 820 struct vm_compressor_swapper_stats { 821 uint64_t unripe_under_30s; 822 uint64_t unripe_under_60s; 823 uint64_t unripe_under_300s; 824 uint64_t reclaim_swapins; 825 uint64_t defrag_swapins; 826 uint64_t compressor_swap_threshold_exceeded; 827 uint64_t external_q_throttled; 828 uint64_t free_count_below_reserve; 829 uint64_t thrashing_detected; 830 uint64_t fragmentation_detected; 831 }; 832 extern struct vm_compressor_swapper_stats vmcs_stats; 833 834 #if DEVELOPMENT || DEBUG 835 typedef struct vmct_stats_s { 836 uint64_t vmct_runtimes[MAX_COMPRESSOR_THREAD_COUNT]; 837 uint64_t vmct_pages[MAX_COMPRESSOR_THREAD_COUNT]; 838 uint64_t vmct_iterations[MAX_COMPRESSOR_THREAD_COUNT]; 839 uint64_t vmct_cthreads_total; 840 int32_t vmct_minpages[MAX_COMPRESSOR_THREAD_COUNT]; 841 int32_t vmct_maxpages[MAX_COMPRESSOR_THREAD_COUNT]; 842 } vmct_stats_t; 843 #endif 844 #endif 845 #endif /* _VM_VM_PAGEOUT_H_ */ 846