1 /* 2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifdef XNU_KERNEL_PRIVATE 30 31 #ifndef _VM_VM_PROTOS_H_ 32 #define _VM_VM_PROTOS_H_ 33 34 #include <mach/mach_types.h> 35 #include <kern/kern_types.h> 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 /* 42 * This file contains various type definitions and routine prototypes 43 * that are needed to avoid compilation warnings for VM code (in osfmk, 44 * default_pager and bsd). 45 * Most of these should eventually go into more appropriate header files. 46 * 47 * Include it after all other header files since it doesn't include any 48 * type definitions and it works around some conflicts with other header 49 * files. 50 */ 51 52 /* 53 * iokit 54 */ 55 extern kern_return_t device_data_action( 56 uintptr_t device_handle, 57 ipc_port_t device_pager, 58 vm_prot_t protection, 59 vm_object_offset_t offset, 60 vm_size_t size); 61 62 extern kern_return_t device_close( 63 uintptr_t device_handle); 64 65 extern boolean_t vm_swap_files_pinned(void); 66 67 /* 68 * osfmk 69 */ 70 #ifndef _IPC_IPC_PORT_H_ 71 extern mach_port_name_t ipc_port_copyout_send( 72 ipc_port_t sright, 73 ipc_space_t space); 74 extern mach_port_name_t ipc_port_copyout_send_pinned( 75 ipc_port_t sright, 76 ipc_space_t space); 77 #endif /* _IPC_IPC_PORT_H_ */ 78 79 #ifndef _KERN_IPC_TT_H_ 80 81 #define port_name_to_task(name) port_name_to_task_kernel(name) 82 83 extern task_t port_name_to_task_kernel( 84 mach_port_name_t name); 85 extern task_t port_name_to_task_read( 86 mach_port_name_t name); 87 extern task_t port_name_to_task_name( 88 mach_port_name_t name); 89 extern void ipc_port_release_send( 90 ipc_port_t port); 91 #endif /* _KERN_IPC_TT_H_ */ 92 93 extern ipc_space_t get_task_ipcspace( 94 task_t t); 95 96 #if CONFIG_MEMORYSTATUS 97 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */ 98 #endif /* CONFIG_MEMORYSTATUS */ 99 100 /* Some loose-ends VM stuff */ 101 102 extern const vm_size_t msg_ool_size_small; 103 104 extern kern_return_t vm_tests(void); 105 extern void consider_machine_adjust(void); 106 extern vm_map_offset_t get_map_min(vm_map_t); 107 extern vm_map_offset_t get_map_max(vm_map_t); 108 extern vm_map_size_t get_vmmap_size(vm_map_t); 109 extern int get_task_page_size(task_t); 110 #if CONFIG_COREDUMP 111 extern int get_vmmap_entries(vm_map_t); 112 #endif 113 extern int get_map_nentries(vm_map_t); 114 115 extern vm_map_offset_t vm_map_page_mask(vm_map_t); 116 117 extern kern_return_t vm_map_purgable_control( 118 vm_map_t map, 119 vm_map_offset_t address, 120 vm_purgable_t control, 121 int *state); 122 123 #if MACH_ASSERT 124 extern void vm_map_pmap_check_ledgers( 125 pmap_t pmap, 126 ledger_t ledger, 127 int pid, 128 char *procname); 129 #endif /* MACH_ASSERT */ 130 131 extern kern_return_t 132 vnode_pager_get_object_vnode( 133 memory_object_t mem_obj, 134 uintptr_t * vnodeaddr, 135 uint32_t * vid); 136 137 #if CONFIG_COREDUMP 138 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va); 139 #endif 140 141 /* 142 * VM routines that used to be published to 143 * user space, and are now restricted to the kernel. 144 * 145 * They should eventually go away entirely - 146 * to be replaced with standard vm_map() and 147 * vm_deallocate() calls. 148 */ 149 150 extern kern_return_t vm_upl_map 151 ( 152 vm_map_t target_task, 153 upl_t upl, 154 vm_address_t *address 155 ); 156 157 extern kern_return_t vm_upl_unmap 158 ( 159 vm_map_t target_task, 160 upl_t upl 161 ); 162 163 extern kern_return_t vm_upl_map_range 164 ( 165 vm_map_t target_task, 166 upl_t upl, 167 vm_offset_t offset, 168 vm_size_t size, 169 vm_prot_t prot, 170 vm_address_t *address 171 ); 172 173 extern kern_return_t vm_upl_unmap_range 174 ( 175 vm_map_t target_task, 176 upl_t upl, 177 vm_offset_t offset, 178 vm_size_t size 179 ); 180 181 extern kern_return_t vm_region_object_create 182 ( 183 vm_map_t target_task, 184 vm_size_t size, 185 ipc_port_t *object_handle 186 ); 187 188 extern mach_vm_offset_t mach_get_vm_start(vm_map_t); 189 extern mach_vm_offset_t mach_get_vm_end(vm_map_t); 190 191 #if CONFIG_CODE_DECRYPTION 192 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT 193 #if VM_MAP_DEBUG_APPLE_PROTECT 194 extern int vm_map_debug_apple_protect; 195 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ 196 struct pager_crypt_info; 197 extern kern_return_t vm_map_apple_protected( 198 vm_map_t map, 199 vm_map_offset_t start, 200 vm_map_offset_t end, 201 vm_object_offset_t crypto_backing_offset, 202 struct pager_crypt_info *crypt_info, 203 uint32_t cryptid); 204 extern memory_object_t apple_protect_pager_setup( 205 vm_object_t backing_object, 206 vm_object_offset_t backing_offset, 207 vm_object_offset_t crypto_backing_offset, 208 struct pager_crypt_info *crypt_info, 209 vm_object_offset_t crypto_start, 210 vm_object_offset_t crypto_end, 211 boolean_t cache_pager); 212 #endif /* CONFIG_CODE_DECRYPTION */ 213 214 struct vm_shared_region_slide_info; 215 extern kern_return_t vm_map_shared_region( 216 vm_map_t map, 217 vm_map_offset_t start, 218 vm_map_offset_t end, 219 vm_object_offset_t backing_offset, 220 struct vm_shared_region_slide_info *slide_info); 221 222 extern memory_object_t shared_region_pager_setup( 223 vm_object_t backing_object, 224 vm_object_offset_t backing_offset, 225 struct vm_shared_region_slide_info *slide_info, 226 uint64_t jop_key); 227 #if __has_feature(ptrauth_calls) 228 extern memory_object_t shared_region_pager_match( 229 vm_object_t backing_object, 230 vm_object_offset_t backing_offset, 231 struct vm_shared_region_slide_info *slide_info, 232 uint64_t jop_key); 233 extern void shared_region_key_alloc( 234 char *shared_region_id, 235 bool inherit, 236 uint64_t inherited_key); 237 extern void shared_region_key_dealloc( 238 char *shared_region_id); 239 extern uint64_t generate_jop_key(void); 240 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task); 241 #endif /* __has_feature(ptrauth_calls) */ 242 extern bool vm_shared_region_is_reslide(struct task *task); 243 244 struct vnode; 245 extern memory_object_t swapfile_pager_setup(struct vnode *vp); 246 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); 247 248 #if __arm64__ || (__ARM_ARCH_7K__ >= 2) 249 #define SIXTEENK_PAGE_SIZE 0x4000 250 #define SIXTEENK_PAGE_MASK 0x3FFF 251 #define SIXTEENK_PAGE_SHIFT 14 252 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */ 253 254 #define FOURK_PAGE_SIZE 0x1000 255 #define FOURK_PAGE_MASK 0xFFF 256 #define FOURK_PAGE_SHIFT 12 257 258 #if __arm64__ 259 260 extern unsigned int page_shift_user32; 261 262 #define VM_MAP_DEBUG_FOURK MACH_ASSERT 263 #if VM_MAP_DEBUG_FOURK 264 extern int vm_map_debug_fourk; 265 #endif /* VM_MAP_DEBUG_FOURK */ 266 extern memory_object_t fourk_pager_create(void); 267 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj); 268 extern kern_return_t fourk_pager_populate( 269 memory_object_t mem_obj, 270 boolean_t overwrite, 271 int index, 272 vm_object_t new_backing_object, 273 vm_object_offset_t new_backing_offset, 274 vm_object_t *old_backing_object, 275 vm_object_offset_t *old_backing_offset); 276 #endif /* __arm64__ */ 277 278 /* 279 * bsd 280 */ 281 struct vnode; 282 extern void *upl_get_internal_page_list( 283 upl_t upl); 284 285 extern void vnode_setswapmount(struct vnode *); 286 extern int64_t vnode_getswappin_avail(struct vnode *); 287 288 extern void vnode_pager_was_dirtied( 289 struct vnode *, 290 vm_object_offset_t, 291 vm_object_offset_t); 292 293 typedef int pager_return_t; 294 extern pager_return_t vnode_pagein( 295 struct vnode *, upl_t, 296 upl_offset_t, vm_object_offset_t, 297 upl_size_t, int, int *); 298 extern pager_return_t vnode_pageout( 299 struct vnode *, upl_t, 300 upl_offset_t, vm_object_offset_t, 301 upl_size_t, int, int *); 302 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len); 303 extern memory_object_t vnode_pager_setup( 304 struct vnode *, memory_object_t); 305 extern vm_object_offset_t vnode_pager_get_filesize( 306 struct vnode *); 307 extern uint32_t vnode_pager_isinuse( 308 struct vnode *); 309 extern boolean_t vnode_pager_isSSD( 310 struct vnode *); 311 extern void vnode_pager_throttle( 312 void); 313 extern uint32_t vnode_pager_return_throttle_io_limit( 314 struct vnode *, 315 uint32_t *); 316 extern kern_return_t vnode_pager_get_name( 317 struct vnode *vp, 318 char *pathname, 319 vm_size_t pathname_len, 320 char *filename, 321 vm_size_t filename_len, 322 boolean_t *truncated_path_p); 323 struct timespec; 324 extern kern_return_t vnode_pager_get_mtime( 325 struct vnode *vp, 326 struct timespec *mtime, 327 struct timespec *cs_mtime); 328 extern kern_return_t vnode_pager_get_cs_blobs( 329 struct vnode *vp, 330 void **blobs); 331 332 #if CONFIG_IOSCHED 333 void vnode_pager_issue_reprioritize_io( 334 struct vnode *devvp, 335 uint64_t blkno, 336 uint32_t len, 337 int priority); 338 #endif 339 340 #if CHECK_CS_VALIDATION_BITMAP 341 /* used by the vnode_pager_cs_validation_bitmap routine*/ 342 #define CS_BITMAP_SET 1 343 #define CS_BITMAP_CLEAR 2 344 #define CS_BITMAP_CHECK 3 345 346 #endif /* CHECK_CS_VALIDATION_BITMAP */ 347 348 extern kern_return_t 349 vnode_pager_data_unlock( 350 memory_object_t mem_obj, 351 memory_object_offset_t offset, 352 memory_object_size_t size, 353 vm_prot_t desired_access); 354 extern kern_return_t vnode_pager_init( 355 memory_object_t, 356 memory_object_control_t, 357 memory_object_cluster_size_t); 358 extern kern_return_t vnode_pager_get_object_size( 359 memory_object_t, 360 memory_object_offset_t *); 361 362 #if CONFIG_IOSCHED 363 extern kern_return_t vnode_pager_get_object_devvp( 364 memory_object_t, 365 uintptr_t *); 366 #endif 367 368 extern void vnode_pager_dirtied( 369 memory_object_t, 370 vm_object_offset_t, 371 vm_object_offset_t); 372 extern kern_return_t vnode_pager_get_isinuse( 373 memory_object_t, 374 uint32_t *); 375 extern kern_return_t vnode_pager_get_isSSD( 376 memory_object_t, 377 boolean_t *); 378 extern kern_return_t vnode_pager_get_throttle_io_limit( 379 memory_object_t, 380 uint32_t *); 381 extern kern_return_t vnode_pager_get_object_name( 382 memory_object_t mem_obj, 383 char *pathname, 384 vm_size_t pathname_len, 385 char *filename, 386 vm_size_t filename_len, 387 boolean_t *truncated_path_p); 388 extern kern_return_t vnode_pager_get_object_mtime( 389 memory_object_t mem_obj, 390 struct timespec *mtime, 391 struct timespec *cs_mtime); 392 393 #if CHECK_CS_VALIDATION_BITMAP 394 extern kern_return_t vnode_pager_cs_check_validation_bitmap( 395 memory_object_t mem_obj, 396 memory_object_offset_t offset, 397 int optype); 398 #endif /*CHECK_CS_VALIDATION_BITMAP*/ 399 400 extern kern_return_t ubc_cs_check_validation_bitmap( 401 struct vnode *vp, 402 memory_object_offset_t offset, 403 int optype); 404 405 extern kern_return_t vnode_pager_data_request( 406 memory_object_t, 407 memory_object_offset_t, 408 memory_object_cluster_size_t, 409 vm_prot_t, 410 memory_object_fault_info_t); 411 extern kern_return_t vnode_pager_data_return( 412 memory_object_t, 413 memory_object_offset_t, 414 memory_object_cluster_size_t, 415 memory_object_offset_t *, 416 int *, 417 boolean_t, 418 boolean_t, 419 int); 420 extern kern_return_t vnode_pager_data_initialize( 421 memory_object_t, 422 memory_object_offset_t, 423 memory_object_cluster_size_t); 424 extern void vnode_pager_reference( 425 memory_object_t mem_obj); 426 extern kern_return_t vnode_pager_synchronize( 427 memory_object_t mem_obj, 428 memory_object_offset_t offset, 429 memory_object_size_t length, 430 vm_sync_t sync_flags); 431 extern kern_return_t vnode_pager_map( 432 memory_object_t mem_obj, 433 vm_prot_t prot); 434 extern kern_return_t vnode_pager_last_unmap( 435 memory_object_t mem_obj); 436 extern void vnode_pager_deallocate( 437 memory_object_t); 438 extern kern_return_t vnode_pager_terminate( 439 memory_object_t); 440 extern void vnode_pager_vrele( 441 struct vnode *vp); 442 extern struct vnode *vnode_pager_lookup_vnode( 443 memory_object_t); 444 445 extern int ubc_map( 446 struct vnode *vp, 447 int flags); 448 extern void ubc_unmap( 449 struct vnode *vp); 450 451 struct vm_map_entry; 452 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); 453 454 extern void device_pager_reference(memory_object_t); 455 extern void device_pager_deallocate(memory_object_t); 456 extern kern_return_t device_pager_init(memory_object_t, 457 memory_object_control_t, 458 memory_object_cluster_size_t); 459 extern kern_return_t device_pager_terminate(memory_object_t); 460 extern kern_return_t device_pager_data_request(memory_object_t, 461 memory_object_offset_t, 462 memory_object_cluster_size_t, 463 vm_prot_t, 464 memory_object_fault_info_t); 465 extern kern_return_t device_pager_data_return(memory_object_t, 466 memory_object_offset_t, 467 memory_object_cluster_size_t, 468 memory_object_offset_t *, 469 int *, 470 boolean_t, 471 boolean_t, 472 int); 473 extern kern_return_t device_pager_data_initialize(memory_object_t, 474 memory_object_offset_t, 475 memory_object_cluster_size_t); 476 extern kern_return_t device_pager_data_unlock(memory_object_t, 477 memory_object_offset_t, 478 memory_object_size_t, 479 vm_prot_t); 480 extern kern_return_t device_pager_synchronize(memory_object_t, 481 memory_object_offset_t, 482 memory_object_size_t, 483 vm_sync_t); 484 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t); 485 extern kern_return_t device_pager_last_unmap(memory_object_t); 486 extern kern_return_t device_pager_populate_object( 487 memory_object_t device, 488 memory_object_offset_t offset, 489 ppnum_t page_num, 490 vm_size_t size); 491 extern memory_object_t device_pager_setup( 492 memory_object_t, 493 uintptr_t, 494 vm_size_t, 495 int); 496 497 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops); 498 499 extern kern_return_t pager_map_to_phys_contiguous( 500 memory_object_control_t object, 501 memory_object_offset_t offset, 502 addr64_t base_vaddr, 503 vm_size_t size); 504 505 extern kern_return_t memory_object_create_named( 506 memory_object_t pager, 507 memory_object_offset_t size, 508 memory_object_control_t *control); 509 510 struct macx_triggers_args; 511 extern int mach_macx_triggers( 512 struct macx_triggers_args *args); 513 514 extern int macx_swapinfo( 515 memory_object_size_t *total_p, 516 memory_object_size_t *avail_p, 517 vm_size_t *pagesize_p, 518 boolean_t *encrypted_p); 519 520 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); 521 extern void log_unnest_badness( 522 vm_map_t map, 523 vm_map_offset_t start_unnest, 524 vm_map_offset_t end_unnest, 525 boolean_t is_nested_map, 526 vm_map_offset_t lowest_unnestable_addr); 527 528 struct proc; 529 struct proc *current_proc(void); 530 extern int cs_allow_invalid(struct proc *p); 531 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); 532 533 #define CS_VALIDATE_TAINTED 0x00000001 534 #define CS_VALIDATE_NX 0x00000002 535 extern boolean_t cs_validate_range(struct vnode *vp, 536 memory_object_t pager, 537 memory_object_offset_t offset, 538 const void *data, 539 vm_size_t size, 540 unsigned *result); 541 extern void cs_validate_page( 542 struct vnode *vp, 543 memory_object_t pager, 544 memory_object_offset_t offset, 545 const void *data, 546 int *validated_p, 547 int *tainted_p, 548 int *nx_p); 549 550 extern kern_return_t memory_entry_purgeable_control_internal( 551 ipc_port_t entry_port, 552 vm_purgable_t control, 553 int *state); 554 555 extern kern_return_t memory_entry_access_tracking_internal( 556 ipc_port_t entry_port, 557 int *access_tracking, 558 uint32_t *access_tracking_reads, 559 uint32_t *access_tracking_writes); 560 561 extern kern_return_t mach_memory_entry_purgable_control( 562 ipc_port_t entry_port, 563 vm_purgable_t control, 564 int *state); 565 566 extern kern_return_t mach_memory_entry_get_page_counts( 567 ipc_port_t entry_port, 568 unsigned int *resident_page_count, 569 unsigned int *dirty_page_count); 570 571 extern kern_return_t mach_memory_entry_phys_page_offset( 572 ipc_port_t entry_port, 573 vm_object_offset_t *offset_p); 574 575 extern kern_return_t mach_memory_entry_map_size( 576 ipc_port_t entry_port, 577 vm_map_t map, 578 memory_object_offset_t offset, 579 memory_object_offset_t size, 580 mach_vm_size_t *map_size); 581 582 extern kern_return_t vm_map_range_physical_size( 583 vm_map_t map, 584 vm_map_address_t start, 585 mach_vm_size_t size, 586 mach_vm_size_t * phys_size); 587 588 extern kern_return_t mach_memory_entry_page_op( 589 ipc_port_t entry_port, 590 vm_object_offset_t offset, 591 int ops, 592 ppnum_t *phys_entry, 593 int *flags); 594 595 extern kern_return_t mach_memory_entry_range_op( 596 ipc_port_t entry_port, 597 vm_object_offset_t offset_beg, 598 vm_object_offset_t offset_end, 599 int ops, 600 int *range); 601 602 extern void mach_memory_entry_port_release(ipc_port_t port); 603 extern vm_named_entry_t mach_memory_entry_from_port(ipc_port_t port); 604 extern struct vm_named_entry *mach_memory_entry_allocate(ipc_port_t *user_handle_p); 605 extern vm_object_t vm_named_entry_to_vm_object( 606 vm_named_entry_t named_entry); 607 extern void vm_named_entry_associate_vm_object( 608 vm_named_entry_t named_entry, 609 vm_object_t object, 610 vm_object_offset_t offset, 611 vm_object_size_t size, 612 vm_prot_t prot); 613 614 extern int macx_backing_store_compaction(int flags); 615 extern unsigned int mach_vm_ctl_page_free_wanted(void); 616 617 extern int no_paging_space_action(void); 618 619 extern unsigned int vmtc_total; /* total # of text page corruptions detected */ 620 621 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t); 622 623 #define VM_TOGGLE_CLEAR 0 624 #define VM_TOGGLE_SET 1 625 #define VM_TOGGLE_GETVALUE 999 626 int vm_toggle_entry_reuse(int, int*); 627 628 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ 629 #define SWAP_READ 0x00000001 /* Read buffer. */ 630 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ 631 632 extern kern_return_t compressor_memory_object_create( 633 memory_object_size_t, 634 memory_object_t *); 635 636 extern boolean_t vm_compressor_low_on_space(void); 637 extern boolean_t vm_compressor_out_of_space(void); 638 extern int vm_swap_low_on_space(void); 639 void do_fastwake_warmup_all(void); 640 641 #if defined(__arm64__) 642 extern void vm_panic_hibernate_write_image_failed(int err); 643 #endif /* __arm64__ */ 644 645 #if CONFIG_JETSAM 646 extern int proc_get_memstat_priority(struct proc*, boolean_t); 647 #endif /* CONFIG_JETSAM */ 648 649 /* the object purger. purges the next eligible object from memory. */ 650 /* returns TRUE if an object was purged, otherwise FALSE. */ 651 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group); 652 void vm_purgeable_nonvolatile_owner_update(task_t owner, 653 int delta); 654 void vm_purgeable_volatile_owner_update(task_t owner, 655 int delta); 656 void vm_owned_objects_disown(task_t task); 657 658 659 struct trim_list { 660 uint64_t tl_offset; 661 uint64_t tl_length; 662 struct trim_list *tl_next; 663 }; 664 665 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only); 666 667 #define MAX_SWAPFILENAME_LEN 1024 668 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */ 669 670 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1]; 671 672 struct vm_counters { 673 unsigned int do_collapse_compressor; 674 unsigned int do_collapse_compressor_pages; 675 unsigned int do_collapse_terminate; 676 unsigned int do_collapse_terminate_failure; 677 unsigned int should_cow_but_wired; 678 unsigned int create_upl_extra_cow; 679 unsigned int create_upl_extra_cow_pages; 680 unsigned int create_upl_lookup_failure_write; 681 unsigned int create_upl_lookup_failure_copy; 682 }; 683 extern struct vm_counters vm_counters; 684 685 #if CONFIG_SECLUDED_MEMORY 686 struct vm_page_secluded_data { 687 int eligible_for_secluded; 688 int grab_success_free; 689 int grab_success_other; 690 int grab_failure_locked; 691 int grab_failure_state; 692 int grab_failure_dirty; 693 int grab_for_iokit; 694 int grab_for_iokit_success; 695 }; 696 extern struct vm_page_secluded_data vm_page_secluded; 697 698 extern int num_tasks_can_use_secluded_mem; 699 700 /* boot-args */ 701 extern int secluded_for_apps; 702 extern int secluded_for_iokit; 703 extern int secluded_for_filecache; 704 #if 11 705 extern int secluded_for_fbdp; 706 #endif 707 708 extern uint64_t vm_page_secluded_drain(void); 709 extern void memory_object_mark_eligible_for_secluded( 710 memory_object_control_t control, 711 boolean_t eligible_for_secluded); 712 713 #endif /* CONFIG_SECLUDED_MEMORY */ 714 715 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */ 716 717 extern kern_return_t mach_make_memory_entry_internal( 718 vm_map_t target_map, 719 memory_object_size_t *size, 720 memory_object_offset_t offset, 721 vm_prot_t permission, 722 vm_named_entry_kernel_flags_t vmne_kflags, 723 ipc_port_t *object_handle, 724 ipc_port_t parent_handle); 725 726 extern kern_return_t 727 memory_entry_check_for_adjustment( 728 vm_map_t src_map, 729 ipc_port_t port, 730 vm_map_offset_t *overmap_start, 731 vm_map_offset_t *overmap_end); 732 733 #define roundup(x, y) ((((x) % (y)) == 0) ? \ 734 (x) : ((x) + ((y) - ((x) % (y))))) 735 736 #ifdef __cplusplus 737 } 738 #endif 739 740 /* 741 * Flags for the VM swapper/reclaimer. 742 * Used by vm_swap_consider_defragment() 743 * to force defrag/reclaim by the swap 744 * GC thread. 745 */ 746 #define VM_SWAP_FLAGS_NONE 0 747 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1 748 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2 749 750 #if __arm64__ 751 /* 752 * Flags to control the behavior of 753 * the legacy footprint entitlement. 754 */ 755 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1) 756 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2) 757 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3) 758 759 #endif /* __arm64__ */ 760 761 #if MACH_ASSERT 762 struct proc; 763 extern struct proc *current_proc(void); 764 extern int proc_pid(struct proc *); 765 extern char *proc_best_name(struct proc *); 766 struct thread; 767 extern uint64_t thread_tid(struct thread *); 768 extern int debug4k_filter; 769 extern int debug4k_proc_filter; 770 extern char debug4k_proc_name[]; 771 extern const char *debug4k_category_name[]; 772 773 #define __DEBUG4K(category, fmt, ...) \ 774 MACRO_BEGIN \ 775 int __category = (category); \ 776 struct thread *__t = NULL; \ 777 struct proc *__p = NULL; \ 778 const char *__pname = "?"; \ 779 boolean_t __do_log = FALSE; \ 780 \ 781 if ((1 << __category) & debug4k_filter) { \ 782 __do_log = TRUE; \ 783 } else if (((1 << __category) & debug4k_proc_filter) && \ 784 debug4k_proc_name[0] != '\0') { \ 785 __p = current_proc(); \ 786 if (__p != NULL) { \ 787 __pname = proc_best_name(__p); \ 788 } \ 789 if (!strcmp(debug4k_proc_name, __pname)) { \ 790 __do_log = TRUE; \ 791 } \ 792 } \ 793 if (__do_log) { \ 794 if (__p == NULL) { \ 795 __p = current_proc(); \ 796 if (__p != NULL) { \ 797 __pname = proc_best_name(__p); \ 798 } \ 799 } \ 800 __t = current_thread(); \ 801 printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \ 802 debug4k_category_name[__category], \ 803 __p ? proc_pid(__p) : 0, \ 804 __pname, \ 805 __t, \ 806 thread_tid(__t), \ 807 __FUNCTION__, \ 808 __LINE__, \ 809 ##__VA_ARGS__); \ 810 } \ 811 MACRO_END 812 813 #define __DEBUG4K_ERROR 0 814 #define __DEBUG4K_LIFE 1 815 #define __DEBUG4K_LOAD 2 816 #define __DEBUG4K_FAULT 3 817 #define __DEBUG4K_COPY 4 818 #define __DEBUG4K_SHARE 5 819 #define __DEBUG4K_ADJUST 6 820 #define __DEBUG4K_PMAP 7 821 #define __DEBUG4K_MEMENTRY 8 822 #define __DEBUG4K_IOKIT 9 823 #define __DEBUG4K_UPL 10 824 #define __DEBUG4K_EXC 11 825 #define __DEBUG4K_VFS 12 826 827 #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__) 828 #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__) 829 #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__) 830 #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__) 831 #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__) 832 #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__) 833 #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__) 834 #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__) 835 #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__) 836 #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__) 837 #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__) 838 #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__) 839 #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__) 840 841 #else /* MACH_ASSERT */ 842 843 #define DEBUG4K_ERROR(...) 844 #define DEBUG4K_LIFE(...) 845 #define DEBUG4K_LOAD(...) 846 #define DEBUG4K_FAULT(...) 847 #define DEBUG4K_COPY(...) 848 #define DEBUG4K_SHARE(...) 849 #define DEBUG4K_ADJUST(...) 850 #define DEBUG4K_PMAP(...) 851 #define DEBUG4K_MEMENTRY(...) 852 #define DEBUG4K_IOKIT(...) 853 #define DEBUG4K_UPL(...) 854 #define DEBUG4K_EXC(...) 855 #define DEBUG4K_VFS(...) 856 857 #endif /* MACH_ASSERT */ 858 859 860 #endif /* _VM_VM_PROTOS_H_ */ 861 862 #endif /* XNU_KERNEL_PRIVATE */ 863