1 /* 2 * Copyright (c) 2004-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 29 #ifdef XNU_KERNEL_PRIVATE 30 31 #ifndef _VM_VM_PROTOS_H_ 32 #define _VM_VM_PROTOS_H_ 33 34 #include <mach/mach_types.h> 35 #include <kern/kern_types.h> 36 37 #ifdef __cplusplus 38 extern "C" { 39 #endif 40 41 /* 42 * This file contains various type definitions and routine prototypes 43 * that are needed to avoid compilation warnings for VM code (in osfmk, 44 * default_pager and bsd). 45 * Most of these should eventually go into more appropriate header files. 46 * 47 * Include it after all other header files since it doesn't include any 48 * type definitions and it works around some conflicts with other header 49 * files. 50 */ 51 52 /* 53 * iokit 54 */ 55 extern kern_return_t device_data_action( 56 uintptr_t device_handle, 57 ipc_port_t device_pager, 58 vm_prot_t protection, 59 vm_object_offset_t offset, 60 vm_size_t size); 61 62 extern kern_return_t device_close( 63 uintptr_t device_handle); 64 65 extern boolean_t vm_swap_files_pinned(void); 66 67 /* 68 * osfmk 69 */ 70 #ifndef _IPC_IPC_PORT_H_ 71 extern mach_port_name_t ipc_port_copyout_send( 72 ipc_port_t sright, 73 ipc_space_t space); 74 extern mach_port_name_t ipc_port_copyout_send_pinned( 75 ipc_port_t sright, 76 ipc_space_t space); 77 #endif /* _IPC_IPC_PORT_H_ */ 78 79 #ifndef _KERN_IPC_TT_H_ 80 81 #define port_name_to_task(name) port_name_to_task_kernel(name) 82 83 extern task_t port_name_to_task_kernel( 84 mach_port_name_t name); 85 extern task_t port_name_to_task_read( 86 mach_port_name_t name); 87 extern task_t port_name_to_task_name( 88 mach_port_name_t name); 89 extern void ipc_port_release_send( 90 ipc_port_t port); 91 #endif /* _KERN_IPC_TT_H_ */ 92 93 extern ipc_space_t get_task_ipcspace( 94 task_t t); 95 96 #if CONFIG_MEMORYSTATUS 97 extern int max_task_footprint_mb; /* Per-task limit on physical memory consumption in megabytes */ 98 #endif /* CONFIG_MEMORYSTATUS */ 99 100 /* Some loose-ends VM stuff */ 101 102 extern const vm_size_t msg_ool_size_small; 103 104 extern kern_return_t vm_tests(void); 105 extern void consider_machine_adjust(void); 106 extern vm_map_offset_t get_map_min(vm_map_t); 107 extern vm_map_offset_t get_map_max(vm_map_t); 108 extern vm_map_size_t get_vmmap_size(vm_map_t); 109 extern int get_task_page_size(task_t); 110 #if CONFIG_COREDUMP 111 extern int get_vmmap_entries(vm_map_t); 112 #endif 113 extern int get_map_nentries(vm_map_t); 114 115 extern vm_map_offset_t vm_map_page_mask(vm_map_t); 116 117 extern kern_return_t vm_map_purgable_control( 118 vm_map_t map, 119 vm_map_offset_t address, 120 vm_purgable_t control, 121 int *state); 122 123 #if MACH_ASSERT 124 extern void vm_map_pmap_set_process( 125 vm_map_t map, 126 int pid, 127 char *procname); 128 extern void vm_map_pmap_check_ledgers( 129 pmap_t pmap, 130 ledger_t ledger, 131 int pid, 132 char *procname); 133 #endif /* MACH_ASSERT */ 134 135 extern kern_return_t 136 vnode_pager_get_object_vnode( 137 memory_object_t mem_obj, 138 uintptr_t * vnodeaddr, 139 uint32_t * vid); 140 141 #if CONFIG_COREDUMP 142 extern boolean_t coredumpok(vm_map_t map, mach_vm_offset_t va); 143 #endif 144 145 /* 146 * VM routines that used to be published to 147 * user space, and are now restricted to the kernel. 148 * 149 * They should eventually go away entirely - 150 * to be replaced with standard vm_map() and 151 * vm_deallocate() calls. 152 */ 153 154 extern kern_return_t vm_upl_map 155 ( 156 vm_map_t target_task, 157 upl_t upl, 158 vm_address_t *address 159 ); 160 161 extern kern_return_t vm_upl_unmap 162 ( 163 vm_map_t target_task, 164 upl_t upl 165 ); 166 167 extern kern_return_t vm_upl_map_range 168 ( 169 vm_map_t target_task, 170 upl_t upl, 171 vm_offset_t offset, 172 vm_size_t size, 173 vm_prot_t prot, 174 vm_address_t *address 175 ); 176 177 extern kern_return_t vm_upl_unmap_range 178 ( 179 vm_map_t target_task, 180 upl_t upl, 181 vm_offset_t offset, 182 vm_size_t size 183 ); 184 185 extern kern_return_t vm_region_object_create 186 ( 187 vm_map_t target_task, 188 vm_size_t size, 189 ipc_port_t *object_handle 190 ); 191 192 #if CONFIG_CODE_DECRYPTION 193 #define VM_MAP_DEBUG_APPLE_PROTECT MACH_ASSERT 194 #if VM_MAP_DEBUG_APPLE_PROTECT 195 extern int vm_map_debug_apple_protect; 196 #endif /* VM_MAP_DEBUG_APPLE_PROTECT */ 197 struct pager_crypt_info; 198 extern kern_return_t vm_map_apple_protected( 199 vm_map_t map, 200 vm_map_offset_t start, 201 vm_map_offset_t end, 202 vm_object_offset_t crypto_backing_offset, 203 struct pager_crypt_info *crypt_info, 204 uint32_t cryptid); 205 extern memory_object_t apple_protect_pager_setup( 206 vm_object_t backing_object, 207 vm_object_offset_t backing_offset, 208 vm_object_offset_t crypto_backing_offset, 209 struct pager_crypt_info *crypt_info, 210 vm_object_offset_t crypto_start, 211 vm_object_offset_t crypto_end, 212 boolean_t cache_pager); 213 #endif /* CONFIG_CODE_DECRYPTION */ 214 215 struct vm_shared_region_slide_info; 216 extern kern_return_t vm_map_shared_region( 217 vm_map_t map, 218 vm_map_offset_t start, 219 vm_map_offset_t end, 220 vm_object_offset_t backing_offset, 221 struct vm_shared_region_slide_info *slide_info); 222 223 extern memory_object_t shared_region_pager_setup( 224 vm_object_t backing_object, 225 vm_object_offset_t backing_offset, 226 struct vm_shared_region_slide_info *slide_info, 227 uint64_t jop_key); 228 229 extern uint64_t apple_protect_pager_purge_all(void); 230 extern uint64_t shared_region_pager_purge_all(void); 231 extern uint64_t dyld_pager_purge_all(void); 232 233 #if __has_feature(ptrauth_calls) 234 extern memory_object_t shared_region_pager_match( 235 vm_object_t backing_object, 236 vm_object_offset_t backing_offset, 237 struct vm_shared_region_slide_info *slide_info, 238 uint64_t jop_key); 239 extern void shared_region_key_alloc( 240 char *shared_region_id, 241 bool inherit, 242 uint64_t inherited_key); 243 extern void shared_region_key_dealloc( 244 char *shared_region_id); 245 extern uint64_t generate_jop_key(void); 246 extern void shared_region_pager_match_task_key(memory_object_t memobj, task_t task); 247 #endif /* __has_feature(ptrauth_calls) */ 248 extern bool vm_shared_region_is_reslide(struct task *task); 249 250 struct vnode; 251 extern memory_object_t swapfile_pager_setup(struct vnode *vp); 252 extern memory_object_control_t swapfile_pager_control(memory_object_t mem_obj); 253 254 #if __arm64__ || (__ARM_ARCH_7K__ >= 2) 255 #define SIXTEENK_PAGE_SIZE 0x4000 256 #define SIXTEENK_PAGE_MASK 0x3FFF 257 #define SIXTEENK_PAGE_SHIFT 14 258 #endif /* __arm64__ || (__ARM_ARCH_7K__ >= 2) */ 259 260 #define FOURK_PAGE_SIZE 0x1000 261 #define FOURK_PAGE_MASK 0xFFF 262 #define FOURK_PAGE_SHIFT 12 263 264 #if __arm64__ 265 266 extern unsigned int page_shift_user32; 267 268 #define VM_MAP_DEBUG_FOURK MACH_ASSERT 269 #if VM_MAP_DEBUG_FOURK 270 extern int vm_map_debug_fourk; 271 #endif /* VM_MAP_DEBUG_FOURK */ 272 extern memory_object_t fourk_pager_create(void); 273 extern vm_object_t fourk_pager_to_vm_object(memory_object_t mem_obj); 274 extern kern_return_t fourk_pager_populate( 275 memory_object_t mem_obj, 276 boolean_t overwrite, 277 int index, 278 vm_object_t new_backing_object, 279 vm_object_offset_t new_backing_offset, 280 vm_object_t *old_backing_object, 281 vm_object_offset_t *old_backing_offset); 282 #endif /* __arm64__ */ 283 284 /* 285 * bsd 286 */ 287 struct vnode; 288 289 extern void vnode_setswapmount(struct vnode *); 290 extern int64_t vnode_getswappin_avail(struct vnode *); 291 292 extern void vnode_pager_was_dirtied( 293 struct vnode *, 294 vm_object_offset_t, 295 vm_object_offset_t); 296 297 typedef int pager_return_t; 298 extern pager_return_t vnode_pagein( 299 struct vnode *, upl_t, 300 upl_offset_t, vm_object_offset_t, 301 upl_size_t, int, int *); 302 extern pager_return_t vnode_pageout( 303 struct vnode *, upl_t, 304 upl_offset_t, vm_object_offset_t, 305 upl_size_t, int, int *); 306 extern uint32_t vnode_trim(struct vnode *, int64_t offset, unsigned long len); 307 extern memory_object_t vnode_pager_setup( 308 struct vnode *, memory_object_t); 309 extern vm_object_offset_t vnode_pager_get_filesize( 310 struct vnode *); 311 extern uint32_t vnode_pager_isinuse( 312 struct vnode *); 313 extern boolean_t vnode_pager_isSSD( 314 struct vnode *); 315 extern void vnode_pager_throttle( 316 void); 317 extern uint32_t vnode_pager_return_throttle_io_limit( 318 struct vnode *, 319 uint32_t *); 320 extern kern_return_t vnode_pager_get_name( 321 struct vnode *vp, 322 char *pathname, 323 vm_size_t pathname_len, 324 char *filename, 325 vm_size_t filename_len, 326 boolean_t *truncated_path_p); 327 struct timespec; 328 extern kern_return_t vnode_pager_get_mtime( 329 struct vnode *vp, 330 struct timespec *mtime, 331 struct timespec *cs_mtime); 332 extern kern_return_t vnode_pager_get_cs_blobs( 333 struct vnode *vp, 334 void **blobs); 335 336 #if CONFIG_IOSCHED 337 void vnode_pager_issue_reprioritize_io( 338 struct vnode *devvp, 339 uint64_t blkno, 340 uint32_t len, 341 int priority); 342 #endif 343 344 #if CHECK_CS_VALIDATION_BITMAP 345 /* used by the vnode_pager_cs_validation_bitmap routine*/ 346 #define CS_BITMAP_SET 1 347 #define CS_BITMAP_CLEAR 2 348 #define CS_BITMAP_CHECK 3 349 350 #endif /* CHECK_CS_VALIDATION_BITMAP */ 351 352 extern kern_return_t 353 vnode_pager_data_unlock( 354 memory_object_t mem_obj, 355 memory_object_offset_t offset, 356 memory_object_size_t size, 357 vm_prot_t desired_access); 358 extern kern_return_t vnode_pager_init( 359 memory_object_t, 360 memory_object_control_t, 361 memory_object_cluster_size_t); 362 extern kern_return_t vnode_pager_get_object_size( 363 memory_object_t, 364 memory_object_offset_t *); 365 366 #if CONFIG_IOSCHED 367 extern kern_return_t vnode_pager_get_object_devvp( 368 memory_object_t, 369 uintptr_t *); 370 #endif 371 372 extern void vnode_pager_dirtied( 373 memory_object_t, 374 vm_object_offset_t, 375 vm_object_offset_t); 376 extern kern_return_t vnode_pager_get_isinuse( 377 memory_object_t, 378 uint32_t *); 379 extern kern_return_t vnode_pager_get_isSSD( 380 memory_object_t, 381 boolean_t *); 382 extern kern_return_t vnode_pager_get_throttle_io_limit( 383 memory_object_t, 384 uint32_t *); 385 extern kern_return_t vnode_pager_get_object_name( 386 memory_object_t mem_obj, 387 char *pathname, 388 vm_size_t pathname_len, 389 char *filename, 390 vm_size_t filename_len, 391 boolean_t *truncated_path_p); 392 extern kern_return_t vnode_pager_get_object_mtime( 393 memory_object_t mem_obj, 394 struct timespec *mtime, 395 struct timespec *cs_mtime); 396 397 #if CHECK_CS_VALIDATION_BITMAP 398 extern kern_return_t vnode_pager_cs_check_validation_bitmap( 399 memory_object_t mem_obj, 400 memory_object_offset_t offset, 401 int optype); 402 #endif /*CHECK_CS_VALIDATION_BITMAP*/ 403 404 extern kern_return_t ubc_cs_check_validation_bitmap( 405 struct vnode *vp, 406 memory_object_offset_t offset, 407 int optype); 408 409 extern kern_return_t vnode_pager_data_request( 410 memory_object_t, 411 memory_object_offset_t, 412 memory_object_cluster_size_t, 413 vm_prot_t, 414 memory_object_fault_info_t); 415 extern kern_return_t vnode_pager_data_return( 416 memory_object_t, 417 memory_object_offset_t, 418 memory_object_cluster_size_t, 419 memory_object_offset_t *, 420 int *, 421 boolean_t, 422 boolean_t, 423 int); 424 extern kern_return_t vnode_pager_data_initialize( 425 memory_object_t, 426 memory_object_offset_t, 427 memory_object_cluster_size_t); 428 extern void vnode_pager_reference( 429 memory_object_t mem_obj); 430 extern kern_return_t vnode_pager_map( 431 memory_object_t mem_obj, 432 vm_prot_t prot); 433 extern kern_return_t vnode_pager_last_unmap( 434 memory_object_t mem_obj); 435 extern void vnode_pager_deallocate( 436 memory_object_t); 437 extern kern_return_t vnode_pager_terminate( 438 memory_object_t); 439 extern void vnode_pager_vrele( 440 struct vnode *vp); 441 extern struct vnode *vnode_pager_lookup_vnode( 442 memory_object_t); 443 444 extern int ubc_map( 445 struct vnode *vp, 446 int flags); 447 extern void ubc_unmap( 448 struct vnode *vp); 449 450 struct vm_map_entry; 451 extern struct vm_object *find_vnode_object(struct vm_map_entry *entry); 452 453 extern void device_pager_reference(memory_object_t); 454 extern void device_pager_deallocate(memory_object_t); 455 extern kern_return_t device_pager_init(memory_object_t, 456 memory_object_control_t, 457 memory_object_cluster_size_t); 458 extern kern_return_t device_pager_terminate(memory_object_t); 459 extern kern_return_t device_pager_data_request(memory_object_t, 460 memory_object_offset_t, 461 memory_object_cluster_size_t, 462 vm_prot_t, 463 memory_object_fault_info_t); 464 extern kern_return_t device_pager_data_return(memory_object_t, 465 memory_object_offset_t, 466 memory_object_cluster_size_t, 467 memory_object_offset_t *, 468 int *, 469 boolean_t, 470 boolean_t, 471 int); 472 extern kern_return_t device_pager_data_initialize(memory_object_t, 473 memory_object_offset_t, 474 memory_object_cluster_size_t); 475 extern kern_return_t device_pager_map(memory_object_t, vm_prot_t); 476 extern kern_return_t device_pager_last_unmap(memory_object_t); 477 extern kern_return_t device_pager_populate_object( 478 memory_object_t device, 479 memory_object_offset_t offset, 480 ppnum_t page_num, 481 vm_size_t size); 482 extern memory_object_t device_pager_setup( 483 memory_object_t, 484 uintptr_t, 485 vm_size_t, 486 int); 487 488 extern boolean_t is_device_pager_ops(const struct memory_object_pager_ops *pager_ops); 489 490 extern kern_return_t pager_map_to_phys_contiguous( 491 memory_object_control_t object, 492 memory_object_offset_t offset, 493 addr64_t base_vaddr, 494 vm_size_t size); 495 496 extern kern_return_t memory_object_create_named( 497 memory_object_t pager, 498 memory_object_offset_t size, 499 memory_object_control_t *control); 500 501 struct macx_triggers_args; 502 extern int mach_macx_triggers( 503 struct macx_triggers_args *args); 504 505 extern int macx_swapinfo( 506 memory_object_size_t *total_p, 507 memory_object_size_t *avail_p, 508 vm_size_t *pagesize_p, 509 boolean_t *encrypted_p); 510 511 extern void log_stack_execution_failure(addr64_t vaddr, vm_prot_t prot); 512 extern void log_unnest_badness( 513 vm_map_t map, 514 vm_map_offset_t start_unnest, 515 vm_map_offset_t end_unnest, 516 boolean_t is_nested_map, 517 vm_map_offset_t lowest_unnestable_addr); 518 519 struct proc; 520 struct proc *current_proc(void); 521 extern int cs_allow_invalid(struct proc *p); 522 extern int cs_invalid_page(addr64_t vaddr, boolean_t *cs_killed); 523 524 #define CS_VALIDATE_TAINTED 0x00000001 525 #define CS_VALIDATE_NX 0x00000002 526 extern boolean_t cs_validate_range(struct vnode *vp, 527 memory_object_t pager, 528 memory_object_offset_t offset, 529 const void *data, 530 vm_size_t size, 531 unsigned *result); 532 extern void cs_validate_page( 533 struct vnode *vp, 534 memory_object_t pager, 535 memory_object_offset_t offset, 536 const void *data, 537 int *validated_p, 538 int *tainted_p, 539 int *nx_p); 540 541 extern kern_return_t memory_entry_purgeable_control_internal( 542 ipc_port_t entry_port, 543 vm_purgable_t control, 544 int *state); 545 546 extern kern_return_t memory_entry_access_tracking_internal( 547 ipc_port_t entry_port, 548 int *access_tracking, 549 uint32_t *access_tracking_reads, 550 uint32_t *access_tracking_writes); 551 552 extern kern_return_t mach_memory_object_memory_entry_64( 553 host_t host, 554 boolean_t internal, 555 vm_object_offset_t size, 556 vm_prot_t permission, 557 memory_object_t pager, 558 ipc_port_t *entry_handle); 559 560 extern kern_return_t mach_memory_entry_purgable_control( 561 ipc_port_t entry_port, 562 vm_purgable_t control, 563 int *state); 564 565 extern kern_return_t mach_memory_entry_get_page_counts( 566 ipc_port_t entry_port, 567 unsigned int *resident_page_count, 568 unsigned int *dirty_page_count); 569 570 extern kern_return_t mach_memory_entry_phys_page_offset( 571 ipc_port_t entry_port, 572 vm_object_offset_t *offset_p); 573 574 extern kern_return_t mach_memory_entry_map_size( 575 ipc_port_t entry_port, 576 vm_map_t map, 577 memory_object_offset_t offset, 578 memory_object_offset_t size, 579 mach_vm_size_t *map_size); 580 581 extern kern_return_t vm_map_range_physical_size( 582 vm_map_t map, 583 vm_map_address_t start, 584 mach_vm_size_t size, 585 mach_vm_size_t * phys_size); 586 587 extern kern_return_t mach_memory_entry_page_op( 588 ipc_port_t entry_port, 589 vm_object_offset_t offset, 590 int ops, 591 ppnum_t *phys_entry, 592 int *flags); 593 594 extern kern_return_t mach_memory_entry_range_op( 595 ipc_port_t entry_port, 596 vm_object_offset_t offset_beg, 597 vm_object_offset_t offset_end, 598 int ops, 599 int *range); 600 601 extern void mach_memory_entry_port_release(ipc_port_t port); 602 extern vm_named_entry_t mach_memory_entry_from_port(ipc_port_t port); 603 extern struct vm_named_entry *mach_memory_entry_allocate(ipc_port_t *user_handle_p); 604 extern vm_object_t vm_named_entry_to_vm_object( 605 vm_named_entry_t named_entry); 606 extern void vm_named_entry_associate_vm_object( 607 vm_named_entry_t named_entry, 608 vm_object_t object, 609 vm_object_offset_t offset, 610 vm_object_size_t size, 611 vm_prot_t prot); 612 613 extern int macx_backing_store_compaction(int flags); 614 extern unsigned int mach_vm_ctl_page_free_wanted(void); 615 616 extern int no_paging_space_action(void); 617 618 extern unsigned int vmtc_total; /* total # of text page corruptions detected */ 619 620 extern kern_return_t revalidate_text_page(task_t, vm_map_offset_t); 621 622 #define VM_TOGGLE_CLEAR 0 623 #define VM_TOGGLE_SET 1 624 #define VM_TOGGLE_GETVALUE 999 625 int vm_toggle_entry_reuse(int, int*); 626 627 #define SWAP_WRITE 0x00000000 /* Write buffer (pseudo flag). */ 628 #define SWAP_READ 0x00000001 /* Read buffer. */ 629 #define SWAP_ASYNC 0x00000002 /* Start I/O, do not wait. */ 630 631 extern kern_return_t compressor_memory_object_create( 632 memory_object_size_t, 633 memory_object_t *); 634 635 extern boolean_t vm_compressor_low_on_space(void); 636 extern bool vm_compressor_compressed_pages_nearing_limit(void); 637 extern boolean_t vm_compressor_out_of_space(void); 638 extern int vm_swap_low_on_space(void); 639 extern int vm_swap_out_of_space(void); 640 void do_fastwake_warmup_all(void); 641 642 #if defined(__arm64__) 643 extern void vm_panic_hibernate_write_image_failed(int err); 644 #endif /* __arm64__ */ 645 646 #if CONFIG_JETSAM 647 extern int proc_get_memstat_priority(struct proc*, boolean_t); 648 #endif /* CONFIG_JETSAM */ 649 650 /* the object purger. purges the next eligible object from memory. */ 651 /* returns TRUE if an object was purged, otherwise FALSE. */ 652 boolean_t vm_purgeable_object_purge_one_unlocked(int force_purge_below_group); 653 void vm_purgeable_nonvolatile_owner_update(task_t owner, 654 int delta); 655 void vm_purgeable_volatile_owner_update(task_t owner, 656 int delta); 657 void vm_owned_objects_disown(task_t task); 658 659 660 struct trim_list { 661 uint64_t tl_offset; 662 uint64_t tl_length; 663 struct trim_list *tl_next; 664 }; 665 666 u_int32_t vnode_trim_list(struct vnode *vp, struct trim_list *tl, boolean_t route_only); 667 668 #define MAX_SWAPFILENAME_LEN 1024 669 #define SWAPFILENAME_INDEX_LEN 2 /* Doesn't include the terminating NULL character */ 670 671 extern char swapfilename[MAX_SWAPFILENAME_LEN + 1]; 672 673 struct vm_counters { 674 unsigned int do_collapse_compressor; 675 unsigned int do_collapse_compressor_pages; 676 unsigned int do_collapse_terminate; 677 unsigned int do_collapse_terminate_failure; 678 unsigned int should_cow_but_wired; 679 unsigned int create_upl_extra_cow; 680 unsigned int create_upl_extra_cow_pages; 681 unsigned int create_upl_lookup_failure_write; 682 unsigned int create_upl_lookup_failure_copy; 683 }; 684 extern struct vm_counters vm_counters; 685 686 #if CONFIG_SECLUDED_MEMORY 687 struct vm_page_secluded_data { 688 int eligible_for_secluded; 689 int grab_success_free; 690 int grab_success_other; 691 int grab_failure_locked; 692 int grab_failure_state; 693 int grab_failure_realtime; 694 int grab_failure_dirty; 695 int grab_for_iokit; 696 int grab_for_iokit_success; 697 }; 698 extern struct vm_page_secluded_data vm_page_secluded; 699 700 extern int num_tasks_can_use_secluded_mem; 701 702 /* boot-args */ 703 704 __enum_decl(secluded_filecache_mode_t, uint8_t, { 705 /* 706 * SECLUDED_FILECACHE_NONE: 707 * + no file contents in secluded pool 708 */ 709 SECLUDED_FILECACHE_NONE = 0, 710 /* 711 * SECLUDED_FILECACHE_APPS 712 * + no files from / 713 * + files from /Applications/ are OK 714 * + files from /Applications/Camera are not OK 715 * + no files that are open for write 716 */ 717 SECLUDED_FILECACHE_APPS = 1, 718 /* 719 * SECLUDED_FILECACHE_RDONLY 720 * + all read-only files OK, except: 721 * + dyld_shared_cache_arm64* 722 * + Camera 723 * + mediaserverd 724 */ 725 SECLUDED_FILECACHE_RDONLY = 2, 726 }); 727 728 extern secluded_filecache_mode_t secluded_for_filecache; 729 extern bool secluded_for_apps; 730 extern bool secluded_for_iokit; 731 732 extern uint64_t vm_page_secluded_drain(void); 733 extern void memory_object_mark_eligible_for_secluded( 734 memory_object_control_t control, 735 boolean_t eligible_for_secluded); 736 737 #endif /* CONFIG_SECLUDED_MEMORY */ 738 739 extern void memory_object_mark_for_realtime( 740 memory_object_control_t control, 741 bool for_realtime); 742 743 #if MACH_ASSERT 744 extern void memory_object_mark_for_fbdp( 745 memory_object_control_t control); 746 #endif /* MACH_ASSERT */ 747 748 #define MAX_PAGE_RANGE_QUERY (1ULL * 1024 * 1024 * 1024) /* 1 GB */ 749 750 extern kern_return_t mach_make_memory_entry_internal( 751 vm_map_t target_map, 752 memory_object_size_t *size, 753 memory_object_offset_t offset, 754 vm_prot_t permission, 755 vm_named_entry_kernel_flags_t vmne_kflags, 756 ipc_port_t *object_handle, 757 ipc_port_t parent_handle); 758 759 extern kern_return_t 760 memory_entry_check_for_adjustment( 761 vm_map_t src_map, 762 ipc_port_t port, 763 vm_map_offset_t *overmap_start, 764 vm_map_offset_t *overmap_end); 765 766 extern uint64_t vm_purge_filebacked_pagers(void); 767 768 #define roundup(x, y) ((((x) % (y)) == 0) ? \ 769 (x) : ((x) + ((y) - ((x) % (y))))) 770 771 #ifdef __cplusplus 772 } 773 #endif 774 775 /* 776 * Flags for the VM swapper/reclaimer. 777 * Used by vm_swap_consider_defragment() 778 * to force defrag/reclaim by the swap 779 * GC thread. 780 */ 781 #define VM_SWAP_FLAGS_NONE 0 782 #define VM_SWAP_FLAGS_FORCE_DEFRAG 1 783 #define VM_SWAP_FLAGS_FORCE_RECLAIM 2 784 785 #if __arm64__ 786 /* 787 * Flags to control the behavior of 788 * the legacy footprint entitlement. 789 */ 790 #define LEGACY_FOOTPRINT_ENTITLEMENT_IGNORE (1) 791 #define LEGACY_FOOTPRINT_ENTITLEMENT_IOS11_ACCT (2) 792 #define LEGACY_FOOTPRINT_ENTITLEMENT_LIMIT_INCREASE (3) 793 794 #endif /* __arm64__ */ 795 796 #if MACH_ASSERT 797 struct proc; 798 extern struct proc *current_proc(void); 799 extern int proc_pid(struct proc *); 800 extern char *proc_best_name(struct proc *); 801 struct thread; 802 extern uint64_t thread_tid(struct thread *); 803 extern int debug4k_filter; 804 extern int debug4k_proc_filter; 805 extern char debug4k_proc_name[]; 806 extern const char *debug4k_category_name[]; 807 808 #define __DEBUG4K(category, fmt, ...) \ 809 MACRO_BEGIN \ 810 int __category = (category); \ 811 struct thread *__t = NULL; \ 812 struct proc *__p = NULL; \ 813 const char *__pname = "?"; \ 814 boolean_t __do_log = FALSE; \ 815 \ 816 if ((1 << __category) & debug4k_filter) { \ 817 __do_log = TRUE; \ 818 } else if (((1 << __category) & debug4k_proc_filter) && \ 819 debug4k_proc_name[0] != '\0') { \ 820 __p = current_proc(); \ 821 if (__p != NULL) { \ 822 __pname = proc_best_name(__p); \ 823 } \ 824 if (!strcmp(debug4k_proc_name, __pname)) { \ 825 __do_log = TRUE; \ 826 } \ 827 } \ 828 if (__do_log) { \ 829 if (__p == NULL) { \ 830 __p = current_proc(); \ 831 if (__p != NULL) { \ 832 __pname = proc_best_name(__p); \ 833 } \ 834 } \ 835 __t = current_thread(); \ 836 printf("DEBUG4K(%s) %d[%s] %p(0x%llx) %s:%d: " fmt, \ 837 debug4k_category_name[__category], \ 838 __p ? proc_pid(__p) : 0, \ 839 __pname, \ 840 __t, \ 841 thread_tid(__t), \ 842 __FUNCTION__, \ 843 __LINE__, \ 844 ##__VA_ARGS__); \ 845 } \ 846 MACRO_END 847 848 #define __DEBUG4K_ERROR 0 849 #define __DEBUG4K_LIFE 1 850 #define __DEBUG4K_LOAD 2 851 #define __DEBUG4K_FAULT 3 852 #define __DEBUG4K_COPY 4 853 #define __DEBUG4K_SHARE 5 854 #define __DEBUG4K_ADJUST 6 855 #define __DEBUG4K_PMAP 7 856 #define __DEBUG4K_MEMENTRY 8 857 #define __DEBUG4K_IOKIT 9 858 #define __DEBUG4K_UPL 10 859 #define __DEBUG4K_EXC 11 860 #define __DEBUG4K_VFS 12 861 862 #define DEBUG4K_ERROR(...) __DEBUG4K(__DEBUG4K_ERROR, ##__VA_ARGS__) 863 #define DEBUG4K_LIFE(...) __DEBUG4K(__DEBUG4K_LIFE, ##__VA_ARGS__) 864 #define DEBUG4K_LOAD(...) __DEBUG4K(__DEBUG4K_LOAD, ##__VA_ARGS__) 865 #define DEBUG4K_FAULT(...) __DEBUG4K(__DEBUG4K_FAULT, ##__VA_ARGS__) 866 #define DEBUG4K_COPY(...) __DEBUG4K(__DEBUG4K_COPY, ##__VA_ARGS__) 867 #define DEBUG4K_SHARE(...) __DEBUG4K(__DEBUG4K_SHARE, ##__VA_ARGS__) 868 #define DEBUG4K_ADJUST(...) __DEBUG4K(__DEBUG4K_ADJUST, ##__VA_ARGS__) 869 #define DEBUG4K_PMAP(...) __DEBUG4K(__DEBUG4K_PMAP, ##__VA_ARGS__) 870 #define DEBUG4K_MEMENTRY(...) __DEBUG4K(__DEBUG4K_MEMENTRY, ##__VA_ARGS__) 871 #define DEBUG4K_IOKIT(...) __DEBUG4K(__DEBUG4K_IOKIT, ##__VA_ARGS__) 872 #define DEBUG4K_UPL(...) __DEBUG4K(__DEBUG4K_UPL, ##__VA_ARGS__) 873 #define DEBUG4K_EXC(...) __DEBUG4K(__DEBUG4K_EXC, ##__VA_ARGS__) 874 #define DEBUG4K_VFS(...) __DEBUG4K(__DEBUG4K_VFS, ##__VA_ARGS__) 875 876 #else /* MACH_ASSERT */ 877 878 #define DEBUG4K_ERROR(...) 879 #define DEBUG4K_LIFE(...) 880 #define DEBUG4K_LOAD(...) 881 #define DEBUG4K_FAULT(...) 882 #define DEBUG4K_COPY(...) 883 #define DEBUG4K_SHARE(...) 884 #define DEBUG4K_ADJUST(...) 885 #define DEBUG4K_PMAP(...) 886 #define DEBUG4K_MEMENTRY(...) 887 #define DEBUG4K_IOKIT(...) 888 #define DEBUG4K_UPL(...) 889 #define DEBUG4K_EXC(...) 890 #define DEBUG4K_VFS(...) 891 892 #endif /* MACH_ASSERT */ 893 894 895 #endif /* _VM_VM_PROTOS_H_ */ 896 897 #endif /* XNU_KERNEL_PRIVATE */ 898