1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm_object_xnu.h 60 * Author: Avadis Tevanian, Jr., Michael Wayne Young 61 * Date: 1985 62 * 63 * Virtual memory object module definitions. 64 */ 65 66 #ifndef _VM_VM_OBJECT_XNU_H_ 67 #define _VM_VM_OBJECT_XNU_H_ 68 69 #ifdef XNU_KERNEL_PRIVATE 70 71 #include <kern/queue.h> 72 73 #ifdef MACH_KERNEL_PRIVATE 74 75 #include <debug.h> 76 #include <mach_assert.h> 77 78 #include <mach/kern_return.h> 79 #include <mach/boolean.h> 80 #include <mach/memory_object_types.h> 81 #include <mach/port.h> 82 #include <mach/vm_prot.h> 83 #include <mach/vm_param.h> 84 #include <mach/machine/vm_types.h> 85 #include <kern/locks.h> 86 #include <kern/assert.h> 87 #include <kern/misc_protos.h> 88 #include <vm/pmap.h> 89 #include <vm/vm_external.h> 90 #include <vm/vm_options.h> 91 #include <kern/macro_help.h> 92 #include <ipc/ipc_types.h> 93 #include <vm/vm_page.h> 94 95 96 struct vm_page; 97 98 /* 99 * Types defined: 100 * 101 * vm_object_t Virtual memory object. 102 * vm_object_fault_info_t Used to determine cluster size. 103 */ 104 105 struct vm_object_fault_info { 106 int interruptible; 107 uint32_t user_tag; 108 vm_size_t cluster_size; 109 vm_behavior_t behavior; 110 vm_object_offset_t lo_offset; 111 vm_object_offset_t hi_offset; 112 unsigned int 113 /* boolean_t */ no_cache:1, 114 /* boolean_t */ stealth:1, 115 /* boolean_t */ io_sync:1, 116 /* boolean_t */ cs_bypass:1, 117 /* boolean_t */ csm_associated:1, 118 /* boolean_t */ mark_zf_absent:1, 119 /* boolean_t */ batch_pmap_op:1, 120 /* boolean_t */ resilient_media:1, 121 /* boolean_t */ no_copy_on_read:1, 122 /* boolean_t */ fi_xnu_user_debug:1, 123 /* boolean_t */ fi_used_for_tpro:1, 124 /* boolean_t */ fi_change_wiring:1, 125 /* boolean_t */ fi_no_sleep:1, 126 __vm_object_fault_info_unused_bits:19; 127 int pmap_options; 128 }; 129 130 #define vo_size vo_un1.vou_size 131 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan 132 #define vo_shadow_offset vo_un2.vou_shadow_offset 133 #define vo_cache_ts vo_un2.vou_cache_ts 134 #define vo_owner vo_un2.vou_owner 135 136 struct vm_object { 137 /* 138 * on 64 bit systems we pack the pointers hung off the memq. 139 * those pointers have to be able to point back to the memq. 140 * the packed pointers are required to be on a 64 byte boundary 141 * which means 2 things for the vm_object... (1) the memq 142 * struct has to be the first element of the structure so that 143 * we can control its alignment... (2) the vm_object must be 144 * aligned on a 64 byte boundary... for static vm_object's 145 * this is accomplished via the 'aligned' attribute... for 146 * vm_object's in the zone pool, this is accomplished by 147 * rounding the size of the vm_object element to the nearest 148 * 64 byte size before creating the zone. 149 */ 150 vm_page_queue_head_t memq; /* Resident memory - must be first */ 151 lck_rw_t Lock; /* Synchronization */ 152 153 union { 154 vm_object_size_t vou_size; /* Object size (only valid if internal) */ 155 int vou_cache_pages_to_scan; /* pages yet to be visited in an 156 * external object in cache 157 */ 158 } vo_un1; 159 160 struct vm_page *memq_hint; 161 os_ref_atomic_t ref_count; /* Number of references */ 162 unsigned int resident_page_count; 163 /* number of resident pages */ 164 unsigned int wired_page_count; /* number of wired pages 165 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */ 166 unsigned int reusable_page_count; 167 168 struct vm_object *vo_copy; /* Object that should receive 169 * a copy of my changed pages, 170 * for copy_delay, or just the 171 * temporary object that 172 * shadows this object, for 173 * copy_call. 174 */ 175 uint64_t vo_copy_version; 176 struct vm_object *shadow; /* My shadow */ 177 memory_object_t pager; /* Where to get data */ 178 179 union { 180 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */ 181 clock_sec_t vou_cache_ts; /* age of an external object 182 * present in cache 183 */ 184 task_t vou_owner; /* If the object is purgeable 185 * or has a "ledger_tag", this 186 * is the task that owns it. 187 */ 188 } vo_un2; 189 190 vm_object_offset_t paging_offset; /* Offset into memory object */ 191 memory_object_control_t pager_control; /* Where data comes back */ 192 193 memory_object_copy_strategy_t 194 copy_strategy; /* How to handle data copy */ 195 196 /* 197 * Some user processes (mostly VirtualMachine software) take a large 198 * number of UPLs (via IOMemoryDescriptors) to wire pages in large 199 * VM objects and overflow the 16-bit "activity_in_progress" counter. 200 * Since we never enforced any limit there, let's give them 32 bits 201 * for backwards compatibility's sake. 202 */ 203 uint16_t paging_in_progress; 204 uint16_t vo_size_delta; 205 uint32_t activity_in_progress; 206 207 /* The memory object ports are 208 * being used (e.g., for pagein 209 * or pageout) -- don't change 210 * any of these fields (i.e., 211 * don't collapse, destroy or 212 * terminate) 213 */ 214 215 unsigned int 216 /* boolean_t array */ all_wanted:7, /* Bit array of "want to be 217 * awakened" notations. See 218 * VM_OBJECT_EVENT_* items 219 * below */ 220 /* boolean_t */ pager_created:1, /* Has pager been created? */ 221 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */ 222 /* boolean_t */ pager_ready:1, /* Will pager take requests? */ 223 224 /* boolean_t */ pager_trusted:1, /* The pager for this object 225 * is trusted. This is true for 226 * all internal objects (backed 227 * by the default pager) 228 */ 229 /* boolean_t */ can_persist:1, /* The kernel may keep the data 230 * for this object (and rights 231 * to the memory object) after 232 * all address map references 233 * are deallocated? 234 */ 235 /* boolean_t */ internal:1, /* Created by the kernel (and 236 * therefore, managed by the 237 * default memory manger) 238 */ 239 /* boolean_t */ private:1, /* magic device_pager object, 240 * holds private pages only */ 241 /* boolean_t */ pageout:1, /* pageout object. contains 242 * private pages that refer to 243 * a real memory object. */ 244 /* boolean_t */ alive:1, /* Not yet terminated */ 245 246 /* boolean_t */ purgable:2, /* Purgable state. See 247 * VM_PURGABLE_* 248 */ 249 /* boolean_t */ purgeable_only_by_kernel:1, 250 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token 251 * becomes ripe. 252 */ 253 /* boolean_t */ shadowed:1, /* Shadow may exist */ 254 /* boolean_t */ true_share:1, 255 /* This object is mapped 256 * in more than one place 257 * and hence cannot be 258 * coalesced */ 259 /* boolean_t */ terminating:1, 260 /* Allows vm_object_lookup 261 * and vm_object_deallocate 262 * to special case their 263 * behavior when they are 264 * called as a result of 265 * page cleaning during 266 * object termination 267 */ 268 /* boolean_t */ named:1, /* An enforces an internal 269 * naming convention, by 270 * calling the right routines 271 * for allocation and 272 * destruction, UBC references 273 * against the vm_object are 274 * checked. 275 */ 276 /* boolean_t */ shadow_severed:1, 277 /* When a permanent object 278 * backing a COW goes away 279 * unexpectedly. This bit 280 * allows vm_fault to return 281 * an error rather than a 282 * zero filled page. 283 */ 284 /* boolean_t */ phys_contiguous:1, 285 /* Memory is wired and 286 * guaranteed physically 287 * contiguous. However 288 * it is not device memory 289 * and obeys normal virtual 290 * memory rules w.r.t pmap 291 * access bits. 292 */ 293 /* boolean_t */ nophyscache:1, 294 /* When mapped at the 295 * pmap level, don't allow 296 * primary caching. (for 297 * I/O) 298 */ 299 /* boolean_t */ for_realtime:1, 300 /* Might be needed for realtime code path */ 301 /* vm_object_destroy_reason_t */ no_pager_reason:3, 302 /* differentiate known and unknown causes */ 303 #if FBDP_DEBUG_OBJECT_NO_PAGER 304 /* boolean_t */ fbdp_tracked:1; 305 #else /* FBDP_DEBUG_OBJECT_NO_PAGER */ 306 __object1_unused_bits:1; 307 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */ 308 309 queue_chain_t cached_list; /* Attachment point for the 310 * list of objects cached as a 311 * result of their can_persist 312 * value 313 */ 314 /* 315 * the following fields are not protected by any locks 316 * they are updated via atomic compare and swap 317 */ 318 vm_object_offset_t last_alloc; /* last allocation offset */ 319 vm_offset_t cow_hint; /* last page present in */ 320 /* shadow but not in object */ 321 int32_t sequential; /* sequential access size */ 322 323 uint32_t pages_created; 324 uint32_t pages_used; 325 /* hold object lock when altering */ 326 unsigned int 327 wimg_bits:8, /* cache WIMG bits */ 328 code_signed:1, /* pages are signed and should be 329 * validated; the signatures are stored 330 * with the pager */ 331 transposed:1, /* object was transposed with another */ 332 mapping_in_progress:1, /* pager being mapped/unmapped */ 333 phantom_isssd:1, 334 volatile_empty:1, 335 volatile_fault:1, 336 all_reusable:1, 337 blocked_access:1, 338 set_cache_attr:1, 339 object_is_shared_cache:1, 340 purgeable_queue_type:2, 341 purgeable_queue_group:3, 342 io_tracking:1, 343 no_tag_update:1, /* */ 344 #if CONFIG_SECLUDED_MEMORY 345 eligible_for_secluded:1, 346 can_grab_secluded:1, 347 #else /* CONFIG_SECLUDED_MEMORY */ 348 __object3_unused_bits:2, 349 #endif /* CONFIG_SECLUDED_MEMORY */ 350 #if VM_OBJECT_ACCESS_TRACKING 351 access_tracking:1, 352 #else /* VM_OBJECT_ACCESS_TRACKING */ 353 __unused_access_tracking:1, 354 #endif /* VM_OBJECT_ACCESS_TRACKING */ 355 vo_ledger_tag:3, 356 vo_no_footprint:1; 357 358 #if VM_OBJECT_ACCESS_TRACKING 359 uint32_t access_tracking_reads; 360 uint32_t access_tracking_writes; 361 #endif /* VM_OBJECT_ACCESS_TRACKING */ 362 363 uint8_t scan_collisions; 364 #if COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1 365 /* This value is used for selecting a chead in the compressor for internal objects. 366 * see rdar://140849693 for a possible way to implement the chead_hint functionality 367 * in a way that doesn't require these bits */ 368 uint8_t vo_chead_hint:COMPRESSOR_PAGEOUT_CHEADS_BITS; 369 #endif /*COMPRESSOR_PAGEOUT_CHEADS_COUNT */ 370 uint8_t __object4_unused_bits:8 - COMPRESSOR_PAGEOUT_CHEADS_BITS; 371 vm_tag_t wire_tag; 372 373 #if CONFIG_PHANTOM_CACHE 374 uint32_t phantom_object_id; 375 #endif 376 #if CONFIG_IOSCHED || UPL_DEBUG 377 queue_head_t uplq; /* List of outstanding upls */ 378 #endif 379 380 #ifdef VM_PIP_DEBUG 381 /* 382 * Keep track of the stack traces for the first holders 383 * of a "paging_in_progress" reference for this VM object. 384 */ 385 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */ 386 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */ 387 struct __pip_backtrace { 388 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES]; 389 } pip_holders[VM_PIP_DEBUG_MAX_REFS]; 390 #endif /* VM_PIP_DEBUG */ 391 392 queue_chain_t objq; /* object queue - currently used for purgable queues */ 393 queue_chain_t task_objq; /* objects owned by task - protected by task lock */ 394 395 #if !VM_TAG_ACTIVE_UPDATE 396 queue_chain_t wired_objq; 397 #endif /* !VM_TAG_ACTIVE_UPDATE */ 398 399 #if DEBUG 400 void *purgeable_owner_bt[16]; 401 task_t vo_purgeable_volatilizer; /* who made it volatile? */ 402 void *purgeable_volatilizer_bt[16]; 403 #endif /* DEBUG */ 404 405 /* 406 * If this object is backed by anonymous memory, this represents the ID of 407 * the vm_map that the memory originated from (i.e. this points backwards in 408 * shadow chains). Note that an originator is present even if the object 409 * hasn't been faulted into the backing pmap yet. 410 */ 411 vm_map_serial_t vmo_provenance; 412 uint32_t vmo_pl_req_in_progress; /* page list request in progress */ 413 uint32_t vo_inherit_copy_none:1, 414 __vo_unused_padding:31; 415 }; 416 417 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \ 418 ((object)->volatile_fault && \ 419 ((object)->purgable == VM_PURGABLE_VOLATILE || \ 420 (object)->purgable == VM_PURGABLE_EMPTY)) 421 422 extern const vm_object_t kernel_object_default; /* the default kernel object */ 423 424 extern const vm_object_t compressor_object; /* the single compressor object, allocates pages for compressed 425 * buffers (not the segments) */ 426 427 extern const vm_object_t retired_pages_object; /* pages retired due to ECC, should never be used */ 428 429 #if HAS_MTE 430 extern const vm_object_t mte_tags_object; /* pages that are wired, holding MTE tags */ 431 extern const vm_object_t kernel_object_tagged; /* kernel object for MTE tagged pages */ 432 433 #define is_kernel_object(object) ((object) == kernel_object_default || (object) == kernel_object_tagged) 434 #define vm_object_is_mte_mappable(object) (((object)->wimg_bits & VM_WIMG_MASK) == VM_WIMG_MTE) 435 #define vm_object_is_mte_mappable_with_page(object, page) ( \ 436 vm_object_is_mte_mappable(object) && !vm_page_is_fictitious(page) \ 437 ) 438 #define vm_object_mte_set(object) ((object)->wimg_bits = VM_WIMG_MTE) 439 #define assert_mte_vmo_matches_vmp(vmo, vmp) ({ \ 440 __assert_only vm_page_t __vmp = (vmp); \ 441 assert3u(vm_object_is_mte_mappable_with_page(vmo, __vmp), ==, \ 442 (__vmp)->vmp_using_mte); \ 443 }) 444 445 #else /* !HAS_MTE */ 446 447 #define is_kernel_object(object) ((object) == kernel_object_default) 448 449 #endif /* HAS_MTE */ 450 451 extern const vm_object_t exclaves_object; /* holds VM pages owned by exclaves */ 452 #if HAS_MTE 453 extern const vm_object_t exclaves_object_tagged; /* holds MTE tagged VM pages owned by exclaves */ 454 #endif /* HAS_MTE */ 455 456 # define VM_MSYNC_INITIALIZED 0 457 # define VM_MSYNC_SYNCHRONIZING 1 458 # define VM_MSYNC_DONE 2 459 460 461 extern lck_grp_t vm_map_lck_grp; 462 extern lck_attr_t vm_map_lck_attr; 463 464 /** os_refgrp_t for vm_objects */ 465 os_refgrp_decl_extern(vm_object_refgrp); 466 467 #ifndef VM_TAG_ACTIVE_UPDATE 468 #error VM_TAG_ACTIVE_UPDATE 469 #endif 470 471 #if VM_TAG_ACTIVE_UPDATE 472 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE") 473 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE") 474 #else /* VM_TAG_ACTIVE_UPDATE */ 475 #define VM_OBJECT_WIRED_ENQUEUE(object) \ 476 MACRO_BEGIN \ 477 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \ 478 assert(!(object)->wired_objq.next); \ 479 assert(!(object)->wired_objq.prev); \ 480 queue_enter(&vm_objects_wired, (object), \ 481 vm_object_t, wired_objq); \ 482 lck_spin_unlock(&vm_objects_wired_lock); \ 483 MACRO_END 484 #define VM_OBJECT_WIRED_DEQUEUE(object) \ 485 MACRO_BEGIN \ 486 if ((object)->wired_objq.next) { \ 487 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \ 488 queue_remove(&vm_objects_wired, (object), \ 489 vm_object_t, wired_objq); \ 490 lck_spin_unlock(&vm_objects_wired_lock); \ 491 } \ 492 MACRO_END 493 #endif /* VM_TAG_ACTIVE_UPDATE */ 494 495 #define VM_OBJECT_WIRED(object, tag) \ 496 MACRO_BEGIN \ 497 assert(VM_KERN_MEMORY_NONE != (tag)); \ 498 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \ 499 (object)->wire_tag = (tag); \ 500 if (!VM_TAG_ACTIVE_UPDATE) { \ 501 VM_OBJECT_WIRED_ENQUEUE((object)); \ 502 } \ 503 MACRO_END 504 505 #define VM_OBJECT_UNWIRED(object) \ 506 MACRO_BEGIN \ 507 if (!VM_TAG_ACTIVE_UPDATE) { \ 508 VM_OBJECT_WIRED_DEQUEUE((object)); \ 509 } \ 510 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \ 511 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count), (object)); \ 512 (object)->wire_tag = VM_KERN_MEMORY_NONE; \ 513 } \ 514 MACRO_END 515 516 // These two macros start & end a C block 517 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \ 518 MACRO_BEGIN \ 519 { \ 520 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag; 521 522 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \ 523 if (__wireddelta) { \ 524 boolean_t __overflow __assert_only = \ 525 os_add_overflow((object)->wired_page_count, __wireddelta, \ 526 &(object)->wired_page_count); \ 527 assert(!__overflow); \ 528 if (!(object)->internal && \ 529 (object)->vo_ledger_tag && \ 530 VM_OBJECT_OWNER((object)) != NULL) { \ 531 vm_object_wired_page_update_ledgers(object, __wireddelta); \ 532 } \ 533 if (!(object)->pageout && !(object)->no_tag_update) { \ 534 if (__wireddelta > 0) { \ 535 assert (VM_KERN_MEMORY_NONE != (tag)); \ 536 if (VM_KERN_MEMORY_NONE == __waswired) { \ 537 VM_OBJECT_WIRED((object), (tag)); \ 538 } \ 539 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \ 540 } else if (VM_KERN_MEMORY_NONE != __waswired) { \ 541 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \ 542 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \ 543 if (!(object)->wired_page_count) { \ 544 VM_OBJECT_UNWIRED((object)); \ 545 } \ 546 } \ 547 } \ 548 } \ 549 } \ 550 MACRO_END 551 552 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \ 553 __wireddelta += delta; \ 554 555 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \ 556 if (vm_page_is_canonical(m)) __wireddelta++; 557 558 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \ 559 if (vm_page_is_canonical(m)) __wireddelta--; 560 561 #define OBJECT_LOCK_SHARED 0 562 #define OBJECT_LOCK_EXCLUSIVE 1 563 564 extern lck_grp_t vm_object_lck_grp; 565 extern lck_attr_t vm_object_lck_attr; 566 extern lck_attr_t kernel_object_lck_attr; 567 extern lck_attr_t compressor_object_lck_attr; 568 569 extern vm_object_t vm_pageout_scan_wants_object; 570 571 extern void vm_object_lock(vm_object_t); 572 extern bool vm_object_lock_check_contended(vm_object_t); 573 extern boolean_t vm_object_lock_try(vm_object_t); 574 extern boolean_t _vm_object_lock_try(vm_object_t); 575 extern boolean_t vm_object_lock_avoid(vm_object_t); 576 extern void vm_object_lock_shared(vm_object_t); 577 extern boolean_t vm_object_lock_yield_shared(vm_object_t); 578 extern boolean_t vm_object_lock_try_shared(vm_object_t); 579 extern void vm_object_unlock(vm_object_t); 580 extern boolean_t vm_object_lock_upgrade(vm_object_t); 581 582 extern void kdp_vm_object_sleep_find_owner( 583 event64_t wait_event, 584 block_hint_t wait_type, 585 thread_waitinfo_t *waitinfo); 586 587 #endif /* MACH_KERNEL_PRIVATE */ 588 589 #if CONFIG_IOSCHED 590 struct io_reprioritize_req { 591 uint64_t blkno; 592 uint32_t len; 593 int priority; 594 struct vnode *devvp; 595 struct mpsc_queue_chain iorr_elm; 596 }; 597 typedef struct io_reprioritize_req *io_reprioritize_req_t; 598 599 extern void vm_io_reprioritize_init(void); 600 #endif 601 602 extern void page_worker_init(void); 603 604 __enum_closed_decl(vm_chead_select_t, uint32_t, { 605 CSEL_MIN = 1, 606 CSEL_BY_PID = 1, 607 CSEL_BY_COALITION = 2, 608 CSEL_MAX = 2 609 }); 610 611 #endif /* XNU_KERNEL_PRIVATE */ 612 613 #endif /* _VM_VM_OBJECT_XNU_H_ */ 614