1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/pmap.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1985 62 * 63 * Machine address mapping definitions -- machine-independent 64 * section. [For machine-dependent section, see "machine/pmap.h".] 65 */ 66 67 #ifndef _VM_PMAP_H_ 68 #define _VM_PMAP_H_ 69 70 #include <mach/kern_return.h> 71 #include <mach/vm_param.h> 72 #include <mach/vm_types.h> 73 #include <mach/vm_attributes.h> 74 #include <mach/boolean.h> 75 #include <mach/vm_prot.h> 76 #include <kern/trustcache.h> 77 78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>) 79 #include <CoreEntitlements/CoreEntitlements.h> 80 #endif 81 82 #ifdef KERNEL_PRIVATE 83 84 /* 85 * The following is a description of the interface to the 86 * machine-dependent "physical map" data structure. The module 87 * must provide a "pmap_t" data type that represents the 88 * set of valid virtual-to-physical addresses for one user 89 * address space. [The kernel address space is represented 90 * by a distinguished "pmap_t".] The routines described manage 91 * this type, install and update virtual-to-physical mappings, 92 * and perform operations on physical addresses common to 93 * many address spaces. 94 */ 95 96 /* Copy between a physical page and a virtual address */ 97 /* LP64todo - switch to vm_map_offset_t when it grows */ 98 extern kern_return_t copypv( 99 addr64_t source, 100 addr64_t sink, 101 unsigned int size, 102 int which); 103 104 /* bcopy_phys and bzero_phys flags. */ 105 #define cppvPsnk 0x000000001 /* Destination is a physical address */ 106 #define cppvPsnkb 31 107 #define cppvPsrc 0x000000002 /* Source is a physical address */ 108 #define cppvPsrcb 30 109 #define cppvFsnk 0x000000004 /* Destination requires flushing (only on non-coherent I/O) */ 110 #define cppvFsnkb 29 111 #define cppvFsrc 0x000000008 /* Source requires flushing (only on non-coherent I/O) */ 112 #define cppvFsrcb 28 113 #define cppvNoModSnk 0x000000010 /* Ignored in bcopy_phys() */ 114 #define cppvNoModSnkb 27 115 #define cppvNoRefSrc 0x000000020 /* Ignored in bcopy_phys() */ 116 #define cppvNoRefSrcb 26 117 #define cppvKmap 0x000000040 /* Use the kernel's vm_map */ 118 #define cppvKmapb 25 119 120 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); 121 122 123 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE 124 #include <mach/mach_types.h> 125 #include <vm/memory_types.h> 126 127 /* 128 * Routines used during BSD process creation. 129 */ 130 131 extern pmap_t pmap_create_options( /* Create a pmap_t. */ 132 ledger_t ledger, 133 vm_map_size_t size, 134 unsigned int flags); 135 136 #if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) 137 /** 138 * Informs the pmap layer that a process will be running with user JOP disabled, 139 * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation. 140 * 141 * @note This function cannot be used once the target process has started 142 * executing code. It is intended for cases where user JOP is disabled based on 143 * the code signature (e.g., special "keys-off" entitlements), which is too late 144 * to change the flags passed to pmap_create_options. 145 * 146 * @param pmap The pmap belonging to the target process 147 */ 148 extern void pmap_disable_user_jop( 149 pmap_t pmap); 150 #endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */ 151 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */ 152 153 #ifdef MACH_KERNEL_PRIVATE 154 155 #include <mach_assert.h> 156 157 #include <machine/pmap.h> 158 159 #if CONFIG_SPTM 160 #include <arm64/sptm/sptm.h> 161 #endif 162 163 /* 164 * Routines used for initialization. 165 * There is traditionally also a pmap_bootstrap, 166 * used very early by machine-dependent code, 167 * but it is not part of the interface. 168 * 169 * LP64todo - 170 * These interfaces are tied to the size of the 171 * kernel pmap - and therefore use the "local" 172 * vm_offset_t, etc... types. 173 */ 174 175 extern void *pmap_steal_memory(vm_size_t size, vm_size_t alignment); /* Early memory allocation */ 176 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */ 177 178 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */ 179 #if defined(__arm__) || defined(__arm64__) 180 extern ppnum_t pmap_first_pnum; /* the first valid physical page on the system == atop(gDramBase) */ 181 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */ 182 #endif /* defined(__arm__) || defined(__arm64__) */ 183 184 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */ 185 186 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */ 187 188 extern void mapping_adjust(void); /* Adjust free mapping count */ 189 190 extern void mapping_free_prime(void); /* Primes the mapping block release list */ 191 192 #ifndef MACHINE_PAGES 193 /* 194 * If machine/pmap.h defines MACHINE_PAGES, it must implement 195 * the above functions. The pmap module has complete control. 196 * Otherwise, it must implement the following functions: 197 * pmap_free_pages 198 * pmap_virtual_space 199 * pmap_next_page 200 * pmap_init 201 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup 202 * using pmap_free_pages, pmap_next_page, pmap_virtual_space, 203 * and pmap_enter. pmap_free_pages may over-estimate the number 204 * of unused physical pages, and pmap_next_page may return FALSE 205 * to indicate that there are no more unused pages to return. 206 * However, for best performance pmap_free_pages should be accurate. 207 */ 208 209 /* 210 * Routines to return the next unused physical page. 211 */ 212 extern boolean_t pmap_next_page(ppnum_t *pnum); 213 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free); 214 #ifdef __x86_64__ 215 extern kern_return_t pmap_next_page_large(ppnum_t *pnum); 216 extern void pmap_hi_pages_done(void); 217 #endif 218 219 #if CONFIG_SPTM 220 __enum_decl(pmap_mapping_type_t, uint8_t, { 221 PMAP_MAPPING_TYPE_INFER = SPTM_UNTYPED, 222 PMAP_MAPPING_TYPE_DEFAULT = XNU_DEFAULT, 223 PMAP_MAPPING_TYPE_ROZONE = XNU_ROZONE, 224 PMAP_MAPPING_TYPE_RESTRICTED = XNU_KERNEL_RESTRICTED 225 }); 226 227 #define PMAP_PAGE_IS_USER_EXECUTABLE(m) \ 228 ({ \ 229 const sptm_paddr_t __paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(m)); \ 230 const sptm_frame_type_t __frame_type = sptm_get_frame_type(__paddr); \ 231 sptm_type_is_user_executable(__frame_type); \ 232 }) 233 234 extern bool pmap_will_retype(pmap_t pmap, vm_map_address_t vaddr, ppnum_t pn, 235 vm_prot_t prot, unsigned int options, pmap_mapping_type_t mapping_type); 236 237 #else 238 __enum_decl(pmap_mapping_type_t, uint8_t, { 239 PMAP_MAPPING_TYPE_INFER = 0, 240 PMAP_MAPPING_TYPE_DEFAULT, 241 PMAP_MAPPING_TYPE_ROZONE, 242 PMAP_MAPPING_TYPE_RESTRICTED 243 }); 244 #endif 245 246 /* 247 * Report virtual space available for the kernel. 248 */ 249 extern void pmap_virtual_space( 250 vm_offset_t *virtual_start, 251 vm_offset_t *virtual_end); 252 #endif /* MACHINE_PAGES */ 253 254 /* 255 * Routines to manage the physical map data structure. 256 */ 257 extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ 258 extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ 259 extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ 260 extern void pmap_switch(pmap_t pmap, thread_t thread); 261 extern void pmap_require(pmap_t pmap); 262 263 #if MACH_ASSERT 264 extern void pmap_set_process(pmap_t pmap, 265 int pid, 266 char *procname); 267 #endif /* MACH_ASSERT */ 268 269 extern kern_return_t pmap_enter( /* Enter a mapping */ 270 pmap_t pmap, 271 vm_map_offset_t v, 272 ppnum_t pn, 273 vm_prot_t prot, 274 vm_prot_t fault_type, 275 unsigned int flags, 276 boolean_t wired, 277 pmap_mapping_type_t mapping_type); 278 279 extern kern_return_t pmap_enter_options( 280 pmap_t pmap, 281 vm_map_offset_t v, 282 ppnum_t pn, 283 vm_prot_t prot, 284 vm_prot_t fault_type, 285 unsigned int flags, 286 boolean_t wired, 287 unsigned int options, 288 void *arg, 289 pmap_mapping_type_t mapping_type); 290 extern kern_return_t pmap_enter_options_addr( 291 pmap_t pmap, 292 vm_map_offset_t v, 293 pmap_paddr_t pa, 294 vm_prot_t prot, 295 vm_prot_t fault_type, 296 unsigned int flags, 297 boolean_t wired, 298 unsigned int options, 299 void *arg, 300 pmap_mapping_type_t mapping_type); 301 302 extern void pmap_remove_some_phys( 303 pmap_t pmap, 304 ppnum_t pn); 305 306 extern void pmap_lock_phys_page( 307 ppnum_t pn); 308 309 extern void pmap_unlock_phys_page( 310 ppnum_t pn); 311 312 313 /* 314 * Routines that operate on physical addresses. 315 */ 316 317 extern void pmap_page_protect( /* Restrict access to page. */ 318 ppnum_t phys, 319 vm_prot_t prot); 320 321 extern void pmap_page_protect_options( /* Restrict access to page. */ 322 ppnum_t phys, 323 vm_prot_t prot, 324 unsigned int options, 325 void *arg); 326 327 extern void(pmap_zero_page)( 328 ppnum_t pn); 329 330 extern void(pmap_zero_page_with_options)( 331 ppnum_t pn, 332 int options); 333 334 extern void(pmap_zero_part_page)( 335 ppnum_t pn, 336 vm_offset_t offset, 337 vm_size_t len); 338 339 extern void(pmap_copy_page)( 340 ppnum_t src, 341 ppnum_t dest, 342 int options); 343 344 extern void(pmap_copy_part_page)( 345 ppnum_t src, 346 vm_offset_t src_offset, 347 ppnum_t dst, 348 vm_offset_t dst_offset, 349 vm_size_t len); 350 351 extern void(pmap_copy_part_lpage)( 352 vm_offset_t src, 353 ppnum_t dst, 354 vm_offset_t dst_offset, 355 vm_size_t len); 356 357 extern void(pmap_copy_part_rpage)( 358 ppnum_t src, 359 vm_offset_t src_offset, 360 vm_offset_t dst, 361 vm_size_t len); 362 363 extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */ 364 ppnum_t phys); 365 366 extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */ 367 ppnum_t phys, 368 unsigned int options, 369 void *arg); 370 371 extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate 372 * cache based on 373 * page number sent */ 374 ppnum_t pn, 375 vm_size_t size, 376 vm_machine_attribute_t attribute, 377 vm_machine_attribute_val_t* value); 378 379 extern unsigned int(pmap_cache_attributes)( 380 ppnum_t pn); 381 382 /* 383 * Set (override) cache attributes for the specified physical page 384 */ 385 extern void pmap_set_cache_attributes( 386 ppnum_t, 387 unsigned int); 388 389 extern void *pmap_map_compressor_page( 390 ppnum_t); 391 392 extern void pmap_unmap_compressor_page( 393 ppnum_t, 394 void*); 395 396 /** 397 * The following declarations are meant to provide a uniform interface by which the VM layer can 398 * pass batches of pages to the pmap layer directly, in the various page list formats natively 399 * used by the VM. If a new type of list is to be added, the various structures and iterator 400 * functions below should be updated to understand it, and then it should "just work" with the 401 * pmap layer. 402 */ 403 404 /* The various supported page list types. */ 405 __enum_decl(unified_page_list_type_t, uint8_t, { 406 /* Universal page list array, essentially an array of ppnum_t. */ 407 UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY, 408 /** 409 * Singly-linked list of vm_page_t, using vmp_snext field. 410 * This is typically used to construct local lists of pages to be freed. 411 */ 412 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST, 413 /* Doubly-linked queue of vm_page_t's associated with a VM object, using vmp_listq field. */ 414 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q, 415 /* Doubly-linked queue of vm_page_t's in a FIFO queue or global free list, using vmp_pageq field. */ 416 UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q, 417 }); 418 419 /* Uniform data structure encompassing the various page list types handled by the VM layer. */ 420 typedef struct { 421 union { 422 /* Base address and size (in pages) of UPL array for type UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */ 423 struct { 424 upl_page_info_array_t upl_info; 425 unsigned int upl_size; 426 } upl; 427 /* Head of singly-linked vm_page_t list for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST */ 428 vm_page_t page_slist; 429 /* Head of queue for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q and UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q */ 430 void *pageq; /* vm_page_queue_head_t* */ 431 }; 432 unified_page_list_type_t type; 433 } unified_page_list_t; 434 435 /* Uniform data structure representing an iterator position within a unified_page_list_t object. */ 436 typedef struct { 437 /* Pointer to list structure from which this iterator was created. */ 438 const unified_page_list_t *list; 439 union { 440 /* Position within UPL array, for UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */ 441 unsigned int upl_index; 442 /* Position within page list or page queue, for all other types */ 443 vm_page_t pageq_pos; 444 }; 445 } unified_page_list_iterator_t; 446 447 extern void unified_page_list_iterator_init( 448 const unified_page_list_t *page_list, 449 unified_page_list_iterator_t *iter); 450 451 extern void unified_page_list_iterator_next(unified_page_list_iterator_t *iter); 452 453 extern bool unified_page_list_iterator_end(const unified_page_list_iterator_t *iter); 454 455 extern ppnum_t unified_page_list_iterator_page( 456 const unified_page_list_iterator_t *iter, 457 bool *is_fictitious); 458 459 extern vm_page_t unified_page_list_iterator_vm_page( 460 const unified_page_list_iterator_t *iter); 461 462 extern void pmap_batch_set_cache_attributes( 463 const unified_page_list_t *, 464 unsigned int); 465 extern void pmap_sync_page_data_phys(ppnum_t pa); 466 extern void pmap_sync_page_attributes_phys(ppnum_t pa); 467 468 469 /** 470 * pmap entry point for performing platform-specific integrity checks and cleanup when 471 * the VM is about to free a page. This function will typically at least validate 472 * that the page has no outstanding mappings or other references, and depending 473 * upon the platform may also take additional steps to reset page state. 474 * 475 * @param pn The page that is about to be freed by the VM. 476 */ 477 extern void pmap_recycle_page(ppnum_t pn); 478 479 /* 480 * debug/assertions. pmap_verify_free returns true iff 481 * the given physical page is mapped into no pmap. 482 * pmap_assert_free() will panic() if pn is not free. 483 */ 484 extern bool pmap_verify_free(ppnum_t pn); 485 #if MACH_ASSERT 486 extern void pmap_assert_free(ppnum_t pn); 487 #endif 488 489 490 /* 491 * Sundry required (internal) routines 492 */ 493 #ifdef CURRENTLY_UNUSED_AND_UNTESTED 494 extern void pmap_collect(pmap_t pmap);/* Perform garbage 495 * collection, if any */ 496 #endif 497 /* 498 * Optional routines 499 */ 500 extern void(pmap_copy)( /* Copy range of mappings, 501 * if desired. */ 502 pmap_t dest, 503 pmap_t source, 504 vm_map_offset_t dest_va, 505 vm_map_size_t size, 506 vm_map_offset_t source_va); 507 508 extern kern_return_t(pmap_attribute)( /* Get/Set special memory 509 * attributes */ 510 pmap_t pmap, 511 vm_map_offset_t va, 512 vm_map_size_t size, 513 vm_machine_attribute_t attribute, 514 vm_machine_attribute_val_t* value); 515 516 /* 517 * Routines defined as macros. 518 */ 519 #ifndef PMAP_ACTIVATE_USER 520 #ifndef PMAP_ACTIVATE 521 #define PMAP_ACTIVATE_USER(thr, cpu) 522 #else /* PMAP_ACTIVATE */ 523 #define PMAP_ACTIVATE_USER(thr, cpu) { \ 524 pmap_t pmap; \ 525 \ 526 pmap = (thr)->map->pmap; \ 527 if (pmap != pmap_kernel()) \ 528 PMAP_ACTIVATE(pmap, (thr), (cpu)); \ 529 } 530 #endif /* PMAP_ACTIVATE */ 531 #endif /* PMAP_ACTIVATE_USER */ 532 533 #ifndef PMAP_DEACTIVATE_USER 534 #ifndef PMAP_DEACTIVATE 535 #define PMAP_DEACTIVATE_USER(thr, cpu) 536 #else /* PMAP_DEACTIVATE */ 537 #define PMAP_DEACTIVATE_USER(thr, cpu) { \ 538 pmap_t pmap; \ 539 \ 540 pmap = (thr)->map->pmap; \ 541 if ((pmap) != pmap_kernel()) \ 542 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ 543 } 544 #endif /* PMAP_DEACTIVATE */ 545 #endif /* PMAP_DEACTIVATE_USER */ 546 547 #ifndef PMAP_ACTIVATE_KERNEL 548 #ifndef PMAP_ACTIVATE 549 #define PMAP_ACTIVATE_KERNEL(cpu) 550 #else /* PMAP_ACTIVATE */ 551 #define PMAP_ACTIVATE_KERNEL(cpu) \ 552 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 553 #endif /* PMAP_ACTIVATE */ 554 #endif /* PMAP_ACTIVATE_KERNEL */ 555 556 #ifndef PMAP_DEACTIVATE_KERNEL 557 #ifndef PMAP_DEACTIVATE 558 #define PMAP_DEACTIVATE_KERNEL(cpu) 559 #else /* PMAP_DEACTIVATE */ 560 #define PMAP_DEACTIVATE_KERNEL(cpu) \ 561 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 562 #endif /* PMAP_DEACTIVATE */ 563 #endif /* PMAP_DEACTIVATE_KERNEL */ 564 565 #ifndef PMAP_SET_CACHE_ATTR 566 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ 567 MACRO_BEGIN \ 568 if (!batch_pmap_op) { \ 569 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ 570 (object)->set_cache_attr = TRUE; \ 571 } \ 572 MACRO_END 573 #endif /* PMAP_SET_CACHE_ATTR */ 574 575 #ifndef PMAP_BATCH_SET_CACHE_ATTR 576 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ 577 cache_attr, num_pages, batch_pmap_op) \ 578 MACRO_BEGIN \ 579 if ((batch_pmap_op)) { \ 580 const unified_page_list_t __pmap_batch_list = { \ 581 .upl = {.upl_info = (user_page_list), \ 582 .upl_size = (num_pages),}, \ 583 .type = UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY, \ 584 }; \ 585 pmap_batch_set_cache_attributes( \ 586 &__pmap_batch_list, \ 587 (cache_attr)); \ 588 (object)->set_cache_attr = TRUE; \ 589 } \ 590 MACRO_END 591 #endif /* PMAP_BATCH_SET_CACHE_ATTR */ 592 593 /* 594 * Routines to manage reference/modify bits based on 595 * physical addresses, simulating them if not provided 596 * by the hardware. 597 */ 598 struct pfc { 599 long pfc_cpus; 600 long pfc_invalid_global; 601 }; 602 603 typedef struct pfc pmap_flush_context; 604 605 /* Clear reference bit */ 606 extern void pmap_clear_reference(ppnum_t pn); 607 /* Return reference bit */ 608 extern boolean_t(pmap_is_referenced)(ppnum_t pn); 609 /* Set modify bit */ 610 extern void pmap_set_modify(ppnum_t pn); 611 /* Clear modify bit */ 612 extern void pmap_clear_modify(ppnum_t pn); 613 /* Return modify bit */ 614 extern boolean_t pmap_is_modified(ppnum_t pn); 615 /* Return modified and referenced bits */ 616 extern unsigned int pmap_get_refmod(ppnum_t pn); 617 /* Clear modified and referenced bits */ 618 extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); 619 #define VM_MEM_MODIFIED 0x01 /* Modified bit */ 620 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ 621 extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); 622 623 /* 624 * Clears the reference and/or modified bits on a range of virtually 625 * contiguous pages. 626 * It returns true if the operation succeeded. If it returns false, 627 * nothing has been modified. 628 * This operation is only supported on some platforms, so callers MUST 629 * handle the case where it returns false. 630 */ 631 extern bool 632 pmap_clear_refmod_range_options( 633 pmap_t pmap, 634 vm_map_address_t start, 635 vm_map_address_t end, 636 unsigned int mask, 637 unsigned int options); 638 639 640 extern void pmap_flush_context_init(pmap_flush_context *); 641 extern void pmap_flush(pmap_flush_context *); 642 643 /* 644 * Routines that operate on ranges of virtual addresses. 645 */ 646 extern void pmap_protect( /* Change protections. */ 647 pmap_t map, 648 vm_map_offset_t s, 649 vm_map_offset_t e, 650 vm_prot_t prot); 651 652 extern void pmap_protect_options( /* Change protections. */ 653 pmap_t map, 654 vm_map_offset_t s, 655 vm_map_offset_t e, 656 vm_prot_t prot, 657 unsigned int options, 658 void *arg); 659 660 extern void(pmap_pageable)( 661 pmap_t pmap, 662 vm_map_offset_t start, 663 vm_map_offset_t end, 664 boolean_t pageable); 665 666 extern uint64_t pmap_shared_region_size_min(pmap_t map); 667 668 extern void 669 pmap_set_shared_region(pmap_t, 670 pmap_t, 671 addr64_t, 672 uint64_t); 673 extern kern_return_t pmap_nest(pmap_t, 674 pmap_t, 675 addr64_t, 676 uint64_t); 677 extern kern_return_t pmap_unnest(pmap_t, 678 addr64_t, 679 uint64_t); 680 681 #define PMAP_UNNEST_CLEAN 1 682 683 extern kern_return_t pmap_fork_nest( 684 pmap_t old_pmap, 685 pmap_t new_pmap); 686 687 extern kern_return_t pmap_unnest_options(pmap_t, 688 addr64_t, 689 uint64_t, 690 unsigned int); 691 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); 692 extern void pmap_advise_pagezero_range(pmap_t, uint64_t); 693 #endif /* MACH_KERNEL_PRIVATE */ 694 695 extern boolean_t pmap_is_noencrypt(ppnum_t); 696 extern void pmap_set_noencrypt(ppnum_t pn); 697 extern void pmap_clear_noencrypt(ppnum_t pn); 698 699 /* 700 * JMM - This portion is exported to other kernel components right now, 701 * but will be pulled back in the future when the needed functionality 702 * is provided in a cleaner manner. 703 */ 704 705 extern const pmap_t kernel_pmap; /* The kernel's map */ 706 #define pmap_kernel() (kernel_pmap) 707 708 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ 709 #define VM_MEM_STACK 0x200 710 711 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS 712 * definitions in i386/pmap_internal.h 713 */ 714 #define PMAP_CREATE_64BIT 0x1 715 716 #if __x86_64__ 717 718 #define PMAP_CREATE_EPT 0x2 719 #define PMAP_CREATE_TEST 0x4 /* pmap will be used for testing purposes only */ 720 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT | PMAP_CREATE_TEST) 721 722 #define PMAP_CREATE_NESTED 0 /* this flag is a nop on x86 */ 723 724 #else 725 726 #define PMAP_CREATE_STAGE2 0 727 #if __arm64e__ 728 #define PMAP_CREATE_DISABLE_JOP 0x4 729 #else 730 #define PMAP_CREATE_DISABLE_JOP 0 731 #endif 732 #if __ARM_MIXED_PAGE_SIZE__ 733 #define PMAP_CREATE_FORCE_4K_PAGES 0x8 734 #else 735 #define PMAP_CREATE_FORCE_4K_PAGES 0 736 #endif /* __ARM_MIXED_PAGE_SIZE__ */ 737 #define PMAP_CREATE_X86_64 0 738 #if CONFIG_ROSETTA 739 #define PMAP_CREATE_ROSETTA 0x20 740 #else 741 #define PMAP_CREATE_ROSETTA 0 742 #endif /* CONFIG_ROSETTA */ 743 744 #define PMAP_CREATE_TEST 0x40 /* pmap will be used for testing purposes only */ 745 746 #define PMAP_CREATE_NESTED 0x80 /* pmap will not try to allocate a subpage root table to save space */ 747 748 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */ 749 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | \ 750 PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA | PMAP_CREATE_TEST | PMAP_CREATE_NESTED) 751 752 #endif /* __x86_64__ */ 753 754 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return 755 * KERN_RESOURCE_SHORTAGE 756 * instead */ 757 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed 758 * but don't enter mapping 759 */ 760 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for 761 * this operation */ 762 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ 763 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ 764 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ 765 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ 766 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ 767 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ 768 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ 769 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ 770 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor 771 * iff page was modified */ 772 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be 773 * be upgraded */ 774 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 775 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */ 776 #if defined(__arm__) || defined(__arm64__) 777 #define PMAP_OPTIONS_FF_LOCKED 0x8000 778 #define PMAP_OPTIONS_FF_WIRED 0x10000 779 #endif 780 #define PMAP_OPTIONS_XNU_USER_DEBUG 0x20000 781 782 /* Indicates that pmap_enter() or pmap_remove() is being called with preemption already disabled. */ 783 #define PMAP_OPTIONS_NOPREEMPT 0x80000 784 785 #if CONFIG_SPTM 786 /* Requests pmap_disconnect() to reset the page frame type (only meaningful for SPTM systems) */ 787 #define PMAP_OPTIONS_RETYPE 0x100000 788 #endif /* CONFIG_SPTM */ 789 790 #define PMAP_OPTIONS_MAP_TPRO 0x40000 791 792 #define PMAP_OPTIONS_RESERVED_MASK 0xFF000000 /* encoding space reserved for internal pmap use */ 793 794 #if !defined(__LP64__) 795 extern vm_offset_t pmap_extract(pmap_t pmap, 796 vm_map_offset_t va); 797 #endif 798 extern void pmap_change_wiring( /* Specify pageability */ 799 pmap_t pmap, 800 vm_map_offset_t va, 801 boolean_t wired); 802 803 /* LP64todo - switch to vm_map_offset_t when it grows */ 804 extern void pmap_remove( /* Remove mappings. */ 805 pmap_t map, 806 vm_map_offset_t s, 807 vm_map_offset_t e); 808 809 extern void pmap_remove_options( /* Remove mappings. */ 810 pmap_t map, 811 vm_map_offset_t s, 812 vm_map_offset_t e, 813 int options); 814 815 extern void fillPage(ppnum_t pa, unsigned int fill); 816 817 #if defined(__LP64__) 818 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); 819 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr); 820 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr); 821 #endif 822 823 mach_vm_size_t pmap_query_resident(pmap_t pmap, 824 vm_map_offset_t s, 825 vm_map_offset_t e, 826 mach_vm_size_t *compressed_bytes_p); 827 828 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value); 829 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap); 830 831 /* Inform the pmap layer that there is a JIT entry in this map. */ 832 extern void pmap_set_jit_entitled(pmap_t pmap); 833 834 /* Ask the pmap layer if there is a JIT entry in this map. */ 835 extern bool pmap_get_jit_entitled(pmap_t pmap); 836 837 /* Inform the pmap layer that the XO register is repurposed for this map */ 838 extern void pmap_set_tpro(pmap_t pmap); 839 840 /* Ask the pmap layer if there is a TPRO entry in this map. */ 841 extern bool pmap_get_tpro(pmap_t pmap); 842 843 /* 844 * Tell the pmap layer what range within the nested region the VM intends to 845 * use. 846 */ 847 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size); 848 849 extern bool pmap_is_nested(pmap_t pmap); 850 851 /* 852 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE 853 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration. 854 * This is expected to only be called from kernel debugger context, 855 * so synchronization is not required. 856 */ 857 858 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied); 859 860 /* Asks the pmap layer for number of bits used for VA address. */ 861 extern uint32_t pmap_user_va_bits(pmap_t pmap); 862 extern uint32_t pmap_kernel_va_bits(void); 863 864 /* 865 * Indicates if any special policy is applied to this protection by the pmap 866 * layer. 867 */ 868 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot); 869 870 /* 871 * Causes the pmap to return any available pages that it can return cheaply to 872 * the VM. 873 */ 874 uint64_t pmap_release_pages_fast(void); 875 876 #define PMAP_QUERY_PAGE_PRESENT 0x01 877 #define PMAP_QUERY_PAGE_REUSABLE 0x02 878 #define PMAP_QUERY_PAGE_INTERNAL 0x04 879 #define PMAP_QUERY_PAGE_ALTACCT 0x08 880 #define PMAP_QUERY_PAGE_COMPRESSED 0x10 881 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 882 extern kern_return_t pmap_query_page_info( 883 pmap_t pmap, 884 vm_map_offset_t va, 885 int *disp); 886 887 extern bool pmap_in_ppl(void); 888 889 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); 890 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]); 891 892 /** 893 * Indicates whether the device supports register-level MMIO access control. 894 * 895 * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register 896 * writability at page granularity, this mechanism allows specific registers 897 * on a read-mostly page to be written using a dedicated guarded mode trap 898 * without requiring a full PPL driver extension. 899 * 900 * @return True if the device supports register-level MMIO access control. 901 */ 902 extern bool pmap_has_iofilter_protected_write(void); 903 904 /** 905 * Performs a write to the I/O register specified by addr on supported devices. 906 * 907 * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this 908 * function goes over the sorted I/O filter entry table. If there is a hit, the 909 * write is performed from Guarded Mode. Otherwise, the write is performed from 910 * Normal Mode (kernel mode). Note that you can still hit an exception if the 911 * register is owned by PPL but not allowed by an io-filter-entry in the device tree. 912 * 913 * @note On unsupported devices, this function will panic. 914 * 915 * @param addr The address of the register. 916 * @param value The value to be written. 917 * @param width The width of the I/O register, supported values are 1, 2, 4 and 8. 918 */ 919 extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width); 920 921 extern void *pmap_claim_reserved_ppl_page(void); 922 extern void pmap_free_reserved_ppl_page(void *kva); 923 924 extern void pmap_ledger_verify_size(size_t); 925 extern ledger_t pmap_ledger_alloc(void); 926 extern void pmap_ledger_free(ledger_t); 927 928 extern bool pmap_is_bad_ram(ppnum_t ppn); 929 930 extern bool pmap_is_page_restricted(ppnum_t pn); 931 932 #if __arm64__ 933 extern bool pmap_is_exotic(pmap_t pmap); 934 #else /* __arm64__ */ 935 #define pmap_is_exotic(pmap) false 936 #endif /* __arm64__ */ 937 938 939 /* 940 * Returns a subset of pmap_cs non-default configuration, 941 * e.g. loosening up of some restrictions through pmap_cs or amfi 942 * boot-args. The return value is a bit field with possible bits 943 * described below. If default, the function will return 0. Note that 944 * this does not work the other way: 0 does not imply that pmap_cs 945 * runs in default configuration, and only a small configuration 946 * subset is returned by this function. 947 * 948 * Never assume the system is "secure" if this returns 0. 949 */ 950 extern int pmap_cs_configuration(void); 951 952 #if XNU_KERNEL_PRIVATE 953 954 typedef enum { 955 PMAP_FEAT_UEXEC = 1 956 } pmap_feature_flags_t; 957 958 #if defined(__x86_64__) 959 960 extern bool pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat); 961 962 #endif 963 #if defined(__arm64__) 964 965 /** 966 * Check if a particular pmap is used for stage2 translations or not. 967 */ 968 extern bool 969 pmap_performs_stage2_translations(const pmap_t pmap); 970 971 #endif /* defined(__arm64__) */ 972 973 extern ppnum_t kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr); 974 975 #endif /* XNU_KERNEL_PRIVATE */ 976 977 #if CONFIG_SPTM 978 /* 979 * The TrustedExecutionMonitor address space data structure is kept within the 980 * pmap structure in order to provide a coherent API to the rest of the kernel 981 * for working with code signing monitors. 982 * 983 * However, a lot of parts of the kernel don't have visibility into the pmap 984 * data structure as they are opaque unless you're in the Mach portion of the 985 * kernel. To allievate this, we provide pmap APIs to the rest of the kernel. 986 */ 987 #include <TrustedExecutionMonitor/API.h> 988 989 /* 990 * All pages allocated by TXM are also kept within the TXM VM object, which allows 991 * tracking it for accounting and debugging purposes. 992 */ 993 extern vm_object_t txm_vm_object; 994 995 /** 996 * Acquire the pointer of the kernel pmap being used for the system. 997 */ 998 extern pmap_t 999 pmap_txm_kernel_pmap(void); 1000 1001 /** 1002 * Acquire the TXM address space object stored within the pmap. 1003 */ 1004 extern TXMAddressSpace_t* 1005 pmap_txm_addr_space(const pmap_t pmap); 1006 1007 /** 1008 * Set the TXM address space object within the pmap. 1009 */ 1010 extern void 1011 pmap_txm_set_addr_space( 1012 pmap_t pmap, 1013 TXMAddressSpace_t *txm_addr_space); 1014 1015 /** 1016 * Set the trust level of the TXM address space object within the pmap. 1017 */ 1018 extern void 1019 pmap_txm_set_trust_level( 1020 pmap_t pmap, 1021 CSTrust_t trust_level); 1022 1023 /** 1024 * Get the trust level of the TXM address space object within the pmap. 1025 */ 1026 extern kern_return_t 1027 pmap_txm_get_trust_level_kdp( 1028 pmap_t pmap, 1029 CSTrust_t *trust_level); 1030 1031 /** 1032 * Get the address range of the JIT region within the pmap, if any. 1033 */ 1034 kern_return_t 1035 pmap_txm_get_jit_address_range_kdp( 1036 pmap_t pmap, 1037 uintptr_t *jit_region_start, 1038 uintptr_t *jit_region_end); 1039 1040 /** 1041 * Take a shared lock on the pmap in order to enforce safe concurrency for 1042 * an operation on the TXM address space object. Passing in NULL takes the lock 1043 * on the current pmap. 1044 */ 1045 extern void 1046 pmap_txm_acquire_shared_lock(pmap_t pmap); 1047 1048 /** 1049 * Release the shared lock which was previously acquired for operations on 1050 * the TXM address space object. Passing in NULL releases the lock for the 1051 * current pmap. 1052 */ 1053 extern void 1054 pmap_txm_release_shared_lock(pmap_t pmap); 1055 1056 /** 1057 * Take an exclusive lock on the pmap in order to enforce safe concurrency for 1058 * an operation on the TXM address space object. Passing in NULL takes the lock 1059 * on the current pmap. 1060 */ 1061 extern void 1062 pmap_txm_acquire_exclusive_lock(pmap_t pmap); 1063 1064 /** 1065 * Release the exclusive lock which was previously acquired for operations on 1066 * the TXM address space object. Passing in NULL releases the lock for the 1067 * current pmap. 1068 */ 1069 extern void 1070 pmap_txm_release_exclusive_lock(pmap_t pmap); 1071 1072 /** 1073 * Transfer a page to the TXM_DEFAULT type after resolving its mapping from its 1074 * virtual to physical address. 1075 */ 1076 extern void 1077 pmap_txm_transfer_page(const vm_address_t addr); 1078 1079 /** 1080 * Grab an available page from the VM free list, add it to the TXM VM object and 1081 * then transfer it to be owned by TXM. 1082 * 1083 * Returns the physical address of the page allocated. 1084 */ 1085 extern vm_map_address_t 1086 pmap_txm_allocate_page(void); 1087 1088 #endif /* CONFIG_SPTM */ 1089 1090 1091 #endif /* KERNEL_PRIVATE */ 1092 1093 #endif /* _VM_PMAP_H_ */ 1094