1 /* 2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved. 3 * 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ 5 * 6 * This file contains Original Code and/or Modifications of Original Code 7 * as defined in and that are subject to the Apple Public Source License 8 * Version 2.0 (the 'License'). You may not use this file except in 9 * compliance with the License. The rights granted to you under the License 10 * may not be used to create, or enable the creation or redistribution of, 11 * unlawful or unlicensed copies of an Apple operating system, or to 12 * circumvent, violate, or enable the circumvention or violation of, any 13 * terms of an Apple operating system software license agreement. 14 * 15 * Please obtain a copy of the License at 16 * http://www.opensource.apple.com/apsl/ and read it before using this file. 17 * 18 * The Original Code and all software distributed under the License are 19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER 20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, 21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, 22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. 23 * Please see the License for the specific language governing rights and 24 * limitations under the License. 25 * 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ 27 */ 28 /* 29 * @OSF_COPYRIGHT@ 30 */ 31 /* 32 * Mach Operating System 33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University 34 * All Rights Reserved. 35 * 36 * Permission to use, copy, modify and distribute this software and its 37 * documentation is hereby granted, provided that both the copyright 38 * notice and this permission notice appear in all copies of the 39 * software, derivative works or modified versions, and any portions 40 * thereof, and that both notices appear in supporting documentation. 41 * 42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" 43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR 44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. 45 * 46 * Carnegie Mellon requests users of this software to return to 47 * 48 * Software Distribution Coordinator or [email protected] 49 * School of Computer Science 50 * Carnegie Mellon University 51 * Pittsburgh PA 15213-3890 52 * 53 * any improvements or extensions that they make and grant Carnegie Mellon 54 * the rights to redistribute these changes. 55 */ 56 /* 57 */ 58 /* 59 * File: vm/pmap.h 60 * Author: Avadis Tevanian, Jr. 61 * Date: 1985 62 * 63 * Machine address mapping definitions -- machine-independent 64 * section. [For machine-dependent section, see "machine/pmap.h".] 65 */ 66 67 #ifndef _VM_PMAP_H_ 68 #define _VM_PMAP_H_ 69 70 #include <mach/kern_return.h> 71 #include <mach/vm_param.h> 72 #include <mach/vm_types.h> 73 #include <mach/vm_attributes.h> 74 #include <mach/boolean.h> 75 #include <mach/vm_prot.h> 76 #include <kern/trustcache.h> 77 78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>) 79 #include <CoreEntitlements/CoreEntitlements.h> 80 #endif 81 82 #ifdef KERNEL_PRIVATE 83 84 /* 85 * The following is a description of the interface to the 86 * machine-dependent "physical map" data structure. The module 87 * must provide a "pmap_t" data type that represents the 88 * set of valid virtual-to-physical addresses for one user 89 * address space. [The kernel address space is represented 90 * by a distinguished "pmap_t".] The routines described manage 91 * this type, install and update virtual-to-physical mappings, 92 * and perform operations on physical addresses common to 93 * many address spaces. 94 */ 95 96 /* Copy between a physical page and a virtual address */ 97 /* LP64todo - switch to vm_map_offset_t when it grows */ 98 extern kern_return_t copypv( 99 addr64_t source, 100 addr64_t sink, 101 unsigned int size, 102 int which); 103 #define cppvPsnk 1 104 #define cppvPsnkb 31 105 #define cppvPsrc 2 106 #define cppvPsrcb 30 107 #define cppvFsnk 4 108 #define cppvFsnkb 29 109 #define cppvFsrc 8 110 #define cppvFsrcb 28 111 #define cppvNoModSnk 16 112 #define cppvNoModSnkb 27 113 #define cppvNoRefSrc 32 114 #define cppvNoRefSrcb 26 115 #define cppvKmap 64 /* Use the kernel's vm_map */ 116 #define cppvKmapb 25 117 118 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last); 119 120 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE 121 #include <mach/mach_types.h> 122 #include <vm/memory_types.h> 123 124 /* 125 * Routines used during BSD process creation. 126 */ 127 128 extern pmap_t pmap_create_options( /* Create a pmap_t. */ 129 ledger_t ledger, 130 vm_map_size_t size, 131 unsigned int flags); 132 133 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) 134 /** 135 * Informs the pmap layer that a process will be running with user JOP disabled, 136 * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation. 137 * 138 * @note This function cannot be used once the target process has started 139 * executing code. It is intended for cases where user JOP is disabled based on 140 * the code signature (e.g., special "keys-off" entitlements), which is too late 141 * to change the flags passed to pmap_create_options. 142 * 143 * @param pmap The pmap belonging to the target process 144 */ 145 extern void pmap_disable_user_jop( 146 pmap_t pmap); 147 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */ 148 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */ 149 150 #ifdef MACH_KERNEL_PRIVATE 151 152 #include <mach_assert.h> 153 154 #include <machine/pmap.h> 155 /* 156 * Routines used for initialization. 157 * There is traditionally also a pmap_bootstrap, 158 * used very early by machine-dependent code, 159 * but it is not part of the interface. 160 * 161 * LP64todo - 162 * These interfaces are tied to the size of the 163 * kernel pmap - and therefore use the "local" 164 * vm_offset_t, etc... types. 165 */ 166 167 extern void *pmap_steal_memory(vm_size_t size, vm_size_t alignment); /* Early memory allocation */ 168 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */ 169 170 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */ 171 #if defined(__arm__) || defined(__arm64__) 172 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */ 173 #endif /* defined(__arm__) || defined(__arm64__) */ 174 175 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */ 176 177 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory. */ 178 179 extern void mapping_adjust(void); /* Adjust free mapping count */ 180 181 extern void mapping_free_prime(void); /* Primes the mapping block release list */ 182 183 #ifndef MACHINE_PAGES 184 /* 185 * If machine/pmap.h defines MACHINE_PAGES, it must implement 186 * the above functions. The pmap module has complete control. 187 * Otherwise, it must implement the following functions: 188 * pmap_free_pages 189 * pmap_virtual_space 190 * pmap_next_page 191 * pmap_init 192 * and vm/vm_resident.c implements pmap_steal_memory and pmap_startup 193 * using pmap_free_pages, pmap_next_page, pmap_virtual_space, 194 * and pmap_enter. pmap_free_pages may over-estimate the number 195 * of unused physical pages, and pmap_next_page may return FALSE 196 * to indicate that there are no more unused pages to return. 197 * However, for best performance pmap_free_pages should be accurate. 198 */ 199 200 /* 201 * Routines to return the next unused physical page. 202 */ 203 extern boolean_t pmap_next_page(ppnum_t *pnum); 204 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free); 205 #ifdef __x86_64__ 206 extern kern_return_t pmap_next_page_large(ppnum_t *pnum); 207 extern void pmap_hi_pages_done(void); 208 #endif 209 210 /* 211 * Report virtual space available for the kernel. 212 */ 213 extern void pmap_virtual_space( 214 vm_offset_t *virtual_start, 215 vm_offset_t *virtual_end); 216 #endif /* MACHINE_PAGES */ 217 218 /* 219 * Routines to manage the physical map data structure. 220 */ 221 extern pmap_t(pmap_kernel)(void); /* Return the kernel's pmap */ 222 extern void pmap_reference(pmap_t pmap); /* Gain a reference. */ 223 extern void pmap_destroy(pmap_t pmap); /* Release a reference. */ 224 extern void pmap_switch(pmap_t); 225 extern void pmap_require(pmap_t pmap); 226 227 #if MACH_ASSERT 228 extern void pmap_set_process(pmap_t pmap, 229 int pid, 230 char *procname); 231 #endif /* MACH_ASSERT */ 232 233 extern kern_return_t pmap_enter( /* Enter a mapping */ 234 pmap_t pmap, 235 vm_map_offset_t v, 236 ppnum_t pn, 237 vm_prot_t prot, 238 vm_prot_t fault_type, 239 unsigned int flags, 240 boolean_t wired); 241 242 extern kern_return_t pmap_enter_options( 243 pmap_t pmap, 244 vm_map_offset_t v, 245 ppnum_t pn, 246 vm_prot_t prot, 247 vm_prot_t fault_type, 248 unsigned int flags, 249 boolean_t wired, 250 unsigned int options, 251 void *arg); 252 extern kern_return_t pmap_enter_options_addr( 253 pmap_t pmap, 254 vm_map_offset_t v, 255 pmap_paddr_t pa, 256 vm_prot_t prot, 257 vm_prot_t fault_type, 258 unsigned int flags, 259 boolean_t wired, 260 unsigned int options, 261 void *arg); 262 263 extern void pmap_remove_some_phys( 264 pmap_t pmap, 265 ppnum_t pn); 266 267 extern void pmap_lock_phys_page( 268 ppnum_t pn); 269 270 extern void pmap_unlock_phys_page( 271 ppnum_t pn); 272 273 274 /* 275 * Routines that operate on physical addresses. 276 */ 277 278 extern void pmap_page_protect( /* Restrict access to page. */ 279 ppnum_t phys, 280 vm_prot_t prot); 281 282 extern void pmap_page_protect_options( /* Restrict access to page. */ 283 ppnum_t phys, 284 vm_prot_t prot, 285 unsigned int options, 286 void *arg); 287 288 extern void(pmap_zero_page)( 289 ppnum_t pn); 290 291 extern void(pmap_zero_part_page)( 292 ppnum_t pn, 293 vm_offset_t offset, 294 vm_size_t len); 295 296 extern void(pmap_copy_page)( 297 ppnum_t src, 298 ppnum_t dest); 299 300 extern void(pmap_copy_part_page)( 301 ppnum_t src, 302 vm_offset_t src_offset, 303 ppnum_t dst, 304 vm_offset_t dst_offset, 305 vm_size_t len); 306 307 extern void(pmap_copy_part_lpage)( 308 vm_offset_t src, 309 ppnum_t dst, 310 vm_offset_t dst_offset, 311 vm_size_t len); 312 313 extern void(pmap_copy_part_rpage)( 314 ppnum_t src, 315 vm_offset_t src_offset, 316 vm_offset_t dst, 317 vm_size_t len); 318 319 extern unsigned int(pmap_disconnect)( /* disconnect mappings and return reference and change */ 320 ppnum_t phys); 321 322 extern unsigned int(pmap_disconnect_options)( /* disconnect mappings and return reference and change */ 323 ppnum_t phys, 324 unsigned int options, 325 void *arg); 326 327 extern kern_return_t(pmap_attribute_cache_sync)( /* Flush appropriate 328 * cache based on 329 * page number sent */ 330 ppnum_t pn, 331 vm_size_t size, 332 vm_machine_attribute_t attribute, 333 vm_machine_attribute_val_t* value); 334 335 extern unsigned int(pmap_cache_attributes)( 336 ppnum_t pn); 337 338 /* 339 * Set (override) cache attributes for the specified physical page 340 */ 341 extern void pmap_set_cache_attributes( 342 ppnum_t, 343 unsigned int); 344 345 extern void *pmap_map_compressor_page( 346 ppnum_t); 347 348 extern void pmap_unmap_compressor_page( 349 ppnum_t, 350 void*); 351 352 #if defined(__arm__) || defined(__arm64__) 353 extern bool pmap_batch_set_cache_attributes( 354 upl_page_info_array_t, 355 unsigned int, 356 unsigned int); 357 #endif 358 extern void pmap_sync_page_data_phys(ppnum_t pa); 359 extern void pmap_sync_page_attributes_phys(ppnum_t pa); 360 361 /* 362 * debug/assertions. pmap_verify_free returns true iff 363 * the given physical page is mapped into no pmap. 364 * pmap_assert_free() will panic() if pn is not free. 365 */ 366 extern bool pmap_verify_free(ppnum_t pn); 367 #if MACH_ASSERT 368 extern void pmap_assert_free(ppnum_t pn); 369 #endif 370 371 372 /* 373 * Sundry required (internal) routines 374 */ 375 #ifdef CURRENTLY_UNUSED_AND_UNTESTED 376 extern void pmap_collect(pmap_t pmap);/* Perform garbage 377 * collection, if any */ 378 #endif 379 /* 380 * Optional routines 381 */ 382 extern void(pmap_copy)( /* Copy range of mappings, 383 * if desired. */ 384 pmap_t dest, 385 pmap_t source, 386 vm_map_offset_t dest_va, 387 vm_map_size_t size, 388 vm_map_offset_t source_va); 389 390 extern kern_return_t(pmap_attribute)( /* Get/Set special memory 391 * attributes */ 392 pmap_t pmap, 393 vm_map_offset_t va, 394 vm_map_size_t size, 395 vm_machine_attribute_t attribute, 396 vm_machine_attribute_val_t* value); 397 398 /* 399 * Routines defined as macros. 400 */ 401 #ifndef PMAP_ACTIVATE_USER 402 #ifndef PMAP_ACTIVATE 403 #define PMAP_ACTIVATE_USER(thr, cpu) 404 #else /* PMAP_ACTIVATE */ 405 #define PMAP_ACTIVATE_USER(thr, cpu) { \ 406 pmap_t pmap; \ 407 \ 408 pmap = (thr)->map->pmap; \ 409 if (pmap != pmap_kernel()) \ 410 PMAP_ACTIVATE(pmap, (thr), (cpu)); \ 411 } 412 #endif /* PMAP_ACTIVATE */ 413 #endif /* PMAP_ACTIVATE_USER */ 414 415 #ifndef PMAP_DEACTIVATE_USER 416 #ifndef PMAP_DEACTIVATE 417 #define PMAP_DEACTIVATE_USER(thr, cpu) 418 #else /* PMAP_DEACTIVATE */ 419 #define PMAP_DEACTIVATE_USER(thr, cpu) { \ 420 pmap_t pmap; \ 421 \ 422 pmap = (thr)->map->pmap; \ 423 if ((pmap) != pmap_kernel()) \ 424 PMAP_DEACTIVATE(pmap, (thr), (cpu)); \ 425 } 426 #endif /* PMAP_DEACTIVATE */ 427 #endif /* PMAP_DEACTIVATE_USER */ 428 429 #ifndef PMAP_ACTIVATE_KERNEL 430 #ifndef PMAP_ACTIVATE 431 #define PMAP_ACTIVATE_KERNEL(cpu) 432 #else /* PMAP_ACTIVATE */ 433 #define PMAP_ACTIVATE_KERNEL(cpu) \ 434 PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 435 #endif /* PMAP_ACTIVATE */ 436 #endif /* PMAP_ACTIVATE_KERNEL */ 437 438 #ifndef PMAP_DEACTIVATE_KERNEL 439 #ifndef PMAP_DEACTIVATE 440 #define PMAP_DEACTIVATE_KERNEL(cpu) 441 #else /* PMAP_DEACTIVATE */ 442 #define PMAP_DEACTIVATE_KERNEL(cpu) \ 443 PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu) 444 #endif /* PMAP_DEACTIVATE */ 445 #endif /* PMAP_DEACTIVATE_KERNEL */ 446 447 #ifndef PMAP_ENTER 448 /* 449 * Macro to be used in place of pmap_enter() 450 */ 451 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \ 452 flags, wired, result) \ 453 MACRO_BEGIN \ 454 pmap_t __pmap = (pmap); \ 455 vm_page_t __page = (page); \ 456 int __options = 0; \ 457 vm_object_t __obj; \ 458 \ 459 PMAP_ENTER_CHECK(__pmap, __page) \ 460 __obj = VM_PAGE_OBJECT(__page); \ 461 if (__obj->internal) { \ 462 __options |= PMAP_OPTIONS_INTERNAL; \ 463 } \ 464 if (__page->vmp_reusable || __obj->all_reusable) { \ 465 __options |= PMAP_OPTIONS_REUSABLE; \ 466 } \ 467 result = pmap_enter_options(__pmap, \ 468 (virtual_address), \ 469 VM_PAGE_GET_PHYS_PAGE(__page), \ 470 (protection), \ 471 (fault_type), \ 472 (flags), \ 473 (wired), \ 474 __options, \ 475 NULL); \ 476 MACRO_END 477 #endif /* !PMAP_ENTER */ 478 479 #ifndef PMAP_ENTER_OPTIONS 480 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset, \ 481 page, protection, \ 482 fault_type, flags, wired, options, result) \ 483 MACRO_BEGIN \ 484 pmap_t __pmap = (pmap); \ 485 vm_page_t __page = (page); \ 486 int __extra_options = 0; \ 487 vm_object_t __obj; \ 488 \ 489 PMAP_ENTER_CHECK(__pmap, __page) \ 490 __obj = VM_PAGE_OBJECT(__page); \ 491 if (__obj->internal) { \ 492 __extra_options |= PMAP_OPTIONS_INTERNAL; \ 493 } \ 494 if (__page->vmp_reusable || __obj->all_reusable) { \ 495 __extra_options |= PMAP_OPTIONS_REUSABLE; \ 496 } \ 497 result = pmap_enter_options_addr(__pmap, \ 498 (virtual_address), \ 499 (((pmap_paddr_t) \ 500 VM_PAGE_GET_PHYS_PAGE(__page) \ 501 << PAGE_SHIFT) \ 502 + fault_phys_offset), \ 503 (protection), \ 504 (fault_type), \ 505 (flags), \ 506 (wired), \ 507 (options) | __extra_options, \ 508 NULL); \ 509 MACRO_END 510 #endif /* !PMAP_ENTER_OPTIONS */ 511 512 #ifndef PMAP_SET_CACHE_ATTR 513 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op) \ 514 MACRO_BEGIN \ 515 if (!batch_pmap_op) { \ 516 pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \ 517 object->set_cache_attr = TRUE; \ 518 } \ 519 MACRO_END 520 #endif /* PMAP_SET_CACHE_ATTR */ 521 522 #ifndef PMAP_BATCH_SET_CACHE_ATTR 523 #if defined(__arm__) || defined(__arm64__) 524 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ 525 cache_attr, num_pages, batch_pmap_op) \ 526 MACRO_BEGIN \ 527 if ((batch_pmap_op)) { \ 528 (void)pmap_batch_set_cache_attributes( \ 529 (user_page_list), \ 530 (num_pages), \ 531 (cache_attr)); \ 532 (object)->set_cache_attr = TRUE; \ 533 } \ 534 MACRO_END 535 #else 536 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list, \ 537 cache_attr, num_pages, batch_pmap_op) \ 538 MACRO_BEGIN \ 539 if ((batch_pmap_op)) { \ 540 unsigned int __page_idx=0; \ 541 while (__page_idx < (num_pages)) { \ 542 pmap_set_cache_attributes( \ 543 user_page_list[__page_idx].phys_addr, \ 544 (cache_attr)); \ 545 __page_idx++; \ 546 } \ 547 (object)->set_cache_attr = TRUE; \ 548 } \ 549 MACRO_END 550 #endif 551 #endif /* PMAP_BATCH_SET_CACHE_ATTR */ 552 553 #define PMAP_ENTER_CHECK(pmap, page) \ 554 { \ 555 if (VMP_ERROR_GET(page)) { \ 556 panic("VM page %p should not have an error\n", \ 557 (page)); \ 558 } \ 559 } 560 561 /* 562 * Routines to manage reference/modify bits based on 563 * physical addresses, simulating them if not provided 564 * by the hardware. 565 */ 566 struct pfc { 567 long pfc_cpus; 568 long pfc_invalid_global; 569 }; 570 571 typedef struct pfc pmap_flush_context; 572 573 /* Clear reference bit */ 574 extern void pmap_clear_reference(ppnum_t pn); 575 /* Return reference bit */ 576 extern boolean_t(pmap_is_referenced)(ppnum_t pn); 577 /* Set modify bit */ 578 extern void pmap_set_modify(ppnum_t pn); 579 /* Clear modify bit */ 580 extern void pmap_clear_modify(ppnum_t pn); 581 /* Return modify bit */ 582 extern boolean_t pmap_is_modified(ppnum_t pn); 583 /* Return modified and referenced bits */ 584 extern unsigned int pmap_get_refmod(ppnum_t pn); 585 /* Clear modified and referenced bits */ 586 extern void pmap_clear_refmod(ppnum_t pn, unsigned int mask); 587 #define VM_MEM_MODIFIED 0x01 /* Modified bit */ 588 #define VM_MEM_REFERENCED 0x02 /* Referenced bit */ 589 extern void pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *); 590 591 /* 592 * Clears the reference and/or modified bits on a range of virtually 593 * contiguous pages. 594 * It returns true if the operation succeeded. If it returns false, 595 * nothing has been modified. 596 * This operation is only supported on some platforms, so callers MUST 597 * handle the case where it returns false. 598 */ 599 extern bool 600 pmap_clear_refmod_range_options( 601 pmap_t pmap, 602 vm_map_address_t start, 603 vm_map_address_t end, 604 unsigned int mask, 605 unsigned int options); 606 607 608 extern void pmap_flush_context_init(pmap_flush_context *); 609 extern void pmap_flush(pmap_flush_context *); 610 611 /* 612 * Routines that operate on ranges of virtual addresses. 613 */ 614 extern void pmap_protect( /* Change protections. */ 615 pmap_t map, 616 vm_map_offset_t s, 617 vm_map_offset_t e, 618 vm_prot_t prot); 619 620 extern void pmap_protect_options( /* Change protections. */ 621 pmap_t map, 622 vm_map_offset_t s, 623 vm_map_offset_t e, 624 vm_prot_t prot, 625 unsigned int options, 626 void *arg); 627 628 extern void(pmap_pageable)( 629 pmap_t pmap, 630 vm_map_offset_t start, 631 vm_map_offset_t end, 632 boolean_t pageable); 633 634 extern uint64_t pmap_shared_region_size_min(pmap_t map); 635 636 extern kern_return_t pmap_nest(pmap_t, 637 pmap_t, 638 addr64_t, 639 uint64_t); 640 extern kern_return_t pmap_unnest(pmap_t, 641 addr64_t, 642 uint64_t); 643 644 #define PMAP_UNNEST_CLEAN 1 645 646 #if __arm64__ 647 #define PMAP_FORK_NEST 1 648 extern kern_return_t pmap_fork_nest( 649 pmap_t old_pmap, 650 pmap_t new_pmap, 651 vm_map_offset_t *nesting_start, 652 vm_map_offset_t *nesting_end); 653 #endif /* __arm64__ */ 654 655 extern kern_return_t pmap_unnest_options(pmap_t, 656 addr64_t, 657 uint64_t, 658 unsigned int); 659 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *); 660 extern void pmap_advise_pagezero_range(pmap_t, uint64_t); 661 #endif /* MACH_KERNEL_PRIVATE */ 662 663 extern boolean_t pmap_is_noencrypt(ppnum_t); 664 extern void pmap_set_noencrypt(ppnum_t pn); 665 extern void pmap_clear_noencrypt(ppnum_t pn); 666 667 /* 668 * JMM - This portion is exported to other kernel components right now, 669 * but will be pulled back in the future when the needed functionality 670 * is provided in a cleaner manner. 671 */ 672 673 extern const pmap_t kernel_pmap; /* The kernel's map */ 674 #define pmap_kernel() (kernel_pmap) 675 676 #define VM_MEM_SUPERPAGE 0x100 /* map a superpage instead of a base page */ 677 #define VM_MEM_STACK 0x200 678 679 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS 680 * definitions in i386/pmap_internal.h 681 */ 682 #define PMAP_CREATE_64BIT 0x1 683 684 #if __x86_64__ 685 686 #define PMAP_CREATE_EPT 0x2 687 #define PMAP_CREATE_TEST 0x4 /* pmap will be used for testing purposes only */ 688 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT | PMAP_CREATE_TEST) 689 690 #else 691 692 #define PMAP_CREATE_STAGE2 0 693 #if __arm64e__ 694 #define PMAP_CREATE_DISABLE_JOP 0x4 695 #else 696 #define PMAP_CREATE_DISABLE_JOP 0 697 #endif 698 #if __ARM_MIXED_PAGE_SIZE__ 699 #define PMAP_CREATE_FORCE_4K_PAGES 0x8 700 #else 701 #define PMAP_CREATE_FORCE_4K_PAGES 0 702 #endif /* __ARM_MIXED_PAGE_SIZE__ */ 703 #define PMAP_CREATE_X86_64 0 704 #if CONFIG_ROSETTA 705 #define PMAP_CREATE_ROSETTA 0x20 706 #else 707 #define PMAP_CREATE_ROSETTA 0 708 #endif /* CONFIG_ROSETTA */ 709 710 #define PMAP_CREATE_TEST 0x40 /* pmap will be used for testing purposes only */ 711 712 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */ 713 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | \ 714 PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA | PMAP_CREATE_TEST) 715 716 #endif /* __x86_64__ */ 717 718 #define PMAP_OPTIONS_NOWAIT 0x1 /* don't block, return 719 * KERN_RESOURCE_SHORTAGE 720 * instead */ 721 #define PMAP_OPTIONS_NOENTER 0x2 /* expand pmap if needed 722 * but don't enter mapping 723 */ 724 #define PMAP_OPTIONS_COMPRESSOR 0x4 /* credit the compressor for 725 * this operation */ 726 #define PMAP_OPTIONS_INTERNAL 0x8 /* page from internal object */ 727 #define PMAP_OPTIONS_REUSABLE 0x10 /* page is "reusable" */ 728 #define PMAP_OPTIONS_NOFLUSH 0x20 /* delay flushing of pmap */ 729 #define PMAP_OPTIONS_NOREFMOD 0x40 /* don't need ref/mod on disconnect */ 730 #define PMAP_OPTIONS_ALT_ACCT 0x80 /* use alternate accounting scheme for page */ 731 #define PMAP_OPTIONS_REMOVE 0x100 /* removing a mapping */ 732 #define PMAP_OPTIONS_SET_REUSABLE 0x200 /* page is now "reusable" */ 733 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400 /* page no longer "reusable" */ 734 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor 735 * iff page was modified */ 736 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000 /* allow protections to be 737 * be upgraded */ 738 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000 739 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */ 740 #if defined(__arm__) || defined(__arm64__) 741 #define PMAP_OPTIONS_FF_LOCKED 0x8000 742 #define PMAP_OPTIONS_FF_WIRED 0x10000 743 #endif 744 #define PMAP_OPTIONS_XNU_USER_DEBUG 0x20000 745 746 #define PMAP_OPTIONS_MAP_TPRO 0x40000 747 748 #define PMAP_OPTIONS_RESERVED_MASK 0xFF000000 /* encoding space reserved for internal pmap use */ 749 750 #if !defined(__LP64__) 751 extern vm_offset_t pmap_extract(pmap_t pmap, 752 vm_map_offset_t va); 753 #endif 754 extern void pmap_change_wiring( /* Specify pageability */ 755 pmap_t pmap, 756 vm_map_offset_t va, 757 boolean_t wired); 758 759 /* LP64todo - switch to vm_map_offset_t when it grows */ 760 extern void pmap_remove( /* Remove mappings. */ 761 pmap_t map, 762 vm_map_offset_t s, 763 vm_map_offset_t e); 764 765 extern void pmap_remove_options( /* Remove mappings. */ 766 pmap_t map, 767 vm_map_offset_t s, 768 vm_map_offset_t e, 769 int options); 770 771 extern void fillPage(ppnum_t pa, unsigned int fill); 772 773 #if defined(__LP64__) 774 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr); 775 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr); 776 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr); 777 #endif 778 779 mach_vm_size_t pmap_query_resident(pmap_t pmap, 780 vm_map_offset_t s, 781 vm_map_offset_t e, 782 mach_vm_size_t *compressed_bytes_p); 783 784 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value); 785 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap); 786 787 /* Inform the pmap layer that there is a JIT entry in this map. */ 788 extern void pmap_set_jit_entitled(pmap_t pmap); 789 790 /* Ask the pmap layer if there is a JIT entry in this map. */ 791 extern bool pmap_get_jit_entitled(pmap_t pmap); 792 793 /* Inform the pmap layer that the XO register is repurposed for this map */ 794 extern void pmap_set_tpro(pmap_t pmap); 795 796 /* Ask the pmap layer if there is a TPRO entry in this map. */ 797 extern bool pmap_get_tpro(pmap_t pmap); 798 799 /* 800 * Tell the pmap layer what range within the nested region the VM intends to 801 * use. 802 */ 803 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size); 804 805 /* 806 * Dump page table contents into the specified buffer. Returns KERN_INSUFFICIENT_BUFFER_SIZE 807 * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration. 808 * This is expected to only be called from kernel debugger context, 809 * so synchronization is not required. 810 */ 811 812 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied); 813 814 /* Asks the pmap layer for number of bits used for VA address. */ 815 extern uint32_t pmap_user_va_bits(pmap_t pmap); 816 extern uint32_t pmap_kernel_va_bits(void); 817 818 /* 819 * Indicates if any special policy is applied to this protection by the pmap 820 * layer. 821 */ 822 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot); 823 824 /* 825 * Causes the pmap to return any available pages that it can return cheaply to 826 * the VM. 827 */ 828 uint64_t pmap_release_pages_fast(void); 829 830 #define PMAP_QUERY_PAGE_PRESENT 0x01 831 #define PMAP_QUERY_PAGE_REUSABLE 0x02 832 #define PMAP_QUERY_PAGE_INTERNAL 0x04 833 #define PMAP_QUERY_PAGE_ALTACCT 0x08 834 #define PMAP_QUERY_PAGE_COMPRESSED 0x10 835 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT 0x20 836 extern kern_return_t pmap_query_page_info( 837 pmap_t pmap, 838 vm_map_offset_t va, 839 int *disp); 840 841 extern bool pmap_in_ppl(void); 842 843 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]); 844 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]); 845 846 /** 847 * Indicates whether the device supports register-level MMIO access control. 848 * 849 * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register 850 * writability at page granularity, this mechanism allows specific registers 851 * on a read-mostly page to be written using a dedicated guarded mode trap 852 * without requiring a full PPL driver extension. 853 * 854 * @return True if the device supports register-level MMIO access control. 855 */ 856 extern bool pmap_has_iofilter_protected_write(void); 857 858 /** 859 * Performs a write to the I/O register specified by addr on supported devices. 860 * 861 * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this 862 * function goes over the sorted I/O filter entry table. If there is a hit, the 863 * write is performed from Guarded Mode. Otherwise, the write is performed from 864 * Normal Mode (kernel mode). Note that you can still hit an exception if the 865 * register is owned by PPL but not allowed by an io-filter-entry in the device tree. 866 * 867 * @note On unsupported devices, this function will panic. 868 * 869 * @param addr The address of the register. 870 * @param value The value to be written. 871 * @param width The width of the I/O register, supported values are 1, 2, 4 and 8. 872 */ 873 extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width); 874 875 extern void *pmap_claim_reserved_ppl_page(void); 876 extern void pmap_free_reserved_ppl_page(void *kva); 877 878 extern void pmap_ledger_verify_size(size_t); 879 extern ledger_t pmap_ledger_alloc(void); 880 extern void pmap_ledger_free(ledger_t); 881 882 extern bool pmap_is_bad_ram(ppnum_t ppn); 883 884 #if __arm64__ 885 extern bool pmap_is_exotic(pmap_t pmap); 886 #else /* __arm64__ */ 887 #define pmap_is_exotic(pmap) false 888 #endif /* __arm64__ */ 889 890 891 /* 892 * Returns a subset of pmap_cs non-default configuration, 893 * e.g. loosening up of some restrictions through pmap_cs or amfi 894 * boot-args. The return value is a bit field with possible bits 895 * described below. If default, the function will return 0. Note that 896 * this does not work the other way: 0 does not imply that pmap_cs 897 * runs in default configuration, and only a small configuration 898 * subset is returned by this function. 899 * 900 * Never assume the system is "secure" if this returns 0. 901 */ 902 extern int pmap_cs_configuration(void); 903 904 #if XNU_KERNEL_PRIVATE 905 906 #if defined(__arm64__) 907 908 /** 909 * Check if a particular pmap is used for stage2 translations or not. 910 */ 911 extern bool 912 pmap_performs_stage2_translations(const pmap_t pmap); 913 914 #endif /* defined(__arm64__) */ 915 #endif /* XNU_KERNEL_PRIVATE */ 916 917 918 #endif /* KERNEL_PRIVATE */ 919 920 #endif /* _VM_PMAP_H_ */ 921