xref: /xnu-8792.41.9/osfmk/vm/pmap.h (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/pmap.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1985
62  *
63  *	Machine address mapping definitions -- machine-independent
64  *	section.  [For machine-dependent section, see "machine/pmap.h".]
65  */
66 
67 #ifndef _VM_PMAP_H_
68 #define _VM_PMAP_H_
69 
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <kern/trustcache.h>
77 
78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>)
79 #include <CoreEntitlements/CoreEntitlements.h>
80 #endif
81 
82 #ifdef  KERNEL_PRIVATE
83 
84 /*
85  *	The following is a description of the interface to the
86  *	machine-dependent "physical map" data structure.  The module
87  *	must provide a "pmap_t" data type that represents the
88  *	set of valid virtual-to-physical addresses for one user
89  *	address space.  [The kernel address space is represented
90  *	by a distinguished "pmap_t".]  The routines described manage
91  *	this type, install and update virtual-to-physical mappings,
92  *	and perform operations on physical addresses common to
93  *	many address spaces.
94  */
95 
96 /* Copy between a physical page and a virtual address */
97 /* LP64todo - switch to vm_map_offset_t when it grows */
98 extern kern_return_t    copypv(
99 	addr64_t source,
100 	addr64_t sink,
101 	unsigned int size,
102 	int which);
103 #define cppvPsnk        1
104 #define cppvPsnkb      31
105 #define cppvPsrc        2
106 #define cppvPsrcb      30
107 #define cppvFsnk        4
108 #define cppvFsnkb      29
109 #define cppvFsrc        8
110 #define cppvFsrcb      28
111 #define cppvNoModSnk   16
112 #define cppvNoModSnkb  27
113 #define cppvNoRefSrc   32
114 #define cppvNoRefSrcb  26
115 #define cppvKmap       64       /* Use the kernel's vm_map */
116 #define cppvKmapb      25
117 
118 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
119 
120 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE
121 #include <mach/mach_types.h>
122 #include <vm/memory_types.h>
123 
124 /*
125  * Routines used during BSD process creation.
126  */
127 
128 extern pmap_t           pmap_create_options(    /* Create a pmap_t. */
129 	ledger_t        ledger,
130 	vm_map_size_t   size,
131 	unsigned int    flags);
132 
133 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
134 /**
135  * Informs the pmap layer that a process will be running with user JOP disabled,
136  * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation.
137  *
138  * @note This function cannot be used once the target process has started
139  * executing code.  It is intended for cases where user JOP is disabled based on
140  * the code signature (e.g., special "keys-off" entitlements), which is too late
141  * to change the flags passed to pmap_create_options.
142  *
143  * @param pmap	The pmap belonging to the target process
144  */
145 extern void             pmap_disable_user_jop(
146 	pmap_t          pmap);
147 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
148 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */
149 
150 #ifdef  MACH_KERNEL_PRIVATE
151 
152 #include <mach_assert.h>
153 
154 #include <machine/pmap.h>
155 /*
156  *	Routines used for initialization.
157  *	There is traditionally also a pmap_bootstrap,
158  *	used very early by machine-dependent code,
159  *	but it is not part of the interface.
160  *
161  *	LP64todo -
162  *	These interfaces are tied to the size of the
163  *	kernel pmap - and therefore use the "local"
164  *	vm_offset_t, etc... types.
165  */
166 
167 extern void *pmap_steal_memory(vm_size_t size); /* Early memory allocation */
168 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */
169 
170 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */
171 #if defined(__arm__) || defined(__arm64__)
172 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */
173 #endif /* defined(__arm__) || defined(__arm64__) */
174 
175 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */
176 
177 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory.  */
178 
179 extern void mapping_adjust(void); /* Adjust free mapping count */
180 
181 extern void mapping_free_prime(void); /* Primes the mapping block release list */
182 
183 #ifndef MACHINE_PAGES
184 /*
185  *	If machine/pmap.h defines MACHINE_PAGES, it must implement
186  *	the above functions.  The pmap module has complete control.
187  *	Otherwise, it must implement the following functions:
188  *		pmap_free_pages
189  *		pmap_virtual_space
190  *		pmap_next_page
191  *		pmap_init
192  *	and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
193  *	using pmap_free_pages, pmap_next_page, pmap_virtual_space,
194  *	and pmap_enter.  pmap_free_pages may over-estimate the number
195  *	of unused physical pages, and pmap_next_page may return FALSE
196  *	to indicate that there are no more unused pages to return.
197  *	However, for best performance pmap_free_pages should be accurate.
198  */
199 
200 /*
201  * Routines to return the next unused physical page.
202  */
203 extern boolean_t pmap_next_page(ppnum_t *pnum);
204 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free);
205 #ifdef __x86_64__
206 extern kern_return_t pmap_next_page_large(ppnum_t *pnum);
207 extern void pmap_hi_pages_done(void);
208 #endif
209 
210 /*
211  * Report virtual space available for the kernel.
212  */
213 extern void pmap_virtual_space(
214 	vm_offset_t     *virtual_start,
215 	vm_offset_t     *virtual_end);
216 #endif  /* MACHINE_PAGES */
217 
218 /*
219  * Routines to manage the physical map data structure.
220  */
221 extern pmap_t(pmap_kernel)(void);               /* Return the kernel's pmap */
222 extern void             pmap_reference(pmap_t pmap);    /* Gain a reference. */
223 extern void             pmap_destroy(pmap_t pmap); /* Release a reference. */
224 extern void             pmap_switch(pmap_t);
225 extern void             pmap_require(pmap_t pmap);
226 
227 #if MACH_ASSERT
228 extern void pmap_set_process(pmap_t pmap,
229     int pid,
230     char *procname);
231 #endif /* MACH_ASSERT */
232 
233 extern kern_return_t    pmap_enter(     /* Enter a mapping */
234 	pmap_t          pmap,
235 	vm_map_offset_t v,
236 	ppnum_t         pn,
237 	vm_prot_t       prot,
238 	vm_prot_t       fault_type,
239 	unsigned int    flags,
240 	boolean_t       wired);
241 
242 extern kern_return_t    pmap_enter_options(
243 	pmap_t pmap,
244 	vm_map_offset_t v,
245 	ppnum_t pn,
246 	vm_prot_t prot,
247 	vm_prot_t fault_type,
248 	unsigned int flags,
249 	boolean_t wired,
250 	unsigned int options,
251 	void *arg);
252 extern kern_return_t    pmap_enter_options_addr(
253 	pmap_t pmap,
254 	vm_map_offset_t v,
255 	pmap_paddr_t pa,
256 	vm_prot_t prot,
257 	vm_prot_t fault_type,
258 	unsigned int flags,
259 	boolean_t wired,
260 	unsigned int options,
261 	void *arg);
262 
263 extern void             pmap_remove_some_phys(
264 	pmap_t          pmap,
265 	ppnum_t         pn);
266 
267 extern void             pmap_lock_phys_page(
268 	ppnum_t         pn);
269 
270 extern void             pmap_unlock_phys_page(
271 	ppnum_t         pn);
272 
273 
274 /*
275  *	Routines that operate on physical addresses.
276  */
277 
278 extern void             pmap_page_protect(      /* Restrict access to page. */
279 	ppnum_t phys,
280 	vm_prot_t       prot);
281 
282 extern void             pmap_page_protect_options(      /* Restrict access to page. */
283 	ppnum_t phys,
284 	vm_prot_t       prot,
285 	unsigned int    options,
286 	void            *arg);
287 
288 extern void(pmap_zero_page)(
289 	ppnum_t         pn);
290 
291 extern void(pmap_zero_part_page)(
292 	ppnum_t         pn,
293 	vm_offset_t     offset,
294 	vm_size_t       len);
295 
296 extern void(pmap_copy_page)(
297 	ppnum_t         src,
298 	ppnum_t         dest);
299 
300 extern void(pmap_copy_part_page)(
301 	ppnum_t         src,
302 	vm_offset_t     src_offset,
303 	ppnum_t         dst,
304 	vm_offset_t     dst_offset,
305 	vm_size_t       len);
306 
307 extern void(pmap_copy_part_lpage)(
308 	vm_offset_t     src,
309 	ppnum_t         dst,
310 	vm_offset_t     dst_offset,
311 	vm_size_t       len);
312 
313 extern void(pmap_copy_part_rpage)(
314 	ppnum_t         src,
315 	vm_offset_t     src_offset,
316 	vm_offset_t     dst,
317 	vm_size_t       len);
318 
319 extern unsigned int(pmap_disconnect)(   /* disconnect mappings and return reference and change */
320 	ppnum_t         phys);
321 
322 extern unsigned int(pmap_disconnect_options)(   /* disconnect mappings and return reference and change */
323 	ppnum_t         phys,
324 	unsigned int    options,
325 	void            *arg);
326 
327 extern kern_return_t(pmap_attribute_cache_sync)(      /* Flush appropriate
328                                                        * cache based on
329                                                        * page number sent */
330 	ppnum_t         pn,
331 	vm_size_t       size,
332 	vm_machine_attribute_t attribute,
333 	vm_machine_attribute_val_t* value);
334 
335 extern unsigned int(pmap_cache_attributes)(
336 	ppnum_t         pn);
337 
338 /*
339  * Set (override) cache attributes for the specified physical page
340  */
341 extern  void            pmap_set_cache_attributes(
342 	ppnum_t,
343 	unsigned int);
344 
345 extern void            *pmap_map_compressor_page(
346 	ppnum_t);
347 
348 extern void             pmap_unmap_compressor_page(
349 	ppnum_t,
350 	void*);
351 
352 #if defined(__arm__) || defined(__arm64__)
353 /* ARM64_TODO */
354 extern  bool       pmap_batch_set_cache_attributes(
355 	upl_page_info_array_t,
356 	unsigned int,
357 	unsigned int);
358 #endif
359 extern void pmap_sync_page_data_phys(ppnum_t pa);
360 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
361 
362 /*
363  * debug/assertions. pmap_verify_free returns true iff
364  * the given physical page is mapped into no pmap.
365  * pmap_assert_free() will panic() if pn is not free.
366  */
367 extern bool pmap_verify_free(ppnum_t pn);
368 #if MACH_ASSERT
369 extern void pmap_assert_free(ppnum_t pn);
370 #endif
371 
372 
373 /*
374  *	Sundry required (internal) routines
375  */
376 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
377 extern void             pmap_collect(pmap_t pmap);/* Perform garbage
378                                                    * collection, if any */
379 #endif
380 /*
381  *	Optional routines
382  */
383 extern void(pmap_copy)(                         /* Copy range of mappings,
384                                                  * if desired. */
385 	pmap_t          dest,
386 	pmap_t          source,
387 	vm_map_offset_t dest_va,
388 	vm_map_size_t   size,
389 	vm_map_offset_t source_va);
390 
391 extern kern_return_t(pmap_attribute)(           /* Get/Set special memory
392                                                  * attributes */
393 	pmap_t          pmap,
394 	vm_map_offset_t va,
395 	vm_map_size_t   size,
396 	vm_machine_attribute_t  attribute,
397 	vm_machine_attribute_val_t* value);
398 
399 /*
400  * Routines defined as macros.
401  */
402 #ifndef PMAP_ACTIVATE_USER
403 #ifndef PMAP_ACTIVATE
404 #define PMAP_ACTIVATE_USER(thr, cpu)
405 #else   /* PMAP_ACTIVATE */
406 #define PMAP_ACTIVATE_USER(thr, cpu) {                  \
407 	pmap_t  pmap;                                           \
408                                                                 \
409 	pmap = (thr)->map->pmap;                                \
410 	if (pmap != pmap_kernel())                              \
411 	        PMAP_ACTIVATE(pmap, (thr), (cpu));              \
412 }
413 #endif  /* PMAP_ACTIVATE */
414 #endif  /* PMAP_ACTIVATE_USER */
415 
416 #ifndef PMAP_DEACTIVATE_USER
417 #ifndef PMAP_DEACTIVATE
418 #define PMAP_DEACTIVATE_USER(thr, cpu)
419 #else   /* PMAP_DEACTIVATE */
420 #define PMAP_DEACTIVATE_USER(thr, cpu) {                        \
421 	pmap_t  pmap;                                           \
422                                                                 \
423 	pmap = (thr)->map->pmap;                                \
424 	if ((pmap) != pmap_kernel())                    \
425 	        PMAP_DEACTIVATE(pmap, (thr), (cpu));    \
426 }
427 #endif  /* PMAP_DEACTIVATE */
428 #endif  /* PMAP_DEACTIVATE_USER */
429 
430 #ifndef PMAP_ACTIVATE_KERNEL
431 #ifndef PMAP_ACTIVATE
432 #define PMAP_ACTIVATE_KERNEL(cpu)
433 #else   /* PMAP_ACTIVATE */
434 #define PMAP_ACTIVATE_KERNEL(cpu)                       \
435 	        PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
436 #endif  /* PMAP_ACTIVATE */
437 #endif  /* PMAP_ACTIVATE_KERNEL */
438 
439 #ifndef PMAP_DEACTIVATE_KERNEL
440 #ifndef PMAP_DEACTIVATE
441 #define PMAP_DEACTIVATE_KERNEL(cpu)
442 #else   /* PMAP_DEACTIVATE */
443 #define PMAP_DEACTIVATE_KERNEL(cpu)                     \
444 	        PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
445 #endif  /* PMAP_DEACTIVATE */
446 #endif  /* PMAP_DEACTIVATE_KERNEL */
447 
448 #ifndef PMAP_ENTER
449 /*
450  *	Macro to be used in place of pmap_enter()
451  */
452 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
453 	    flags, wired, result)                                \
454 	MACRO_BEGIN                                                     \
455 	pmap_t		__pmap = (pmap);                                \
456 	vm_page_t	__page = (page);                                \
457 	int		__options = 0;                                  \
458 	vm_object_t	__obj;                                          \
459                                                                         \
460 	PMAP_ENTER_CHECK(__pmap, __page)                                \
461 	__obj = VM_PAGE_OBJECT(__page);                                 \
462 	if (__obj->internal) {                                          \
463 	        __options |= PMAP_OPTIONS_INTERNAL;                     \
464 	}                                                               \
465 	if (__page->vmp_reusable || __obj->all_reusable) {              \
466 	        __options |= PMAP_OPTIONS_REUSABLE;                     \
467 	}                                                               \
468 	result = pmap_enter_options(__pmap,                             \
469 	                            (virtual_address),                  \
470 	                            VM_PAGE_GET_PHYS_PAGE(__page),      \
471 	                            (protection),                               \
472 	                            (fault_type),                               \
473 	                            (flags),                            \
474 	                            (wired),                            \
475 	                            __options,                          \
476 	                            NULL);                              \
477 	MACRO_END
478 #endif  /* !PMAP_ENTER */
479 
480 #ifndef PMAP_ENTER_OPTIONS
481 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset,   \
482 	    page, protection,                                           \
483 	    fault_type, flags, wired, options, result)                  \
484 	MACRO_BEGIN                                                     \
485 	pmap_t		__pmap = (pmap);                                \
486 	vm_page_t	__page = (page);                                \
487 	int		__extra_options = 0;                            \
488 	vm_object_t	__obj;                                          \
489                                                                         \
490 	PMAP_ENTER_CHECK(__pmap, __page)                                \
491 	__obj = VM_PAGE_OBJECT(__page);                                 \
492 	if (__obj->internal) {                                          \
493 	        __extra_options |= PMAP_OPTIONS_INTERNAL;               \
494 	}                                                               \
495 	if (__page->vmp_reusable || __obj->all_reusable) {              \
496 	        __extra_options |= PMAP_OPTIONS_REUSABLE;               \
497 	}                                                               \
498 	result = pmap_enter_options_addr(__pmap,                        \
499 	                            (virtual_address),                  \
500 	                            (((pmap_paddr_t)                    \
501 	                              VM_PAGE_GET_PHYS_PAGE(__page)     \
502 	                              << PAGE_SHIFT)                    \
503 	                             + fault_phys_offset),             \
504 	                            (protection),                       \
505 	                            (fault_type),                       \
506 	                            (flags),                            \
507 	                            (wired),                            \
508 	                            (options) | __extra_options,        \
509 	                            NULL);                              \
510 	MACRO_END
511 #endif  /* !PMAP_ENTER_OPTIONS */
512 
513 #ifndef PMAP_SET_CACHE_ATTR
514 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op)             \
515 	MACRO_BEGIN                                                             \
516 	        if (!batch_pmap_op) {                                           \
517 	                pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
518 	                object->set_cache_attr = TRUE;                          \
519 	        }                                                               \
520 	MACRO_END
521 #endif  /* PMAP_SET_CACHE_ATTR */
522 
523 #ifndef PMAP_BATCH_SET_CACHE_ATTR
524 #if     defined(__arm__) || defined(__arm64__)
525 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                   \
526 	    cache_attr, num_pages, batch_pmap_op)                               \
527 	MACRO_BEGIN                                                             \
528 	        if ((batch_pmap_op)) {                                          \
529 	                (void)pmap_batch_set_cache_attributes(                  \
530 	                                (user_page_list),                       \
531 	                                (num_pages),                            \
532 	                                (cache_attr));                          \
533 	                (object)->set_cache_attr = TRUE;                        \
534 	        }                                                               \
535 	MACRO_END
536 #else
537 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                   \
538 	    cache_attr, num_pages, batch_pmap_op)                               \
539 	MACRO_BEGIN                                                             \
540 	        if ((batch_pmap_op)) {                                          \
541 	                unsigned int __page_idx=0;                              \
542 	                while (__page_idx < (num_pages)) {                      \
543 	                        pmap_set_cache_attributes(                      \
544 	                                user_page_list[__page_idx].phys_addr,   \
545 	                                (cache_attr));                          \
546 	                        __page_idx++;                                   \
547 	                }                                                       \
548 	                (object)->set_cache_attr = TRUE;                        \
549 	        }                                                               \
550 	MACRO_END
551 #endif
552 #endif  /* PMAP_BATCH_SET_CACHE_ATTR */
553 
554 #define PMAP_ENTER_CHECK(pmap, page)                                    \
555 {                                                                       \
556 	if (VMP_ERROR_GET(page)) {                                      \
557 	        panic("VM page %p should not have an error\n",          \
558 	                (page));                                        \
559 	}                                                               \
560 }
561 
562 /*
563  *	Routines to manage reference/modify bits based on
564  *	physical addresses, simulating them if not provided
565  *	by the hardware.
566  */
567 struct pfc {
568 	long    pfc_cpus;
569 	long    pfc_invalid_global;
570 };
571 
572 typedef struct pfc      pmap_flush_context;
573 
574 /* Clear reference bit */
575 extern void             pmap_clear_reference(ppnum_t     pn);
576 /* Return reference bit */
577 extern boolean_t(pmap_is_referenced)(ppnum_t     pn);
578 /* Set modify bit */
579 extern void             pmap_set_modify(ppnum_t  pn);
580 /* Clear modify bit */
581 extern void             pmap_clear_modify(ppnum_t pn);
582 /* Return modify bit */
583 extern boolean_t        pmap_is_modified(ppnum_t pn);
584 /* Return modified and referenced bits */
585 extern unsigned int pmap_get_refmod(ppnum_t pn);
586 /* Clear modified and referenced bits */
587 extern void                     pmap_clear_refmod(ppnum_t pn, unsigned int mask);
588 #define VM_MEM_MODIFIED         0x01    /* Modified bit */
589 #define VM_MEM_REFERENCED       0x02    /* Referenced bit */
590 extern void                     pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
591 
592 /*
593  * Clears the reference and/or modified bits on a range of virtually
594  * contiguous pages.
595  * It returns true if the operation succeeded. If it returns false,
596  * nothing has been modified.
597  * This operation is only supported on some platforms, so callers MUST
598  * handle the case where it returns false.
599  */
600 extern bool
601 pmap_clear_refmod_range_options(
602 	pmap_t pmap,
603 	vm_map_address_t start,
604 	vm_map_address_t end,
605 	unsigned int mask,
606 	unsigned int options);
607 
608 
609 extern void pmap_flush_context_init(pmap_flush_context *);
610 extern void pmap_flush(pmap_flush_context *);
611 
612 /*
613  *	Routines that operate on ranges of virtual addresses.
614  */
615 extern void             pmap_protect(   /* Change protections. */
616 	pmap_t          map,
617 	vm_map_offset_t s,
618 	vm_map_offset_t e,
619 	vm_prot_t       prot);
620 
621 extern void             pmap_protect_options(   /* Change protections. */
622 	pmap_t          map,
623 	vm_map_offset_t s,
624 	vm_map_offset_t e,
625 	vm_prot_t       prot,
626 	unsigned int    options,
627 	void            *arg);
628 
629 extern void(pmap_pageable)(
630 	pmap_t          pmap,
631 	vm_map_offset_t start,
632 	vm_map_offset_t end,
633 	boolean_t       pageable);
634 
635 extern uint64_t pmap_shared_region_size_min(pmap_t map);
636 
637 extern kern_return_t pmap_nest(pmap_t,
638     pmap_t,
639     addr64_t,
640     uint64_t);
641 extern kern_return_t pmap_unnest(pmap_t,
642     addr64_t,
643     uint64_t);
644 
645 #define PMAP_UNNEST_CLEAN       1
646 
647 #if __arm64__
648 #define PMAP_FORK_NEST 1
649 extern kern_return_t pmap_fork_nest(
650 	pmap_t old_pmap,
651 	pmap_t new_pmap,
652 	vm_map_offset_t *nesting_start,
653 	vm_map_offset_t *nesting_end);
654 #endif /* __arm64__ */
655 
656 extern kern_return_t pmap_unnest_options(pmap_t,
657     addr64_t,
658     uint64_t,
659     unsigned int);
660 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
661 extern void             pmap_advise_pagezero_range(pmap_t, uint64_t);
662 #endif  /* MACH_KERNEL_PRIVATE */
663 
664 extern boolean_t        pmap_is_noencrypt(ppnum_t);
665 extern void             pmap_set_noencrypt(ppnum_t pn);
666 extern void             pmap_clear_noencrypt(ppnum_t pn);
667 
668 /*
669  * JMM - This portion is exported to other kernel components right now,
670  * but will be pulled back in the future when the needed functionality
671  * is provided in a cleaner manner.
672  */
673 
674 extern const pmap_t     kernel_pmap;            /* The kernel's map */
675 #define pmap_kernel()   (kernel_pmap)
676 
677 #define VM_MEM_SUPERPAGE        0x100           /* map a superpage instead of a base page */
678 #define VM_MEM_STACK            0x200
679 
680 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
681  * definitions in i386/pmap_internal.h
682  */
683 #define PMAP_CREATE_64BIT          0x1
684 
685 #if __x86_64__
686 
687 #define PMAP_CREATE_EPT            0x2
688 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
689 
690 #else
691 
692 #define PMAP_CREATE_STAGE2         0
693 #if __arm64e__
694 #define PMAP_CREATE_DISABLE_JOP    0x4
695 #else
696 #define PMAP_CREATE_DISABLE_JOP    0
697 #endif
698 #if __ARM_MIXED_PAGE_SIZE__
699 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
700 #else
701 #define PMAP_CREATE_FORCE_4K_PAGES 0
702 #endif /* __ARM_MIXED_PAGE_SIZE__ */
703 #define PMAP_CREATE_X86_64         0
704 #if CONFIG_ROSETTA
705 #define PMAP_CREATE_ROSETTA        0x20
706 #else
707 #define PMAP_CREATE_ROSETTA        0
708 #endif /* CONFIG_ROSETTA */
709 
710 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
711 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA)
712 
713 #endif /* __x86_64__ */
714 
715 #define PMAP_OPTIONS_NOWAIT     0x1             /* don't block, return
716 	                                         * KERN_RESOURCE_SHORTAGE
717 	                                         * instead */
718 #define PMAP_OPTIONS_NOENTER    0x2             /* expand pmap if needed
719 	                                         * but don't enter mapping
720 	                                         */
721 #define PMAP_OPTIONS_COMPRESSOR 0x4             /* credit the compressor for
722 	                                         * this operation */
723 #define PMAP_OPTIONS_INTERNAL   0x8             /* page from internal object */
724 #define PMAP_OPTIONS_REUSABLE   0x10            /* page is "reusable" */
725 #define PMAP_OPTIONS_NOFLUSH    0x20            /* delay flushing of pmap */
726 #define PMAP_OPTIONS_NOREFMOD   0x40            /* don't need ref/mod on disconnect */
727 #define PMAP_OPTIONS_ALT_ACCT   0x80            /* use alternate accounting scheme for page */
728 #define PMAP_OPTIONS_REMOVE     0x100           /* removing a mapping */
729 #define PMAP_OPTIONS_SET_REUSABLE   0x200       /* page is now "reusable" */
730 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400       /* page no longer "reusable" */
731 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
732 	                                            * iff page was modified */
733 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000   /* allow protections to be
734 	                                         * be upgraded */
735 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
736 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
737 #if defined(__arm__) || defined(__arm64__)
738 #define PMAP_OPTIONS_FF_LOCKED  0x8000
739 #define PMAP_OPTIONS_FF_WIRED   0x10000
740 #endif
741 
742 #define PMAP_OPTIONS_MAP_TPRO 0x40000
743 
744 #if     !defined(__LP64__)
745 extern vm_offset_t      pmap_extract(pmap_t pmap,
746     vm_map_offset_t va);
747 #endif
748 extern void             pmap_change_wiring(     /* Specify pageability */
749 	pmap_t          pmap,
750 	vm_map_offset_t va,
751 	boolean_t       wired);
752 
753 /* LP64todo - switch to vm_map_offset_t when it grows */
754 extern void             pmap_remove(    /* Remove mappings. */
755 	pmap_t          map,
756 	vm_map_offset_t s,
757 	vm_map_offset_t e);
758 
759 extern void             pmap_remove_options(    /* Remove mappings. */
760 	pmap_t          map,
761 	vm_map_offset_t s,
762 	vm_map_offset_t e,
763 	int             options);
764 
765 extern void             fillPage(ppnum_t pa, unsigned int fill);
766 
767 #if defined(__LP64__)
768 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
769 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr);
770 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr);
771 #endif
772 
773 mach_vm_size_t pmap_query_resident(pmap_t pmap,
774     vm_map_offset_t s,
775     vm_map_offset_t e,
776     mach_vm_size_t *compressed_bytes_p);
777 
778 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value);
779 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap);
780 
781 /* Inform the pmap layer that there is a JIT entry in this map. */
782 extern void pmap_set_jit_entitled(pmap_t pmap);
783 
784 /* Ask the pmap layer if there is a JIT entry in this map. */
785 extern bool pmap_get_jit_entitled(pmap_t pmap);
786 
787 /* Inform the pmap layer that the XO register is repurposed for this map */
788 extern void pmap_set_tpro(pmap_t pmap);
789 
790 /* Ask the pmap layer if there is a TPRO entry in this map. */
791 extern bool pmap_get_tpro(pmap_t pmap);
792 
793 /*
794  * Tell the pmap layer what range within the nested region the VM intends to
795  * use.
796  */
797 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size);
798 
799 /*
800  * Dump page table contents into the specified buffer.  Returns KERN_INSUFFICIENT_BUFFER_SIZE
801  * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
802  * This is expected to only be called from kernel debugger context,
803  * so synchronization is not required.
804  */
805 
806 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied);
807 
808 /*
809  * Indicates if any special policy is applied to this protection by the pmap
810  * layer.
811  */
812 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot);
813 
814 /*
815  * Causes the pmap to return any available pages that it can return cheaply to
816  * the VM.
817  */
818 uint64_t pmap_release_pages_fast(void);
819 
820 #define PMAP_QUERY_PAGE_PRESENT                 0x01
821 #define PMAP_QUERY_PAGE_REUSABLE                0x02
822 #define PMAP_QUERY_PAGE_INTERNAL                0x04
823 #define PMAP_QUERY_PAGE_ALTACCT                 0x08
824 #define PMAP_QUERY_PAGE_COMPRESSED              0x10
825 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT      0x20
826 extern kern_return_t pmap_query_page_info(
827 	pmap_t          pmap,
828 	vm_map_offset_t va,
829 	int             *disp);
830 
831 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
832 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
833 
834 extern void pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
835 extern bool pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
836 
837 extern bool pmap_in_ppl(void);
838 extern bool pmap_has_ppl(void);
839 
840 /**
841  * Indicates whether the device supports register-level MMIO access control.
842  *
843  * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register
844  *       writability at page granularity, this mechanism allows specific registers
845  *       on a read-mostly page to be written using a dedicated guarded mode trap
846  *       without requiring a full PPL driver extension.
847  *
848  * @return True if the device supports register-level MMIO access control.
849  */
850 extern bool pmap_has_iofilter_protected_write(void);
851 
852 /**
853  * Performs a write to the I/O register specified by addr on supported devices.
854  *
855  * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this
856  *       function goes over the sorted I/O filter entry table. If there is a hit, the
857  *       write is performed from Guarded Mode. Otherwise, the write is performed from
858  *       Normal Mode (kernel mode). Note that you can still hit an exception if the
859  *       register is owned by PPL but not allowed by an io-filter-entry in the device tree.
860  *
861  * @note On unsupported devices, this function will panic.
862  *
863  * @param addr The address of the register.
864  * @param value The value to be written.
865  * @param width The width of the I/O register, supported values are 1, 2, 4 and 8.
866  */
867 extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width);
868 
869 extern void *pmap_claim_reserved_ppl_page(void);
870 extern void pmap_free_reserved_ppl_page(void *kva);
871 
872 extern void pmap_ledger_verify_size(size_t);
873 extern ledger_t pmap_ledger_alloc(void);
874 extern void pmap_ledger_free(ledger_t);
875 
876 extern bool pmap_is_bad_ram(ppnum_t ppn);
877 extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
878 
879 #if __arm64__
880 extern bool pmap_is_exotic(pmap_t pmap);
881 #else /* __arm64__ */
882 #define pmap_is_exotic(pmap) false
883 #endif /* __arm64__ */
884 
885 extern bool pmap_cs_enabled(void);
886 
887 
888 /*
889  * Returns a subset of pmap_cs non-default configuration,
890  * e.g. loosening up of some restrictions through pmap_cs or amfi
891  * boot-args. The return value is a bit field with possible bits
892  * described below. If default, the function will return 0. Note that
893  * this does not work the other way: 0 does not imply that pmap_cs
894  * runs in default configuration, and only a small configuration
895  * subset is returned by this function.
896  *
897  * Never assume the system is "secure" if this returns 0.
898  */
899 
900 extern int pmap_cs_configuration(void);
901 
902 extern kern_return_t pmap_cs_fork_prepare(
903 	pmap_t old_pmap,
904 	pmap_t new_pmap
905 	);
906 
907 /*
908  * The PMAP layer is responsible for holding on to the local signing key so that
909  * we can re-use the code for multiple different layers. By keeping our local
910  * signing public key here, we can safeguard it with PMAP_CS, and also use it
911  * within PMAP_CS for validation.
912  *
913  * Moreover, we present an API which can be used by AMFI to query the key when
914  * it needs to.
915  */
916 #define PMAP_ECC_P384_PUBLIC_KEY_SIZE 97
917 extern void pmap_set_local_signing_public_key(
918 	const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE]
919 	);
920 
921 extern uint8_t *pmap_get_local_signing_public_key(void);
922 
923 /*
924  * We require AMFI call into the PMAP layer to unrestrict a particular CDHash
925  * for local signing. This only needs to happen for arm devices since x86 devices
926  * don't have PMAP_CS.
927  *
928  * For now, we make the configuration available for x86 devices as well. When
929  * AMFI stop calling into this API, we'll remove it.
930  */
931 #define PMAP_SUPPORTS_RESTRICTED_LOCAL_SIGNING 1
932 extern void pmap_unrestrict_local_signing(
933 	const uint8_t cdhash[CS_CDHASH_LEN]
934 	);
935 
936 #if __has_include(<CoreEntitlements/CoreEntitlements.h>)
937 /*
938  * The PMAP layer provides an API to query entitlements through the CoreEntitlements
939  * layer.
940  */
941 extern bool pmap_query_entitlements(
942 	pmap_t pmap,
943 	CEQuery_t query,
944 	size_t queryLength,
945 	CEQueryContext_t finalContext
946 	);
947 #endif
948 
949 #endif  /* KERNEL_PRIVATE */
950 
951 #endif  /* _VM_PMAP_H_ */
952