xref: /xnu-8020.140.41/osfmk/vm/pmap.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/pmap.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1985
62  *
63  *	Machine address mapping definitions -- machine-independent
64  *	section.  [For machine-dependent section, see "machine/pmap.h".]
65  */
66 
67 #ifndef _VM_PMAP_H_
68 #define _VM_PMAP_H_
69 
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <kern/trustcache.h>
77 
78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>)
79 #include <CoreEntitlements/CoreEntitlements.h>
80 #endif
81 
82 #ifdef  KERNEL_PRIVATE
83 
84 /*
85  *	The following is a description of the interface to the
86  *	machine-dependent "physical map" data structure.  The module
87  *	must provide a "pmap_t" data type that represents the
88  *	set of valid virtual-to-physical addresses for one user
89  *	address space.  [The kernel address space is represented
90  *	by a distinguished "pmap_t".]  The routines described manage
91  *	this type, install and update virtual-to-physical mappings,
92  *	and perform operations on physical addresses common to
93  *	many address spaces.
94  */
95 
96 /* Copy between a physical page and a virtual address */
97 /* LP64todo - switch to vm_map_offset_t when it grows */
98 extern kern_return_t    copypv(
99 	addr64_t source,
100 	addr64_t sink,
101 	unsigned int size,
102 	int which);
103 #define cppvPsnk        1
104 #define cppvPsnkb      31
105 #define cppvPsrc        2
106 #define cppvPsrcb      30
107 #define cppvFsnk        4
108 #define cppvFsnkb      29
109 #define cppvFsrc        8
110 #define cppvFsrcb      28
111 #define cppvNoModSnk   16
112 #define cppvNoModSnkb  27
113 #define cppvNoRefSrc   32
114 #define cppvNoRefSrcb  26
115 #define cppvKmap       64       /* Use the kernel's vm_map */
116 #define cppvKmapb      25
117 
118 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
119 
120 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE
121 #include <mach/mach_types.h>
122 #include <vm/memory_types.h>
123 
124 /*
125  * Routines used during BSD process creation.
126  */
127 
128 extern pmap_t           pmap_create_options(    /* Create a pmap_t. */
129 	ledger_t        ledger,
130 	vm_map_size_t   size,
131 	unsigned int    flags);
132 
133 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
134 /**
135  * Informs the pmap layer that a process will be running with user JOP disabled,
136  * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation.
137  *
138  * @note This function cannot be used once the target process has started
139  * executing code.  It is intended for cases where user JOP is disabled based on
140  * the code signature (e.g., special "keys-off" entitlements), which is too late
141  * to change the flags passed to pmap_create_options.
142  *
143  * @param pmap	The pmap belonging to the target process
144  */
145 extern void             pmap_disable_user_jop(
146 	pmap_t          pmap);
147 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
148 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */
149 
150 #ifdef  MACH_KERNEL_PRIVATE
151 
152 #include <mach_assert.h>
153 
154 #include <machine/pmap.h>
155 /*
156  *	Routines used for initialization.
157  *	There is traditionally also a pmap_bootstrap,
158  *	used very early by machine-dependent code,
159  *	but it is not part of the interface.
160  *
161  *	LP64todo -
162  *	These interfaces are tied to the size of the
163  *	kernel pmap - and therefore use the "local"
164  *	vm_offset_t, etc... types.
165  */
166 
167 extern void *pmap_steal_memory(vm_size_t size); /* Early memory allocation */
168 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */
169 
170 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */
171 #if defined(__arm__) || defined(__arm64__)
172 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */
173 #endif /* defined(__arm__) || defined(__arm64__) */
174 
175 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */
176 
177 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory.  */
178 
179 extern void mapping_adjust(void); /* Adjust free mapping count */
180 
181 extern void mapping_free_prime(void); /* Primes the mapping block release list */
182 
183 #ifndef MACHINE_PAGES
184 /*
185  *	If machine/pmap.h defines MACHINE_PAGES, it must implement
186  *	the above functions.  The pmap module has complete control.
187  *	Otherwise, it must implement the following functions:
188  *		pmap_free_pages
189  *		pmap_virtual_space
190  *		pmap_next_page
191  *		pmap_init
192  *	and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
193  *	using pmap_free_pages, pmap_next_page, pmap_virtual_space,
194  *	and pmap_enter.  pmap_free_pages may over-estimate the number
195  *	of unused physical pages, and pmap_next_page may return FALSE
196  *	to indicate that there are no more unused pages to return.
197  *	However, for best performance pmap_free_pages should be accurate.
198  */
199 
200 /*
201  * Routines to return the next unused physical page.
202  */
203 extern boolean_t pmap_next_page(ppnum_t *pnum);
204 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free);
205 #ifdef __x86_64__
206 extern kern_return_t pmap_next_page_large(ppnum_t *pnum);
207 extern void pmap_hi_pages_done(void);
208 #endif
209 
210 /*
211  * Report virtual space available for the kernel.
212  */
213 extern void pmap_virtual_space(
214 	vm_offset_t     *virtual_start,
215 	vm_offset_t     *virtual_end);
216 #endif  /* MACHINE_PAGES */
217 
218 /*
219  * Routines to manage the physical map data structure.
220  */
221 extern pmap_t(pmap_kernel)(void);               /* Return the kernel's pmap */
222 extern void             pmap_reference(pmap_t pmap);    /* Gain a reference. */
223 extern void             pmap_destroy(pmap_t pmap); /* Release a reference. */
224 extern void             pmap_switch(pmap_t);
225 extern void             pmap_require(pmap_t pmap);
226 
227 #if MACH_ASSERT
228 extern void pmap_set_process(pmap_t pmap,
229     int pid,
230     char *procname);
231 #endif /* MACH_ASSERT */
232 
233 extern kern_return_t    pmap_enter(     /* Enter a mapping */
234 	pmap_t          pmap,
235 	vm_map_offset_t v,
236 	ppnum_t         pn,
237 	vm_prot_t       prot,
238 	vm_prot_t       fault_type,
239 	unsigned int    flags,
240 	boolean_t       wired);
241 
242 extern kern_return_t    pmap_enter_options(
243 	pmap_t pmap,
244 	vm_map_offset_t v,
245 	ppnum_t pn,
246 	vm_prot_t prot,
247 	vm_prot_t fault_type,
248 	unsigned int flags,
249 	boolean_t wired,
250 	unsigned int options,
251 	void *arg);
252 extern kern_return_t    pmap_enter_options_addr(
253 	pmap_t pmap,
254 	vm_map_offset_t v,
255 	pmap_paddr_t pa,
256 	vm_prot_t prot,
257 	vm_prot_t fault_type,
258 	unsigned int flags,
259 	boolean_t wired,
260 	unsigned int options,
261 	void *arg);
262 
263 extern void             pmap_remove_some_phys(
264 	pmap_t          pmap,
265 	ppnum_t         pn);
266 
267 extern void             pmap_lock_phys_page(
268 	ppnum_t         pn);
269 
270 extern void             pmap_unlock_phys_page(
271 	ppnum_t         pn);
272 
273 
274 /*
275  *	Routines that operate on physical addresses.
276  */
277 
278 extern void             pmap_page_protect(      /* Restrict access to page. */
279 	ppnum_t phys,
280 	vm_prot_t       prot);
281 
282 extern void             pmap_page_protect_options(      /* Restrict access to page. */
283 	ppnum_t phys,
284 	vm_prot_t       prot,
285 	unsigned int    options,
286 	void            *arg);
287 
288 extern void(pmap_zero_page)(
289 	ppnum_t         pn);
290 
291 extern void(pmap_zero_part_page)(
292 	ppnum_t         pn,
293 	vm_offset_t     offset,
294 	vm_size_t       len);
295 
296 extern void(pmap_copy_page)(
297 	ppnum_t         src,
298 	ppnum_t         dest);
299 
300 extern void(pmap_copy_part_page)(
301 	ppnum_t         src,
302 	vm_offset_t     src_offset,
303 	ppnum_t         dst,
304 	vm_offset_t     dst_offset,
305 	vm_size_t       len);
306 
307 extern void(pmap_copy_part_lpage)(
308 	vm_offset_t     src,
309 	ppnum_t         dst,
310 	vm_offset_t     dst_offset,
311 	vm_size_t       len);
312 
313 extern void(pmap_copy_part_rpage)(
314 	ppnum_t         src,
315 	vm_offset_t     src_offset,
316 	vm_offset_t     dst,
317 	vm_size_t       len);
318 
319 extern unsigned int(pmap_disconnect)(   /* disconnect mappings and return reference and change */
320 	ppnum_t         phys);
321 
322 extern unsigned int(pmap_disconnect_options)(   /* disconnect mappings and return reference and change */
323 	ppnum_t         phys,
324 	unsigned int    options,
325 	void            *arg);
326 
327 extern kern_return_t(pmap_attribute_cache_sync)(      /* Flush appropriate
328                                                        * cache based on
329                                                        * page number sent */
330 	ppnum_t         pn,
331 	vm_size_t       size,
332 	vm_machine_attribute_t attribute,
333 	vm_machine_attribute_val_t* value);
334 
335 extern unsigned int(pmap_cache_attributes)(
336 	ppnum_t         pn);
337 
338 /*
339  * Set (override) cache attributes for the specified physical page
340  */
341 extern  void            pmap_set_cache_attributes(
342 	ppnum_t,
343 	unsigned int);
344 
345 extern void            *pmap_map_compressor_page(
346 	ppnum_t);
347 
348 extern void             pmap_unmap_compressor_page(
349 	ppnum_t,
350 	void*);
351 
352 #if defined(__arm__) || defined(__arm64__)
353 /* ARM64_TODO */
354 extern  boolean_t       pmap_batch_set_cache_attributes(
355 	ppnum_t,
356 	unsigned int,
357 	unsigned int,
358 	unsigned int,
359 	boolean_t,
360 	unsigned int*);
361 #endif
362 extern void pmap_sync_page_data_phys(ppnum_t pa);
363 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
364 
365 /*
366  * debug/assertions. pmap_verify_free returns true iff
367  * the given physical page is mapped into no pmap.
368  * pmap_assert_free() will panic() if pn is not free.
369  */
370 extern bool pmap_verify_free(ppnum_t pn);
371 #if MACH_ASSERT
372 extern void pmap_assert_free(ppnum_t pn);
373 #endif
374 
375 
376 /*
377  *	Sundry required (internal) routines
378  */
379 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
380 extern void             pmap_collect(pmap_t pmap);/* Perform garbage
381                                                    * collection, if any */
382 #endif
383 /*
384  *	Optional routines
385  */
386 extern void(pmap_copy)(                         /* Copy range of mappings,
387                                                  * if desired. */
388 	pmap_t          dest,
389 	pmap_t          source,
390 	vm_map_offset_t dest_va,
391 	vm_map_size_t   size,
392 	vm_map_offset_t source_va);
393 
394 extern kern_return_t(pmap_attribute)(           /* Get/Set special memory
395                                                  * attributes */
396 	pmap_t          pmap,
397 	vm_map_offset_t va,
398 	vm_map_size_t   size,
399 	vm_machine_attribute_t  attribute,
400 	vm_machine_attribute_val_t* value);
401 
402 /*
403  * Routines defined as macros.
404  */
405 #ifndef PMAP_ACTIVATE_USER
406 #ifndef PMAP_ACTIVATE
407 #define PMAP_ACTIVATE_USER(thr, cpu)
408 #else   /* PMAP_ACTIVATE */
409 #define PMAP_ACTIVATE_USER(thr, cpu) {                  \
410 	pmap_t  pmap;                                           \
411                                                                 \
412 	pmap = (thr)->map->pmap;                                \
413 	if (pmap != pmap_kernel())                              \
414 	        PMAP_ACTIVATE(pmap, (thr), (cpu));              \
415 }
416 #endif  /* PMAP_ACTIVATE */
417 #endif  /* PMAP_ACTIVATE_USER */
418 
419 #ifndef PMAP_DEACTIVATE_USER
420 #ifndef PMAP_DEACTIVATE
421 #define PMAP_DEACTIVATE_USER(thr, cpu)
422 #else   /* PMAP_DEACTIVATE */
423 #define PMAP_DEACTIVATE_USER(thr, cpu) {                        \
424 	pmap_t  pmap;                                           \
425                                                                 \
426 	pmap = (thr)->map->pmap;                                \
427 	if ((pmap) != pmap_kernel())                    \
428 	        PMAP_DEACTIVATE(pmap, (thr), (cpu));    \
429 }
430 #endif  /* PMAP_DEACTIVATE */
431 #endif  /* PMAP_DEACTIVATE_USER */
432 
433 #ifndef PMAP_ACTIVATE_KERNEL
434 #ifndef PMAP_ACTIVATE
435 #define PMAP_ACTIVATE_KERNEL(cpu)
436 #else   /* PMAP_ACTIVATE */
437 #define PMAP_ACTIVATE_KERNEL(cpu)                       \
438 	        PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
439 #endif  /* PMAP_ACTIVATE */
440 #endif  /* PMAP_ACTIVATE_KERNEL */
441 
442 #ifndef PMAP_DEACTIVATE_KERNEL
443 #ifndef PMAP_DEACTIVATE
444 #define PMAP_DEACTIVATE_KERNEL(cpu)
445 #else   /* PMAP_DEACTIVATE */
446 #define PMAP_DEACTIVATE_KERNEL(cpu)                     \
447 	        PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
448 #endif  /* PMAP_DEACTIVATE */
449 #endif  /* PMAP_DEACTIVATE_KERNEL */
450 
451 #ifndef PMAP_ENTER
452 /*
453  *	Macro to be used in place of pmap_enter()
454  */
455 #define PMAP_ENTER(pmap, virtual_address, page, protection, fault_type, \
456 	    flags, wired, result)                                \
457 	MACRO_BEGIN                                                     \
458 	pmap_t		__pmap = (pmap);                                \
459 	vm_page_t	__page = (page);                                \
460 	int		__options = 0;                                  \
461 	vm_object_t	__obj;                                          \
462                                                                         \
463 	PMAP_ENTER_CHECK(__pmap, __page)                                \
464 	__obj = VM_PAGE_OBJECT(__page);                                 \
465 	if (__obj->internal) {                                          \
466 	        __options |= PMAP_OPTIONS_INTERNAL;                     \
467 	}                                                               \
468 	if (__page->vmp_reusable || __obj->all_reusable) {              \
469 	        __options |= PMAP_OPTIONS_REUSABLE;                     \
470 	}                                                               \
471 	result = pmap_enter_options(__pmap,                             \
472 	                            (virtual_address),                  \
473 	                            VM_PAGE_GET_PHYS_PAGE(__page),      \
474 	                            (protection),                               \
475 	                            (fault_type),                               \
476 	                            (flags),                            \
477 	                            (wired),                            \
478 	                            __options,                          \
479 	                            NULL);                              \
480 	MACRO_END
481 #endif  /* !PMAP_ENTER */
482 
483 #ifndef PMAP_ENTER_OPTIONS
484 #define PMAP_ENTER_OPTIONS(pmap, virtual_address, fault_phys_offset,   \
485 	    page, protection,                                           \
486 	    fault_type, flags, wired, options, result)                  \
487 	MACRO_BEGIN                                                     \
488 	pmap_t		__pmap = (pmap);                                \
489 	vm_page_t	__page = (page);                                \
490 	int		__extra_options = 0;                            \
491 	vm_object_t	__obj;                                          \
492                                                                         \
493 	PMAP_ENTER_CHECK(__pmap, __page)                                \
494 	__obj = VM_PAGE_OBJECT(__page);                                 \
495 	if (__obj->internal) {                                          \
496 	        __extra_options |= PMAP_OPTIONS_INTERNAL;               \
497 	}                                                               \
498 	if (__page->vmp_reusable || __obj->all_reusable) {              \
499 	        __extra_options |= PMAP_OPTIONS_REUSABLE;               \
500 	}                                                               \
501 	result = pmap_enter_options_addr(__pmap,                        \
502 	                            (virtual_address),                  \
503 	                            (((pmap_paddr_t)                    \
504 	                              VM_PAGE_GET_PHYS_PAGE(__page)     \
505 	                              << PAGE_SHIFT)                    \
506 	                             + fault_phys_offset),             \
507 	                            (protection),                       \
508 	                            (fault_type),                       \
509 	                            (flags),                            \
510 	                            (wired),                            \
511 	                            (options) | __extra_options,        \
512 	                            NULL);                              \
513 	MACRO_END
514 #endif  /* !PMAP_ENTER_OPTIONS */
515 
516 #ifndef PMAP_SET_CACHE_ATTR
517 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op)             \
518 	MACRO_BEGIN                                                             \
519 	        if (!batch_pmap_op) {                                           \
520 	                pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
521 	                object->set_cache_attr = TRUE;                          \
522 	        }                                                               \
523 	MACRO_END
524 #endif  /* PMAP_SET_CACHE_ATTR */
525 
526 #ifndef PMAP_BATCH_SET_CACHE_ATTR
527 #if     defined(__arm__) || defined(__arm64__)
528 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                       \
529 	    cache_attr, num_pages, batch_pmap_op)   \
530 	MACRO_BEGIN                                                             \
531 	        if ((batch_pmap_op)) {                                          \
532 	                unsigned int __page_idx=0;                              \
533 	                unsigned int res=0;                                     \
534 	                boolean_t batch=TRUE;                                   \
535 	                while (__page_idx < (num_pages)) {                      \
536 	                        if (!pmap_batch_set_cache_attributes(           \
537 	                                user_page_list[__page_idx].phys_addr,   \
538 	                                (cache_attr),                           \
539 	                                (num_pages),                            \
540 	                                (__page_idx),                           \
541 	                                FALSE,                                  \
542 	                                (&res))) {                              \
543 	                                batch = FALSE;                          \
544 	                                break;                                  \
545 	                        }                                               \
546 	                        __page_idx++;                                   \
547 	                }                                                       \
548 	                __page_idx=0;                                           \
549 	                res=0;                                                  \
550 	                while (__page_idx < (num_pages)) {                      \
551 	                        if (batch)                                      \
552 	                                (void)pmap_batch_set_cache_attributes(  \
553 	                                user_page_list[__page_idx].phys_addr,   \
554 	                                (cache_attr),                           \
555 	                                (num_pages),                            \
556 	                                (__page_idx),                           \
557 	                                TRUE,                                   \
558 	                                (&res));                                \
559 	                        else                                            \
560 	                                pmap_set_cache_attributes(              \
561 	                                user_page_list[__page_idx].phys_addr,   \
562 	                                (cache_attr));                          \
563 	                                __page_idx++;                           \
564 	                }                                                       \
565 	                (object)->set_cache_attr = TRUE;                        \
566 	        }                                                               \
567 	MACRO_END
568 #else
569 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                       \
570 	    cache_attr, num_pages, batch_pmap_op)   \
571 	MACRO_BEGIN                                                             \
572 	        if ((batch_pmap_op)) {                                          \
573 	                unsigned int __page_idx=0;                              \
574 	                while (__page_idx < (num_pages)) {                      \
575 	                        pmap_set_cache_attributes(                      \
576 	                                user_page_list[__page_idx].phys_addr,   \
577 	                                (cache_attr));                          \
578 	                        __page_idx++;                                   \
579 	                }                                                       \
580 	                (object)->set_cache_attr = TRUE;                        \
581 	        }                                                               \
582 	MACRO_END
583 #endif
584 #endif  /* PMAP_BATCH_SET_CACHE_ATTR */
585 
586 #define PMAP_ENTER_CHECK(pmap, page)                                    \
587 {                                                                       \
588 	if ((page)->vmp_error) {                                        \
589 	        panic("VM page %p should not have an error\n",          \
590 	                (page));                                        \
591 	}                                                               \
592 }
593 
594 /*
595  *	Routines to manage reference/modify bits based on
596  *	physical addresses, simulating them if not provided
597  *	by the hardware.
598  */
599 struct pfc {
600 	long    pfc_cpus;
601 	long    pfc_invalid_global;
602 };
603 
604 typedef struct pfc      pmap_flush_context;
605 
606 /* Clear reference bit */
607 extern void             pmap_clear_reference(ppnum_t     pn);
608 /* Return reference bit */
609 extern boolean_t(pmap_is_referenced)(ppnum_t     pn);
610 /* Set modify bit */
611 extern void             pmap_set_modify(ppnum_t  pn);
612 /* Clear modify bit */
613 extern void             pmap_clear_modify(ppnum_t pn);
614 /* Return modify bit */
615 extern boolean_t        pmap_is_modified(ppnum_t pn);
616 /* Return modified and referenced bits */
617 extern unsigned int pmap_get_refmod(ppnum_t pn);
618 /* Clear modified and referenced bits */
619 extern void                     pmap_clear_refmod(ppnum_t pn, unsigned int mask);
620 #define VM_MEM_MODIFIED         0x01    /* Modified bit */
621 #define VM_MEM_REFERENCED       0x02    /* Referenced bit */
622 extern void                     pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
623 
624 /*
625  * Clears the reference and/or modified bits on a range of virtually
626  * contiguous pages.
627  * It returns true if the operation succeeded. If it returns false,
628  * nothing has been modified.
629  * This operation is only supported on some platforms, so callers MUST
630  * handle the case where it returns false.
631  */
632 extern bool
633 pmap_clear_refmod_range_options(
634 	pmap_t pmap,
635 	vm_map_address_t start,
636 	vm_map_address_t end,
637 	unsigned int mask,
638 	unsigned int options);
639 
640 
641 extern void pmap_flush_context_init(pmap_flush_context *);
642 extern void pmap_flush(pmap_flush_context *);
643 
644 /*
645  *	Routines that operate on ranges of virtual addresses.
646  */
647 extern void             pmap_protect(   /* Change protections. */
648 	pmap_t          map,
649 	vm_map_offset_t s,
650 	vm_map_offset_t e,
651 	vm_prot_t       prot);
652 
653 extern void             pmap_protect_options(   /* Change protections. */
654 	pmap_t          map,
655 	vm_map_offset_t s,
656 	vm_map_offset_t e,
657 	vm_prot_t       prot,
658 	unsigned int    options,
659 	void            *arg);
660 
661 extern void(pmap_pageable)(
662 	pmap_t          pmap,
663 	vm_map_offset_t start,
664 	vm_map_offset_t end,
665 	boolean_t       pageable);
666 
667 extern uint64_t pmap_shared_region_size_min(pmap_t map);
668 
669 extern kern_return_t pmap_nest(pmap_t,
670     pmap_t,
671     addr64_t,
672     uint64_t);
673 extern kern_return_t pmap_unnest(pmap_t,
674     addr64_t,
675     uint64_t);
676 
677 #define PMAP_UNNEST_CLEAN       1
678 
679 extern kern_return_t pmap_unnest_options(pmap_t,
680     addr64_t,
681     uint64_t,
682     unsigned int);
683 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
684 extern void             pmap_advise_pagezero_range(pmap_t, uint64_t);
685 #endif  /* MACH_KERNEL_PRIVATE */
686 
687 extern boolean_t        pmap_is_noencrypt(ppnum_t);
688 extern void             pmap_set_noencrypt(ppnum_t pn);
689 extern void             pmap_clear_noencrypt(ppnum_t pn);
690 
691 /*
692  * JMM - This portion is exported to other kernel components right now,
693  * but will be pulled back in the future when the needed functionality
694  * is provided in a cleaner manner.
695  */
696 
697 #if XNU_KERNEL_PRIVATE
698 /*
699  * Note: because this is an API break we hide the constness of that pointer
700  *       to kexts for now
701  */
702 extern const pmap_t     kernel_pmap;            /* The kernel's map */
703 #else
704 extern pmap_t           kernel_pmap;            /* The kernel's map */
705 #endif
706 #define pmap_kernel()   (kernel_pmap)
707 
708 #define VM_MEM_SUPERPAGE        0x100           /* map a superpage instead of a base page */
709 #define VM_MEM_STACK            0x200
710 
711 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
712  * definitions in i386/pmap_internal.h
713  */
714 #define PMAP_CREATE_64BIT          0x1
715 
716 #if __x86_64__
717 
718 #define PMAP_CREATE_EPT            0x2
719 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT)
720 
721 #else
722 
723 #define PMAP_CREATE_STAGE2         0
724 #if __arm64e__
725 #define PMAP_CREATE_DISABLE_JOP    0x4
726 #else
727 #define PMAP_CREATE_DISABLE_JOP    0
728 #endif
729 #if __ARM_MIXED_PAGE_SIZE__
730 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
731 #else
732 #define PMAP_CREATE_FORCE_4K_PAGES 0
733 #endif /* __ARM_MIXED_PAGE_SIZE__ */
734 #if __arm64__
735 #define PMAP_CREATE_X86_64         0
736 #else
737 #define PMAP_CREATE_X86_64         0
738 #endif
739 
740 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
741 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64)
742 
743 #endif /* __x86_64__ */
744 
745 #define PMAP_OPTIONS_NOWAIT     0x1             /* don't block, return
746 	                                         * KERN_RESOURCE_SHORTAGE
747 	                                         * instead */
748 #define PMAP_OPTIONS_NOENTER    0x2             /* expand pmap if needed
749 	                                         * but don't enter mapping
750 	                                         */
751 #define PMAP_OPTIONS_COMPRESSOR 0x4             /* credit the compressor for
752 	                                         * this operation */
753 #define PMAP_OPTIONS_INTERNAL   0x8             /* page from internal object */
754 #define PMAP_OPTIONS_REUSABLE   0x10            /* page is "reusable" */
755 #define PMAP_OPTIONS_NOFLUSH    0x20            /* delay flushing of pmap */
756 #define PMAP_OPTIONS_NOREFMOD   0x40            /* don't need ref/mod on disconnect */
757 #define PMAP_OPTIONS_ALT_ACCT   0x80            /* use alternate accounting scheme for page */
758 #define PMAP_OPTIONS_REMOVE     0x100           /* removing a mapping */
759 #define PMAP_OPTIONS_SET_REUSABLE   0x200       /* page is now "reusable" */
760 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400       /* page no longer "reusable" */
761 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
762 	                                            * iff page was modified */
763 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000   /* allow protections to be
764 	                                         * be upgraded */
765 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
766 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
767 #if defined(__arm__) || defined(__arm64__)
768 #define PMAP_OPTIONS_FF_LOCKED  0x8000
769 #define PMAP_OPTIONS_FF_WIRED   0x10000
770 #endif
771 
772 #if     !defined(__LP64__)
773 extern vm_offset_t      pmap_extract(pmap_t pmap,
774     vm_map_offset_t va);
775 #endif
776 extern void             pmap_change_wiring(     /* Specify pageability */
777 	pmap_t          pmap,
778 	vm_map_offset_t va,
779 	boolean_t       wired);
780 
781 /* LP64todo - switch to vm_map_offset_t when it grows */
782 extern void             pmap_remove(    /* Remove mappings. */
783 	pmap_t          map,
784 	vm_map_offset_t s,
785 	vm_map_offset_t e);
786 
787 extern void             pmap_remove_options(    /* Remove mappings. */
788 	pmap_t          map,
789 	vm_map_offset_t s,
790 	vm_map_offset_t e,
791 	int             options);
792 
793 extern void             fillPage(ppnum_t pa, unsigned int fill);
794 
795 #if defined(__LP64__)
796 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
797 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr);
798 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr);
799 #endif
800 
801 mach_vm_size_t pmap_query_resident(pmap_t pmap,
802     vm_map_offset_t s,
803     vm_map_offset_t e,
804     mach_vm_size_t *compressed_bytes_p);
805 
806 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value);
807 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap);
808 
809 /* Inform the pmap layer that there is a JIT entry in this map. */
810 extern void pmap_set_jit_entitled(pmap_t pmap);
811 
812 /* Ask the pmap layer if there is a JIT entry in this map. */
813 extern bool pmap_get_jit_entitled(pmap_t pmap);
814 
815 /*
816  * Tell the pmap layer what range within the nested region the VM intends to
817  * use.
818  */
819 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size);
820 
821 /*
822  * Dump page table contents into the specified buffer.  Returns KERN_INSUFFICIENT_BUFFER_SIZE
823  * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
824  * This is expected to only be called from kernel debugger context,
825  * so synchronization is not required.
826  */
827 
828 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied);
829 
830 /*
831  * Indicates if any special policy is applied to this protection by the pmap
832  * layer.
833  */
834 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot);
835 
836 /*
837  * Causes the pmap to return any available pages that it can return cheaply to
838  * the VM.
839  */
840 uint64_t pmap_release_pages_fast(void);
841 
842 #define PMAP_QUERY_PAGE_PRESENT                 0x01
843 #define PMAP_QUERY_PAGE_REUSABLE                0x02
844 #define PMAP_QUERY_PAGE_INTERNAL                0x04
845 #define PMAP_QUERY_PAGE_ALTACCT                 0x08
846 #define PMAP_QUERY_PAGE_COMPRESSED              0x10
847 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT      0x20
848 extern kern_return_t pmap_query_page_info(
849 	pmap_t          pmap,
850 	vm_map_offset_t va,
851 	int             *disp);
852 
853 #ifdef PLATFORM_BridgeOS
854 struct pmap_legacy_trust_cache {
855 	struct pmap_legacy_trust_cache *next;
856 	uuid_t uuid;
857 	uint32_t num_hashes;
858 	uint8_t hashes[][CS_CDHASH_LEN];
859 };
860 #else
861 struct pmap_legacy_trust_cache;
862 #endif
863 
864 extern kern_return_t pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache *trust_cache,
865     const vm_size_t trust_cache_len);
866 
867 typedef enum {
868 	PMAP_TC_TYPE_PERSONALIZED,
869 	PMAP_TC_TYPE_PDI,
870 	PMAP_TC_TYPE_CRYPTEX,
871 	PMAP_TC_TYPE_ENGINEERING,
872 	PMAP_TC_TYPE_GLOBAL_FF00,
873 	PMAP_TC_TYPE_GLOBAL_FF01,
874 	PMAP_TC_TYPE_GLOBAL_FF06,
875 	PMAP_TC_TYPE_DDI,
876 	PMAP_TC_TYPE_EPHEMERAL_CRYPTEX,
877 } pmap_tc_type_t;
878 
879 #define PMAP_IMAGE4_TRUST_CACHE_HAS_TYPE 1
880 struct pmap_image4_trust_cache {
881 	// Filled by pmap layer.
882 	struct pmap_image4_trust_cache const *next;             // linked list linkage
883 	struct trust_cache_module1 const *module;                       // pointer into module (within data below)
884 
885 	// Filled by caller.
886 	// data is either an image4,
887 	// or just the trust cache payload itself if the image4 manifest is external.
888 	pmap_tc_type_t type;
889 	size_t bnch_len;
890 	uint8_t const bnch[48];
891 	size_t data_len;
892 	uint8_t const data[];
893 };
894 
895 typedef enum {
896 	PMAP_TC_SUCCESS = 0,
897 	PMAP_TC_UNKNOWN_FORMAT = -1,
898 	PMAP_TC_TOO_SMALL_FOR_HEADER = -2,
899 	PMAP_TC_TOO_SMALL_FOR_ENTRIES = -3,
900 	PMAP_TC_UNKNOWN_VERSION = -4,
901 	PMAP_TC_ALREADY_LOADED = -5,
902 	PMAP_TC_TOO_BIG = -6,
903 	PMAP_TC_RESOURCE_SHORTAGE = -7,
904 	PMAP_TC_MANIFEST_TOO_BIG = -8,
905 	PMAP_TC_MANIFEST_VIOLATION = -9,
906 	PMAP_TC_PAYLOAD_VIOLATION = -10,
907 	PMAP_TC_EXPIRED = -11,
908 	PMAP_TC_CRYPTO_WRONG = -12,
909 	PMAP_TC_OBJECT_WRONG = -13,
910 	PMAP_TC_UNKNOWN_CALLER = -14,
911 	PMAP_TC_NOT_SUPPORTED = -15,
912 	PMAP_TC_UNKNOWN_FAILURE = -16,
913 } pmap_tc_ret_t;
914 
915 #define PMAP_HAS_LOCKDOWN_IMAGE4_SLAB 1
916 extern void pmap_lockdown_image4_slab(vm_offset_t slab, vm_size_t slab_len, uint64_t flags);
917 
918 #define PMAP_HAS_LOCKDOWN_IMAGE4_LATE_SLAB 1
919 extern void pmap_lockdown_image4_late_slab(vm_offset_t slab, vm_size_t slab_len, uint64_t flags);
920 
921 extern pmap_tc_ret_t pmap_load_image4_trust_cache(
922 	struct pmap_image4_trust_cache *trust_cache, vm_size_t trust_cache_len,
923 	uint8_t const *img4_manifest,
924 	vm_size_t img4_manifest_buffer_len,
925 	vm_size_t img4_manifest_actual_len,
926 	bool dry_run);
927 
928 extern bool pmap_is_trust_cache_loaded(const uuid_t uuid);
929 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
930 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
931 
932 extern void pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
933 extern bool pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN]);
934 
935 extern bool pmap_in_ppl(void);
936 extern bool pmap_has_ppl(void);
937 
938 extern void *pmap_claim_reserved_ppl_page(void);
939 extern void pmap_free_reserved_ppl_page(void *kva);
940 
941 extern void pmap_ledger_verify_size(size_t);
942 extern ledger_t pmap_ledger_alloc(void);
943 extern void pmap_ledger_free(ledger_t);
944 
945 extern bool pmap_is_bad_ram(ppnum_t ppn);
946 extern void pmap_retire_page(ppnum_t ppn);
947 extern kern_return_t pmap_cs_allow_invalid(pmap_t pmap);
948 
949 #if __arm64__
950 extern bool pmap_is_exotic(pmap_t pmap);
951 #else /* __arm64__ */
952 #define pmap_is_exotic(pmap) false
953 #endif /* __arm64__ */
954 
955 extern bool pmap_cs_enabled(void);
956 
957 
958 /*
959  * Returns a subset of pmap_cs non-default configuration,
960  * e.g. loosening up of some restrictions through pmap_cs or amfi
961  * boot-args. The return value is a bit field with possible bits
962  * described below. If default, the function will return 0. Note that
963  * this does not work the other way: 0 does not imply that pmap_cs
964  * runs in default configuration, and only a small configuration
965  * subset is returned by this function.
966  *
967  * Never assume the system is "secure" if this returns 0.
968  */
969 
970 extern int pmap_cs_configuration(void);
971 
972 extern kern_return_t pmap_cs_fork_prepare(
973 	pmap_t old_pmap,
974 	pmap_t new_pmap
975 	);
976 
977 /*
978  * The PMAP layer is responsible for holding on to the local signing key so that
979  * we can re-use the code for multiple different layers. By keeping our local
980  * signing public key here, we can safeguard it with PMAP_CS, and also use it
981  * within PMAP_CS for validation.
982  *
983  * Moreover, we present an API which can be used by AMFI to query the key when
984  * it needs to.
985  */
986 #define PMAP_ECC_P384_PUBLIC_KEY_SIZE 97
987 extern void pmap_set_local_signing_public_key(
988 	const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE]
989 	);
990 
991 extern uint8_t *pmap_get_local_signing_public_key(void);
992 
993 /*
994  * We require AMFI call into the PMAP layer to unrestrict a particular CDHash
995  * for local signing. This only needs to happen for arm devices since x86 devices
996  * don't have PMAP_CS.
997  *
998  * For now, we make the configuration available for x86 devices as well. When
999  * AMFI stop calling into this API, we'll remove it.
1000  */
1001 #define PMAP_SUPPORTS_RESTRICTED_LOCAL_SIGNING 1
1002 extern void pmap_unrestrict_local_signing(
1003 	const uint8_t cdhash[CS_CDHASH_LEN]
1004 	);
1005 
1006 #if __has_include(<CoreEntitlements/CoreEntitlements.h>)
1007 /*
1008  * The PMAP layer provides an API to query entitlements through the CoreEntitlements
1009  * layer.
1010  */
1011 extern bool pmap_query_entitlements(
1012 	pmap_t pmap,
1013 	CEQuery_t query,
1014 	size_t queryLength,
1015 	CEQueryContext_t finalContext
1016 	);
1017 #endif
1018 
1019 #endif  /* KERNEL_PRIVATE */
1020 
1021 #endif  /* _VM_PMAP_H_ */
1022