xref: /xnu-12377.61.12/osfmk/vm/pmap.h (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/pmap.h
60  *	Author:	Avadis Tevanian, Jr.
61  *	Date:	1985
62  *
63  *	Machine address mapping definitions -- machine-independent
64  *	section.  [For machine-dependent section, see "machine/pmap.h".]
65  */
66 
67 #ifndef _VM_PMAP_H_
68 #define _VM_PMAP_H_
69 
70 #include <mach/kern_return.h>
71 #include <mach/vm_param.h>
72 #include <mach/vm_types.h>
73 #include <mach/vm_attributes.h>
74 #include <mach/boolean.h>
75 #include <mach/vm_prot.h>
76 #include <kern/trustcache.h>
77 
78 #if __has_include(<CoreEntitlements/CoreEntitlements.h>)
79 #include <CoreEntitlements/CoreEntitlements.h>
80 #endif
81 
82 #ifdef  KERNEL_PRIVATE
83 
84 /*
85  *	The following is a description of the interface to the
86  *	machine-dependent "physical map" data structure.  The module
87  *	must provide a "pmap_t" data type that represents the
88  *	set of valid virtual-to-physical addresses for one user
89  *	address space.  [The kernel address space is represented
90  *	by a distinguished "pmap_t".]  The routines described manage
91  *	this type, install and update virtual-to-physical mappings,
92  *	and perform operations on physical addresses common to
93  *	many address spaces.
94  */
95 
96 /* Copy between a physical page and a virtual address */
97 /* LP64todo - switch to vm_map_offset_t when it grows */
98 extern kern_return_t    copypv(
99 	addr64_t source,
100 	addr64_t sink,
101 	unsigned int size,
102 	int which);
103 
104 /* bcopy_phys and bzero_phys flags. */
105 #define cppvPsnk                0x000000001     /* Destination is a physical address */
106 #define cppvPsnkb               31
107 #define cppvPsrc                0x000000002     /* Source is a physical address */
108 #define cppvPsrcb               30
109 #define cppvFsnk                0x000000004     /* Destination requires flushing (only on non-coherent I/O) */
110 #define cppvFsnkb               29
111 #define cppvFsrc                0x000000008     /* Source requires flushing (only on non-coherent I/O) */
112 #define cppvFsrcb               28
113 #define cppvNoModSnk            0x000000010     /* Ignored in bcopy_phys() */
114 #define cppvNoModSnkb           27
115 #define cppvNoRefSrc            0x000000020     /* Ignored in bcopy_phys() */
116 #define cppvNoRefSrcb           26
117 #define cppvKmap                0x000000040     /* Use the kernel's vm_map */
118 #define cppvKmapb               25
119 #if HAS_MTE
120 #define cppvCopyTags                0x000000080     /* Copy tag metadata along with the physical copy operation */
121 #define cppvDisableTagCheck         0x000000100     /* Perform the physical copy operation with tag checking disabled */
122 #define cppvFixupPhysmapTag         0x000000200     /* Fixup the physmap address with the tag stashed on the input pointer */
123 #define cppvZeroPageTags            0x000000400     /* Zero ATags as part of the operation. Operates in page sized chunks  */
124 #define cppvDenoteAccessMayFault    0x000000800     /* Hint that we expect a possibility of TCF during the copy */
125 #endif /* HAS_MTE */
126 
127 extern boolean_t pmap_has_managed_page(ppnum_t first, ppnum_t last);
128 
129 #if HAS_MTE
130 extern bool pmap_is_tagged_page(ppnum_t);
131 extern bool pmap_is_tagged_mapping(pmap_t, vm_map_offset_t);
132 #endif /* HAS_MTE */
133 
134 #if MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE
135 #include <mach/mach_types.h>
136 #include <vm/memory_types.h>
137 
138 /*
139  * Routines used during BSD process creation.
140  */
141 
142 extern pmap_t           pmap_create_options(    /* Create a pmap_t. */
143 	ledger_t        ledger,
144 	vm_map_size_t   size,
145 	unsigned int    flags);
146 
147 #if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG))
148 /**
149  * Informs the pmap layer that a process will be running with user JOP disabled,
150  * as if PMAP_CREATE_DISABLE_JOP had been passed during pmap creation.
151  *
152  * @note This function cannot be used once the target process has started
153  * executing code.  It is intended for cases where user JOP is disabled based on
154  * the code signature (e.g., special "keys-off" entitlements), which is too late
155  * to change the flags passed to pmap_create_options.
156  *
157  * @param pmap	The pmap belonging to the target process
158  */
159 extern void             pmap_disable_user_jop(
160 	pmap_t          pmap);
161 #endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */
162 #endif /* MACH_KERNEL_PRIVATE || BSD_KERNEL_PRIVATE */
163 
164 #ifdef  MACH_KERNEL_PRIVATE
165 
166 #include <mach_assert.h>
167 
168 #include <machine/pmap.h>
169 
170 #if CONFIG_SPTM
171 #include <arm64/sptm/sptm.h>
172 #endif
173 
174 /*
175  *	Routines used for initialization.
176  *	There is traditionally also a pmap_bootstrap,
177  *	used very early by machine-dependent code,
178  *	but it is not part of the interface.
179  *
180  *	LP64todo -
181  *	These interfaces are tied to the size of the
182  *	kernel pmap - and therefore use the "local"
183  *	vm_offset_t, etc... types.
184  */
185 
186 extern void *pmap_steal_memory(vm_size_t size, vm_size_t alignment); /* Early memory allocation */
187 extern void *pmap_steal_freeable_memory(vm_size_t size); /* Early memory allocation */
188 #if HAS_MTE
189 extern void *pmap_steal_zone_memory(vm_size_t size, vm_size_t alignment); /* Early zone-specific allocations */
190 #endif /* HAS_MTE */
191 
192 extern uint_t pmap_free_pages(void); /* report remaining unused physical pages */
193 #if defined(__arm__) || defined(__arm64__)
194 extern ppnum_t pmap_first_pnum;           /* the first valid physical page on the system == atop(gDramBase) */
195 extern uint_t pmap_free_pages_span(void); /* report phys address range of unused physical pages */
196 #endif /* defined(__arm__) || defined(__arm64__) */
197 
198 extern void pmap_startup(vm_offset_t *startp, vm_offset_t *endp); /* allocate vm_page structs */
199 
200 extern void pmap_init(void); /* Initialization, once we have kernel virtual memory.  */
201 
202 extern void mapping_adjust(void); /* Adjust free mapping count */
203 
204 extern void mapping_free_prime(void); /* Primes the mapping block release list */
205 
206 #ifndef MACHINE_PAGES
207 /*
208  *	If machine/pmap.h defines MACHINE_PAGES, it must implement
209  *	the above functions.  The pmap module has complete control.
210  *	Otherwise, it must implement the following functions:
211  *		pmap_free_pages
212  *		pmap_virtual_space
213  *		pmap_next_page
214  *		pmap_init
215  *	and vm/vm_resident.c implements pmap_steal_memory and pmap_startup
216  *	using pmap_free_pages, pmap_next_page, pmap_virtual_space,
217  *	and pmap_enter.  pmap_free_pages may over-estimate the number
218  *	of unused physical pages, and pmap_next_page may return FALSE
219  *	to indicate that there are no more unused pages to return.
220  *	However, for best performance pmap_free_pages should be accurate.
221  */
222 
223 /*
224  * Routines to return the next unused physical page.
225  */
226 extern boolean_t pmap_next_page(ppnum_t *pnum);
227 extern boolean_t pmap_next_page_hi(ppnum_t *pnum, boolean_t might_free);
228 #ifdef __x86_64__
229 extern kern_return_t pmap_next_page_large(ppnum_t *pnum);
230 extern void pmap_hi_pages_done(void);
231 #endif
232 
233 #if CONFIG_SPTM
234 __enum_decl(pmap_mapping_type_t, uint8_t, {
235 	PMAP_MAPPING_TYPE_INFER = SPTM_UNTYPED,
236 	PMAP_MAPPING_TYPE_DEFAULT = XNU_DEFAULT,
237 	PMAP_MAPPING_TYPE_ROZONE = XNU_ROZONE,
238 	PMAP_MAPPING_TYPE_RESTRICTED = XNU_KERNEL_RESTRICTED
239 });
240 
241 #define PMAP_PAGE_IS_USER_EXECUTABLE(m) \
242 ({ \
243 	const sptm_paddr_t __paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(m)); \
244 	const sptm_frame_type_t __frame_type = sptm_get_frame_type(__paddr); \
245 	sptm_type_is_user_executable(__frame_type); \
246 })
247 
248 extern bool pmap_will_retype(pmap_t pmap, vm_map_address_t vaddr, ppnum_t pn,
249     vm_prot_t prot, unsigned int options, pmap_mapping_type_t mapping_type);
250 
251 #else
252 __enum_decl(pmap_mapping_type_t, uint8_t, {
253 	PMAP_MAPPING_TYPE_INFER = 0,
254 	PMAP_MAPPING_TYPE_DEFAULT,
255 	PMAP_MAPPING_TYPE_ROZONE,
256 	PMAP_MAPPING_TYPE_RESTRICTED
257 });
258 #endif
259 
260 /*
261  * Report virtual space available for the kernel.
262  */
263 extern void pmap_virtual_space(
264 	vm_offset_t     *virtual_start,
265 	vm_offset_t     *virtual_end);
266 #endif  /* MACHINE_PAGES */
267 
268 /*
269  * Routines to manage the physical map data structure.
270  */
271 extern pmap_t(pmap_kernel)(void);               /* Return the kernel's pmap */
272 extern void             pmap_reference(pmap_t pmap);    /* Gain a reference. */
273 extern void             pmap_destroy(pmap_t pmap); /* Release a reference. */
274 extern void             pmap_switch(pmap_t pmap, thread_t thread);
275 extern void             pmap_require(pmap_t pmap);
276 
277 #if MACH_ASSERT
278 extern void pmap_set_process(pmap_t pmap,
279     int pid,
280     char *procname);
281 #endif /* MACH_ASSERT */
282 
283 extern kern_return_t    pmap_enter(     /* Enter a mapping */
284 	pmap_t          pmap,
285 	vm_map_offset_t v,
286 	ppnum_t         pn,
287 	vm_prot_t       prot,
288 	vm_prot_t       fault_type,
289 	unsigned int    flags,
290 	boolean_t       wired,
291 	pmap_mapping_type_t mapping_type);
292 
293 extern kern_return_t    pmap_enter_options(
294 	pmap_t pmap,
295 	vm_map_offset_t v,
296 	ppnum_t pn,
297 	vm_prot_t prot,
298 	vm_prot_t fault_type,
299 	unsigned int flags,
300 	boolean_t wired,
301 	unsigned int options,
302 	void *arg,
303 	pmap_mapping_type_t mapping_type);
304 extern kern_return_t    pmap_enter_options_addr(
305 	pmap_t pmap,
306 	vm_map_offset_t v,
307 	pmap_paddr_t pa,
308 	vm_prot_t prot,
309 	vm_prot_t fault_type,
310 	unsigned int flags,
311 	boolean_t wired,
312 	unsigned int options,
313 	void *arg,
314 	pmap_mapping_type_t mapping_type);
315 
316 extern void             pmap_remove_some_phys(
317 	pmap_t          pmap,
318 	ppnum_t         pn);
319 
320 extern void             pmap_lock_phys_page(
321 	ppnum_t         pn);
322 
323 extern void             pmap_unlock_phys_page(
324 	ppnum_t         pn);
325 
326 
327 /*
328  *	Routines that operate on physical addresses.
329  */
330 
331 extern void             pmap_page_protect(      /* Restrict access to page. */
332 	ppnum_t phys,
333 	vm_prot_t       prot);
334 
335 extern void             pmap_page_protect_options(      /* Restrict access to page. */
336 	ppnum_t phys,
337 	vm_prot_t       prot,
338 	unsigned int    options,
339 	void            *arg);
340 
341 extern void(pmap_zero_page)(
342 	ppnum_t         pn);
343 
344 extern void(pmap_zero_page_with_options)(
345 	ppnum_t         pn,
346 	int             options);
347 
348 extern void(pmap_zero_part_page)(
349 	ppnum_t         pn,
350 	vm_offset_t     offset,
351 	vm_size_t       len);
352 
353 extern void(pmap_copy_page)(
354 	ppnum_t         src,
355 	ppnum_t         dest,
356 	int             options);
357 
358 extern void(pmap_copy_part_page)(
359 	ppnum_t         src,
360 	vm_offset_t     src_offset,
361 	ppnum_t         dst,
362 	vm_offset_t     dst_offset,
363 	vm_size_t       len);
364 
365 extern void(pmap_copy_part_lpage)(
366 	vm_offset_t     src,
367 	ppnum_t         dst,
368 	vm_offset_t     dst_offset,
369 	vm_size_t       len);
370 
371 extern void(pmap_copy_part_rpage)(
372 	ppnum_t         src,
373 	vm_offset_t     src_offset,
374 	vm_offset_t     dst,
375 	vm_size_t       len);
376 
377 extern unsigned int(pmap_disconnect)(   /* disconnect mappings and return reference and change */
378 	ppnum_t         phys);
379 
380 extern unsigned int(pmap_disconnect_options)(   /* disconnect mappings and return reference and change */
381 	ppnum_t         phys,
382 	unsigned int    options,
383 	void            *arg);
384 
385 extern kern_return_t(pmap_attribute_cache_sync)(      /* Flush appropriate
386                                                        * cache based on
387                                                        * page number sent */
388 	ppnum_t         pn,
389 	vm_size_t       size,
390 	vm_machine_attribute_t attribute,
391 	vm_machine_attribute_val_t* value);
392 
393 extern unsigned int(pmap_cache_attributes)(
394 	ppnum_t         pn);
395 
396 /*
397  * Set (override) cache attributes for the specified physical page
398  */
399 extern  void            pmap_set_cache_attributes(
400 	ppnum_t,
401 	unsigned int);
402 
403 extern void            *pmap_map_compressor_page(
404 	ppnum_t);
405 
406 extern void             pmap_unmap_compressor_page(
407 	ppnum_t,
408 	void*);
409 
410 /**
411  * The following declarations are meant to provide a uniform interface by which the VM layer can
412  * pass batches of pages to the pmap layer directly, in the various page list formats natively
413  * used by the VM.  If a new type of list is to be added, the various structures and iterator
414  * functions below should be updated to understand it, and then it should "just work" with the
415  * pmap layer.
416  */
417 
418 /* The various supported page list types. */
419 __enum_decl(unified_page_list_type_t, uint8_t, {
420 	/* Universal page list array, essentially an array of ppnum_t. */
421 	UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY,
422 	/**
423 	 * Singly-linked list of vm_page_t, using vmp_snext field.
424 	 * This is typically used to construct local lists of pages to be freed.
425 	 */
426 	UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST,
427 	/* Doubly-linked queue of vm_page_t's associated with a VM object, using vmp_listq field. */
428 	UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q,
429 	/* Doubly-linked queue of vm_page_t's in a FIFO queue or global free list, using vmp_pageq field. */
430 	UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q,
431 });
432 
433 /* Uniform data structure encompassing the various page list types handled by the VM layer. */
434 typedef struct {
435 	union {
436 		/* Base address and size (in pages) of UPL array for type UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */
437 		struct {
438 			upl_page_info_array_t upl_info;
439 			unsigned int upl_size;
440 		} upl;
441 		/* Head of singly-linked vm_page_t list for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST */
442 		vm_page_t page_slist;
443 		/* Head of queue for UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q and UNIFIED_PAGE_LIST_TYPE_VM_PAGE_FIFO_Q */
444 		void *pageq; /* vm_page_queue_head_t* */
445 	};
446 	unified_page_list_type_t type;
447 } unified_page_list_t;
448 
449 /* Uniform data structure representing an iterator position within a unified_page_list_t object. */
450 typedef struct {
451 	/* Pointer to list structure from which this iterator was created. */
452 	const unified_page_list_t *list;
453 	union {
454 		/* Position within UPL array, for UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY */
455 		unsigned int upl_index;
456 		/* Position within page list or page queue, for all other types */
457 		vm_page_t pageq_pos;
458 	};
459 } unified_page_list_iterator_t;
460 
461 extern void unified_page_list_iterator_init(
462 	const unified_page_list_t *page_list,
463 	unified_page_list_iterator_t *iter);
464 
465 extern void unified_page_list_iterator_next(unified_page_list_iterator_t *iter);
466 
467 extern bool unified_page_list_iterator_end(const unified_page_list_iterator_t *iter);
468 
469 extern ppnum_t unified_page_list_iterator_page(
470 	const unified_page_list_iterator_t *iter,
471 	bool *is_fictitious);
472 
473 extern vm_page_t unified_page_list_iterator_vm_page(
474 	const unified_page_list_iterator_t *iter);
475 
476 extern void pmap_batch_set_cache_attributes(
477 	const unified_page_list_t *,
478 	unsigned int);
479 extern void pmap_sync_page_data_phys(ppnum_t pa);
480 extern void pmap_sync_page_attributes_phys(ppnum_t pa);
481 
482 #if HAS_MTE
483 extern pmap_paddr_t mte_tag_storage_start;
484 extern pmap_paddr_t mte_tag_storage_end;
485 extern uint_t       mte_tag_storage_count; /* in number of pages */
486 extern ppnum_t      mte_tag_storage_start_pnum;
487 
488 extern void pmap_make_tag_storage_page(ppnum_t);
489 extern void pmap_unmake_tag_storage_page(ppnum_t);
490 extern ppnum_t map_tag_ppnum_to_first_covered_ppnum(ppnum_t tag_ppnum);
491 extern void pmap_make_tagged_page(ppnum_t);
492 extern void pmap_make_tagged_pages(const unified_page_list_t *page_list);
493 extern void pmap_unmake_tagged_page(ppnum_t);
494 extern void pmap_unmake_tagged_pages(const unified_page_list_t *page_list);
495 extern bool pmap_is_tag_storage_page(ppnum_t pnum);
496 extern bool pmap_in_tag_storage_range(ppnum_t pnum) __pure2;
497 
498 /*
499  * Routines for classifying tag storage pages.  Recursive and unmanaged tag
500  * storage pages should never be used for tag storage.
501  */
502 
503 /*
504  * pmap_tag_storage_is_recursive:
505  * Given a tag storage page number, returns whether the tag storage page is
506  * recursive.
507  */
508 extern bool pmap_tag_storage_is_recursive(ppnum_t pnum) __pure2;
509 
510 /*
511  * pmap_tag_storage_is_unmanaged:
512  * Given a tag storage page number, returns whether the tag storage page is
513  * for unmanaged memory.
514  */
515 extern bool pmap_tag_storage_is_unmanaged(ppnum_t pnum) __pure2;
516 
517 /*
518  * pmap_tag_storage_is_discarded:
519  * Given a tag storage page number, returns whether the tag storage (and the associated
520  * pages) have been discarded by a maxmem= boot-arg.
521  */
522 extern bool pmap_tag_storage_is_discarded(ppnum_t pnum) __pure2;
523 #endif /* HAS_MTE */
524 
525 /**
526  * pmap entry point for performing platform-specific integrity checks and cleanup when
527  * the VM is about to free a page.  This function will typically at least validate
528  * that the page has no outstanding mappings or other references, and depending
529  * upon the platform may also take additional steps to reset page state.
530  *
531  * @param pn The page that is about to be freed by the VM.
532  */
533 extern void pmap_recycle_page(ppnum_t pn);
534 
535 /*
536  * debug/assertions. pmap_verify_free returns true iff
537  * the given physical page is mapped into no pmap.
538  * pmap_assert_free() will panic() if pn is not free.
539  */
540 extern bool pmap_verify_free(ppnum_t pn);
541 #if MACH_ASSERT
542 extern void pmap_assert_free(ppnum_t pn);
543 #endif
544 
545 
546 /*
547  *	Sundry required (internal) routines
548  */
549 #ifdef CURRENTLY_UNUSED_AND_UNTESTED
550 extern void             pmap_collect(pmap_t pmap);/* Perform garbage
551                                                    * collection, if any */
552 #endif
553 /*
554  *	Optional routines
555  */
556 extern void(pmap_copy)(                         /* Copy range of mappings,
557                                                  * if desired. */
558 	pmap_t          dest,
559 	pmap_t          source,
560 	vm_map_offset_t dest_va,
561 	vm_map_size_t   size,
562 	vm_map_offset_t source_va);
563 
564 extern kern_return_t(pmap_attribute)(           /* Get/Set special memory
565                                                  * attributes */
566 	pmap_t          pmap,
567 	vm_map_offset_t va,
568 	vm_map_size_t   size,
569 	vm_machine_attribute_t  attribute,
570 	vm_machine_attribute_val_t* value);
571 
572 /*
573  * Routines defined as macros.
574  */
575 #ifndef PMAP_ACTIVATE_USER
576 #ifndef PMAP_ACTIVATE
577 #define PMAP_ACTIVATE_USER(thr, cpu)
578 #else   /* PMAP_ACTIVATE */
579 #define PMAP_ACTIVATE_USER(thr, cpu) {                  \
580 	pmap_t  pmap;                                           \
581                                                                 \
582 	pmap = (thr)->map->pmap;                                \
583 	if (pmap != pmap_kernel())                              \
584 	        PMAP_ACTIVATE(pmap, (thr), (cpu));              \
585 }
586 #endif  /* PMAP_ACTIVATE */
587 #endif  /* PMAP_ACTIVATE_USER */
588 
589 #ifndef PMAP_DEACTIVATE_USER
590 #ifndef PMAP_DEACTIVATE
591 #define PMAP_DEACTIVATE_USER(thr, cpu)
592 #else   /* PMAP_DEACTIVATE */
593 #define PMAP_DEACTIVATE_USER(thr, cpu) {                        \
594 	pmap_t  pmap;                                           \
595                                                                 \
596 	pmap = (thr)->map->pmap;                                \
597 	if ((pmap) != pmap_kernel())                    \
598 	        PMAP_DEACTIVATE(pmap, (thr), (cpu));    \
599 }
600 #endif  /* PMAP_DEACTIVATE */
601 #endif  /* PMAP_DEACTIVATE_USER */
602 
603 #ifndef PMAP_ACTIVATE_KERNEL
604 #ifndef PMAP_ACTIVATE
605 #define PMAP_ACTIVATE_KERNEL(cpu)
606 #else   /* PMAP_ACTIVATE */
607 #define PMAP_ACTIVATE_KERNEL(cpu)                       \
608 	        PMAP_ACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
609 #endif  /* PMAP_ACTIVATE */
610 #endif  /* PMAP_ACTIVATE_KERNEL */
611 
612 #ifndef PMAP_DEACTIVATE_KERNEL
613 #ifndef PMAP_DEACTIVATE
614 #define PMAP_DEACTIVATE_KERNEL(cpu)
615 #else   /* PMAP_DEACTIVATE */
616 #define PMAP_DEACTIVATE_KERNEL(cpu)                     \
617 	        PMAP_DEACTIVATE(pmap_kernel(), THREAD_NULL, cpu)
618 #endif  /* PMAP_DEACTIVATE */
619 #endif  /* PMAP_DEACTIVATE_KERNEL */
620 
621 #ifndef PMAP_SET_CACHE_ATTR
622 #define PMAP_SET_CACHE_ATTR(mem, object, cache_attr, batch_pmap_op)             \
623 	MACRO_BEGIN                                                             \
624 	        if (!batch_pmap_op) {                                           \
625 	                pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(mem), cache_attr); \
626 	                (object)->set_cache_attr = TRUE;                        \
627 	        }                                                               \
628 	MACRO_END
629 #endif  /* PMAP_SET_CACHE_ATTR */
630 
631 #ifndef PMAP_BATCH_SET_CACHE_ATTR
632 #define PMAP_BATCH_SET_CACHE_ATTR(object, user_page_list,                   \
633 	    cache_attr, num_pages, batch_pmap_op)                               \
634 	MACRO_BEGIN                                                             \
635 	        if ((batch_pmap_op)) {                                          \
636 	                const unified_page_list_t __pmap_batch_list = {         \
637 	                        .upl = {.upl_info = (user_page_list),           \
638 	                                .upl_size = (num_pages),},              \
639 	                        .type = UNIFIED_PAGE_LIST_TYPE_UPL_ARRAY,       \
640 	                };                                                      \
641 	                pmap_batch_set_cache_attributes(                        \
642 	                                &__pmap_batch_list,                     \
643 	                                (cache_attr));                          \
644 	                (object)->set_cache_attr = TRUE;                        \
645 	        }                                                               \
646 	MACRO_END
647 #endif  /* PMAP_BATCH_SET_CACHE_ATTR */
648 
649 /*
650  *	Routines to manage reference/modify bits based on
651  *	physical addresses, simulating them if not provided
652  *	by the hardware.
653  */
654 struct pfc {
655 	long    pfc_cpus;
656 	long    pfc_invalid_global;
657 };
658 
659 typedef struct pfc      pmap_flush_context;
660 
661 /* Clear reference bit */
662 extern void             pmap_clear_reference(ppnum_t     pn);
663 /* Return reference bit */
664 extern boolean_t(pmap_is_referenced)(ppnum_t     pn);
665 /* Set modify bit */
666 extern void             pmap_set_modify(ppnum_t  pn);
667 /* Clear modify bit */
668 extern void             pmap_clear_modify(ppnum_t pn);
669 /* Return modify bit */
670 extern boolean_t        pmap_is_modified(ppnum_t pn);
671 /* Return modified and referenced bits */
672 extern unsigned int pmap_get_refmod(ppnum_t pn);
673 /* Clear modified and referenced bits */
674 extern void                     pmap_clear_refmod(ppnum_t pn, unsigned int mask);
675 #define VM_MEM_MODIFIED         0x01    /* Modified bit */
676 #define VM_MEM_REFERENCED       0x02    /* Referenced bit */
677 extern void                     pmap_clear_refmod_options(ppnum_t pn, unsigned int mask, unsigned int options, void *);
678 
679 /*
680  * Clears the reference and/or modified bits on a range of virtually
681  * contiguous pages.
682  * It returns true if the operation succeeded. If it returns false,
683  * nothing has been modified.
684  * This operation is only supported on some platforms, so callers MUST
685  * handle the case where it returns false.
686  */
687 extern bool
688 pmap_clear_refmod_range_options(
689 	pmap_t pmap,
690 	vm_map_address_t start,
691 	vm_map_address_t end,
692 	unsigned int mask,
693 	unsigned int options);
694 
695 
696 extern void pmap_flush_context_init(pmap_flush_context *);
697 extern void pmap_flush(pmap_flush_context *);
698 
699 /*
700  *	Routines that operate on ranges of virtual addresses.
701  */
702 extern void             pmap_protect(   /* Change protections. */
703 	pmap_t          map,
704 	vm_map_offset_t s,
705 	vm_map_offset_t e,
706 	vm_prot_t       prot);
707 
708 extern void             pmap_protect_options(   /* Change protections. */
709 	pmap_t          map,
710 	vm_map_offset_t s,
711 	vm_map_offset_t e,
712 	vm_prot_t       prot,
713 	unsigned int    options,
714 	void            *arg);
715 
716 extern void(pmap_pageable)(
717 	pmap_t          pmap,
718 	vm_map_offset_t start,
719 	vm_map_offset_t end,
720 	boolean_t       pageable);
721 
722 extern uint64_t pmap_shared_region_size_min(pmap_t map);
723 
724 extern void
725     pmap_set_shared_region(pmap_t,
726     pmap_t,
727     addr64_t,
728     uint64_t);
729 extern kern_return_t pmap_nest(pmap_t,
730     pmap_t,
731     addr64_t,
732     uint64_t);
733 extern kern_return_t pmap_unnest(pmap_t,
734     addr64_t,
735     uint64_t);
736 
737 #define PMAP_UNNEST_CLEAN       1
738 
739 extern kern_return_t pmap_fork_nest(
740 	pmap_t old_pmap,
741 	pmap_t new_pmap);
742 
743 extern kern_return_t pmap_unnest_options(pmap_t,
744     addr64_t,
745     uint64_t,
746     unsigned int);
747 extern boolean_t pmap_adjust_unnest_parameters(pmap_t, vm_map_offset_t *, vm_map_offset_t *);
748 extern void             pmap_advise_pagezero_range(pmap_t, uint64_t);
749 #endif  /* MACH_KERNEL_PRIVATE */
750 
751 extern boolean_t        pmap_is_noencrypt(ppnum_t);
752 extern void             pmap_set_noencrypt(ppnum_t pn);
753 extern void             pmap_clear_noencrypt(ppnum_t pn);
754 
755 /*
756  * JMM - This portion is exported to other kernel components right now,
757  * but will be pulled back in the future when the needed functionality
758  * is provided in a cleaner manner.
759  */
760 
761 extern const pmap_t     kernel_pmap;            /* The kernel's map */
762 #define pmap_kernel()   (kernel_pmap)
763 
764 #define VM_MEM_SUPERPAGE        0x100           /* map a superpage instead of a base page */
765 #define VM_MEM_STACK            0x200
766 #if HAS_MTE
767 #define VM_MEM_MAP_MTE          0x400           /* map an MTE enabled page */
768 #endif /* HAS_MTE */
769 
770 /* N.B. These use the same numerical space as the PMAP_EXPAND_OPTIONS
771  * definitions in i386/pmap_internal.h
772  */
773 #define PMAP_CREATE_64BIT          0x1
774 
775 #if __x86_64__
776 
777 #define PMAP_CREATE_EPT            0x2
778 #define PMAP_CREATE_TEST           0x4 /* pmap will be used for testing purposes only */
779 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_EPT | PMAP_CREATE_TEST)
780 
781 #define PMAP_CREATE_NESTED         0   /* this flag is a nop on x86 */
782 
783 #else
784 
785 #define PMAP_CREATE_STAGE2         0
786 #if __arm64e__
787 #define PMAP_CREATE_DISABLE_JOP    0x4
788 #else
789 #define PMAP_CREATE_DISABLE_JOP    0
790 #endif
791 #if __ARM_MIXED_PAGE_SIZE__
792 #define PMAP_CREATE_FORCE_4K_PAGES 0x8
793 #else
794 #define PMAP_CREATE_FORCE_4K_PAGES 0
795 #endif /* __ARM_MIXED_PAGE_SIZE__ */
796 #define PMAP_CREATE_X86_64         0
797 #if CONFIG_ROSETTA
798 #define PMAP_CREATE_ROSETTA        0x20
799 #else
800 #define PMAP_CREATE_ROSETTA        0
801 #endif /* CONFIG_ROSETTA */
802 
803 #define PMAP_CREATE_TEST           0x40 /* pmap will be used for testing purposes only */
804 
805 #define PMAP_CREATE_NESTED         0x80 /* pmap will not try to allocate a subpage root table to save space */
806 
807 /* Define PMAP_CREATE_KNOWN_FLAGS in terms of optional flags */
808 #define PMAP_CREATE_KNOWN_FLAGS (PMAP_CREATE_64BIT | PMAP_CREATE_STAGE2 | PMAP_CREATE_DISABLE_JOP | \
809     PMAP_CREATE_FORCE_4K_PAGES | PMAP_CREATE_X86_64 | PMAP_CREATE_ROSETTA | PMAP_CREATE_TEST | PMAP_CREATE_NESTED)
810 
811 #endif /* __x86_64__ */
812 
813 #define PMAP_OPTIONS_NOWAIT     0x1             /* don't block, return
814 	                                         * KERN_RESOURCE_SHORTAGE
815 	                                         * instead */
816 #define PMAP_OPTIONS_NOENTER    0x2             /* expand pmap if needed
817 	                                         * but don't enter mapping
818 	                                         */
819 #define PMAP_OPTIONS_COMPRESSOR 0x4             /* credit the compressor for
820 	                                         * this operation */
821 #define PMAP_OPTIONS_INTERNAL   0x8             /* page from internal object */
822 #define PMAP_OPTIONS_REUSABLE   0x10            /* page is "reusable" */
823 #define PMAP_OPTIONS_NOFLUSH    0x20            /* delay flushing of pmap */
824 #define PMAP_OPTIONS_NOREFMOD   0x40            /* don't need ref/mod on disconnect */
825 #define PMAP_OPTIONS_ALT_ACCT   0x80            /* use alternate accounting scheme for page */
826 #define PMAP_OPTIONS_REMOVE     0x100           /* removing a mapping */
827 #define PMAP_OPTIONS_SET_REUSABLE   0x200       /* page is now "reusable" */
828 #define PMAP_OPTIONS_CLEAR_REUSABLE 0x400       /* page no longer "reusable" */
829 #define PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED 0x800 /* credit the compressor
830 	                                            * iff page was modified */
831 #define PMAP_OPTIONS_PROTECT_IMMEDIATE 0x1000   /* allow protections to be
832 	                                         * be upgraded */
833 #define PMAP_OPTIONS_CLEAR_WRITE 0x2000
834 #define PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE 0x4000 /* Honor execute for translated processes */
835 #if defined(__arm__) || defined(__arm64__)
836 #define PMAP_OPTIONS_FF_LOCKED  0x8000
837 #define PMAP_OPTIONS_FF_WIRED   0x10000
838 #endif
839 #define PMAP_OPTIONS_XNU_USER_DEBUG 0x20000
840 
841 /* Indicates that pmap_enter() or pmap_remove() is being called with preemption already disabled. */
842 #define PMAP_OPTIONS_NOPREEMPT  0x80000
843 
844 #if CONFIG_SPTM
845 /* Requests pmap_disconnect() to reset the page frame type (only meaningful for SPTM systems) */
846 #define PMAP_OPTIONS_RETYPE 0x100000
847 #endif /* CONFIG_SPTM */
848 
849 #define PMAP_OPTIONS_MAP_TPRO 0x40000
850 
851 #define PMAP_OPTIONS_RESERVED_MASK 0xFF000000   /* encoding space reserved for internal pmap use */
852 
853 #if     !defined(__LP64__)
854 extern vm_offset_t      pmap_extract(pmap_t pmap,
855     vm_map_offset_t va);
856 #endif
857 extern void             pmap_change_wiring(     /* Specify pageability */
858 	pmap_t          pmap,
859 	vm_map_offset_t va,
860 	boolean_t       wired);
861 
862 /* LP64todo - switch to vm_map_offset_t when it grows */
863 extern void             pmap_remove(    /* Remove mappings. */
864 	pmap_t          map,
865 	vm_map_offset_t s,
866 	vm_map_offset_t e);
867 
868 extern void             pmap_remove_options(    /* Remove mappings. */
869 	pmap_t          map,
870 	vm_map_offset_t s,
871 	vm_map_offset_t e,
872 	int             options);
873 
874 extern void             fillPage(ppnum_t pa, unsigned int fill);
875 
876 #if defined(__LP64__)
877 extern void pmap_pre_expand(pmap_t pmap, vm_map_offset_t vaddr);
878 extern kern_return_t pmap_pre_expand_large(pmap_t pmap, vm_map_offset_t vaddr);
879 extern vm_size_t pmap_query_pagesize(pmap_t map, vm_map_offset_t vaddr);
880 #endif
881 
882 mach_vm_size_t pmap_query_resident(pmap_t pmap,
883     vm_map_offset_t s,
884     vm_map_offset_t e,
885     mach_vm_size_t *compressed_bytes_p);
886 
887 extern void pmap_set_vm_map_cs_enforced(pmap_t pmap, bool new_value);
888 extern bool pmap_get_vm_map_cs_enforced(pmap_t pmap);
889 
890 /* Inform the pmap layer that there is a JIT entry in this map. */
891 extern void pmap_set_jit_entitled(pmap_t pmap);
892 
893 /* Ask the pmap layer if there is a JIT entry in this map. */
894 extern bool pmap_get_jit_entitled(pmap_t pmap);
895 
896 /* Inform the pmap layer that the XO register is repurposed for this map */
897 extern void pmap_set_tpro(pmap_t pmap);
898 
899 /* Ask the pmap layer if there is a TPRO entry in this map. */
900 extern bool pmap_get_tpro(pmap_t pmap);
901 
902 /*
903  * Tell the pmap layer what range within the nested region the VM intends to
904  * use.
905  */
906 extern void pmap_trim(pmap_t grand, pmap_t subord, addr64_t vstart, uint64_t size);
907 
908 extern bool pmap_is_nested(pmap_t pmap);
909 
910 /*
911  * Dump page table contents into the specified buffer.  Returns KERN_INSUFFICIENT_BUFFER_SIZE
912  * if insufficient space, KERN_NOT_SUPPORTED if unsupported in the current configuration.
913  * This is expected to only be called from kernel debugger context,
914  * so synchronization is not required.
915  */
916 
917 extern kern_return_t pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied);
918 
919 /* Asks the pmap layer for number of bits used for VA address. */
920 extern uint32_t pmap_user_va_bits(pmap_t pmap);
921 extern uint32_t pmap_kernel_va_bits(void);
922 
923 /*
924  * Indicates if any special policy is applied to this protection by the pmap
925  * layer.
926  */
927 bool pmap_has_prot_policy(pmap_t pmap, bool translated_allow_execute, vm_prot_t prot);
928 
929 /*
930  * Causes the pmap to return any available pages that it can return cheaply to
931  * the VM.
932  */
933 uint64_t pmap_release_pages_fast(void);
934 
935 #define PMAP_QUERY_PAGE_PRESENT                 0x01
936 #define PMAP_QUERY_PAGE_REUSABLE                0x02
937 #define PMAP_QUERY_PAGE_INTERNAL                0x04
938 #define PMAP_QUERY_PAGE_ALTACCT                 0x08
939 #define PMAP_QUERY_PAGE_COMPRESSED              0x10
940 #define PMAP_QUERY_PAGE_COMPRESSED_ALTACCT      0x20
941 extern kern_return_t pmap_query_page_info(
942 	pmap_t          pmap,
943 	vm_map_offset_t va,
944 	int             *disp);
945 
946 extern bool pmap_in_ppl(void);
947 
948 extern uint32_t pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN]);
949 extern bool pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN]);
950 
951 /**
952  * Indicates whether the device supports register-level MMIO access control.
953  *
954  * @note Unlike the pmap-io-ranges mechanism, which enforces PPL-only register
955  *       writability at page granularity, this mechanism allows specific registers
956  *       on a read-mostly page to be written using a dedicated guarded mode trap
957  *       without requiring a full PPL driver extension.
958  *
959  * @return True if the device supports register-level MMIO access control.
960  */
961 extern bool pmap_has_iofilter_protected_write(void);
962 
963 /**
964  * Performs a write to the I/O register specified by addr on supported devices.
965  *
966  * @note On supported devices (determined by pmap_has_iofilter_protected_write()), this
967  *       function goes over the sorted I/O filter entry table. If there is a hit, the
968  *       write is performed from Guarded Mode. Otherwise, the write is performed from
969  *       Normal Mode (kernel mode). Note that you can still hit an exception if the
970  *       register is owned by PPL but not allowed by an io-filter-entry in the device tree.
971  *
972  * @note On unsupported devices, this function will panic.
973  *
974  * @param addr The address of the register.
975  * @param value The value to be written.
976  * @param width The width of the I/O register, supported values are 1, 2, 4 and 8.
977  */
978 extern void pmap_iofilter_protected_write(vm_address_t addr, uint64_t value, uint64_t width);
979 
980 extern void *pmap_claim_reserved_ppl_page(void);
981 extern void pmap_free_reserved_ppl_page(void *kva);
982 
983 extern void pmap_ledger_verify_size(size_t);
984 extern ledger_t pmap_ledger_alloc(void);
985 extern void pmap_ledger_free(ledger_t);
986 
987 extern bool pmap_is_bad_ram(ppnum_t ppn);
988 
989 extern bool pmap_is_page_restricted(ppnum_t pn);
990 
991 #if __arm64__
992 extern bool pmap_is_exotic(pmap_t pmap);
993 #else /* __arm64__ */
994 #define pmap_is_exotic(pmap) false
995 #endif /* __arm64__ */
996 
997 
998 /*
999  * Returns a subset of pmap_cs non-default configuration,
1000  * e.g. loosening up of some restrictions through pmap_cs or amfi
1001  * boot-args. The return value is a bit field with possible bits
1002  * described below. If default, the function will return 0. Note that
1003  * this does not work the other way: 0 does not imply that pmap_cs
1004  * runs in default configuration, and only a small configuration
1005  * subset is returned by this function.
1006  *
1007  * Never assume the system is "secure" if this returns 0.
1008  */
1009 extern int pmap_cs_configuration(void);
1010 
1011 #if XNU_KERNEL_PRIVATE
1012 
1013 typedef enum {
1014 	PMAP_FEAT_UEXEC = 1
1015 } pmap_feature_flags_t;
1016 
1017 #if defined(__x86_64__)
1018 
1019 extern bool             pmap_supported_feature(pmap_t pmap, pmap_feature_flags_t feat);
1020 
1021 #endif
1022 #if defined(__arm64__)
1023 
1024 /**
1025  * Check if a particular pmap is used for stage2 translations or not.
1026  */
1027 extern bool
1028 pmap_performs_stage2_translations(const pmap_t pmap);
1029 
1030 #endif /* defined(__arm64__) */
1031 
1032 extern ppnum_t          kernel_pmap_present_mapping(uint64_t vaddr, uint64_t * pvincr, uintptr_t * pvphysaddr);
1033 
1034 #endif /* XNU_KERNEL_PRIVATE */
1035 
1036 #if CONFIG_SPTM
1037 /*
1038  * The TrustedExecutionMonitor address space data structure is kept within the
1039  * pmap structure in order to provide a coherent API to the rest of the kernel
1040  * for working with code signing monitors.
1041  *
1042  * However, a lot of parts of the kernel don't have visibility into the pmap
1043  * data structure as they are opaque unless you're in the Mach portion of the
1044  * kernel. To allievate this, we provide pmap APIs to the rest of the kernel.
1045  */
1046 #include <TrustedExecutionMonitor/API.h>
1047 
1048 /*
1049  * All pages allocated by TXM are also kept within the TXM VM object, which allows
1050  * tracking it for accounting and debugging purposes.
1051  */
1052 extern vm_object_t txm_vm_object;
1053 
1054 /**
1055  * Acquire the pointer of the kernel pmap being used for the system.
1056  */
1057 extern pmap_t
1058 pmap_txm_kernel_pmap(void);
1059 
1060 /**
1061  * Acquire the TXM address space object stored within the pmap.
1062  */
1063 extern TXMAddressSpace_t*
1064 pmap_txm_addr_space(const pmap_t pmap);
1065 
1066 /**
1067  * Set the TXM address space object within the pmap.
1068  */
1069 extern void
1070 pmap_txm_set_addr_space(
1071 	pmap_t pmap,
1072 	TXMAddressSpace_t *txm_addr_space);
1073 
1074 /**
1075  * Set the trust level of the TXM address space object within the pmap.
1076  */
1077 extern void
1078 pmap_txm_set_trust_level(
1079 	pmap_t pmap,
1080 	CSTrust_t trust_level);
1081 
1082 /**
1083  * Get the trust level of the TXM address space object within the pmap.
1084  */
1085 extern kern_return_t
1086 pmap_txm_get_trust_level_kdp(
1087 	pmap_t pmap,
1088 	CSTrust_t *trust_level);
1089 
1090 /**
1091  * Get the address range of the JIT region within the pmap, if any.
1092  */
1093 kern_return_t
1094 pmap_txm_get_jit_address_range_kdp(
1095 	pmap_t pmap,
1096 	uintptr_t *jit_region_start,
1097 	uintptr_t *jit_region_end);
1098 
1099 /**
1100  * Take a shared lock on the pmap in order to enforce safe concurrency for
1101  * an operation on the TXM address space object. Passing in NULL takes the lock
1102  * on the current pmap.
1103  */
1104 extern void
1105 pmap_txm_acquire_shared_lock(pmap_t pmap);
1106 
1107 /**
1108  * Release the shared lock which was previously acquired for operations on
1109  * the TXM address space object. Passing in NULL releases the lock for the
1110  * current pmap.
1111  */
1112 extern void
1113 pmap_txm_release_shared_lock(pmap_t pmap);
1114 
1115 /**
1116  * Take an exclusive lock on the pmap in order to enforce safe concurrency for
1117  * an operation on the TXM address space object. Passing in NULL takes the lock
1118  * on the current pmap.
1119  */
1120 extern void
1121 pmap_txm_acquire_exclusive_lock(pmap_t pmap);
1122 
1123 /**
1124  * Release the exclusive lock which was previously acquired for operations on
1125  * the TXM address space object. Passing in NULL releases the lock for the
1126  * current pmap.
1127  */
1128 extern void
1129 pmap_txm_release_exclusive_lock(pmap_t pmap);
1130 
1131 /**
1132  * Transfer a page to the TXM_DEFAULT type after resolving its mapping from its
1133  * virtual to physical address.
1134  */
1135 extern void
1136 pmap_txm_transfer_page(const vm_address_t addr);
1137 
1138 /**
1139  * Grab an available page from the VM free list, add it to the TXM VM object and
1140  * then transfer it to be owned by TXM.
1141  *
1142  * Returns the physical address of the page allocated.
1143  */
1144 extern vm_map_address_t
1145 pmap_txm_allocate_page(void);
1146 
1147 #endif /* CONFIG_SPTM */
1148 
1149 
1150 #endif  /* KERNEL_PRIVATE */
1151 
1152 #endif  /* _VM_PMAP_H_ */
1153