xref: /xnu-12377.81.4/osfmk/arm64/sptm/pmap/pmap_pt_geometry.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796) !
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * PMAP Page Table Geometry.
30  *
31  * This header file is used to store the types, and inline functions related to
32  * retrieving information about and parsing page table hierarchies.
33  *
34  * To prevent circular dependencies, this file shouldn't include any of the
35  * other internal osfmk/arm/pmap/ header files.
36  */
37 #ifndef _ARM_PMAP_PMAP_PT_GEOMETRY_H_
38 #define _ARM_PMAP_PMAP_PT_GEOMETRY_H_
39 
40 #include <stdint.h>
41 
42 #include <kern/debug.h>
43 #include <kern/locks.h>
44 #include <mach/vm_types.h>
45 #include <mach_assert.h>
46 
47 #include <arm64/proc_reg.h>
48 
49 /**
50  * arm64/sptm/pmap/pmap.h is safe to be included in this file since it shouldn't rely on any
51  * of the internal pmap header files (so no circular dependencies).
52  */
53 #include <arm64/sptm/pmap/pmap.h>
54 
55 /**
56  * Structure representing parameters of a single page table level. An array of
57  * these structures are used to represent the geometry for an entire page table
58  * hierarchy.
59  */
60 struct page_table_level_info {
61 	const uint64_t size;
62 	const uint64_t offmask;
63 	const uint64_t shift;
64 	const uint64_t index_mask;
65 	const uint64_t valid_mask;
66 	const uint64_t type_mask;
67 	const uint64_t type_block;
68 };
69 
70 /**
71  * Operations that are dependent on the type of page table. This is useful, for
72  * instance, when dealing with stage 1 vs stage 2 pmaps.
73  */
74 struct page_table_ops {
75 	bool (*alloc_id)(pmap_t pmap);
76 	void (*free_id)(pmap_t pmap);
77 	void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only);
78 	void (*flush_tlb_async)(pmap_t pmap);
79 	pt_entry_t (*wimg_to_pte)(unsigned int wimg, pmap_paddr_t pa);
80 };
81 
82 /**
83  * The Page Table Attribute structure is used for both parameterizing the
84  * different possible page table geometries, but also for abstracting out the
85  * differences between stage 1 and stage 2 page tables. This allows one set of
86  * code to seamlessly handle the differences between various address space
87  * layouts as well as stage 1 vs stage 2 page tables on the fly. See
88  * doc/arm/arm_pmap.md for more details.
89  *
90  * Instead of accessing the fields in this structure directly, it is recommended
91  * to use the page table attribute getter functions defined below.
92  */
93 struct page_table_attr {
94 	/* Sizes and offsets for each level in the page table hierarchy. */
95 	const struct page_table_level_info * const pta_level_info;
96 
97 	/* Operations that are dependent on the type of page table. */
98 	const struct page_table_ops * const pta_ops;
99 
100 	/**
101 	 * The Access Permissions bits have different layouts within a page table
102 	 * entry depending on whether it's an entry for a stage 1 or stage 2 pmap.
103 	 *
104 	 * These fields describe the correct PTE bits to set to get the wanted
105 	 * permissions for the page tables described by this attribute structure.
106 	 */
107 	const uintptr_t ap_ro;
108 	const uintptr_t ap_rw;
109 	const uintptr_t ap_rona;
110 	const uintptr_t ap_rwna;
111 	const uintptr_t ap_xn;
112 	const uintptr_t ap_x;
113 
114 	/* The page table level at which the hierarchy begins. */
115 	const unsigned int pta_root_level;
116 
117 	/* The page table level at which the commpage is nested into an address space. */
118 	const unsigned int pta_commpage_level;
119 
120 	/* The last level in the page table hierarchy (ARM supports up to four levels). */
121 	const unsigned int pta_max_level;
122 
123 
124 	/**
125 	 * Value to set the Translation Control Register (TCR) to in order to inform
126 	 * the hardware of this page table geometry.
127 	 */
128 	const uint64_t pta_tcr_value;
129 
130 	/* Page Table/Granule Size. */
131 	const uint64_t pta_page_size;
132 
133 	/**
134 	 * How many bits to shift "1" by to get the page table size. Alternatively,
135 	 * could also be thought of as how many bits make up the page offset in a
136 	 * virtual address.
137 	 */
138 	const uint64_t pta_page_shift;
139 
140 	/**
141 	 * SPTM page table geometry index.
142 	 */
143 	const uint8_t geometry_id;
144 
145 	/**
146 	 * Mask of significant address bits. This is the mask needed to address the
147 	 * virtual page number portion of the VA.
148 	 */
149 	const uint64_t pta_va_valid_mask;
150 };
151 
152 typedef struct page_table_attr pt_attr_t;
153 
154 /* The default page table attributes for a system. */
155 extern const struct page_table_attr * const native_pt_attr;
156 
157 /**
158  * Macros for getting pmap attributes/operations; not functions for const
159  * propagation.
160  */
161 #if ARM_PARAMETERIZED_PMAP
162 
163 /* The page table attributes are linked to the pmap */
164 #define pmap_get_pt_attr(pmap) ((pmap)->pmap_pt_attr)
165 #define pmap_get_pt_ops(pmap) ((pmap)->pmap_pt_attr->pta_ops)
166 
167 #else /* ARM_PARAMETERIZED_PMAP */
168 
169 /* The page table attributes are fixed (to allow for const propagation) */
170 #define pmap_get_pt_attr(pmap) (native_pt_attr)
171 #define pmap_get_pt_ops(pmap) (&native_pt_ops)
172 
173 #endif /* ARM_PARAMETERIZED_PMAP */
174 
175 /* Defines representing a level in a page table hierarchy. */
176 #define PMAP_TT_L0_LEVEL 0x0
177 #define PMAP_TT_L1_LEVEL 0x1
178 #define PMAP_TT_L2_LEVEL 0x2
179 #define PMAP_TT_L3_LEVEL 0x3
180 
181 /**
182  * Inline functions exported for usage by other pmap modules.
183  *
184  * In an effort to not cause any performance regressions while breaking up the
185  * pmap, I'm keeping all functions originally marked as "static inline", as
186  * inline and moving them into header files to be shared across the pmap
187  * modules. In reality, many of these functions probably don't need to be inline
188  * and can be moved back into a .c file.
189  *
190  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
191  */
192 
193 /**
194  * Keep the following in mind when looking at the available attribute getters:
195  *
196  * We tend to use standard terms to describe various levels in a page table
197  * hierarchy. The "root" level is the top of a hierarchy. The root page table is
198  * the one that will programmed into the Translation Table Base Register (TTBR)
199  * to inform the hardware of where to begin when performing page table walks.
200  * The "twig" level is always one up from the last level, and the "leaf" level
201  * is the last page table level in a hierarchy. The leaf page tables always
202  * contain block entries, but the higher levels can contain either table or
203  * block entries.
204  *
205  * ARM supports up to four levels of page tables. The levels start at L0 and
206  * increase to L3 the deeper into a hierarchy you get, although L0 isn't
207  * necessarily always the root level. For example, in a four-level hierarchy,
208  * the root would be L0, the twig would be L2, and the leaf would be L3. But for
209  * a three-level hierarchy, the root would be L1, the twig would be L2, and the
210  * leaf would be L3.
211  */
212 /* Page size getter. */
213 static inline uint64_t
pt_attr_page_size(const pt_attr_t * const pt_attr)214 pt_attr_page_size(const pt_attr_t * const pt_attr)
215 {
216 	return pt_attr->pta_page_size;
217 }
218 
219 /**
220  * Return the size of the virtual address space covered by a single TTE at a
221  * specified level in the hierarchy.
222  */
223 __unused static inline uint64_t
pt_attr_ln_size(const pt_attr_t * const pt_attr,unsigned int level)224 pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level)
225 {
226 	return pt_attr->pta_level_info[level].size;
227 }
228 
229 /**
230  * Return the page descriptor shift for a specified level in the hierarchy. This
231  * shift value can be used to get the index into a page table at this level in
232  * the hierarchy from a given virtual address.
233  */
234 __unused static inline uint64_t
pt_attr_ln_shift(const pt_attr_t * const pt_attr,unsigned int level)235 pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level)
236 {
237 	return pt_attr->pta_level_info[level].shift;
238 }
239 
240 /**
241  * Return a mask of the offset for a specified level in the hierarchy.
242  *
243  * This should be equivalent to the value returned by pt_attr_ln_size() - 1.
244  */
245 static inline uint64_t
pt_attr_ln_offmask(const pt_attr_t * const pt_attr,unsigned int level)246 pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level)
247 {
248 	return pt_attr->pta_level_info[level].offmask;
249 }
250 
251 /**
252  * On ARMv7 systems, the leaf page table size (1KB) is smaller than the page
253  * size (4KB). To simplify our code, leaf tables are operated on in bundles of
254  * four, so that four leaf page tables can be allocated with a single page.
255  * Because of that, each page of leaf tables takes up four root/twig entries.
256  *
257  * This function returns the offset mask for a given level with that taken into
258  * consideration. On ARMv8 systems, the granule size is identical to the page
259  * size so this doesn't need to be taken into account.
260  *
261  */
262 __unused static inline uint64_t
pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr,unsigned int level)263 pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr, unsigned int level)
264 {
265 	return pt_attr_ln_offmask(pt_attr, level);
266 }
267 
268 /**
269  * Return the mask for getting a page table index out of a virtual address for a
270  * specified level in the hierarchy. This can be combined with the value
271  * returned by pt_attr_ln_shift() to get the index into a page table.
272  */
273 __unused static inline uint64_t
pt_attr_ln_index_mask(const pt_attr_t * const pt_attr,unsigned int level)274 pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level)
275 {
276 	return pt_attr->pta_level_info[level].index_mask;
277 }
278 
279 /**
280  * Return the second to last page table level.
281  */
282 static inline unsigned int
pt_attr_twig_level(const pt_attr_t * const pt_attr)283 pt_attr_twig_level(const pt_attr_t * const pt_attr)
284 {
285 	return pt_attr->pta_max_level - 1;
286 }
287 
288 /**
289  * Return the first page table level. This is what will be programmed into the
290  * Translation Table Base Register (TTBR) to inform the hardware of where to
291  * begin page table walks.
292  */
293 static inline unsigned int
pt_attr_root_level(const pt_attr_t * const pt_attr)294 pt_attr_root_level(const pt_attr_t * const pt_attr)
295 {
296 	return pt_attr->pta_root_level;
297 }
298 
299 /**
300  * Return the level at which to nest the commpage pmap into userspace pmaps.
301  * Since the commpage is shared across all userspace address maps, memory is
302  * saved by sharing the commpage page tables with every userspace pmap. The
303  * level at which to nest the commpage is dependent on the page table geometry.
304  *
305  * Typically this is L1 for 4KB page tables, and L2 for 16KB page tables. In
306  * this way, the commpage's L2/L3 page tables are reused in every 4KB task, and
307  * the L3 page table is reused in every 16KB task.
308  */
309 static inline unsigned int
pt_attr_commpage_level(const pt_attr_t * const pt_attr)310 pt_attr_commpage_level(const pt_attr_t * const pt_attr)
311 {
312 	return pt_attr->pta_commpage_level;
313 }
314 
315 /**
316  * Return the size of the virtual address space covered by a single PTE at the
317  * leaf level.
318  */
319 static __unused inline uint64_t
pt_attr_leaf_size(const pt_attr_t * const pt_attr)320 pt_attr_leaf_size(const pt_attr_t * const pt_attr)
321 {
322 	return pt_attr->pta_level_info[pt_attr->pta_max_level].size;
323 }
324 
325 /**
326  * Return a mask of the offset for a leaf table.
327  *
328  * This should be equivalent to the value returned by pt_attr_leaf_size() - 1.
329  */
330 static __unused inline uint64_t
pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)331 pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)
332 {
333 	return pt_attr->pta_level_info[pt_attr->pta_max_level].offmask;
334 }
335 
336 /**
337  * Return the page descriptor shift for a leaf table entry. This shift value can
338  * be used to get the index into a leaf page table from a given virtual address.
339  */
340 static inline uint64_t
pt_attr_leaf_shift(const pt_attr_t * const pt_attr)341 pt_attr_leaf_shift(const pt_attr_t * const pt_attr)
342 {
343 	return pt_attr->pta_level_info[pt_attr->pta_max_level].shift;
344 }
345 
346 /**
347  * Return the mask for getting a leaf table index out of a virtual address. This
348  * can be combined with the value returned by pt_attr_leaf_shift() to get the
349  * index into a leaf table.
350  */
351 static __unused inline uint64_t
pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)352 pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)
353 {
354 	return pt_attr->pta_level_info[pt_attr->pta_max_level].index_mask;
355 }
356 
357 /**
358  * Return the size of the virtual address space covered by a single TTE at the
359  * twig level.
360  */
361 static inline uint64_t
pt_attr_twig_size(const pt_attr_t * const pt_attr)362 pt_attr_twig_size(const pt_attr_t * const pt_attr)
363 {
364 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].size;
365 }
366 
367 /**
368  * Return a mask of the offset for a twig table.
369  *
370  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
371  */
372 static inline uint64_t
pt_attr_twig_offmask(const pt_attr_t * const pt_attr)373 pt_attr_twig_offmask(const pt_attr_t * const pt_attr)
374 {
375 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].offmask;
376 }
377 
378 /**
379  * Return the page descriptor shift for a twig table entry. This shift value can
380  * be used to get the index into a twig page table from a given virtual address.
381  */
382 static inline uint64_t
pt_attr_twig_shift(const pt_attr_t * const pt_attr)383 pt_attr_twig_shift(const pt_attr_t * const pt_attr)
384 {
385 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].shift;
386 }
387 
388 /**
389  * Return the mask for getting a twig table index out of a virtual address. This
390  * can be combined with the value returned by pt_attr_twig_shift() to get the
391  * index into a twig table.
392  */
393 static __unused inline uint64_t
pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)394 pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)
395 {
396 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].index_mask;
397 }
398 
399 /**
400  * Return the amount of memory that a leaf table takes up. This is equivalent
401  * to the amount of virtual address space covered by a single twig TTE.
402  */
403 static inline uint64_t
pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)404 pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)
405 {
406 	return pt_attr_twig_size(pt_attr);
407 }
408 
409 /**
410  * Return the offset mask for the memory used by a leaf page table.
411  *
412  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
413  */
414 static inline uint64_t
pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)415 pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)
416 {
417 	return pt_attr_twig_offmask(pt_attr);
418 }
419 
420 /**
421  * Return the Access Permissions bits required to specify User and Kernel
422  * Read/Write permissions on a PTE in this type of page table hierarchy (stage 1
423  * vs stage 2).
424  */
425 static inline uintptr_t
pt_attr_leaf_rw(const pt_attr_t * const pt_attr)426 pt_attr_leaf_rw(const pt_attr_t * const pt_attr)
427 {
428 	return pt_attr->ap_rw;
429 }
430 
431 /**
432  * Return the Access Permissions bits required to specify User and Kernel
433  * Read-Only permissions on a PTE in this type of page table hierarchy (stage 1
434  * vs stage 2).
435  */
436 static inline uintptr_t
pt_attr_leaf_ro(const pt_attr_t * const pt_attr)437 pt_attr_leaf_ro(const pt_attr_t * const pt_attr)
438 {
439 	return pt_attr->ap_ro;
440 }
441 
442 /**
443  * Return the Access Permissions bits required to specify just Kernel Read-Only
444  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
445  * 2).
446  */
447 static inline uintptr_t
pt_attr_leaf_rona(const pt_attr_t * const pt_attr)448 pt_attr_leaf_rona(const pt_attr_t * const pt_attr)
449 {
450 	return pt_attr->ap_rona;
451 }
452 
453 /**
454  * Return the Access Permissions bits required to specify just Kernel Read/Write
455  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
456  * 2).
457  */
458 static inline uintptr_t
pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)459 pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)
460 {
461 	return pt_attr->ap_rwna;
462 }
463 
464 /**
465  * Return the mask of the page table entry bits required to set both the
466  * privileged and unprivileged execute never bits.
467  */
468 static inline uintptr_t
pt_attr_leaf_xn(const pt_attr_t * const pt_attr)469 pt_attr_leaf_xn(const pt_attr_t * const pt_attr)
470 {
471 	return pt_attr->ap_xn;
472 }
473 
474 /**
475  * Return the mask of the page table entry bits required to set just the
476  * privileged execute never bit.
477  */
478 static inline uintptr_t
pt_attr_leaf_x(const pt_attr_t * const pt_attr)479 pt_attr_leaf_x(const pt_attr_t * const pt_attr)
480 {
481 	return pt_attr->ap_x;
482 }
483 
484 
485 /**
486  * Return the last level in the page table hierarchy.
487  */
488 static inline unsigned int
pt_attr_leaf_level(const pt_attr_t * const pt_attr)489 pt_attr_leaf_level(const pt_attr_t * const pt_attr)
490 {
491 	return pt_attr_twig_level(pt_attr) + 1;
492 }
493 
494 /* Significant address bits in PTE */
495 static inline uint64_t
pt_attr_va_valid_mask(const pt_attr_t * const pt_attr)496 pt_attr_va_valid_mask(const pt_attr_t * const pt_attr)
497 {
498 	return pt_attr->pta_va_valid_mask;
499 }
500 
501 /**
502  * Return the index into a specific level of page table for a given virtual
503  * address.
504  *
505  * @param pt_attr Page table attribute structure describing the hierarchy.
506  * @param addr The virtual address to get the index from.
507  * @param pt_level The page table whose index should be returned.
508  */
509 static inline unsigned int
ttn_index(const pt_attr_t * const pt_attr,vm_map_address_t addr,unsigned int pt_level)510 ttn_index(const pt_attr_t * const pt_attr, vm_map_address_t addr, unsigned int pt_level)
511 {
512 	const uint64_t addr_masked = addr & pt_attr_va_valid_mask(pt_attr);
513 	const uint64_t index_unshifted = addr_masked & pt_attr_ln_index_mask(pt_attr, pt_level);
514 	return (unsigned int)(index_unshifted >> pt_attr_ln_shift(pt_attr, pt_level));
515 }
516 
517 /**
518  * Return the index into a twig page table for a given virtual address.
519  *
520  * @param pt_attr Page table attribute structure describing the hierarchy.
521  * @param addr The virtual address to get the index from.
522  */
523 static inline unsigned int
tte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)524 tte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
525 {
526 	return ttn_index(pt_attr, addr, PMAP_TT_L2_LEVEL);
527 }
528 
529 /**
530  * Return the index into a leaf page table for a given virtual address.
531  *
532  * @param pt_attr Page table attribute structure describing the hierarchy.
533  * @param addr The virtual address to get the index from.
534  */
535 static inline unsigned int
pte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)536 pte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
537 {
538 	return ttn_index(pt_attr, addr, PMAP_TT_L3_LEVEL);
539 }
540 
541 /**
542  * Return true if a leaf-level PTE is valid.
543  *
544  * @note This will NOT work on non-leaf-level entries. Please use tte_is_valid()
545  *       instead.
546  */
547 static inline bool
pte_is_valid(pt_entry_t pte)548 pte_is_valid(pt_entry_t pte)
549 {
550 	return (pte & ARM_PTE_TYPE_MASK) == ARM_PTE_TYPE_VALID;
551 }
552 
553 /**
554  * Return true if a non-leaf-level TTE is valid and typed as a table.
555  *
556  * @note This will NOT work on leaf-level entries. Please use pte_is_valid()
557  *       instead.
558  *
559  * @note This will return false if the TTE represents a non-leaf-level block
560  *       mapping (instead of a table mapping).
561  */
562 static inline bool
tte_is_valid_table(tt_entry_t tte)563 tte_is_valid_table(tt_entry_t tte)
564 {
565 	return (tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID);
566 }
567 
568 /**
569  * Return true if a non-leaf-level TTE is valid and typed as a block mapping.
570  *
571  * @note This will NOT work on leaf-level entries. Please use pte_is_valid()
572  *       instead.
573  *
574  * @note This will return false if the TTE represents a non-leaf-level table
575  *       mapping (instead of a block mapping).
576  */
577 static inline bool
tte_is_valid_block(tt_entry_t tte)578 tte_is_valid_block(tt_entry_t tte)
579 {
580 	return (tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID);
581 }
582 
583 /**
584  * Return true if a non-leaf-level TTE is typed as a table (regardless of
585  * validity).
586  *
587  * @note This will NOT work on leaf-level entries.
588  */
589 static inline bool
tte_is_table(tt_entry_t tte)590 tte_is_table(tt_entry_t tte)
591 {
592 	return (tte & (ARM_TTE_TYPE_MASK)) == (ARM_TTE_TYPE_TABLE);
593 }
594 
595 /**
596  * Return true if a non-leaf-level TTE is typed as a block mapping (regardless
597  * of validity).
598  *
599  * @note This will NOT work on leaf-level entries.
600  */
601 static inline bool
tte_is_block(tt_entry_t tte)602 tte_is_block(tt_entry_t tte)
603 {
604 	return (tte & (ARM_TTE_TYPE_MASK)) == (ARM_TTE_TYPE_BLOCK);
605 }
606 
607 /**
608  * Given an address and a map, compute the address of the table entry at the
609  * specified page table level. If the address is invalid with respect to the map
610  * then TT_ENTRY_NULL is returned.
611  *
612  * @param pmap The pmap whose page tables to parse.
613  * @param target_level The page table level at which to stop parsing the
614  *                     hierarchy at.
615  * @param addr The virtual address to calculate the table indices off of.
616  */
617 static inline tt_entry_t *
pmap_ttne(pmap_t pmap,unsigned int target_level,vm_map_address_t addr)618 pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr)
619 {
620 	tt_entry_t *table_ttep = TT_ENTRY_NULL;
621 	tt_entry_t *ttep = TT_ENTRY_NULL;
622 	tt_entry_t tte = ARM_TTE_EMPTY;
623 	unsigned int cur_level;
624 
625 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
626 
627 	if (__improbable((addr < pmap->min) || (addr >= pmap->max))) {
628 		return TT_ENTRY_NULL;
629 	}
630 	/* Start parsing at the root page table. */
631 	table_ttep = pmap->tte;
632 
633 	assert(target_level <= pt_attr->pta_max_level);
634 
635 	for (cur_level = pt_attr->pta_root_level; cur_level <= target_level; cur_level++) {
636 		ttep = &table_ttep[ttn_index(pt_attr, addr, cur_level)];
637 
638 		if (cur_level == target_level) {
639 			break;
640 		}
641 
642 		tte = *ttep;
643 
644 #if MACH_ASSERT
645 		if (tte_is_valid_block(tte)) {
646 			panic("%s: Attempt to demote L%u block, tte=0x%llx, pmap=%p, target_level=%u, addr=%p",
647 			    __func__, cur_level, tte, pmap, target_level, (void*)addr);
648 		}
649 #endif
650 		if (!tte_is_valid_table(tte)) {
651 			return TT_ENTRY_NULL;
652 		}
653 
654 		table_ttep = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
655 	}
656 
657 	return ttep;
658 }
659 
660 /**
661  * Given an address and a map, compute the address of the level 1 translation
662  * table entry. If the address is invalid with respect to the map then
663  * TT_ENTRY_NULL is returned.
664  *
665  * @param pmap The pmap whose page tables to parse.
666  * @param addr The virtual address to calculate the table indices off of.
667  */
668 static inline tt_entry_t *
pmap_tt1e(pmap_t pmap,vm_map_address_t addr)669 pmap_tt1e(pmap_t pmap, vm_map_address_t addr)
670 {
671 	return pmap_ttne(pmap, PMAP_TT_L1_LEVEL, addr);
672 }
673 
674 /**
675  * Given an address and a map, compute the address of the level 2 translation
676  * table entry. If the address is invalid with respect to the map then
677  * TT_ENTRY_NULL is returned.
678  *
679  * @param pmap The pmap whose page tables to parse.
680  * @param addr The virtual address to calculate the table indices off of.
681  */
682 static inline tt_entry_t *
pmap_tt2e(pmap_t pmap,vm_map_address_t addr)683 pmap_tt2e(pmap_t pmap, vm_map_address_t addr)
684 {
685 	return pmap_ttne(pmap, PMAP_TT_L2_LEVEL, addr);
686 }
687 
688 /**
689  * Given an address and a map, compute the address of the level 3 page table
690  * entry. If the address is invalid with respect to the map then PT_ENTRY_NULL
691  * is returned.
692  *
693  * @param pmap The pmap whose page tables to parse.
694  * @param addr The virtual address to calculate the table indices off of.
695  */
696 static inline pt_entry_t *
pmap_tt3e(pmap_t pmap,vm_map_address_t addr)697 pmap_tt3e(pmap_t pmap, vm_map_address_t addr)
698 {
699 	return (pt_entry_t*)pmap_ttne(pmap, PMAP_TT_L3_LEVEL, addr);
700 }
701 
702 /**
703  * Given an address and a map, compute the address of the twig translation table
704  * entry. If the address is invalid with respect to the map then TT_ENTRY_NULL
705  * is returned.
706  *
707  * @param pmap The pmap whose page tables to parse.
708  * @param addr The virtual address to calculate the table indices off of.
709  */
710 static inline tt_entry_t *
pmap_tte(pmap_t pmap,vm_map_address_t addr)711 pmap_tte(pmap_t pmap, vm_map_address_t addr)
712 {
713 	return pmap_tt2e(pmap, addr);
714 }
715 
716 /**
717  * Given an address and a map, compute the address of the leaf page table entry.
718  * If the address is invalid with respect to the map then PT_ENTRY_NULL is
719  * returned.
720  *
721  * @param pmap The pmap whose page tables to parse.
722  * @param addr The virtual address to calculate the table indices off of.
723  */
724 static inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_map_address_t addr)725 pmap_pte(pmap_t pmap, vm_map_address_t addr)
726 {
727 	return pmap_tt3e(pmap, addr);
728 }
729 
730 /**
731  * Given a virtual address and a page hierarchy level, align the address such that
732  * it targets a TTE index that is page ratio-aligned. Normally used prior to
733  * calling SPTM table operations (map/unmap/nest/unnest), since the SPTM enforces
734  * this requirement.
735  *
736  * @param pt_attr Page table attribute structure associated with the address space at hand.
737  * @param level Page table level for which to align the address.
738  * @param va Virtual address to align.
739  *
740  * @return Aligned virtual address.
741  */
742 static inline vm_map_address_t
pt_attr_align_va(const pt_attr_t * const pt_attr,unsigned int level,vm_map_address_t va)743 pt_attr_align_va(const pt_attr_t * const pt_attr, unsigned int level, vm_map_address_t va)
744 {
745 	const uint64_t page_ratio = PAGE_SIZE / pt_attr_page_size(pt_attr);
746 	const uint64_t ln_shift = pt_attr_ln_shift(pt_attr, level);
747 
748 	return va & ~((page_ratio - 1) << ln_shift);
749 }
750 #endif /* _ARM_PMAP_PMAP_PT_GEOMETRY_H_ */
751