xref: /xnu-8796.141.3/osfmk/arm/pmap/pmap_pt_geometry.h (revision 1b191cb58250d0705d8a51287127505aa4bc0789)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * PMAP Page Table Geometry.
30  *
31  * This header file is used to store the types, and inline functions related to
32  * retrieving information about and parsing page table hierarchies.
33  *
34  * To prevent circular dependencies, this file shouldn't include any of the
35  * other internal osfmk/arm/pmap/ header files.
36  */
37 #ifndef _ARM_PMAP_PMAP_PT_GEOMETRY_H_
38 #define _ARM_PMAP_PMAP_PT_GEOMETRY_H_
39 
40 #include <stdint.h>
41 
42 #include <kern/debug.h>
43 #include <kern/locks.h>
44 #include <mach/vm_types.h>
45 #include <mach_assert.h>
46 
47 #include <arm64/proc_reg.h>
48 
49 /**
50  * arm/pmap.h is safe to be included in this file since it shouldn't rely on any
51  * of the internal pmap header files (so no circular dependencies).
52  */
53 #include <arm/pmap.h>
54 
55 /**
56  * Structure representing parameters of a single page table level. An array of
57  * these structures are used to represent the geometry for an entire page table
58  * hierarchy.
59  */
60 struct page_table_level_info {
61 	const uint64_t size;
62 	const uint64_t offmask;
63 	const uint64_t shift;
64 	const uint64_t index_mask;
65 	const uint64_t valid_mask;
66 	const uint64_t type_mask;
67 	const uint64_t type_block;
68 };
69 
70 /**
71  * Operations that are dependent on the type of page table. This is useful, for
72  * instance, when dealing with stage 1 vs stage 2 pmaps.
73  */
74 struct page_table_ops {
75 	bool (*alloc_id)(pmap_t pmap);
76 	void (*free_id)(pmap_t pmap);
77 	void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only);
78 	void (*flush_tlb_async)(pmap_t pmap);
79 	pt_entry_t (*wimg_to_pte)(unsigned int wimg, pmap_paddr_t pa);
80 };
81 
82 /**
83  * The Page Table Attribute structure is used for both parameterizing the
84  * different possible page table geometries, but also for abstracting out the
85  * differences between stage 1 and stage 2 page tables. This allows one set of
86  * code to seamlessly handle the differences between various address space
87  * layouts as well as stage 1 vs stage 2 page tables on the fly. See
88  * doc/arm_pmap.md for more details.
89  *
90  * Instead of accessing the fields in this structure directly, it is recommended
91  * to use the page table attribute getter functions defined below.
92  */
93 struct page_table_attr {
94 	/* Sizes and offsets for each level in the page table hierarchy. */
95 	const struct page_table_level_info * const pta_level_info;
96 
97 	/* Operations that are dependent on the type of page table. */
98 	const struct page_table_ops * const pta_ops;
99 
100 	/**
101 	 * The Access Permissions bits have different layouts within a page table
102 	 * entry depending on whether it's an entry for a stage 1 or stage 2 pmap.
103 	 *
104 	 * These fields describe the correct PTE bits to set to get the wanted
105 	 * permissions for the page tables described by this attribute structure.
106 	 */
107 	const uintptr_t ap_ro;
108 	const uintptr_t ap_rw;
109 	const uintptr_t ap_rona;
110 	const uintptr_t ap_rwna;
111 	const uintptr_t ap_xn;
112 	const uintptr_t ap_x;
113 
114 	/* The page table level at which the hierarchy begins. */
115 	const unsigned int pta_root_level;
116 
117 	/* The page table level at which the commpage is nested into an address space. */
118 	const unsigned int pta_commpage_level;
119 
120 	/* The last level in the page table hierarchy (ARM supports up to four levels). */
121 	const unsigned int pta_max_level;
122 
123 
124 	/**
125 	 * Value to set the Translation Control Register (TCR) to in order to inform
126 	 * the hardware of this page table geometry.
127 	 */
128 	const uint64_t pta_tcr_value;
129 
130 	/* Page Table/Granule Size. */
131 	const uint64_t pta_page_size;
132 
133 	/**
134 	 * How many bits to shift "1" by to get the page table size. Alternatively,
135 	 * could also be thought of as how many bits make up the page offset in a
136 	 * virtual address.
137 	 */
138 	const uint64_t pta_page_shift;
139 };
140 
141 typedef struct page_table_attr pt_attr_t;
142 
143 /* The default page table attributes for a system. */
144 extern const struct page_table_attr * const native_pt_attr;
145 extern const struct page_table_ops native_pt_ops;
146 
147 /**
148  * Macros for getting pmap attributes/operations; not functions for const
149  * propagation.
150  */
151 #if ARM_PARAMETERIZED_PMAP
152 
153 /* The page table attributes are linked to the pmap */
154 #define pmap_get_pt_attr(pmap) ((pmap)->pmap_pt_attr)
155 #define pmap_get_pt_ops(pmap) ((pmap)->pmap_pt_attr->pta_ops)
156 
157 #else /* ARM_PARAMETERIZED_PMAP */
158 
159 /* The page table attributes are fixed (to allow for const propagation) */
160 #define pmap_get_pt_attr(pmap) (native_pt_attr)
161 #define pmap_get_pt_ops(pmap) (&native_pt_ops)
162 
163 #endif /* ARM_PARAMETERIZED_PMAP */
164 
165 /* Defines representing a level in a page table hierarchy. */
166 #define PMAP_TT_L0_LEVEL 0x0
167 #define PMAP_TT_L1_LEVEL 0x1
168 #define PMAP_TT_L2_LEVEL 0x2
169 #define PMAP_TT_L3_LEVEL 0x3
170 
171 /**
172  * Inline functions exported for usage by other pmap modules.
173  *
174  * In an effort to not cause any performance regressions while breaking up the
175  * pmap, I'm keeping all functions originally marked as "static inline", as
176  * inline and moving them into header files to be shared across the pmap
177  * modules. In reality, many of these functions probably don't need to be inline
178  * and can be moved back into a .c file.
179  *
180  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
181  */
182 
183 /**
184  * Keep the following in mind when looking at the available attribute getters:
185  *
186  * We tend to use standard terms to describe various levels in a page table
187  * hierarchy. The "root" level is the top of a hierarchy. The root page table is
188  * the one that will programmed into the Translation Table Base Register (TTBR)
189  * to inform the hardware of where to begin when performing page table walks.
190  * The "twig" level is always one up from the last level, and the "leaf" level
191  * is the last page table level in a hierarchy. The leaf page tables always
192  * contain block entries, but the higher levels can contain either table or
193  * block entries.
194  *
195  * ARM supports up to four levels of page tables. The levels start at L0 and
196  * increase to L3 the deeper into a hierarchy you get, although L0 isn't
197  * necessarily always the root level. For example, in a four-level hierarchy,
198  * the root would be L0, the twig would be L2, and the leaf would be L3. But for
199  * a three-level hierarchy, the root would be L1, the twig would be L2, and the
200  * leaf would be L3.
201  */
202 /* Page size getter. */
203 static inline uint64_t
pt_attr_page_size(const pt_attr_t * const pt_attr)204 pt_attr_page_size(const pt_attr_t * const pt_attr)
205 {
206 	return pt_attr->pta_page_size;
207 }
208 
209 /**
210  * Return the size of the virtual address space covered by a single TTE at a
211  * specified level in the hierarchy.
212  */
213 __unused static inline uint64_t
pt_attr_ln_size(const pt_attr_t * const pt_attr,unsigned int level)214 pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level)
215 {
216 	return pt_attr->pta_level_info[level].size;
217 }
218 
219 /**
220  * Return the page descriptor shift for a specified level in the hierarchy. This
221  * shift value can be used to get the index into a page table at this level in
222  * the hierarchy from a given virtual address.
223  */
224 __unused static inline uint64_t
pt_attr_ln_shift(const pt_attr_t * const pt_attr,unsigned int level)225 pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level)
226 {
227 	return pt_attr->pta_level_info[level].shift;
228 }
229 
230 /**
231  * Return a mask of the offset for a specified level in the hierarchy.
232  *
233  * This should be equivalent to the value returned by pt_attr_ln_size() - 1.
234  */
235 static inline uint64_t
pt_attr_ln_offmask(const pt_attr_t * const pt_attr,unsigned int level)236 pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level)
237 {
238 	return pt_attr->pta_level_info[level].offmask;
239 }
240 
241 /**
242  * Return the mask for getting a page table index out of a virtual address for a
243  * specified level in the hierarchy. This can be combined with the value
244  * returned by pt_attr_ln_shift() to get the index into a page table.
245  */
246 __unused static inline uint64_t
pt_attr_ln_index_mask(const pt_attr_t * const pt_attr,unsigned int level)247 pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level)
248 {
249 	return pt_attr->pta_level_info[level].index_mask;
250 }
251 
252 /**
253  * Return the second to last page table level.
254  */
255 static inline unsigned int
pt_attr_twig_level(const pt_attr_t * const pt_attr)256 pt_attr_twig_level(const pt_attr_t * const pt_attr)
257 {
258 	return pt_attr->pta_max_level - 1;
259 }
260 
261 /**
262  * Return the first page table level. This is what will be programmed into the
263  * Translation Table Base Register (TTBR) to inform the hardware of where to
264  * begin page table walks.
265  */
266 static inline unsigned int
pt_attr_root_level(const pt_attr_t * const pt_attr)267 pt_attr_root_level(const pt_attr_t * const pt_attr)
268 {
269 	return pt_attr->pta_root_level;
270 }
271 
272 /**
273  * Return the level at which to nest the commpage pmap into userspace pmaps.
274  * Since the commpage is shared across all userspace address maps, memory is
275  * saved by sharing the commpage page tables with every userspace pmap. The
276  * level at which to nest the commpage is dependent on the page table geometry.
277  *
278  * Typically this is L1 for 4KB page tables, and L2 for 16KB page tables. In
279  * this way, the commpage's L2/L3 page tables are reused in every 4KB task, and
280  * the L3 page table is reused in every 16KB task.
281  */
282 static inline unsigned int
pt_attr_commpage_level(const pt_attr_t * const pt_attr)283 pt_attr_commpage_level(const pt_attr_t * const pt_attr)
284 {
285 	return pt_attr->pta_commpage_level;
286 }
287 
288 /**
289  * Return the size of the virtual address space covered by a single PTE at the
290  * leaf level.
291  */
292 static __unused inline uint64_t
pt_attr_leaf_size(const pt_attr_t * const pt_attr)293 pt_attr_leaf_size(const pt_attr_t * const pt_attr)
294 {
295 	return pt_attr->pta_level_info[pt_attr->pta_max_level].size;
296 }
297 
298 /**
299  * Return a mask of the offset for a leaf table.
300  *
301  * This should be equivalent to the value returned by pt_attr_leaf_size() - 1.
302  */
303 static __unused inline uint64_t
pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)304 pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)
305 {
306 	return pt_attr->pta_level_info[pt_attr->pta_max_level].offmask;
307 }
308 
309 /**
310  * Return the page descriptor shift for a leaf table entry. This shift value can
311  * be used to get the index into a leaf page table from a given virtual address.
312  */
313 static inline uint64_t
pt_attr_leaf_shift(const pt_attr_t * const pt_attr)314 pt_attr_leaf_shift(const pt_attr_t * const pt_attr)
315 {
316 	return pt_attr->pta_level_info[pt_attr->pta_max_level].shift;
317 }
318 
319 /**
320  * Return the mask for getting a leaf table index out of a virtual address. This
321  * can be combined with the value returned by pt_attr_leaf_shift() to get the
322  * index into a leaf table.
323  */
324 static __unused inline uint64_t
pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)325 pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)
326 {
327 	return pt_attr->pta_level_info[pt_attr->pta_max_level].index_mask;
328 }
329 
330 /**
331  * Return the size of the virtual address space covered by a single TTE at the
332  * twig level.
333  */
334 static inline uint64_t
pt_attr_twig_size(const pt_attr_t * const pt_attr)335 pt_attr_twig_size(const pt_attr_t * const pt_attr)
336 {
337 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].size;
338 }
339 
340 /**
341  * Return a mask of the offset for a twig table.
342  *
343  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
344  */
345 static inline uint64_t
pt_attr_twig_offmask(const pt_attr_t * const pt_attr)346 pt_attr_twig_offmask(const pt_attr_t * const pt_attr)
347 {
348 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].offmask;
349 }
350 
351 /**
352  * Return the page descriptor shift for a twig table entry. This shift value can
353  * be used to get the index into a twig page table from a given virtual address.
354  */
355 static inline uint64_t
pt_attr_twig_shift(const pt_attr_t * const pt_attr)356 pt_attr_twig_shift(const pt_attr_t * const pt_attr)
357 {
358 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].shift;
359 }
360 
361 /**
362  * Return the mask for getting a twig table index out of a virtual address. This
363  * can be combined with the value returned by pt_attr_twig_shift() to get the
364  * index into a twig table.
365  */
366 static __unused inline uint64_t
pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)367 pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)
368 {
369 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].index_mask;
370 }
371 
372 /**
373  * Return the amount of memory that a leaf table takes up. This is equivalent
374  * to the amount of virtual address space covered by a single twig TTE.
375  */
376 static inline uint64_t
pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)377 pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)
378 {
379 	return pt_attr_twig_size(pt_attr);
380 }
381 
382 /**
383  * Return the offset mask for the memory used by a leaf page table.
384  *
385  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
386  */
387 static inline uint64_t
pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)388 pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)
389 {
390 	return pt_attr_twig_offmask(pt_attr);
391 }
392 
393 /**
394  * Return the Access Permissions bits required to specify User and Kernel
395  * Read/Write permissions on a PTE in this type of page table hierarchy (stage 1
396  * vs stage 2).
397  */
398 static inline uintptr_t
pt_attr_leaf_rw(const pt_attr_t * const pt_attr)399 pt_attr_leaf_rw(const pt_attr_t * const pt_attr)
400 {
401 	return pt_attr->ap_rw;
402 }
403 
404 /**
405  * Return the Access Permissions bits required to specify User and Kernel
406  * Read-Only permissions on a PTE in this type of page table hierarchy (stage 1
407  * vs stage 2).
408  */
409 static inline uintptr_t
pt_attr_leaf_ro(const pt_attr_t * const pt_attr)410 pt_attr_leaf_ro(const pt_attr_t * const pt_attr)
411 {
412 	return pt_attr->ap_ro;
413 }
414 
415 /**
416  * Return the Access Permissions bits required to specify just Kernel Read-Only
417  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
418  * 2).
419  */
420 static inline uintptr_t
pt_attr_leaf_rona(const pt_attr_t * const pt_attr)421 pt_attr_leaf_rona(const pt_attr_t * const pt_attr)
422 {
423 	return pt_attr->ap_rona;
424 }
425 
426 /**
427  * Return the Access Permissions bits required to specify just Kernel Read/Write
428  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
429  * 2).
430  */
431 static inline uintptr_t
pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)432 pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)
433 {
434 	return pt_attr->ap_rwna;
435 }
436 
437 /**
438  * Return the mask of the page table entry bits required to set both the
439  * privileged and unprivileged execute never bits.
440  */
441 static inline uintptr_t
pt_attr_leaf_xn(const pt_attr_t * const pt_attr)442 pt_attr_leaf_xn(const pt_attr_t * const pt_attr)
443 {
444 	return pt_attr->ap_xn;
445 }
446 
447 /**
448  * Return the mask of the page table entry bits required to set just the
449  * privileged execute never bit.
450  */
451 static inline uintptr_t
pt_attr_leaf_x(const pt_attr_t * const pt_attr)452 pt_attr_leaf_x(const pt_attr_t * const pt_attr)
453 {
454 	return pt_attr->ap_x;
455 }
456 
457 
458 /**
459  * Return the last level in the page table hierarchy.
460  */
461 static inline unsigned int
pt_attr_leaf_level(const pt_attr_t * const pt_attr)462 pt_attr_leaf_level(const pt_attr_t * const pt_attr)
463 {
464 	return pt_attr_twig_level(pt_attr) + 1;
465 }
466 
467 
468 /**
469  * Return the index into a specific level of page table for a given virtual
470  * address.
471  *
472  * @param pt_attr Page table attribute structure describing the hierarchy.
473  * @param addr The virtual address to get the index from.
474  * @param pt_level The page table whose index should be returned.
475  */
476 static inline unsigned int
ttn_index(const pt_attr_t * const pt_attr,vm_map_address_t addr,unsigned int pt_level)477 ttn_index(const pt_attr_t * const pt_attr, vm_map_address_t addr, unsigned int pt_level)
478 {
479 	const uint64_t index_unshifted = addr & pt_attr_ln_index_mask(pt_attr, pt_level);
480 	return (unsigned int)(index_unshifted >> pt_attr_ln_shift(pt_attr, pt_level));
481 }
482 
483 /**
484  * Return the index into a twig page table for a given virtual address.
485  *
486  * @param pt_attr Page table attribute structure describing the hierarchy.
487  * @param addr The virtual address to get the index from.
488  */
489 static inline unsigned int
tte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)490 tte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
491 {
492 	return ttn_index(pt_attr, addr, PMAP_TT_L2_LEVEL);
493 }
494 
495 /**
496  * Return the index into a leaf page table for a given virtual address.
497  *
498  * @param pt_attr Page table attribute structure describing the hierarchy.
499  * @param addr The virtual address to get the index from.
500  */
501 static inline unsigned int
pte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)502 pte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
503 {
504 	return ttn_index(pt_attr, addr, PMAP_TT_L3_LEVEL);
505 }
506 
507 
508 
509 /**
510  * Given an address and a map, compute the address of the table entry at the
511  * specified page table level. If the address is invalid with respect to the map
512  * then TT_ENTRY_NULL is returned.
513  *
514  * @param pmap The pmap whose page tables to parse.
515  * @param target_level The page table level at which to stop parsing the
516  *                     hierarchy at.
517  * @param addr The virtual address to calculate the table indices off of.
518  */
519 static inline tt_entry_t *
pmap_ttne(pmap_t pmap,unsigned int target_level,vm_map_address_t addr)520 pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr)
521 {
522 	tt_entry_t *table_ttep = TT_ENTRY_NULL;
523 	tt_entry_t *ttep = TT_ENTRY_NULL;
524 	tt_entry_t tte = ARM_TTE_EMPTY;
525 	unsigned int cur_level;
526 
527 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
528 
529 	if (__improbable((addr < pmap->min) || (addr >= pmap->max))) {
530 		return TT_ENTRY_NULL;
531 	}
532 	/* Start parsing at the root page table. */
533 	table_ttep = pmap->tte;
534 
535 	assert(target_level <= pt_attr->pta_max_level);
536 
537 	for (cur_level = pt_attr->pta_root_level; cur_level <= target_level; cur_level++) {
538 		ttep = &table_ttep[ttn_index(pt_attr, addr, cur_level)];
539 
540 		if (cur_level == target_level) {
541 			break;
542 		}
543 
544 		tte = *ttep;
545 
546 #if MACH_ASSERT
547 		if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) {
548 			panic("%s: Attempt to demote L%u block, tte=0x%llx, pmap=%p, target_level=%u, addr=%p",
549 			    __func__, cur_level, tte, pmap, target_level, (void*)addr);
550 		}
551 #endif
552 		if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) {
553 			return TT_ENTRY_NULL;
554 		}
555 
556 		table_ttep = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
557 	}
558 
559 	return ttep;
560 }
561 
562 /**
563  * Given an address and a map, compute the address of the level 1 translation
564  * table entry. If the address is invalid with respect to the map then
565  * TT_ENTRY_NULL is returned.
566  *
567  * @param pmap The pmap whose page tables to parse.
568  * @param addr The virtual address to calculate the table indices off of.
569  */
570 static inline tt_entry_t *
pmap_tt1e(pmap_t pmap,vm_map_address_t addr)571 pmap_tt1e(pmap_t pmap, vm_map_address_t addr)
572 {
573 	return pmap_ttne(pmap, PMAP_TT_L1_LEVEL, addr);
574 }
575 
576 /**
577  * Given an address and a map, compute the address of the level 2 translation
578  * table entry. If the address is invalid with respect to the map then
579  * TT_ENTRY_NULL is returned.
580  *
581  * @param pmap The pmap whose page tables to parse.
582  * @param addr The virtual address to calculate the table indices off of.
583  */
584 static inline tt_entry_t *
pmap_tt2e(pmap_t pmap,vm_map_address_t addr)585 pmap_tt2e(pmap_t pmap, vm_map_address_t addr)
586 {
587 	return pmap_ttne(pmap, PMAP_TT_L2_LEVEL, addr);
588 }
589 
590 /**
591  * Given an address and a map, compute the address of the level 3 page table
592  * entry. If the address is invalid with respect to the map then PT_ENTRY_NULL
593  * is returned.
594  *
595  * @param pmap The pmap whose page tables to parse.
596  * @param addr The virtual address to calculate the table indices off of.
597  */
598 static inline pt_entry_t *
pmap_tt3e(pmap_t pmap,vm_map_address_t addr)599 pmap_tt3e(pmap_t pmap, vm_map_address_t addr)
600 {
601 	return (pt_entry_t*)pmap_ttne(pmap, PMAP_TT_L3_LEVEL, addr);
602 }
603 
604 /**
605  * Given an address and a map, compute the address of the twig translation table
606  * entry. If the address is invalid with respect to the map then TT_ENTRY_NULL
607  * is returned.
608  *
609  * @param pmap The pmap whose page tables to parse.
610  * @param addr The virtual address to calculate the table indices off of.
611  */
612 static inline tt_entry_t *
pmap_tte(pmap_t pmap,vm_map_address_t addr)613 pmap_tte(pmap_t pmap, vm_map_address_t addr)
614 {
615 	return pmap_tt2e(pmap, addr);
616 }
617 
618 /**
619  * Given an address and a map, compute the address of the leaf page table entry.
620  * If the address is invalid with respect to the map then PT_ENTRY_NULL is
621  * returned.
622  *
623  * @param pmap The pmap whose page tables to parse.
624  * @param addr The virtual address to calculate the table indices off of.
625  */
626 static inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_map_address_t addr)627 pmap_pte(pmap_t pmap, vm_map_address_t addr)
628 {
629 	return pmap_tt3e(pmap, addr);
630 }
631 
632 
633 #endif /* _ARM_PMAP_PMAP_PT_GEOMETRY_H_ */
634