xref: /xnu-11417.140.69/osfmk/arm64/sptm/pmap/pmap_pt_geometry.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * PMAP Page Table Geometry.
30  *
31  * This header file is used to store the types, and inline functions related to
32  * retrieving information about and parsing page table hierarchies.
33  *
34  * To prevent circular dependencies, this file shouldn't include any of the
35  * other internal osfmk/arm/pmap/ header files.
36  */
37 #ifndef _ARM_PMAP_PMAP_PT_GEOMETRY_H_
38 #define _ARM_PMAP_PMAP_PT_GEOMETRY_H_
39 
40 #include <stdint.h>
41 
42 #include <kern/debug.h>
43 #include <kern/locks.h>
44 #include <mach/vm_types.h>
45 #include <mach_assert.h>
46 
47 #include <arm64/proc_reg.h>
48 
49 /**
50  * arm64/sptm/pmap/pmap.h is safe to be included in this file since it shouldn't rely on any
51  * of the internal pmap header files (so no circular dependencies).
52  */
53 #include <arm64/sptm/pmap/pmap.h>
54 
55 /**
56  * Structure representing parameters of a single page table level. An array of
57  * these structures are used to represent the geometry for an entire page table
58  * hierarchy.
59  */
60 struct page_table_level_info {
61 	const uint64_t size;
62 	const uint64_t offmask;
63 	const uint64_t shift;
64 	const uint64_t index_mask;
65 	const uint64_t valid_mask;
66 	const uint64_t type_mask;
67 	const uint64_t type_block;
68 };
69 
70 /**
71  * Operations that are dependent on the type of page table. This is useful, for
72  * instance, when dealing with stage 1 vs stage 2 pmaps.
73  */
74 struct page_table_ops {
75 	bool (*alloc_id)(pmap_t pmap);
76 	void (*free_id)(pmap_t pmap);
77 	void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only);
78 	void (*flush_tlb_async)(pmap_t pmap);
79 	pt_entry_t (*wimg_to_pte)(unsigned int wimg, pmap_paddr_t pa);
80 };
81 
82 /**
83  * The Page Table Attribute structure is used for both parameterizing the
84  * different possible page table geometries, but also for abstracting out the
85  * differences between stage 1 and stage 2 page tables. This allows one set of
86  * code to seamlessly handle the differences between various address space
87  * layouts as well as stage 1 vs stage 2 page tables on the fly. See
88  * doc/arm_pmap.md for more details.
89  *
90  * Instead of accessing the fields in this structure directly, it is recommended
91  * to use the page table attribute getter functions defined below.
92  */
93 struct page_table_attr {
94 	/* Sizes and offsets for each level in the page table hierarchy. */
95 	const struct page_table_level_info * const pta_level_info;
96 
97 	/* Operations that are dependent on the type of page table. */
98 	const struct page_table_ops * const pta_ops;
99 
100 	/**
101 	 * The Access Permissions bits have different layouts within a page table
102 	 * entry depending on whether it's an entry for a stage 1 or stage 2 pmap.
103 	 *
104 	 * These fields describe the correct PTE bits to set to get the wanted
105 	 * permissions for the page tables described by this attribute structure.
106 	 */
107 	const uintptr_t ap_ro;
108 	const uintptr_t ap_rw;
109 	const uintptr_t ap_rona;
110 	const uintptr_t ap_rwna;
111 	const uintptr_t ap_xn;
112 	const uintptr_t ap_x;
113 
114 	/* The page table level at which the hierarchy begins. */
115 	const unsigned int pta_root_level;
116 
117 	/* The page table level at which the commpage is nested into an address space. */
118 	const unsigned int pta_commpage_level;
119 
120 	/* The last level in the page table hierarchy (ARM supports up to four levels). */
121 	const unsigned int pta_max_level;
122 
123 
124 	/**
125 	 * Value to set the Translation Control Register (TCR) to in order to inform
126 	 * the hardware of this page table geometry.
127 	 */
128 	const uint64_t pta_tcr_value;
129 
130 	/* Page Table/Granule Size. */
131 	const uint64_t pta_page_size;
132 
133 	/**
134 	 * How many bits to shift "1" by to get the page table size. Alternatively,
135 	 * could also be thought of as how many bits make up the page offset in a
136 	 * virtual address.
137 	 */
138 	const uint64_t pta_page_shift;
139 
140 	/**
141 	 * SPTM page table geometry index.
142 	 */
143 	const uint8_t geometry_id;
144 };
145 
146 typedef struct page_table_attr pt_attr_t;
147 
148 /* The default page table attributes for a system. */
149 extern const struct page_table_attr * const native_pt_attr;
150 
151 /**
152  * Macros for getting pmap attributes/operations; not functions for const
153  * propagation.
154  */
155 #if ARM_PARAMETERIZED_PMAP
156 
157 /* The page table attributes are linked to the pmap */
158 #define pmap_get_pt_attr(pmap) ((pmap)->pmap_pt_attr)
159 #define pmap_get_pt_ops(pmap) ((pmap)->pmap_pt_attr->pta_ops)
160 
161 #else /* ARM_PARAMETERIZED_PMAP */
162 
163 /* The page table attributes are fixed (to allow for const propagation) */
164 #define pmap_get_pt_attr(pmap) (native_pt_attr)
165 #define pmap_get_pt_ops(pmap) (&native_pt_ops)
166 
167 #endif /* ARM_PARAMETERIZED_PMAP */
168 
169 /* Defines representing a level in a page table hierarchy. */
170 #define PMAP_TT_L0_LEVEL 0x0
171 #define PMAP_TT_L1_LEVEL 0x1
172 #define PMAP_TT_L2_LEVEL 0x2
173 #define PMAP_TT_L3_LEVEL 0x3
174 
175 /**
176  * Inline functions exported for usage by other pmap modules.
177  *
178  * In an effort to not cause any performance regressions while breaking up the
179  * pmap, I'm keeping all functions originally marked as "static inline", as
180  * inline and moving them into header files to be shared across the pmap
181  * modules. In reality, many of these functions probably don't need to be inline
182  * and can be moved back into a .c file.
183  *
184  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
185  */
186 
187 /**
188  * Keep the following in mind when looking at the available attribute getters:
189  *
190  * We tend to use standard terms to describe various levels in a page table
191  * hierarchy. The "root" level is the top of a hierarchy. The root page table is
192  * the one that will programmed into the Translation Table Base Register (TTBR)
193  * to inform the hardware of where to begin when performing page table walks.
194  * The "twig" level is always one up from the last level, and the "leaf" level
195  * is the last page table level in a hierarchy. The leaf page tables always
196  * contain block entries, but the higher levels can contain either table or
197  * block entries.
198  *
199  * ARM supports up to four levels of page tables. The levels start at L0 and
200  * increase to L3 the deeper into a hierarchy you get, although L0 isn't
201  * necessarily always the root level. For example, in a four-level hierarchy,
202  * the root would be L0, the twig would be L2, and the leaf would be L3. But for
203  * a three-level hierarchy, the root would be L1, the twig would be L2, and the
204  * leaf would be L3.
205  */
206 /* Page size getter. */
207 static inline uint64_t
pt_attr_page_size(const pt_attr_t * const pt_attr)208 pt_attr_page_size(const pt_attr_t * const pt_attr)
209 {
210 	return pt_attr->pta_page_size;
211 }
212 
213 /**
214  * Return the size of the virtual address space covered by a single TTE at a
215  * specified level in the hierarchy.
216  */
217 __unused static inline uint64_t
pt_attr_ln_size(const pt_attr_t * const pt_attr,unsigned int level)218 pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level)
219 {
220 	return pt_attr->pta_level_info[level].size;
221 }
222 
223 /**
224  * Return the page descriptor shift for a specified level in the hierarchy. This
225  * shift value can be used to get the index into a page table at this level in
226  * the hierarchy from a given virtual address.
227  */
228 __unused static inline uint64_t
pt_attr_ln_shift(const pt_attr_t * const pt_attr,unsigned int level)229 pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level)
230 {
231 	return pt_attr->pta_level_info[level].shift;
232 }
233 
234 /**
235  * Return a mask of the offset for a specified level in the hierarchy.
236  *
237  * This should be equivalent to the value returned by pt_attr_ln_size() - 1.
238  */
239 static inline uint64_t
pt_attr_ln_offmask(const pt_attr_t * const pt_attr,unsigned int level)240 pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level)
241 {
242 	return pt_attr->pta_level_info[level].offmask;
243 }
244 
245 /**
246  * On ARMv7 systems, the leaf page table size (1KB) is smaller than the page
247  * size (4KB). To simplify our code, leaf tables are operated on in bundles of
248  * four, so that four leaf page tables can be allocated with a single page.
249  * Because of that, each page of leaf tables takes up four root/twig entries.
250  *
251  * This function returns the offset mask for a given level with that taken into
252  * consideration. On ARMv8 systems, the granule size is identical to the page
253  * size so this doesn't need to be taken into account.
254  *
255  */
256 __unused static inline uint64_t
pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr,unsigned int level)257 pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr, unsigned int level)
258 {
259 	return pt_attr_ln_offmask(pt_attr, level);
260 }
261 
262 /**
263  * Return the mask for getting a page table index out of a virtual address for a
264  * specified level in the hierarchy. This can be combined with the value
265  * returned by pt_attr_ln_shift() to get the index into a page table.
266  */
267 __unused static inline uint64_t
pt_attr_ln_index_mask(const pt_attr_t * const pt_attr,unsigned int level)268 pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level)
269 {
270 	return pt_attr->pta_level_info[level].index_mask;
271 }
272 
273 /**
274  * Return the second to last page table level.
275  */
276 static inline unsigned int
pt_attr_twig_level(const pt_attr_t * const pt_attr)277 pt_attr_twig_level(const pt_attr_t * const pt_attr)
278 {
279 	return pt_attr->pta_max_level - 1;
280 }
281 
282 /**
283  * Return the first page table level. This is what will be programmed into the
284  * Translation Table Base Register (TTBR) to inform the hardware of where to
285  * begin page table walks.
286  */
287 static inline unsigned int
pt_attr_root_level(const pt_attr_t * const pt_attr)288 pt_attr_root_level(const pt_attr_t * const pt_attr)
289 {
290 	return pt_attr->pta_root_level;
291 }
292 
293 /**
294  * Return the level at which to nest the commpage pmap into userspace pmaps.
295  * Since the commpage is shared across all userspace address maps, memory is
296  * saved by sharing the commpage page tables with every userspace pmap. The
297  * level at which to nest the commpage is dependent on the page table geometry.
298  *
299  * Typically this is L1 for 4KB page tables, and L2 for 16KB page tables. In
300  * this way, the commpage's L2/L3 page tables are reused in every 4KB task, and
301  * the L3 page table is reused in every 16KB task.
302  */
303 static inline unsigned int
pt_attr_commpage_level(const pt_attr_t * const pt_attr)304 pt_attr_commpage_level(const pt_attr_t * const pt_attr)
305 {
306 	return pt_attr->pta_commpage_level;
307 }
308 
309 /**
310  * Return the size of the virtual address space covered by a single PTE at the
311  * leaf level.
312  */
313 static __unused inline uint64_t
pt_attr_leaf_size(const pt_attr_t * const pt_attr)314 pt_attr_leaf_size(const pt_attr_t * const pt_attr)
315 {
316 	return pt_attr->pta_level_info[pt_attr->pta_max_level].size;
317 }
318 
319 /**
320  * Return a mask of the offset for a leaf table.
321  *
322  * This should be equivalent to the value returned by pt_attr_leaf_size() - 1.
323  */
324 static __unused inline uint64_t
pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)325 pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)
326 {
327 	return pt_attr->pta_level_info[pt_attr->pta_max_level].offmask;
328 }
329 
330 /**
331  * Return the page descriptor shift for a leaf table entry. This shift value can
332  * be used to get the index into a leaf page table from a given virtual address.
333  */
334 static inline uint64_t
pt_attr_leaf_shift(const pt_attr_t * const pt_attr)335 pt_attr_leaf_shift(const pt_attr_t * const pt_attr)
336 {
337 	return pt_attr->pta_level_info[pt_attr->pta_max_level].shift;
338 }
339 
340 /**
341  * Return the mask for getting a leaf table index out of a virtual address. This
342  * can be combined with the value returned by pt_attr_leaf_shift() to get the
343  * index into a leaf table.
344  */
345 static __unused inline uint64_t
pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)346 pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)
347 {
348 	return pt_attr->pta_level_info[pt_attr->pta_max_level].index_mask;
349 }
350 
351 /**
352  * Return the size of the virtual address space covered by a single TTE at the
353  * twig level.
354  */
355 static inline uint64_t
pt_attr_twig_size(const pt_attr_t * const pt_attr)356 pt_attr_twig_size(const pt_attr_t * const pt_attr)
357 {
358 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].size;
359 }
360 
361 /**
362  * Return a mask of the offset for a twig table.
363  *
364  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
365  */
366 static inline uint64_t
pt_attr_twig_offmask(const pt_attr_t * const pt_attr)367 pt_attr_twig_offmask(const pt_attr_t * const pt_attr)
368 {
369 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].offmask;
370 }
371 
372 /**
373  * Return the page descriptor shift for a twig table entry. This shift value can
374  * be used to get the index into a twig page table from a given virtual address.
375  */
376 static inline uint64_t
pt_attr_twig_shift(const pt_attr_t * const pt_attr)377 pt_attr_twig_shift(const pt_attr_t * const pt_attr)
378 {
379 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].shift;
380 }
381 
382 /**
383  * Return the mask for getting a twig table index out of a virtual address. This
384  * can be combined with the value returned by pt_attr_twig_shift() to get the
385  * index into a twig table.
386  */
387 static __unused inline uint64_t
pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)388 pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)
389 {
390 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].index_mask;
391 }
392 
393 /**
394  * Return the amount of memory that a leaf table takes up. This is equivalent
395  * to the amount of virtual address space covered by a single twig TTE.
396  */
397 static inline uint64_t
pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)398 pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)
399 {
400 	return pt_attr_twig_size(pt_attr);
401 }
402 
403 /**
404  * Return the offset mask for the memory used by a leaf page table.
405  *
406  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
407  */
408 static inline uint64_t
pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)409 pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)
410 {
411 	return pt_attr_twig_offmask(pt_attr);
412 }
413 
414 /**
415  * Return the Access Permissions bits required to specify User and Kernel
416  * Read/Write permissions on a PTE in this type of page table hierarchy (stage 1
417  * vs stage 2).
418  */
419 static inline uintptr_t
pt_attr_leaf_rw(const pt_attr_t * const pt_attr)420 pt_attr_leaf_rw(const pt_attr_t * const pt_attr)
421 {
422 	return pt_attr->ap_rw;
423 }
424 
425 /**
426  * Return the Access Permissions bits required to specify User and Kernel
427  * Read-Only permissions on a PTE in this type of page table hierarchy (stage 1
428  * vs stage 2).
429  */
430 static inline uintptr_t
pt_attr_leaf_ro(const pt_attr_t * const pt_attr)431 pt_attr_leaf_ro(const pt_attr_t * const pt_attr)
432 {
433 	return pt_attr->ap_ro;
434 }
435 
436 /**
437  * Return the Access Permissions bits required to specify just Kernel Read-Only
438  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
439  * 2).
440  */
441 static inline uintptr_t
pt_attr_leaf_rona(const pt_attr_t * const pt_attr)442 pt_attr_leaf_rona(const pt_attr_t * const pt_attr)
443 {
444 	return pt_attr->ap_rona;
445 }
446 
447 /**
448  * Return the Access Permissions bits required to specify just Kernel Read/Write
449  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
450  * 2).
451  */
452 static inline uintptr_t
pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)453 pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)
454 {
455 	return pt_attr->ap_rwna;
456 }
457 
458 /**
459  * Return the mask of the page table entry bits required to set both the
460  * privileged and unprivileged execute never bits.
461  */
462 static inline uintptr_t
pt_attr_leaf_xn(const pt_attr_t * const pt_attr)463 pt_attr_leaf_xn(const pt_attr_t * const pt_attr)
464 {
465 	return pt_attr->ap_xn;
466 }
467 
468 /**
469  * Return the mask of the page table entry bits required to set just the
470  * privileged execute never bit.
471  */
472 static inline uintptr_t
pt_attr_leaf_x(const pt_attr_t * const pt_attr)473 pt_attr_leaf_x(const pt_attr_t * const pt_attr)
474 {
475 	return pt_attr->ap_x;
476 }
477 
478 
479 /**
480  * Return the last level in the page table hierarchy.
481  */
482 static inline unsigned int
pt_attr_leaf_level(const pt_attr_t * const pt_attr)483 pt_attr_leaf_level(const pt_attr_t * const pt_attr)
484 {
485 	return pt_attr_twig_level(pt_attr) + 1;
486 }
487 
488 
489 /**
490  * Return the index into a specific level of page table for a given virtual
491  * address.
492  *
493  * @param pt_attr Page table attribute structure describing the hierarchy.
494  * @param addr The virtual address to get the index from.
495  * @param pt_level The page table whose index should be returned.
496  */
497 static inline unsigned int
ttn_index(const pt_attr_t * const pt_attr,vm_map_address_t addr,unsigned int pt_level)498 ttn_index(const pt_attr_t * const pt_attr, vm_map_address_t addr, unsigned int pt_level)
499 {
500 	const uint64_t index_unshifted = addr & pt_attr_ln_index_mask(pt_attr, pt_level);
501 	return (unsigned int)(index_unshifted >> pt_attr_ln_shift(pt_attr, pt_level));
502 }
503 
504 /**
505  * Return the index into a twig page table for a given virtual address.
506  *
507  * @param pt_attr Page table attribute structure describing the hierarchy.
508  * @param addr The virtual address to get the index from.
509  */
510 static inline unsigned int
tte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)511 tte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
512 {
513 	return ttn_index(pt_attr, addr, PMAP_TT_L2_LEVEL);
514 }
515 
516 /**
517  * Return the index into a leaf page table for a given virtual address.
518  *
519  * @param pt_attr Page table attribute structure describing the hierarchy.
520  * @param addr The virtual address to get the index from.
521  */
522 static inline unsigned int
pte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)523 pte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
524 {
525 	return ttn_index(pt_attr, addr, PMAP_TT_L3_LEVEL);
526 }
527 
528 /**
529  * Return true if a leaf-level PTE is valid.
530  *
531  * @note This will NOT work on non-leaf-level entries. Please use tte_is_valid()
532  *       instead.
533  */
534 static inline bool
pte_is_valid(pt_entry_t pte)535 pte_is_valid(pt_entry_t pte)
536 {
537 	return (pte & ARM_PTE_TYPE_MASK) == ARM_PTE_TYPE_VALID;
538 }
539 
540 /**
541  * Return true if a non-leaf-level TTE is valid and typed as a table.
542  *
543  * @note This will NOT work on leaf-level entries. Please use pte_is_valid()
544  *       instead.
545  *
546  * @note This will return false if the TTE represents a non-leaf-level block
547  *       mapping (instead of a table mapping).
548  */
549 static inline bool
tte_is_valid_table(tt_entry_t tte)550 tte_is_valid_table(tt_entry_t tte)
551 {
552 	return (tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID);
553 }
554 
555 /**
556  * Return true if a non-leaf-level TTE is valid and typed as a block mapping.
557  *
558  * @note This will NOT work on leaf-level entries. Please use pte_is_valid()
559  *       instead.
560  *
561  * @note This will return false if the TTE represents a non-leaf-level table
562  *       mapping (instead of a block mapping).
563  */
564 static inline bool
tte_is_valid_block(tt_entry_t tte)565 tte_is_valid_block(tt_entry_t tte)
566 {
567 	return (tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID);
568 }
569 
570 /**
571  * Return true if a non-leaf-level TTE is typed as a table (regardless of
572  * validity).
573  *
574  * @note This will NOT work on leaf-level entries.
575  */
576 static inline bool
tte_is_table(tt_entry_t tte)577 tte_is_table(tt_entry_t tte)
578 {
579 	return (tte & (ARM_TTE_TYPE_MASK)) == (ARM_TTE_TYPE_TABLE);
580 }
581 
582 /**
583  * Return true if a non-leaf-level TTE is typed as a block mapping (regardless
584  * of validity).
585  *
586  * @note This will NOT work on leaf-level entries.
587  */
588 static inline bool
tte_is_block(tt_entry_t tte)589 tte_is_block(tt_entry_t tte)
590 {
591 	return (tte & (ARM_TTE_TYPE_MASK)) == (ARM_TTE_TYPE_BLOCK);
592 }
593 
594 /**
595  * Given an address and a map, compute the address of the table entry at the
596  * specified page table level. If the address is invalid with respect to the map
597  * then TT_ENTRY_NULL is returned.
598  *
599  * @param pmap The pmap whose page tables to parse.
600  * @param target_level The page table level at which to stop parsing the
601  *                     hierarchy at.
602  * @param addr The virtual address to calculate the table indices off of.
603  */
604 static inline tt_entry_t *
pmap_ttne(pmap_t pmap,unsigned int target_level,vm_map_address_t addr)605 pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr)
606 {
607 	tt_entry_t *table_ttep = TT_ENTRY_NULL;
608 	tt_entry_t *ttep = TT_ENTRY_NULL;
609 	tt_entry_t tte = ARM_TTE_EMPTY;
610 	unsigned int cur_level;
611 
612 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
613 
614 	if (__improbable((addr < pmap->min) || (addr >= pmap->max))) {
615 		return TT_ENTRY_NULL;
616 	}
617 	/* Start parsing at the root page table. */
618 	table_ttep = pmap->tte;
619 
620 	assert(target_level <= pt_attr->pta_max_level);
621 
622 	for (cur_level = pt_attr->pta_root_level; cur_level <= target_level; cur_level++) {
623 		ttep = &table_ttep[ttn_index(pt_attr, addr, cur_level)];
624 
625 		if (cur_level == target_level) {
626 			break;
627 		}
628 
629 		tte = *ttep;
630 
631 #if MACH_ASSERT
632 		if (tte_is_valid_block(tte)) {
633 			panic("%s: Attempt to demote L%u block, tte=0x%llx, pmap=%p, target_level=%u, addr=%p",
634 			    __func__, cur_level, tte, pmap, target_level, (void*)addr);
635 		}
636 #endif
637 		if (!tte_is_valid_table(tte)) {
638 			return TT_ENTRY_NULL;
639 		}
640 
641 		table_ttep = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
642 	}
643 
644 	return ttep;
645 }
646 
647 /**
648  * Given an address and a map, compute the address of the level 1 translation
649  * table entry. If the address is invalid with respect to the map then
650  * TT_ENTRY_NULL is returned.
651  *
652  * @param pmap The pmap whose page tables to parse.
653  * @param addr The virtual address to calculate the table indices off of.
654  */
655 static inline tt_entry_t *
pmap_tt1e(pmap_t pmap,vm_map_address_t addr)656 pmap_tt1e(pmap_t pmap, vm_map_address_t addr)
657 {
658 	return pmap_ttne(pmap, PMAP_TT_L1_LEVEL, addr);
659 }
660 
661 /**
662  * Given an address and a map, compute the address of the level 2 translation
663  * table entry. If the address is invalid with respect to the map then
664  * TT_ENTRY_NULL is returned.
665  *
666  * @param pmap The pmap whose page tables to parse.
667  * @param addr The virtual address to calculate the table indices off of.
668  */
669 static inline tt_entry_t *
pmap_tt2e(pmap_t pmap,vm_map_address_t addr)670 pmap_tt2e(pmap_t pmap, vm_map_address_t addr)
671 {
672 	return pmap_ttne(pmap, PMAP_TT_L2_LEVEL, addr);
673 }
674 
675 /**
676  * Given an address and a map, compute the address of the level 3 page table
677  * entry. If the address is invalid with respect to the map then PT_ENTRY_NULL
678  * is returned.
679  *
680  * @param pmap The pmap whose page tables to parse.
681  * @param addr The virtual address to calculate the table indices off of.
682  */
683 static inline pt_entry_t *
pmap_tt3e(pmap_t pmap,vm_map_address_t addr)684 pmap_tt3e(pmap_t pmap, vm_map_address_t addr)
685 {
686 	return (pt_entry_t*)pmap_ttne(pmap, PMAP_TT_L3_LEVEL, addr);
687 }
688 
689 /**
690  * Given an address and a map, compute the address of the twig translation table
691  * entry. If the address is invalid with respect to the map then TT_ENTRY_NULL
692  * is returned.
693  *
694  * @param pmap The pmap whose page tables to parse.
695  * @param addr The virtual address to calculate the table indices off of.
696  */
697 static inline tt_entry_t *
pmap_tte(pmap_t pmap,vm_map_address_t addr)698 pmap_tte(pmap_t pmap, vm_map_address_t addr)
699 {
700 	return pmap_tt2e(pmap, addr);
701 }
702 
703 /**
704  * Given an address and a map, compute the address of the leaf page table entry.
705  * If the address is invalid with respect to the map then PT_ENTRY_NULL is
706  * returned.
707  *
708  * @param pmap The pmap whose page tables to parse.
709  * @param addr The virtual address to calculate the table indices off of.
710  */
711 static inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_map_address_t addr)712 pmap_pte(pmap_t pmap, vm_map_address_t addr)
713 {
714 	return pmap_tt3e(pmap, addr);
715 }
716 
717 /**
718  * Given a virtual address and a page hierarchy level, align the address such that
719  * it targets a TTE index that is page ratio-aligned. Normally used prior to
720  * calling SPTM table operations (map/unmap/nest/unnest), since the SPTM enforces
721  * this requirement.
722  *
723  * @param pt_attr Page table attribute structure associated with the address space at hand.
724  * @param level Page table level for which to align the address.
725  * @param va Virtual address to align.
726  *
727  * @return Aligned virtual address.
728  */
729 static inline vm_map_address_t
pt_attr_align_va(const pt_attr_t * const pt_attr,unsigned int level,vm_map_address_t va)730 pt_attr_align_va(const pt_attr_t * const pt_attr, unsigned int level, vm_map_address_t va)
731 {
732 	const uint64_t page_ratio = PAGE_SIZE / pt_attr_page_size(pt_attr);
733 	const uint64_t ln_shift = pt_attr_ln_shift(pt_attr, level);
734 
735 	return va & ~((page_ratio - 1) << ln_shift);
736 }
737 #endif /* _ARM_PMAP_PMAP_PT_GEOMETRY_H_ */
738