xref: /xnu-8020.121.3/osfmk/arm/pmap/pmap_pt_geometry.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /**
29  * PMAP Page Table Geometry.
30  *
31  * This header file is used to store the types, and inline functions related to
32  * retrieving information about and parsing page table hierarchies.
33  *
34  * To prevent circular dependencies, this file shouldn't include any of the
35  * other internal osfmk/arm/pmap/ header files.
36  */
37 #ifndef _ARM_PMAP_PMAP_PT_GEOMETRY_H_
38 #define _ARM_PMAP_PMAP_PT_GEOMETRY_H_
39 
40 #include <stdint.h>
41 
42 #include <kern/debug.h>
43 #include <kern/locks.h>
44 #include <mach/vm_types.h>
45 #include <mach_assert.h>
46 
47 #include <arm/proc_reg.h>
48 #if defined(__arm64__)
49 #include <arm64/proc_reg.h>
50 #endif /* defined(__arm64__) */
51 
52 /**
53  * arm/pmap.h is safe to be included in this file since it shouldn't rely on any
54  * of the internal pmap header files (so no circular dependencies).
55  */
56 #include <arm/pmap.h>
57 
58 /**
59  * Structure representing parameters of a single page table level. An array of
60  * these structures are used to represent the geometry for an entire page table
61  * hierarchy.
62  */
63 struct page_table_level_info {
64 	const uint64_t size;
65 	const uint64_t offmask;
66 	const uint64_t shift;
67 	const uint64_t index_mask;
68 	const uint64_t valid_mask;
69 	const uint64_t type_mask;
70 	const uint64_t type_block;
71 };
72 
73 /**
74  * Operations that are dependent on the type of page table. This is useful, for
75  * instance, when dealing with stage 1 vs stage 2 pmaps.
76  */
77 struct page_table_ops {
78 	bool (*alloc_id)(pmap_t pmap);
79 	void (*free_id)(pmap_t pmap);
80 	void (*flush_tlb_region_async)(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only);
81 	void (*flush_tlb_async)(pmap_t pmap);
82 	pt_entry_t (*wimg_to_pte)(unsigned int wimg, pmap_paddr_t pa);
83 };
84 
85 /**
86  * The Page Table Attribute structure is used for both parameterizing the
87  * different possible page table geometries, but also for abstracting out the
88  * differences between stage 1 and stage 2 page tables. This allows one set of
89  * code to seamlessly handle the differences between various address space
90  * layouts as well as stage 1 vs stage 2 page tables on the fly. See
91  * doc/arm_pmap.md for more details.
92  *
93  * Instead of accessing the fields in this structure directly, it is recommended
94  * to use the page table attribute getter functions defined below.
95  */
96 struct page_table_attr {
97 	/* Sizes and offsets for each level in the page table hierarchy. */
98 	const struct page_table_level_info * const pta_level_info;
99 
100 	/* Operations that are dependent on the type of page table. */
101 	const struct page_table_ops * const pta_ops;
102 
103 	/**
104 	 * The Access Permissions bits have different layouts within a page table
105 	 * entry depending on whether it's an entry for a stage 1 or stage 2 pmap.
106 	 *
107 	 * These fields describe the correct PTE bits to set to get the wanted
108 	 * permissions for the page tables described by this attribute structure.
109 	 */
110 	const uintptr_t ap_ro;
111 	const uintptr_t ap_rw;
112 	const uintptr_t ap_rona;
113 	const uintptr_t ap_rwna;
114 	const uintptr_t ap_xn;
115 	const uintptr_t ap_x;
116 
117 	/* The page table level at which the hierarchy begins. */
118 	const unsigned int pta_root_level;
119 
120 	/* The page table level at which the commpage is nested into an address space. */
121 	const unsigned int pta_commpage_level;
122 
123 	/* The last level in the page table hierarchy (ARM supports up to four levels). */
124 	const unsigned int pta_max_level;
125 
126 
127 #if __ARM_MIXED_PAGE_SIZE__
128 	/**
129 	 * Value to set the Translation Control Register (TCR) to in order to inform
130 	 * the hardware of this page table geometry.
131 	 */
132 	const uint64_t pta_tcr_value;
133 #endif /* __ARM_MIXED_PAGE_SIZE__ */
134 
135 	/* Page Table/Granule Size. */
136 	const uint64_t pta_page_size;
137 
138 	/**
139 	 * How many bits to shift "1" by to get the page table size. Alternatively,
140 	 * could also be thought of as how many bits make up the page offset in a
141 	 * virtual address.
142 	 */
143 	const uint64_t pta_page_shift;
144 };
145 
146 typedef struct page_table_attr pt_attr_t;
147 
148 /* The default page table attributes for a system. */
149 extern const struct page_table_attr * const native_pt_attr;
150 
151 /**
152  * Macros for getting pmap attributes/operations; not functions for const
153  * propagation.
154  */
155 #if ARM_PARAMETERIZED_PMAP
156 
157 /* The page table attributes are linked to the pmap */
158 #define pmap_get_pt_attr(pmap) ((pmap)->pmap_pt_attr)
159 #define pmap_get_pt_ops(pmap) ((pmap)->pmap_pt_attr->pta_ops)
160 
161 #else /* ARM_PARAMETERIZED_PMAP */
162 
163 /* The page table attributes are fixed (to allow for const propagation) */
164 #define pmap_get_pt_attr(pmap) (native_pt_attr)
165 #define pmap_get_pt_ops(pmap) (&native_pt_ops)
166 
167 #endif /* ARM_PARAMETERIZED_PMAP */
168 
169 /* Defines representing a level in a page table hierarchy. */
170 #define PMAP_TT_L0_LEVEL 0x0
171 #define PMAP_TT_L1_LEVEL 0x1
172 #define PMAP_TT_L2_LEVEL 0x2
173 #define PMAP_TT_L3_LEVEL 0x3
174 
175 /**
176  * Inline functions exported for usage by other pmap modules.
177  *
178  * In an effort to not cause any performance regressions while breaking up the
179  * pmap, I'm keeping all functions originally marked as "static inline", as
180  * inline and moving them into header files to be shared across the pmap
181  * modules. In reality, many of these functions probably don't need to be inline
182  * and can be moved back into a .c file.
183  *
184  * TODO: rdar://70538514 (PMAP Cleanup: re-evaluate whether inline functions should actually be inline)
185  */
186 
187 /**
188  * Keep the following in mind when looking at the available attribute getters:
189  *
190  * We tend to use standard terms to describe various levels in a page table
191  * hierarchy. The "root" level is the top of a hierarchy. The root page table is
192  * the one that will programmed into the Translation Table Base Register (TTBR)
193  * to inform the hardware of where to begin when performing page table walks.
194  * The "twig" level is always one up from the last level, and the "leaf" level
195  * is the last page table level in a hierarchy. The leaf page tables always
196  * contain block entries, but the higher levels can contain either table or
197  * block entries.
198  *
199  * ARM supports up to four levels of page tables. The levels start at L0 and
200  * increase to L3 the deeper into a hierarchy you get, although L0 isn't
201  * necessarily always the root level. For example, in a four-level hierarchy,
202  * the root would be L0, the twig would be L2, and the leaf would be L3. But for
203  * a three-level hierarchy, the root would be L1, the twig would be L2, and the
204  * leaf would be L3.
205  */
206 #if (__ARM_VMSA__ > 7)
207 /* Page size getter. */
208 static inline uint64_t
pt_attr_page_size(const pt_attr_t * const pt_attr)209 pt_attr_page_size(const pt_attr_t * const pt_attr)
210 {
211 	return pt_attr->pta_page_size;
212 }
213 
214 /**
215  * Return the size of the virtual address space covered by a single TTE at a
216  * specified level in the hierarchy.
217  */
218 __unused static inline uint64_t
pt_attr_ln_size(const pt_attr_t * const pt_attr,unsigned int level)219 pt_attr_ln_size(const pt_attr_t * const pt_attr, unsigned int level)
220 {
221 	return pt_attr->pta_level_info[level].size;
222 }
223 
224 /**
225  * Return the page descriptor shift for a specified level in the hierarchy. This
226  * shift value can be used to get the index into a page table at this level in
227  * the hierarchy from a given virtual address.
228  */
229 __unused static inline uint64_t
pt_attr_ln_shift(const pt_attr_t * const pt_attr,unsigned int level)230 pt_attr_ln_shift(const pt_attr_t * const pt_attr, unsigned int level)
231 {
232 	return pt_attr->pta_level_info[level].shift;
233 }
234 
235 /**
236  * Return a mask of the offset for a specified level in the hierarchy.
237  *
238  * This should be equivalent to the value returned by pt_attr_ln_size() - 1.
239  */
240 static inline uint64_t
pt_attr_ln_offmask(const pt_attr_t * const pt_attr,unsigned int level)241 pt_attr_ln_offmask(const pt_attr_t * const pt_attr, unsigned int level)
242 {
243 	return pt_attr->pta_level_info[level].offmask;
244 }
245 
246 /**
247  * On ARMv7 systems, the leaf page table size (1KB) is smaller than the page
248  * size (4KB). To simplify our code, leaf tables are operated on in bundles of
249  * four, so that four leaf page tables can be allocated with a single page.
250  * Because of that, each page of leaf tables takes up four root/twig entries.
251  *
252  * This function returns the offset mask for a given level with that taken into
253  * consideration. On ARMv8 systems, the granule size is identical to the page
254  * size so this doesn't need to be taken into account.
255  *
256  * TODO __ARM_VMSA__ == 7: Remove this function when we remove ARMv7 from xnu.
257  */
258 __unused static inline uint64_t
pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr,unsigned int level)259 pt_attr_ln_pt_offmask(const pt_attr_t * const pt_attr, unsigned int level)
260 {
261 	return pt_attr_ln_offmask(pt_attr, level);
262 }
263 
264 /**
265  * Return the mask for getting a page table index out of a virtual address for a
266  * specified level in the hierarchy. This can be combined with the value
267  * returned by pt_attr_ln_shift() to get the index into a page table.
268  */
269 __unused static inline uint64_t
pt_attr_ln_index_mask(const pt_attr_t * const pt_attr,unsigned int level)270 pt_attr_ln_index_mask(const pt_attr_t * const pt_attr, unsigned int level)
271 {
272 	return pt_attr->pta_level_info[level].index_mask;
273 }
274 
275 /**
276  * Return the second to last page table level.
277  */
278 static inline unsigned int
pt_attr_twig_level(const pt_attr_t * const pt_attr)279 pt_attr_twig_level(const pt_attr_t * const pt_attr)
280 {
281 	return pt_attr->pta_max_level - 1;
282 }
283 
284 /**
285  * Return the first page table level. This is what will be programmed into the
286  * Translation Table Base Register (TTBR) to inform the hardware of where to
287  * begin page table walks.
288  */
289 static inline unsigned int
pt_attr_root_level(const pt_attr_t * const pt_attr)290 pt_attr_root_level(const pt_attr_t * const pt_attr)
291 {
292 	return pt_attr->pta_root_level;
293 }
294 
295 /**
296  * Return the level at which to nest the commpage pmap into userspace pmaps.
297  * Since the commpage is shared across all userspace address maps, memory is
298  * saved by sharing the commpage page tables with every userspace pmap. The
299  * level at which to nest the commpage is dependent on the page table geometry.
300  *
301  * Typically this is L1 for 4KB page tables, and L2 for 16KB page tables. In
302  * this way, the commpage's L2/L3 page tables are reused in every 4KB task, and
303  * the L3 page table is reused in every 16KB task.
304  */
305 static inline unsigned int
pt_attr_commpage_level(const pt_attr_t * const pt_attr)306 pt_attr_commpage_level(const pt_attr_t * const pt_attr)
307 {
308 	return pt_attr->pta_commpage_level;
309 }
310 
311 /**
312  * Return the size of the virtual address space covered by a single PTE at the
313  * leaf level.
314  */
315 static __unused inline uint64_t
pt_attr_leaf_size(const pt_attr_t * const pt_attr)316 pt_attr_leaf_size(const pt_attr_t * const pt_attr)
317 {
318 	return pt_attr->pta_level_info[pt_attr->pta_max_level].size;
319 }
320 
321 /**
322  * Return a mask of the offset for a leaf table.
323  *
324  * This should be equivalent to the value returned by pt_attr_leaf_size() - 1.
325  */
326 static __unused inline uint64_t
pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)327 pt_attr_leaf_offmask(const pt_attr_t * const pt_attr)
328 {
329 	return pt_attr->pta_level_info[pt_attr->pta_max_level].offmask;
330 }
331 
332 /**
333  * Return the page descriptor shift for a leaf table entry. This shift value can
334  * be used to get the index into a leaf page table from a given virtual address.
335  */
336 static inline uint64_t
pt_attr_leaf_shift(const pt_attr_t * const pt_attr)337 pt_attr_leaf_shift(const pt_attr_t * const pt_attr)
338 {
339 	return pt_attr->pta_level_info[pt_attr->pta_max_level].shift;
340 }
341 
342 /**
343  * Return the mask for getting a leaf table index out of a virtual address. This
344  * can be combined with the value returned by pt_attr_leaf_shift() to get the
345  * index into a leaf table.
346  */
347 static __unused inline uint64_t
pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)348 pt_attr_leaf_index_mask(const pt_attr_t * const pt_attr)
349 {
350 	return pt_attr->pta_level_info[pt_attr->pta_max_level].index_mask;
351 }
352 
353 /**
354  * Return the size of the virtual address space covered by a single TTE at the
355  * twig level.
356  */
357 static inline uint64_t
pt_attr_twig_size(const pt_attr_t * const pt_attr)358 pt_attr_twig_size(const pt_attr_t * const pt_attr)
359 {
360 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].size;
361 }
362 
363 /**
364  * Return a mask of the offset for a twig table.
365  *
366  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
367  */
368 static inline uint64_t
pt_attr_twig_offmask(const pt_attr_t * const pt_attr)369 pt_attr_twig_offmask(const pt_attr_t * const pt_attr)
370 {
371 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].offmask;
372 }
373 
374 /**
375  * Return the page descriptor shift for a twig table entry. This shift value can
376  * be used to get the index into a twig page table from a given virtual address.
377  */
378 static inline uint64_t
pt_attr_twig_shift(const pt_attr_t * const pt_attr)379 pt_attr_twig_shift(const pt_attr_t * const pt_attr)
380 {
381 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].shift;
382 }
383 
384 /**
385  * Return the mask for getting a twig table index out of a virtual address. This
386  * can be combined with the value returned by pt_attr_twig_shift() to get the
387  * index into a twig table.
388  */
389 static __unused inline uint64_t
pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)390 pt_attr_twig_index_mask(const pt_attr_t * const pt_attr)
391 {
392 	return pt_attr->pta_level_info[pt_attr->pta_max_level - 1].index_mask;
393 }
394 
395 /**
396  * Return the amount of memory that a leaf table takes up. This is equivalent
397  * to the amount of virtual address space covered by a single twig TTE.
398  */
399 static inline uint64_t
pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)400 pt_attr_leaf_table_size(const pt_attr_t * const pt_attr)
401 {
402 	return pt_attr_twig_size(pt_attr);
403 }
404 
405 /**
406  * Return the offset mask for the memory used by a leaf page table.
407  *
408  * This should be equivalent to the value returned by pt_attr_twig_size() - 1.
409  */
410 static inline uint64_t
pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)411 pt_attr_leaf_table_offmask(const pt_attr_t * const pt_attr)
412 {
413 	return pt_attr_twig_offmask(pt_attr);
414 }
415 
416 /**
417  * Return the Access Permissions bits required to specify User and Kernel
418  * Read/Write permissions on a PTE in this type of page table hierarchy (stage 1
419  * vs stage 2).
420  */
421 static inline uintptr_t
pt_attr_leaf_rw(const pt_attr_t * const pt_attr)422 pt_attr_leaf_rw(const pt_attr_t * const pt_attr)
423 {
424 	return pt_attr->ap_rw;
425 }
426 
427 /**
428  * Return the Access Permissions bits required to specify User and Kernel
429  * Read-Only permissions on a PTE in this type of page table hierarchy (stage 1
430  * vs stage 2).
431  */
432 static inline uintptr_t
pt_attr_leaf_ro(const pt_attr_t * const pt_attr)433 pt_attr_leaf_ro(const pt_attr_t * const pt_attr)
434 {
435 	return pt_attr->ap_ro;
436 }
437 
438 /**
439  * Return the Access Permissions bits required to specify just Kernel Read-Only
440  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
441  * 2).
442  */
443 static inline uintptr_t
pt_attr_leaf_rona(const pt_attr_t * const pt_attr)444 pt_attr_leaf_rona(const pt_attr_t * const pt_attr)
445 {
446 	return pt_attr->ap_rona;
447 }
448 
449 /**
450  * Return the Access Permissions bits required to specify just Kernel Read/Write
451  * permissions on a PTE in this type of page table hierarchy (stage 1 vs stage
452  * 2).
453  */
454 static inline uintptr_t
pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)455 pt_attr_leaf_rwna(const pt_attr_t * const pt_attr)
456 {
457 	return pt_attr->ap_rwna;
458 }
459 
460 /**
461  * Return the mask of the page table entry bits required to set both the
462  * privileged and unprivileged execute never bits.
463  */
464 static inline uintptr_t
pt_attr_leaf_xn(const pt_attr_t * const pt_attr)465 pt_attr_leaf_xn(const pt_attr_t * const pt_attr)
466 {
467 	return pt_attr->ap_xn;
468 }
469 
470 /**
471  * Return the mask of the page table entry bits required to set just the
472  * privileged execute never bit.
473  */
474 static inline uintptr_t
pt_attr_leaf_x(const pt_attr_t * const pt_attr)475 pt_attr_leaf_x(const pt_attr_t * const pt_attr)
476 {
477 	return pt_attr->ap_x;
478 }
479 
480 #else /* (__ARM_VMSA__ > 7) */
481 
482 /**
483  * Only the ARMv8 page tables are parameterized. ARMv7 page tables have a fixed
484  * geometry.
485  *
486  * For documentation, see the ARMv8 equivalents to these functions above.
487  */
488 
489 static inline uint64_t
pt_attr_page_size(__unused const pt_attr_t * const pt_attr)490 pt_attr_page_size(__unused const pt_attr_t * const pt_attr)
491 {
492 	return PAGE_SIZE;
493 }
494 
495 __unused static inline unsigned int
pt_attr_root_level(__unused const pt_attr_t * const pt_attr)496 pt_attr_root_level(__unused const pt_attr_t * const pt_attr)
497 {
498 	return PMAP_TT_L1_LEVEL;
499 }
500 
501 __unused static inline unsigned int
pt_attr_commpage_level(__unused const pt_attr_t * const pt_attr)502 pt_attr_commpage_level(__unused const pt_attr_t * const pt_attr)
503 {
504 	return PMAP_TT_L1_LEVEL;
505 }
506 
507 static inline unsigned int
pt_attr_twig_level(__unused const pt_attr_t * const pt_attr)508 pt_attr_twig_level(__unused const pt_attr_t * const pt_attr)
509 {
510 	return PMAP_TT_L1_LEVEL;
511 }
512 
513 static inline uint64_t
pt_attr_twig_size(__unused const pt_attr_t * const pt_attr)514 pt_attr_twig_size(__unused const pt_attr_t * const pt_attr)
515 {
516 	return ARM_TT_TWIG_SIZE;
517 }
518 
519 static inline uint64_t
pt_attr_twig_offmask(__unused const pt_attr_t * const pt_attr)520 pt_attr_twig_offmask(__unused const pt_attr_t * const pt_attr)
521 {
522 	return ARM_TT_TWIG_OFFMASK;
523 }
524 
525 static inline uint64_t
pt_attr_twig_shift(__unused const pt_attr_t * const pt_attr)526 pt_attr_twig_shift(__unused const pt_attr_t * const pt_attr)
527 {
528 	return ARM_TT_TWIG_SHIFT;
529 }
530 
531 static __unused inline uint64_t
pt_attr_twig_index_mask(__unused const pt_attr_t * const pt_attr)532 pt_attr_twig_index_mask(__unused const pt_attr_t * const pt_attr)
533 {
534 	return ARM_TT_TWIG_INDEX_MASK;
535 }
536 
537 __unused static inline uint64_t
pt_attr_leaf_size(__unused const pt_attr_t * const pt_attr)538 pt_attr_leaf_size(__unused const pt_attr_t * const pt_attr)
539 {
540 	return ARM_TT_LEAF_SIZE;
541 }
542 
543 __unused static inline uint64_t
pt_attr_leaf_offmask(__unused const pt_attr_t * const pt_attr)544 pt_attr_leaf_offmask(__unused const pt_attr_t * const pt_attr)
545 {
546 	return ARM_TT_LEAF_OFFMASK;
547 }
548 
549 static inline uint64_t
pt_attr_leaf_shift(__unused const pt_attr_t * const pt_attr)550 pt_attr_leaf_shift(__unused const pt_attr_t * const pt_attr)
551 {
552 	return ARM_TT_LEAF_SHIFT;
553 }
554 
555 static __unused inline uint64_t
pt_attr_leaf_index_mask(__unused const pt_attr_t * const pt_attr)556 pt_attr_leaf_index_mask(__unused const pt_attr_t * const pt_attr)
557 {
558 	return ARM_TT_LEAF_INDEX_MASK;
559 }
560 
561 static inline uint64_t
pt_attr_leaf_table_size(__unused const pt_attr_t * const pt_attr)562 pt_attr_leaf_table_size(__unused const pt_attr_t * const pt_attr)
563 {
564 	return ARM_TT_L1_PT_SIZE;
565 }
566 
567 static inline uint64_t
pt_attr_leaf_table_offmask(__unused const pt_attr_t * const pt_attr)568 pt_attr_leaf_table_offmask(__unused const pt_attr_t * const pt_attr)
569 {
570 	return ARM_TT_L1_PT_OFFMASK;
571 }
572 
573 static inline uintptr_t
pt_attr_leaf_rw(__unused const pt_attr_t * const pt_attr)574 pt_attr_leaf_rw(__unused const pt_attr_t * const pt_attr)
575 {
576 	return ARM_PTE_AP(AP_RWRW);
577 }
578 
579 static inline uintptr_t
pt_attr_leaf_ro(__unused const pt_attr_t * const pt_attr)580 pt_attr_leaf_ro(__unused const pt_attr_t * const pt_attr)
581 {
582 	return ARM_PTE_AP(AP_RORO);
583 }
584 
585 static inline uintptr_t
pt_attr_leaf_rona(__unused const pt_attr_t * const pt_attr)586 pt_attr_leaf_rona(__unused const pt_attr_t * const pt_attr)
587 {
588 	return ARM_PTE_AP(AP_RONA);
589 }
590 
591 static inline uintptr_t
pt_attr_leaf_rwna(__unused const pt_attr_t * const pt_attr)592 pt_attr_leaf_rwna(__unused const pt_attr_t * const pt_attr)
593 {
594 	return ARM_PTE_AP(AP_RWNA);
595 }
596 
597 static inline uintptr_t
pt_attr_leaf_xn(__unused const pt_attr_t * const pt_attr)598 pt_attr_leaf_xn(__unused const pt_attr_t * const pt_attr)
599 {
600 	return ARM_PTE_NX;
601 }
602 
603 static inline uintptr_t
pt_attr_leaf_x(__unused const pt_attr_t * const pt_attr)604 pt_attr_leaf_x(__unused const pt_attr_t * const pt_attr)
605 {
606 	return ARM_PTE_PNX;
607 }
608 
609 __unused static inline uintptr_t
pt_attr_ln_offmask(__unused const pt_attr_t * const pt_attr,unsigned int level)610 pt_attr_ln_offmask(__unused const pt_attr_t * const pt_attr, unsigned int level)
611 {
612 	if (level == PMAP_TT_L1_LEVEL) {
613 		return ARM_TT_L1_OFFMASK;
614 	} else if (level == PMAP_TT_L2_LEVEL) {
615 		return ARM_TT_L2_OFFMASK;
616 	}
617 
618 	return 0;
619 }
620 
621 static inline uintptr_t
pt_attr_ln_pt_offmask(__unused const pt_attr_t * const pt_attr,unsigned int level)622 pt_attr_ln_pt_offmask(__unused const pt_attr_t * const pt_attr, unsigned int level)
623 {
624 	if (level == PMAP_TT_L1_LEVEL) {
625 		return ARM_TT_L1_PT_OFFMASK;
626 	} else if (level == PMAP_TT_L2_LEVEL) {
627 		return ARM_TT_L2_OFFMASK;
628 	}
629 
630 	return 0;
631 }
632 
633 #endif /* (__ARM_VMSA__ > 7) */
634 
635 /**
636  * Return the last level in the page table hierarchy.
637  */
638 static inline unsigned int
pt_attr_leaf_level(const pt_attr_t * const pt_attr)639 pt_attr_leaf_level(const pt_attr_t * const pt_attr)
640 {
641 	return pt_attr_twig_level(pt_attr) + 1;
642 }
643 
644 #if (__ARM_VMSA__ == 7)
645 
646 /**
647  * Return the index into a root/twig page table for a given virtual address.
648  *
649  * @param addr The virtual address to get the index from.
650  */
651 static inline unsigned int
tte_index(__unused const pt_attr_t * const pt_attr,vm_map_address_t addr)652 tte_index(__unused const pt_attr_t * const pt_attr, vm_map_address_t addr)
653 {
654 	return ttenum(addr);
655 }
656 
657 /**
658  * Return the index into a leaf page table for a given virtual address.
659  *
660  * @param addr The virtual address to get the index from.
661  */
662 static inline unsigned int
pte_index(__unused const pt_attr_t * const pt_attr,vm_map_address_t addr)663 pte_index(__unused const pt_attr_t * const pt_attr, vm_map_address_t addr)
664 {
665 	return ptenum(addr);
666 }
667 
668 #else /* (__ARM_VMSA__ == 7) */
669 
670 /**
671  * Return the index into a specific level of page table for a given virtual
672  * address.
673  *
674  * @param pt_attr Page table attribute structure describing the hierarchy.
675  * @param addr The virtual address to get the index from.
676  * @param pt_level The page table whose index should be returned.
677  */
678 static inline unsigned int
ttn_index(const pt_attr_t * const pt_attr,vm_map_address_t addr,unsigned int pt_level)679 ttn_index(const pt_attr_t * const pt_attr, vm_map_address_t addr, unsigned int pt_level)
680 {
681 	const uint64_t index_unshifted = addr & pt_attr_ln_index_mask(pt_attr, pt_level);
682 	return (unsigned int)(index_unshifted >> pt_attr_ln_shift(pt_attr, pt_level));
683 }
684 
685 /**
686  * Return the index into a twig page table for a given virtual address.
687  *
688  * @param pt_attr Page table attribute structure describing the hierarchy.
689  * @param addr The virtual address to get the index from.
690  */
691 static inline unsigned int
tte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)692 tte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
693 {
694 	return ttn_index(pt_attr, addr, PMAP_TT_L2_LEVEL);
695 }
696 
697 /**
698  * Return the index into a leaf page table for a given virtual address.
699  *
700  * @param pt_attr Page table attribute structure describing the hierarchy.
701  * @param addr The virtual address to get the index from.
702  */
703 static inline unsigned int
pte_index(const pt_attr_t * const pt_attr,vm_map_address_t addr)704 pte_index(const pt_attr_t * const pt_attr, vm_map_address_t addr)
705 {
706 	return ttn_index(pt_attr, addr, PMAP_TT_L3_LEVEL);
707 }
708 
709 #endif /* (__ARM_VMSA__ == 7) */
710 
711 #if (__ARM_VMSA__ == 7)
712 
713 /**
714  * Given an address and a map, compute the address of the corresponding
715  * root/twig translation table entry.
716  *
717  * @param pmap The pmap whose root table to use.
718  * @param addr The virtual address to calculate the root index off of.
719  */
720 static inline tt_entry_t *
pmap_tte(pmap_t pmap,vm_map_address_t addr)721 pmap_tte(pmap_t pmap, vm_map_address_t addr)
722 {
723 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
724 
725 	/**
726 	 * A full root table can address up to 4GB of virtual address space, but not
727 	 * all pmaps support the full address space.
728 	 */
729 	if (tte_index(pt_attr, addr) >= pmap->tte_index_max) {
730 		return TT_ENTRY_NULL;
731 	}
732 
733 	return &pmap->tte[tte_index(pt_attr, addr)];
734 }
735 
736 
737 /**
738  * Given an address and a map, compute the address of the leaf page table entry.
739  * If the address is invalid with respect to the map then PT_ENTRY_NULL is
740  * returned (and the map may need to grow).
741  *
742  * @param pmap The pmap whose page tables to parse.
743  * @param addr The virtual address to calculate the table indices off of.
744  */
745 static inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_map_address_t addr)746 pmap_pte(pmap_t pmap, vm_map_address_t addr)
747 {
748 	pt_entry_t *ptep = PT_ENTRY_NULL;
749 	tt_entry_t *ttep = TT_ENTRY_NULL;
750 	tt_entry_t tte = ARM_TTE_EMPTY;
751 
752 	ttep = pmap_tte(pmap, addr);
753 	if (ttep == TT_ENTRY_NULL) {
754 		return PT_ENTRY_NULL;
755 	}
756 
757 	tte = *ttep;
758 
759 #if MACH_ASSERT
760 	if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) {
761 		panic("%s: Attempt to demote L1 block, tte=0x%lx, pmap=%p, addr=%p",
762 		    __func__, (unsigned long)tte, pmap, (void*)addr);
763 	}
764 #endif
765 
766 	if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
767 		return PT_ENTRY_NULL;
768 	}
769 
770 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
771 	ptep = (pt_entry_t *) ttetokv(tte) + pte_index(pt_attr, addr);
772 	return ptep;
773 }
774 
775 /**
776  * Given an address and a map, compute the address of the table entry at the
777  * specified page table level. If the address is invalid with respect to the map
778  * then TT_ENTRY_NULL is returned.
779  *
780  * @param pmap The pmap whose page tables to parse.
781  * @param target_level The page table level at which to stop parsing the
782  *                     hierarchy at.
783  * @param addr The virtual address to calculate the table indices off of.
784  */
785 static inline tt_entry_t *
pmap_ttne(pmap_t pmap,unsigned int target_level,vm_map_address_t addr)786 pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr)
787 {
788 	tt_entry_t *ret_ttep = TT_ENTRY_NULL;
789 
790 	switch (target_level) {
791 	case PMAP_TT_L1_LEVEL:
792 		ret_ttep = pmap_tte(pmap, addr);
793 		break;
794 	case PMAP_TT_L2_LEVEL:
795 		ret_ttep = (tt_entry_t *)pmap_pte(pmap, addr);
796 		break;
797 	default:
798 		panic("%s: bad level, pmap=%p, target_level=%u, addr=%p",
799 		    __func__, pmap, target_level, (void *)addr);
800 	}
801 
802 	return ret_ttep;
803 }
804 
805 #else /* (__ARM_VMSA__ == 7) */
806 
807 /**
808  * Given an address and a map, compute the address of the table entry at the
809  * specified page table level. If the address is invalid with respect to the map
810  * then TT_ENTRY_NULL is returned.
811  *
812  * @param pmap The pmap whose page tables to parse.
813  * @param target_level The page table level at which to stop parsing the
814  *                     hierarchy at.
815  * @param addr The virtual address to calculate the table indices off of.
816  */
817 static inline tt_entry_t *
pmap_ttne(pmap_t pmap,unsigned int target_level,vm_map_address_t addr)818 pmap_ttne(pmap_t pmap, unsigned int target_level, vm_map_address_t addr)
819 {
820 	tt_entry_t *table_ttep = TT_ENTRY_NULL;
821 	tt_entry_t *ttep = TT_ENTRY_NULL;
822 	tt_entry_t tte = ARM_TTE_EMPTY;
823 	unsigned int cur_level;
824 
825 	const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
826 
827 	if (__improbable((addr < pmap->min) || (addr >= pmap->max))) {
828 		return TT_ENTRY_NULL;
829 	}
830 	/* Start parsing at the root page table. */
831 	table_ttep = pmap->tte;
832 
833 	assert(target_level <= pt_attr->pta_max_level);
834 
835 	for (cur_level = pt_attr->pta_root_level; cur_level <= target_level; cur_level++) {
836 		ttep = &table_ttep[ttn_index(pt_attr, addr, cur_level)];
837 
838 		if (cur_level == target_level) {
839 			break;
840 		}
841 
842 		tte = *ttep;
843 
844 #if MACH_ASSERT
845 		if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) == (ARM_TTE_TYPE_BLOCK | ARM_TTE_VALID)) {
846 			panic("%s: Attempt to demote L%u block, tte=0x%llx, pmap=%p, target_level=%u, addr=%p",
847 			    __func__, cur_level, tte, pmap, target_level, (void*)addr);
848 		}
849 #endif
850 		if ((tte & (ARM_TTE_TYPE_MASK | ARM_TTE_VALID)) != (ARM_TTE_TYPE_TABLE | ARM_TTE_VALID)) {
851 			return TT_ENTRY_NULL;
852 		}
853 
854 		table_ttep = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
855 	}
856 
857 	return ttep;
858 }
859 
860 /**
861  * Given an address and a map, compute the address of the level 1 translation
862  * table entry. If the address is invalid with respect to the map then
863  * TT_ENTRY_NULL is returned.
864  *
865  * @param pmap The pmap whose page tables to parse.
866  * @param addr The virtual address to calculate the table indices off of.
867  */
868 static inline tt_entry_t *
pmap_tt1e(pmap_t pmap,vm_map_address_t addr)869 pmap_tt1e(pmap_t pmap, vm_map_address_t addr)
870 {
871 	return pmap_ttne(pmap, PMAP_TT_L1_LEVEL, addr);
872 }
873 
874 /**
875  * Given an address and a map, compute the address of the level 2 translation
876  * table entry. If the address is invalid with respect to the map then
877  * TT_ENTRY_NULL is returned.
878  *
879  * @param pmap The pmap whose page tables to parse.
880  * @param addr The virtual address to calculate the table indices off of.
881  */
882 static inline tt_entry_t *
pmap_tt2e(pmap_t pmap,vm_map_address_t addr)883 pmap_tt2e(pmap_t pmap, vm_map_address_t addr)
884 {
885 	return pmap_ttne(pmap, PMAP_TT_L2_LEVEL, addr);
886 }
887 
888 /**
889  * Given an address and a map, compute the address of the level 3 page table
890  * entry. If the address is invalid with respect to the map then PT_ENTRY_NULL
891  * is returned.
892  *
893  * @param pmap The pmap whose page tables to parse.
894  * @param addr The virtual address to calculate the table indices off of.
895  */
896 static inline pt_entry_t *
pmap_tt3e(pmap_t pmap,vm_map_address_t addr)897 pmap_tt3e(pmap_t pmap, vm_map_address_t addr)
898 {
899 	return (pt_entry_t*)pmap_ttne(pmap, PMAP_TT_L3_LEVEL, addr);
900 }
901 
902 /**
903  * Given an address and a map, compute the address of the twig translation table
904  * entry. If the address is invalid with respect to the map then TT_ENTRY_NULL
905  * is returned.
906  *
907  * @param pmap The pmap whose page tables to parse.
908  * @param addr The virtual address to calculate the table indices off of.
909  */
910 static inline tt_entry_t *
pmap_tte(pmap_t pmap,vm_map_address_t addr)911 pmap_tte(pmap_t pmap, vm_map_address_t addr)
912 {
913 	return pmap_tt2e(pmap, addr);
914 }
915 
916 /**
917  * Given an address and a map, compute the address of the leaf page table entry.
918  * If the address is invalid with respect to the map then PT_ENTRY_NULL is
919  * returned.
920  *
921  * @param pmap The pmap whose page tables to parse.
922  * @param addr The virtual address to calculate the table indices off of.
923  */
924 static inline pt_entry_t *
pmap_pte(pmap_t pmap,vm_map_address_t addr)925 pmap_pte(pmap_t pmap, vm_map_address_t addr)
926 {
927 	return pmap_tt3e(pmap, addr);
928 }
929 
930 #endif /* (__ARM_VMSA__ == 7) */
931 
932 #endif /* _ARM_PMAP_PMAP_PT_GEOMETRY_H_ */
933