1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * This header file is used to store the types, prototypes, and inline functions
30 * that define some of the most important data structures used in the pmap. This
31 * header is only meant for sharing types within the pmap; if a type is meant to
32 * be used by the rest of the kernel, then put it into osfmk/arm/pmap.h.
33 */
34 #ifndef _ARM_PMAP_PMAP_DATA_H_
35 #define _ARM_PMAP_PMAP_DATA_H_
36
37 #include <stdint.h>
38
39 #include <kern/ledger.h>
40 #include <mach/vm_types.h>
41 #include <mach_assert.h>
42 #include <vm/vm_page.h>
43
44 #include <arm/cpu_data.h>
45 #include <arm/machine_routines.h>
46 #include <arm64/proc_reg.h>
47
48 /* Temporary include before moving all ledger functions into pmap_data.c */
49 #include <os/refcnt.h>
50
51 /**
52 * These headers are safe to be included in this file since they shouldn't rely
53 * on any of the internal pmap header files (so no circular dependencies).
54 */
55 #include <arm/pmap.h>
56 #include <arm/pmap/pmap_pt_geometry.h>
57
58 /**
59 * These values represent the first and last kernel-managed physical addresses.
60 * We keep track of extra metadata on kernel-managed pages compared to other
61 * pages (usually iBoot carved out memory or I/O).
62 */
63 extern pmap_paddr_t vm_first_phys, vm_last_phys;
64
65 /**
66 * Return whether the given address represents a kernel-managed physical page.
67 *
68 * Whether a page is considered "kernel-managed" is determined by the BootArgs
69 * passed by the bootloader. Typically memory carved out by the bootloader as
70 * well as I/O memory should return false.
71 *
72 * @param pa The physical address to check.
73 */
74 static inline bool
pa_valid(pmap_paddr_t pa)75 pa_valid(pmap_paddr_t pa)
76 {
77 return (pa >= vm_first_phys) && (pa < vm_last_phys);
78 }
79
80 /**
81 * The pmap has a variety of data structures (pv_head_table/pp_attr_table) that
82 * contain an entry for every kernel-managed page in the system. These systems
83 * are indexed with physical address indices ("pai") generated by this function.
84 *
85 * The logic is simple since there should be one entry in each of these data
86 * structures for each kernel-managed physical page in the system. These data
87 * structures are allocated on boot based on the amount of memory available.
88 *
89 * @note PAIs are defined using the VM page size, which might not be identical
90 * to the underlying hardware page size for an arbitrary address space.
91 * This means that the data structures relying on PAIs will contain one
92 * entry for each VM page, not hardware page.
93 *
94 * @note This function is only valid for physical addresses that are
95 * kernel-managed.
96 */
97
98 static inline unsigned int
pa_index(pmap_paddr_t pa)99 pa_index(pmap_paddr_t pa)
100 {
101 return (unsigned int)atop(pa - vm_first_phys);
102 }
103
104 /* See the definition of pv_head_table for more information. */
105 extern pv_entry_t **pv_head_table;
106
107 /* Represents a NULL entry in the pv_head_table. */
108 #define PV_ENTRY_NULL ((pv_entry_t *) 0)
109
110 /**
111 * Given a physical address index, return the corresponding pv_head_table entry.
112 *
113 * @note Despite returning a pointer to a pv_entry_t pointer, the entry might
114 * actually be a different type of pointer (pt_entry_t or pt_desc_t)
115 * depending on the type for this entry. Determine the type using
116 * pvh_test_type().
117 *
118 * @param pai The index returned by pa_index() for the page whose pv_head_table
119 * entry should be retrieved.
120 */
121 static inline pv_entry_t **
pai_to_pvh(unsigned int pai)122 pai_to_pvh(unsigned int pai)
123 {
124 return &pv_head_table[pai];
125 }
126
127 /**
128 * Each pv_head_table entry can be one of four different types:
129 *
130 * - PVH_TYPE_NULL: No mappings to the physical page exist outside of the
131 * physical aperture. Physical aperture mappings are not
132 * tracked in the pv_head_table.
133 *
134 * - PVH_TYPE_PVEP: There are multiple mappings to the physical page.
135 * These entries are linked lists of pv_entry_t objects (which
136 * each contain a pointer to the associated PTE and a pointer
137 * to the next entry in the list).
138 *
139 * - PVH_TYPE_PTEP: There is a single mapping to the physical page. Once more
140 * mappings are created, this entry will get upgraded to an
141 * entry of type PVH_TYPE_PVEP. These entries are pointers
142 * directly to the page table entry that contain the mapping
143 * (pt_entry_t*).
144 *
145 * - PVH_TYPE_PTDP: The physical page is being used as a page table. These
146 * entries are pointers to page table descriptor structures
147 * (pt_desc_t) which contain metadata related to each page
148 * table.
149 *
150 * The type is stored in the bottom two bits of each pv_head_table entry. That
151 * type needs to be checked before dereferencing the pointer to determine which
152 * pointer type to dereference as.
153 */
154 #define PVH_TYPE_NULL 0x0UL
155 #define PVH_TYPE_PVEP 0x1UL
156 #define PVH_TYPE_PTEP 0x2UL
157 #define PVH_TYPE_PTDP 0x3UL
158
159 #define PVH_TYPE_MASK (0x3UL)
160
161 #if defined(__arm64__)
162
163 /**
164 * PV_HEAD_TABLE Flags.
165 *
166 * All flags listed below are stored in the pv_head_table entry/pointer
167 * (per-physical-page) unless otherwise noted.
168 *
169 * Please update the pv_walk LLDB macro if these flags are changed or added to.
170 */
171
172 /**
173 * This flag is set for every mapping created by an IOMMU.
174 *
175 * Stored in each PTE pointer (for PVH_TYPE_PVEP lists), or in the pv_head_table
176 * entry/pointer for single-PTE entries (PVH_TYPE_PTEP).
177 */
178 #define PVH_FLAG_IOMMU 0x4UL
179
180 /**
181 * This flag is only valid when PVH_FLAG_IOMMU is set. For an IOMMU mapping, if
182 * this bit is set, then the PTE pointer points directly into the IOMMU page
183 * table for this mapping. If this bit is cleared, then the "PTE pointer" is
184 * actually a pointer to the IOMMU descriptor object that owns this mapping.
185 *
186 * There are cases where it's not easy to tie an IOMMU mapping directly to a
187 * specific page table, so this allows us to at least get a pointer to which
188 * IOMMU created this mapping which is useful for debugging purposes.
189 *
190 * Stored in each PTE pointer (for PVH_TYPE_PVEP lists), or in the pv_head_table
191 * entry/pointer for single-PTE entries (PVH_TYPE_PTEP).
192 */
193 #define PVH_FLAG_IOMMU_TABLE (1ULL << 63)
194
195 /**
196 * This flag is set when the first CPU (non-IOMMU) mapping is created. This is
197 * important to keep track of because various accounting statistics are based on
198 * the options specified for the first CPU mapping. This flag, and thus the
199 * accounting statistics, will persist as long as there *any* mappings of the
200 * page (including IOMMU mappings). This works because the accounting for a page
201 * should not need to change until the page is recycled by the VM layer, and we
202 * double-check that there are no mappings (CPU or IOMMU) when a page is
203 * recycled (see: pmap_verify_free()).
204 */
205 #define PVH_FLAG_CPU (1ULL << 62)
206
207 /* This bit is used as a lock when modifying a pv_head_table entry. */
208 #define PVH_LOCK_BIT 61
209 #define PVH_FLAG_LOCK (1ULL << PVH_LOCK_BIT)
210
211 /**
212 * This flag is set when there are any executable mappings to this physical
213 * page. This is used to prevent any writable mappings from being created at
214 * the same time an executable mapping exists.
215 */
216 #define PVH_FLAG_EXEC (1ULL << 60)
217
218 /**
219 * Marking a pv_head_table entry with this flag denotes that this page is a
220 * kernelcache text or data page that shouldn't have dynamically-created
221 * mappings. See PVH_FLAG_LOCKDOWN_MASK for more details.
222 */
223 #define PVH_FLAG_LOCKDOWN_KC (1ULL << 59)
224
225 /**
226 * This flag is used to mark that a page has been hashed into the hibernation
227 * image.
228 *
229 * The hibernation driver will use this to ensure that all PPL-owned memory is
230 * correctly included into the hibernation image (a missing PPL page could be
231 * a security concern when coming out of hibernation).
232 */
233 #define PVH_FLAG_HASHED (1ULL << 58)
234
235 /**
236 * Marking a pv_head_table entry with this flag denotes that this page is a
237 * code signature page that shouldn't have dynamically-created mappings.
238 * See PVH_FLAG_LOCKDOWN_MASK for more details.
239 */
240 #define PVH_FLAG_LOCKDOWN_CS (1ULL << 57)
241
242 /**
243 * Marking a pv_head_table entry with this flag denotes that this page is a
244 * read-only allocator page that shouldn't have dynamically-created mappings.
245 * See PVH_FLAG_LOCKDOWN_MASK for more details.
246 */
247 #define PVH_FLAG_LOCKDOWN_RO (1ULL << 56)
248
249 /**
250 * Marking a pv_head_table entry with this flag denotes that this page is
251 * retired without any mappings and never should be mapped again.
252 */
253 #define PVH_FLAG_RETIRED (1ULL << 55)
254
255 /**
256 * Flags which disallow a new mapping to a page.
257 */
258 #define PVH_FLAG_NOMAP_MASK (PVH_FLAG_RETIRED)
259
260 /**
261 * Marking a pv_head_table entry with this flag denotes that this page has
262 * been mapped into a non-coherent coprocessor address space and requires a
263 * cache flush operation once all mappings have been removed.
264 */
265 #define PVH_FLAG_FLUSH_NEEDED (1ULL << 52)
266
267 /**
268 * Marking a pv_head_table entry with any bit in this mask denotes that this page
269 * has been locked down by the PPL. Locked down pages can't have new mappings
270 * created or existing mappings removed, and all existing mappings will have been
271 * converted to read-only. This essentially makes the page immutable.
272 */
273 #define PVH_FLAG_LOCKDOWN_MASK (PVH_FLAG_LOCKDOWN_KC | PVH_FLAG_LOCKDOWN_CS | PVH_FLAG_LOCKDOWN_RO)
274
275
276 /**
277 * These bits need to be set to safely dereference a pv_head_table
278 * entry/pointer.
279 *
280 * Any change to this #define should also update the copy located in the pmap.py
281 * LLDB macros file.
282 */
283 #define PVH_HIGH_FLAGS (PVH_FLAG_CPU | PVH_FLAG_LOCK | PVH_FLAG_EXEC | PVH_FLAG_LOCKDOWN_MASK | \
284 PVH_FLAG_HASHED | PVH_FLAG_FLUSH_NEEDED | PVH_FLAG_RETIRED)
285
286 #endif /* defined(__arm64__) */
287
288 /* Mask used to clear out the TYPE bits from a pv_head_table entry/pointer. */
289 #define PVH_LIST_MASK (~PVH_TYPE_MASK)
290
291 /* Which 32-bit word in each pv_head_table entry/pointer contains the LOCK bit. */
292 #if defined(__arm64__)
293 #define PVH_LOCK_WORD 1 /* Assumes little-endian */
294 #endif /* defined(__arm64__) */
295
296 /**
297 * Assert that a pv_head_table entry is locked. Will panic if the lock isn't
298 * acquired.
299 *
300 * @param index The physical address index to check.
301 */
302 static inline void
pvh_assert_locked(__assert_only unsigned int index)303 pvh_assert_locked(__assert_only unsigned int index)
304 {
305 assert((vm_offset_t)(pv_head_table[index]) & PVH_FLAG_LOCK);
306 }
307
308
309 /**
310 * Lock a pv_head_table entry.
311 *
312 * @param index The physical address index of the pv_head_table entry to lock.
313 */
314 static inline void
pvh_lock(unsigned int index)315 pvh_lock(unsigned int index)
316 {
317 pmap_lock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD,
318 PVH_LOCK_BIT - (PVH_LOCK_WORD * 32));
319 }
320
321 /**
322 * Unlock a pv_head_table entry.
323 *
324 * @param index The physical address index of the pv_head_table entry to unlock.
325 */
326 static inline void
pvh_unlock(unsigned int index)327 pvh_unlock(unsigned int index)
328 {
329 pvh_assert_locked(index);
330
331 pmap_unlock_bit((uint32_t*)(&pv_head_table[index]) + PVH_LOCK_WORD,
332 PVH_LOCK_BIT - (PVH_LOCK_WORD * 32));
333 }
334
335 /**
336 * Check that a pv_head_table entry/pointer is a specific type.
337 *
338 * @param pvh The pv_head_table entry/pointer to check.
339 * @param type The type to check for.
340 *
341 * @return True if the pv_head_table entry is of the passed in type, false
342 * otherwise.
343 */
344 static inline bool
pvh_test_type(pv_entry_t ** pvh,vm_offset_t type)345 pvh_test_type(pv_entry_t **pvh, vm_offset_t type)
346 {
347 return ((*(vm_offset_t *)pvh) & PVH_TYPE_MASK) == type;
348 }
349
350 /**
351 * Convert a pv_head_table entry/pointer into a page table entry pointer. This
352 * should only be done if the type of this entry is PVH_TYPE_PTEP.
353 *
354 * @param pvh The pv_head_table entry/pointer to convert into a pt_entry_t*.
355 *
356 * @return Return back a safe to derefence pointer to the single mapping of this
357 * physical page by masking off the TYPE bits and adding any missing
358 * flags to the upper portion of the pointer.
359 */
360 static inline pt_entry_t*
pvh_ptep(pv_entry_t ** pvh)361 pvh_ptep(pv_entry_t **pvh)
362 {
363 return (pt_entry_t *)(((*(vm_offset_t *)pvh) & PVH_LIST_MASK) | PVH_HIGH_FLAGS);
364 }
365
366 /**
367 * Convert a pv_head_table entry/pointer into a PVE list pointer. This
368 * should only be done if the type of this entry is PVH_TYPE_PVEP.
369 *
370 * @param pvh The pv_head_table entry/pointer to convert into a safe to
371 * dereference pv_entry_t*.
372 *
373 * @return Return back a safe to derefence pointer to the first mapping of this
374 * physical page by masking off the TYPE bits and adding any missing
375 * flags to the upper portion of the pointer.
376 */
377 static inline pv_entry_t*
pvh_pve_list(pv_entry_t ** pvh)378 pvh_pve_list(pv_entry_t **pvh)
379 {
380 return (pv_entry_t *)(((*(vm_offset_t *)pvh) & PVH_LIST_MASK) | PVH_HIGH_FLAGS);
381 }
382
383 /**
384 * Return the flags associated with a pv_head_table entry/pointer.
385 *
386 * @param pvh The pv_head_table entry whose flags to get.
387 */
388 static inline vm_offset_t
pvh_get_flags(pv_entry_t ** pvh)389 pvh_get_flags(pv_entry_t **pvh)
390 {
391 return (*(vm_offset_t *)pvh) & PVH_HIGH_FLAGS;
392 }
393
394 /**
395 * Atomically set the flags associated with a pv_head_table entry/pointer.
396 *
397 * @param pvh The pv_head_table entry whose flags are getting set.
398 */
399 static inline void
pvh_set_flags(pv_entry_t ** pvh,vm_offset_t flags)400 pvh_set_flags(pv_entry_t **pvh, vm_offset_t flags)
401 {
402 os_atomic_store((vm_offset_t *)pvh, ((*(vm_offset_t *)pvh) & ~PVH_HIGH_FLAGS) | flags, relaxed);
403 }
404
405 /**
406 * Update a pv_head_table entry/pointer to be a different type and/or point to
407 * a different object.
408 *
409 * @note The pv_head_table entry MUST already be locked.
410 *
411 * @note This function will clobber any existing flags stored in the PVH pointer
412 * (except PVH_FLAG_LOCK). It's up to the caller to preserve flags if that
413 * functionality is needed (either by ensuring `pvep` contains those
414 * flags, or by manually setting the flags after this call).
415 *
416 * @param pvh The pv_head_table entry/pointer to update.
417 * @param pvep The new entry to use. This could be either a pt_entry_t*,
418 * pv_entry_t*, or pt_desc_t* depending on the type.
419 * @param type The type of the new entry.
420 */
421 static inline void
pvh_update_head(pv_entry_t ** pvh,void * pvep,unsigned int type)422 pvh_update_head(pv_entry_t **pvh, void *pvep, unsigned int type)
423 {
424 assert((*(vm_offset_t *)pvh) & PVH_FLAG_LOCK);
425 os_atomic_store((vm_offset_t *)pvh, (vm_offset_t)pvep | type | PVH_FLAG_LOCK, relaxed);
426 }
427
428 /**
429 * Update a pv_head_table entry/pointer to be a different type and/or point to
430 * a different object.
431 *
432 * @note The pv_head_table entry CAN'T already be locked.
433 *
434 * @note This function will clobber any existing flags stored in the PVH
435 * pointer. It's up to the caller to preserve flags if that functionality
436 * is needed (either by ensuring `pvep` contains those flags, or by
437 * manually setting the flags after this call).
438 *
439 * @param pvh The pv_head_table entry/pointer to update.
440 * @param pvep The new entry to use. This could be either a pt_entry_t*,
441 * pv_entry_t*, or pt_desc_t* depending on the type.
442 * @param type The type of the new entry.
443 */
444 static inline void
pvh_update_head_unlocked(pv_entry_t ** pvh,void * pvep,unsigned int type)445 pvh_update_head_unlocked(pv_entry_t **pvh, void *pvep, unsigned int type)
446 {
447 assert(!((*(vm_offset_t *)pvh) & PVH_FLAG_LOCK));
448 *(vm_offset_t *)pvh = ((vm_offset_t)pvep | type) & ~PVH_FLAG_LOCK;
449 }
450
451 /**
452 * Given a page table entry pointer retrieved from the pv_head_table (from an
453 * entry of type PVH_TYPE_PTEP or PVH_TYPE_PVEP), return back whether the PTE is
454 * an IOMMU mapping.
455 *
456 * @note The way this function determines whether the passed in pointer is
457 * pointing to an IOMMU PTE, is by checking for a special flag stored in
458 * the lower bits of the pointer. This flag is only set on pointers stored
459 * in the pv_head_table, and as such, this function will only work on
460 * pointers retrieved from the pv_head_table. If a pointer to a PTE was
461 * directly retrieved from an IOMMU's page tables, this function would
462 * always return false despite actually being an IOMMU PTE.
463 *
464 * @param ptep A PTE pointer obtained from the pv_head_table to check.
465 *
466 * @return True if the entry is an IOMMU mapping, false otherwise.
467 */
468 static inline bool
pvh_ptep_is_iommu(const pt_entry_t * ptep)469 pvh_ptep_is_iommu(const pt_entry_t *ptep)
470 {
471 #ifdef PVH_FLAG_IOMMU
472 return (vm_offset_t)ptep & PVH_FLAG_IOMMU;
473 #else /* PVH_FLAG_IOMMU */
474 #pragma unused(ptep)
475 return false;
476 #endif /* PVH_FLAG_IOMMU */
477 }
478
479 /**
480 * Sometimes the PTE pointers retrieved from the pv_head_table (from an entry of
481 * type PVH_TYPE_PTEP or PVH_TYPE_PVEP) contain flags themselves. This function
482 * strips out those flags and returns back a dereferencable pointer.
483 *
484 * @param ptep The PTE pointer to strip out the unwanted flags.
485 *
486 * @return A valid dereferencable pointer to the page table entry.
487 */
488 static inline const pt_entry_t*
pvh_strip_ptep(const pt_entry_t * ptep)489 pvh_strip_ptep(const pt_entry_t *ptep)
490 {
491 #ifdef PVH_FLAG_IOMMU
492 const vm_offset_t pte_va = (vm_offset_t)ptep;
493 return (const pt_entry_t*)((pte_va & ~PVH_FLAG_IOMMU) | PVH_FLAG_IOMMU_TABLE);
494 #else /* PVH_FLAG_IOMMU */
495 return ptep;
496 #endif /* PVH_FLAG_IOMMU */
497 }
498
499 /**
500 * PVH_TYPE_PVEP Helper Functions.
501 *
502 * The following are methods used to manipulate PVE lists. This is the type of
503 * pv_head_table entry used when there are multiple mappings to a single
504 * physical page.
505 */
506
507 /**
508 * Whether a physical page is using "alternate accounting" (ALTACCT) for its
509 * ledger statistics is something that needs to be tracked on a per-mapping
510 * basis, not on a per-physical-page basis. Because of that, it's tracked
511 * differently depending on whether there's a single mapping to a page
512 * (PVH_TYPE_PTEP) or multiple (PVH_TYPE_PVEP). For single mappings, the bit is
513 * tracked in the pp_attr_table. But when there are multiple mappings, the least
514 * significant bit of the corresponding "pve_pte" pointer in each pv_entry object
515 * is used as a marker for pages using alternate accounting.
516 *
517 * @note See the definition for PP_ATTR_ALTACCT for a more detailed description
518 * of what "alternate accounting" actually means in respect to the
519 * footprint ledger.
520 *
521 * Since some code (KernelDiskImages, e.g.) might map a phsyical page as
522 * "device" memory (i.e. external) while it's also being used as regular
523 * "anonymous" memory (i.e. internal) in user space, we have to manage the
524 * "internal" attribute per mapping rather than per physical page.
525 * When there are multiple mappings, we use the next least significant bit of
526 * the corresponding "pve_pte" pointer for that.
527 */
528 #define PVE_PTEP_ALTACCT ((uintptr_t) 0x1)
529 #define PVE_PTEP_INTERNAL ((uintptr_t) 0x2)
530 #define PVE_PTEP_FLAGS (PVE_PTEP_ALTACCT | PVE_PTEP_INTERNAL)
531
532 /**
533 * Set the ALTACCT bit for a specific PTE pointer.
534 *
535 * @param pvep A pointer to the current pv_entry mapping in the linked list of
536 * mappings.
537 * @param idx Index of the chosen PTE pointer inside the PVE.
538 */
539 static inline void
pve_set_altacct(pv_entry_t * pvep,unsigned idx)540 pve_set_altacct(pv_entry_t *pvep, unsigned idx)
541 {
542 assert(idx < PTE_PER_PVE);
543 pvep->pve_ptep[idx] = (pt_entry_t *)((uintptr_t)pvep->pve_ptep[idx] | PVE_PTEP_ALTACCT);
544 }
545 /**
546 * Set the INTERNAL bit for a specific PTE pointer.
547 *
548 * @param pvep A pointer to the current pv_entry mapping in the linked list of
549 * mappings.
550 * @param idx Index of the chosen PTE pointer inside the PVE.
551 */
552 static inline void
pve_set_internal(pv_entry_t * pvep,unsigned idx)553 pve_set_internal(pv_entry_t *pvep, unsigned idx)
554 {
555 assert(idx < PTE_PER_PVE);
556 pvep->pve_ptep[idx] = (pt_entry_t *)((uintptr_t)pvep->pve_ptep[idx] | PVE_PTEP_INTERNAL);
557 }
558
559 /**
560 * Clear the ALTACCT bit for a specific PTE pointer.
561 *
562 * @param pvep A pointer to the current pv_entry mapping in the linked list of
563 * mappings.
564 * @param idx Index of the chosen PTE pointer inside the PVE.
565 */
566 static inline void
pve_clr_altacct(pv_entry_t * pvep,unsigned idx)567 pve_clr_altacct(pv_entry_t *pvep, unsigned idx)
568 {
569 assert(idx < PTE_PER_PVE);
570 pvep->pve_ptep[idx] = (pt_entry_t *)((uintptr_t)pvep->pve_ptep[idx] & ~PVE_PTEP_ALTACCT);
571 }
572 /**
573 * Clear the INTERNAL bit for a specific PTE pointer.
574 *
575 * @param pvep A pointer to the current pv_entry mapping in the linked list of
576 * mappings.
577 * @param idx Index of the chosen PTE pointer inside the PVE.
578 */
579 static inline void
pve_clr_internal(pv_entry_t * pvep,unsigned idx)580 pve_clr_internal(pv_entry_t *pvep, unsigned idx)
581 {
582 assert(idx < PTE_PER_PVE);
583 pvep->pve_ptep[idx] = (pt_entry_t *)((uintptr_t)pvep->pve_ptep[idx] & ~PVE_PTEP_INTERNAL);
584 }
585
586 /**
587 * Return the ALTACCT bit for a specific PTE pointer.
588 *
589 * @param pvep A pointer to the current pv_entry mapping in the linked list of
590 * mappings.
591 * @param idx Index of the chosen PTE pointer inside the PVE.
592 */
593 static inline bool
pve_get_altacct(pv_entry_t * pvep,unsigned idx)594 pve_get_altacct(pv_entry_t *pvep, unsigned idx)
595 {
596 assert(idx < PTE_PER_PVE);
597 return (uintptr_t)pvep->pve_ptep[idx] & PVE_PTEP_ALTACCT;
598 }
599 /**
600 * Return the INTERNAL bit for a specific PTE pointer.
601 *
602 * @param pvep A pointer to the current pv_entry mapping in the linked list of
603 * mappings.
604 * @param idx Index of the chosen PTE pointer inside the PVE.
605 */
606 static inline bool
pve_get_internal(pv_entry_t * pvep,unsigned idx)607 pve_get_internal(pv_entry_t *pvep, unsigned idx)
608 {
609 assert(idx < PTE_PER_PVE);
610 return (uintptr_t)pvep->pve_ptep[idx] & PVE_PTEP_INTERNAL;
611 }
612
613 /**
614 * Return the next mapping (pv_entry) in a linked list of mappings. This applies
615 * to pv_head_table entries of type PVH_TYPE_PVEP.
616 *
617 * @param pvep A pointer to the current pv_entry mapping in the linked list of
618 * mappings.
619 *
620 * @return The next virtual mapping for a physical page, or PV_ENTRY_NULL if the
621 * end of the list has been reached.
622 */
623 static inline pv_entry_t *
pve_next(pv_entry_t * pvep)624 pve_next(pv_entry_t *pvep)
625 {
626 return pvep->pve_next;
627 }
628
629 /**
630 * Return a pointer to the pve_next field in a pv_entry. This value is used
631 * when adding and removing entries to a PVE list.
632 *
633 * @param pvep The pv_entry whose pve_next field is being accessed.
634 *
635 * @return Pointer to the pve_next field.
636 */
637 static inline pv_entry_t **
pve_next_ptr(pv_entry_t * pvep)638 pve_next_ptr(pv_entry_t *pvep)
639 {
640 return &pvep->pve_next;
641 }
642
643 /**
644 * Return a pointer to the page table entry for this mapping.
645 *
646 * @param pvep The pv_entry whose pve_ptep field is to be returned.
647 * @param idx Index of the chosen PTE pointer inside the PVE.
648 *
649 * @return Pointer to the page table entry.
650 */
651 static inline pt_entry_t *
pve_get_ptep(pv_entry_t * pvep,unsigned idx)652 pve_get_ptep(pv_entry_t *pvep, unsigned idx)
653 {
654 assert(idx < PTE_PER_PVE);
655 return (pt_entry_t *)((uintptr_t)pvep->pve_ptep[idx] & ~PVE_PTEP_FLAGS);
656 }
657
658 /**
659 * Update the page table entry for a specific physical to virtual mapping.
660 *
661 * @param pvep The pv_entry to update.
662 * @param idx Index of the chosen PTE pointer inside the PVE.
663 * @param ptep_new The new page table entry.
664 */
665 static inline void
pve_set_ptep(pv_entry_t * pvep,unsigned idx,pt_entry_t * ptep_new)666 pve_set_ptep(pv_entry_t *pvep, unsigned idx, pt_entry_t *ptep_new)
667 {
668 assert(idx < PTE_PER_PVE);
669 pvep->pve_ptep[idx] = ptep_new;
670 }
671
672 /**
673 * Initialize all fields in a PVE to NULL.
674 *
675 * @param pvep The pv_entry to initialize.
676 */
677 static inline void
pve_init(pv_entry_t * pvep)678 pve_init(pv_entry_t *pvep)
679 {
680 pvep->pve_next = PV_ENTRY_NULL;
681 for (int i = 0; i < PTE_PER_PVE; i++) {
682 pvep->pve_ptep[i] = PT_ENTRY_NULL;
683 }
684 }
685
686 /**
687 * Find PTE pointer in PVE and return its index.
688 *
689 * @param pvep The PVE to search.
690 * @param ptep PTE to search for.
691 *
692 * @return Index of the found entry, or -1 if no entry exists.
693 */
694 static inline int
pve_find_ptep_index(pv_entry_t * pvep,pt_entry_t * ptep)695 pve_find_ptep_index(pv_entry_t *pvep, pt_entry_t *ptep)
696 {
697 for (unsigned int i = 0; i < PTE_PER_PVE; i++) {
698 if (pve_get_ptep(pvep, i) == ptep) {
699 return (int)i;
700 }
701 }
702
703 return -1;
704 }
705
706 /**
707 * Checks if no PTEs are currently associated with this PVE.
708 *
709 * @param pvep The PVE to search.
710 *
711 * @return True if no PTEs are currently associated with this PVE, or false.
712 */
713 static inline bool
pve_is_empty(pv_entry_t * pvep)714 pve_is_empty(pv_entry_t *pvep)
715 {
716 for (unsigned int i = 0; i < PTE_PER_PVE; i++) {
717 if (pve_get_ptep(pvep, i) != PT_ENTRY_NULL) {
718 return false;
719 }
720 }
721
722 return true;
723 }
724
725 /**
726 * Prepend a new pv_entry node to a PVE list.
727 *
728 * @note This function will clobber any existing flags stored in the PVH
729 * pointer. It's up to the caller to preserve flags if that functionality
730 * is needed (either by ensuring `pvep` contains those flags, or by
731 * manually setting the flags after this call).
732 *
733 * @param pvh The linked list of mappings to update.
734 * @param pvep The new mapping to add to the linked list.
735 */
736 static inline void
pve_add(pv_entry_t ** pvh,pv_entry_t * pvep)737 pve_add(pv_entry_t **pvh, pv_entry_t *pvep)
738 {
739 assert(pvh_test_type(pvh, PVH_TYPE_PVEP));
740
741 pvep->pve_next = pvh_pve_list(pvh);
742 pvh_update_head(pvh, pvep, PVH_TYPE_PVEP);
743 }
744
745 /**
746 * Remove an entry from a PVE list of mappings.
747 *
748 * @note This function will clobber any existing flags stored in the PVH
749 * pointer. It's up to the caller to preserve flags if that functionality
750 * is needed.
751 *
752 * @param pvh The pv_head_table entry of the PVE list to remove a mapping from.
753 * This is the first entry in the list of pv_entry_t mappings.
754 * @param pvepp A pointer to the pv_entry_t* that's being removed. If this entry
755 * is the first in the linked list of mappings, then this should be
756 * identical to the pv_head_table entry. If the mapping isn't the
757 * first, then this is a pointer to the pve_next field in the
758 * previous mapping.
759 * @param pvep The entry that should be removed. Should be identical to a
760 * dereference of the pvepp parameter (unless it's the pv_head_table
761 * entry).
762 */
763 static inline void
pve_remove(pv_entry_t ** pvh,pv_entry_t ** pvepp,pv_entry_t * pvep)764 pve_remove(pv_entry_t **pvh, pv_entry_t **pvepp, pv_entry_t *pvep)
765 {
766 assert(pvh_test_type(pvh, PVH_TYPE_PVEP));
767
768 if (pvepp == pvh) {
769 if (pve_next(pvep) == PV_ENTRY_NULL) {
770 /* The last mapping to this page is being removed. */
771 pvh_update_head(pvh, PV_ENTRY_NULL, PVH_TYPE_NULL);
772 } else {
773 /**
774 * There are still mappings left, make the next one the new head of
775 * the list. This effectively removes the first entry from the list.
776 */
777 pvh_update_head(pvh, pve_next(pvep), PVH_TYPE_PVEP);
778 }
779 } else {
780 /**
781 * Move the previous entry's next field to the entry after the one being
782 * removed. This will clobber the ALTACCT and INTERNAL bits.
783 */
784 *pvepp = pve_next(pvep);
785 }
786 }
787
788 /**
789 * PVH_TYPE_PTDP Types and Helper Functions.
790 *
791 * The following are types and methods used to manipulate page table descriptor
792 * (PTD) objects. This is the type of pv_head_table entry used when a page is
793 * being used as a page table.
794 */
795
796 /**
797 * When the pmap layer allocates memory, it always does so in chunks of the VM
798 * page size (which are represented by the PAGE_SIZE/PAGE_SHIFT macros). The VM
799 * page size might not match up with the hardware page size for a given address
800 * space (this is especially true on systems that support more than one page
801 * size).
802 *
803 * The pv_head_table is allocated to have one entry per VM page, not hardware
804 * page (which can change depending on the address space). Because of that, a
805 * single VM-page-sized region (single pv_head_table entry) can potentially hold
806 * up to four page tables. Only one page table descriptor (PTD) is allocated per
807 * pv_head_table entry (per VM page), so on some systems, one PTD might have to
808 * keep track of up to four different page tables.
809 */
810
811 #if __ARM_MIXED_PAGE_SIZE__
812 #define PT_INDEX_MAX (ARM_PGBYTES / 4096)
813 #elif (ARM_PGSHIFT == 14)
814 #define PT_INDEX_MAX 1
815 #elif (ARM_PGSHIFT == 12)
816 #define PT_INDEX_MAX 4
817 #else
818 #error Unsupported ARM_PGSHIFT
819 #endif /* __ARM_MIXED_PAGE_SIZE__ || ARM_PGSHIFT == 14 || ARM_PGSHIFT == 12 */
820
821
822 /**
823 * Page table descriptor (PTD) info structure.
824 *
825 * Contains information about a page table. These pieces of data are separate
826 * from the PTD itself because in address spaces where the VM page size doesn't
827 * match the underlying hardware page size, one PTD could represent multiple
828 * page tables (and so will need multiple PTD info structures).
829 *
830 * These fields are also in their own struct so that they can be allocated
831 * separately from the associated pt_desc_t object. This allows us to allocate
832 * the counts in this structure in a way that ensures they don't fall within the
833 * same cache line as the main pt_desc_t object. This is important because the
834 * fields in this structure are atomically updated which could cause false
835 * sharing cache performance issues with the "va" field in pt_desc_t if all of
836 * the fields were within the same structure.
837 */
838 typedef struct {
839 /**
840 * Pre-defined sentinel values for ptd_info_t.refcnt. If these refcnt values
841 * change, make sure to update the showpte LLDB macro to reflect the
842 * changes.
843 */
844 #define PT_DESC_REFCOUNT 0x4000U
845 #define PT_DESC_IOMMU_GRANTED_REFCOUNT 0x8000U
846 #define PT_DESC_IOMMU_ACCEPTED_REFCOUNT 0x8001U
847
848 /*
849 * For non-leaf pagetables, should always be PT_DESC_REFCOUNT.
850 * For leaf pagetables, should reflect the number of non-empty PTEs.
851 * For IOMMU pages, should always be either PT_DESC_IOMMU_GRANTED_REFCOUNT
852 * or PT_DESC_IOMMU_ACCEPTED_REFCOUNT.
853 */
854 unsigned short refcnt;
855
856 /*
857 * For non-leaf pagetables, should be 0.
858 * For leaf pagetables, should reflect the number of wired entries.
859 * For IOMMU pages, may optionally reflect a driver-defined refcount (IOMMU
860 * operations are implicitly wired).
861 */
862 unsigned short wiredcnt;
863 } ptd_info_t;
864
865 /**
866 * Page Table Descriptor (PTD).
867 *
868 * Provides a per-table data structure and a way of keeping track of all page
869 * tables in the system.
870 *
871 * This structure is also used as a convenient way of keeping track of IOMMU
872 * pages (which may or may not be used as page tables). In that case the "iommu"
873 * field will point to the owner of the page, ptd_info[0].refcnt will be
874 * PT_DESC_IOMMU_GRANTED_REFCOUNT or PT_DESC_IOMMU_ACCEPTED_REFCOUNT, and
875 * ptd_info[0].wiredcnt can be used as an arbitrary refcnt controlled by the
876 * IOMMU driver.
877 */
878 typedef struct pt_desc {
879 /**
880 * This queue chain provides a mechanism for keeping a list of pages
881 * being used as page tables. This is used to potentially reclaim userspace
882 * page tables as a fast way of "allocating" a page.
883 *
884 * Refer to osfmk/kern/queue.h for more information about queue chains.
885 */
886 queue_chain_t pt_page;
887
888 /* Each page table is either owned by a pmap or a specific IOMMU. */
889 union {
890 struct pmap *pmap;
891 };
892
893 /**
894 * The following fields contain per-page-table properties, and as such,
895 * might have multiple elements each. This is due to a single PTD
896 * potentially representing multiple page tables (in address spaces where
897 * the VM page size differs from the hardware page size). Use the
898 * ptd_get_index() function to get the correct index for a specific page
899 * table.
900 */
901
902 /**
903 * The first address of the virtual address space this page table is
904 * translating for, or a value set by an IOMMU driver if this PTD is being
905 * used to track an IOMMU page.
906 */
907 vm_offset_t va[PT_INDEX_MAX];
908
909 /**
910 * ptd_info_t's are allocated separately so as to reduce false sharing
911 * with the va field. This is desirable because ptd_info_t's are updated
912 * atomically from all CPUs.
913 */
914 ptd_info_t *ptd_info;
915 } pt_desc_t;
916
917 /**
918 * Convert a pv_head_table entry/pointer into a page table descriptor pointer.
919 * This should only be done if the type of this entry is PVH_TYPE_PTDP.
920 *
921 * @param pvh The pv_head_table entry/pointer to convert into a safe to
922 * dereference pt_desc_t*.
923 *
924 * @return Return back a safe to derefence pointer to the page table descriptor
925 * for this physical page by masking off the TYPE bits and adding any
926 * missing flags to the upper portion of the pointer.
927 */
928 static inline pt_desc_t*
pvh_ptd(pv_entry_t ** pvh)929 pvh_ptd(pv_entry_t **pvh)
930 {
931 return (pt_desc_t *)(((*(vm_offset_t *)pvh) & PVH_LIST_MASK) | PVH_HIGH_FLAGS);
932 }
933
934 /**
935 * Given an arbitrary page table entry, return back the page table descriptor
936 * (PTD) object for the page table that contains that entry.
937 *
938 * @param ptep Pointer to a PTE whose page table descriptor object to return.
939 *
940 * @return The PTD object for the passed in page table.
941 */
942 static inline pt_desc_t *
ptep_get_ptd(const pt_entry_t * ptep)943 ptep_get_ptd(const pt_entry_t *ptep)
944 {
945 assert(ptep != NULL);
946
947 const vm_offset_t pt_base_va = (vm_offset_t)ptep;
948 pv_entry_t **pvh = pai_to_pvh(pa_index(ml_static_vtop(pt_base_va)));
949
950 if (__improbable(!pvh_test_type(pvh, PVH_TYPE_PTDP))) {
951 panic("%s: invalid PV head 0x%llx for PTE %p", __func__, (uint64_t)(*pvh), ptep);
952 }
953
954 return pvh_ptd(pvh);
955 }
956
957 /**
958 * Given an arbitrary page table entry, return back the pmap that owns that
959 * page table.
960 *
961 * @note This won't work correctly for page tables owned by IOMMUs, because
962 * those table aren't owned by any specific pmap.
963 *
964 * @param ptep Pointer to a page table entry whose owner we're trying to return.
965 *
966 * @return The pmap that owns the given page table entry.
967 */
968 static inline struct pmap *
ptep_get_pmap(const pt_entry_t * ptep)969 ptep_get_pmap(const pt_entry_t *ptep)
970 {
971 return ptep_get_ptd(ptep)->pmap;
972 }
973
974
975 /**
976 * Given an arbitrary translation table entry, get the page table descriptor
977 * (PTD) object for the page table pointed to by the TTE.
978 *
979 * @param tte The translation table entry to parse. For instance, if this is an
980 * L2 TTE, then the PTD for the L3 table this entry points to will be
981 * returned.
982 *
983 * @return The page table descriptor (PTD) for the page table pointed to by this
984 * TTE.
985 */
986 static inline pt_desc_t *
tte_get_ptd(const tt_entry_t tte)987 tte_get_ptd(const tt_entry_t tte)
988 {
989 const vm_offset_t pt_base_va = (vm_offset_t)(tte & ~((tt_entry_t)PAGE_MASK));
990 pv_entry_t **pvh = pai_to_pvh(pa_index(pt_base_va));
991
992 if (__improbable(!pvh_test_type(pvh, PVH_TYPE_PTDP))) {
993 panic("%s: invalid PV head 0x%llx for TTE 0x%llx", __func__, (uint64_t)(*pvh), (uint64_t)tte);
994 }
995
996 return pvh_ptd(pvh);
997 }
998
999 /**
1000 * In address spaces where the VM page size doesn't match the underlying
1001 * hardware page size, one PTD could represent multiple page tables. This
1002 * function returns the correct index value depending on which page table is
1003 * being accessed. That index value can then be used to access the
1004 * per-page-table properties stored within a PTD.
1005 *
1006 * @note See the description above the PT_INDEX_MAX definition for a more
1007 * detailed explanation of why multiple page tables can be represented
1008 * by a single PTD object in the pv_head_table.
1009 *
1010 * @param ptd The page table descriptor that's being accessed.
1011 * @param ttep Pointer to the translation table entry that's being accessed.
1012 *
1013 * @return The correct index value for a specific, hardware-sized page
1014 * table.
1015 */
1016 static inline unsigned
ptd_get_index(__unused const pt_desc_t * ptd,__unused const tt_entry_t * ttep)1017 ptd_get_index(__unused const pt_desc_t *ptd, __unused const tt_entry_t *ttep)
1018 {
1019 #if PT_INDEX_MAX == 1
1020 return 0;
1021 #else
1022 assert(ptd != NULL);
1023
1024 const uint64_t pmap_page_shift = pt_attr_leaf_shift(pmap_get_pt_attr(ptd->pmap));
1025 const vm_offset_t ttep_page = (vm_offset_t)ttep >> pmap_page_shift;
1026
1027 /**
1028 * Use the difference between the VM page shift and the hardware page shift
1029 * to get the index of the correct page table. In practice, this equates to
1030 * masking out the bottom two bits of the L3 table index in address spaces
1031 * where the VM page size is greater than the hardware page size. In address
1032 * spaces where they're identical, the index will always be zero.
1033 */
1034 const unsigned int ttep_index = ttep_page & ((1U << (PAGE_SHIFT - pmap_page_shift)) - 1);
1035 assert(ttep_index < PT_INDEX_MAX);
1036
1037 return ttep_index;
1038 #endif
1039 }
1040
1041 /**
1042 * In address spaces where the VM page size doesn't match the underlying
1043 * hardware page size, one PTD could represent multiple page tables. This
1044 * function returns the correct ptd_info_t structure depending on which page
1045 * table is being accessed.
1046 *
1047 * @note See the description above the PT_INDEX_MAX definition for a more
1048 * detailed explanation of why multiple page tables can be represented
1049 * by a single PTD object in the pv_head_table.
1050 *
1051 * @param ptd The page table descriptor that's being accessed.
1052 * @param ttep Pointer to the translation table entry that's being accessed.
1053 *
1054 * @return The correct ptd_info_t structure for a specific, hardware-sized page
1055 * table.
1056 */
1057 static inline ptd_info_t *
ptd_get_info(pt_desc_t * ptd,const tt_entry_t * ttep)1058 ptd_get_info(pt_desc_t *ptd, const tt_entry_t *ttep)
1059 {
1060 assert((ptd != NULL) && (ptd->ptd_info[0].refcnt < PT_DESC_IOMMU_GRANTED_REFCOUNT));
1061
1062 return &ptd->ptd_info[ptd_get_index(ptd, ttep)];
1063 }
1064
1065 /**
1066 * Given a pointer to a page table entry, return back the ptd_info structure
1067 * for the page table that contains that entry.
1068 *
1069 * @param ptep Pointer to a PTE whose ptd_info object to return.
1070 *
1071 * @return The ptd_info object for the page table that contains the passed in
1072 * page table entry.
1073 */
1074 static inline ptd_info_t *
ptep_get_info(const pt_entry_t * ptep)1075 ptep_get_info(const pt_entry_t *ptep)
1076 {
1077 return ptd_get_info(ptep_get_ptd(ptep), ptep);
1078 }
1079
1080 /**
1081 * Return the virtual address mapped by the passed in leaf page table entry,
1082 * using an already-retrieved pagetable descriptor.
1083 *
1084 * @param ptdp pointer to the descriptor for the pagetable containing ptep
1085 * @param ptep Pointer to a PTE to parse
1086 */
1087 static inline vm_map_address_t
ptd_get_va(const pt_desc_t * ptdp,const pt_entry_t * ptep)1088 ptd_get_va(const pt_desc_t *ptdp, const pt_entry_t *ptep)
1089 {
1090 const pt_attr_t * const pt_attr = pmap_get_pt_attr(ptdp->pmap);
1091
1092 vm_map_address_t va = ptdp->va[ptd_get_index(ptdp, ptep)];
1093 vm_offset_t ptep_index = ((vm_offset_t)ptep & pt_attr_leaf_offmask(pt_attr)) / sizeof(*ptep);
1094
1095 va += (ptep_index << pt_attr_leaf_shift(pt_attr));
1096
1097 return va;
1098 }
1099
1100 /**
1101 * Return the virtual address that is being mapped by the passed in leaf page
1102 * table entry.
1103 *
1104 * @param ptep Pointer to a PTE to parse.
1105 */
1106 static inline vm_map_address_t
ptep_get_va(const pt_entry_t * ptep)1107 ptep_get_va(const pt_entry_t *ptep)
1108 {
1109 return ptd_get_va(ptep_get_ptd(ptep), ptep);
1110 }
1111
1112 /**
1113 * Physical Page Attribute Table (pp_attr_table) defines and helper functions.
1114 */
1115
1116 /* How many bits to use for flags on a per-VM-page basis. */
1117 typedef uint16_t pp_attr_t;
1118
1119 /* See the definition of pp_attr_table for more information. */
1120 extern volatile pp_attr_t* pp_attr_table;
1121
1122 /**
1123 * Flags stored in the pp_attr_table on a per-physical-page basis.
1124 *
1125 * Please update the pv_walk LLDB macro if these flags are changed or added to.
1126 */
1127
1128 /**
1129 * The bottom 6-bits are used to store the default WIMG (cacheability and memory
1130 * type) setting for this physical page. This can be changed by calling
1131 * pmap_set_cache_attributes().
1132 *
1133 * If a default WIMG setting isn't set for a page, then the default is Normal,
1134 * Cached memory (VM_WIMG_DEFAULT).
1135 */
1136 #define PP_ATTR_WIMG_MASK 0x003F
1137 #define PP_ATTR_WIMG(x) ((x) & PP_ATTR_WIMG_MASK)
1138
1139 /**
1140 * The reference and modify bits keep track of whether a page has been accessed
1141 * or modified since the last time the bits were cleared. These bits are used to
1142 * enforce policy decisions in the VM layer.
1143 */
1144 #define PP_ATTR_REFERENCED 0x0040
1145 #define PP_ATTR_MODIFIED 0x0080
1146
1147 /**
1148 * This physical page is being used as anonymous memory that's internally
1149 * managed by the VM and is not connected to an external pager. This flag is
1150 * only set/cleared on the first CPU mapping of a page (see PVH_FLAG_CPU). Any
1151 * subsequent mappings won't set/clear this flag until all mappings are removed
1152 * and a new CPU mapping is added.
1153 */
1154 #define PP_ATTR_INTERNAL 0x0100
1155
1156 /**
1157 * This flag is used to keep track of pages that are still resident but are not
1158 * considered dirty and can be reclaimed under memory pressure. These pages do
1159 * not count as a part of the memory footprint, so the footprint ledger does not
1160 * need to be updated for these pages. This is hinted to the VM by the
1161 * `madvise(MADV_FREE_REUSABLE)` system call.
1162 */
1163 #define PP_ATTR_REUSABLE 0x0200
1164
1165 /**
1166 * This flag denotes that a page is utilizing "alternate accounting". This means
1167 * that the pmap doesn't need to keep track of these pages with regards to the
1168 * footprint ledger because the VM is already accounting for them in a different
1169 * way. These include IOKit mappings (VM adds their entire virtual size to the
1170 * footprint), and purgeable pages (VM counts them only when non-volatile and
1171 * only for one "owner"), among others.
1172 *
1173 * Note that alternate accounting status is tracked on a per-mapping basis (not
1174 * per-page). Because of that the ALTACCT flag in the pp_attr_table is only used
1175 * when there's a single mapping to a page. When there are multiple mappings,
1176 * the status of this flag is tracked in the pv_head_table (see PVE_PTEP_ALTACCT
1177 * above).
1178 */
1179 #define PP_ATTR_ALTACCT 0x0400
1180
1181 /**
1182 * This bit was originally used on x86 to keep track of what pages to not
1183 * encrypt during the hibernation process as a performance optimization when
1184 * encryption was done in software. This doesn't apply to the ARM
1185 * hibernation process because all pages are automatically encrypted using
1186 * hardware acceleration. Despite that, the pmap still keeps track of this flag
1187 * as a debugging aid on internal builds.
1188 *
1189 * TODO: This bit can probably be reclaimed:
1190 * rdar://70740650 (PMAP Cleanup: Potentially reclaim the PP_ATTR_NOENCRYPT bit on ARM)
1191 */
1192 #define PP_ATTR_NOENCRYPT 0x0800
1193
1194 /**
1195 * These bits denote that a physical page is expecting the next access or
1196 * modification to set the PP_ATTR_REFERENCED and PP_ATTR_MODIFIED flags
1197 * respectively.
1198 */
1199 #define PP_ATTR_REFFAULT 0x1000
1200 #define PP_ATTR_MODFAULT 0x2000
1201
1202 #if XNU_MONITOR
1203 /**
1204 * Denotes that a page is owned by the PPL. This is modified/checked with the
1205 * PVH lock held, to avoid ownership related races. This does not need to be a
1206 * PP_ATTR bit (as we have the lock), but for now this is a convenient place to
1207 * put the bit.
1208 */
1209 #define PP_ATTR_MONITOR 0x4000
1210
1211 /**
1212 * Denotes that a page *cannot* be owned by the PPL. This is required in order
1213 * to temporarily 'pin' kernel pages that are used to store PPL output
1214 * parameters. Otherwise a malicious or buggy caller could pass PPL-owned memory
1215 * for these parameters and in so doing stage a write gadget against the PPL.
1216 */
1217 #define PP_ATTR_NO_MONITOR 0x8000
1218
1219 /**
1220 * All of the bits owned by the PPL; kernel requests to set or clear these bits
1221 * are illegal.
1222 */
1223 #define PP_ATTR_PPL_OWNED_BITS (PP_ATTR_MONITOR | PP_ATTR_NO_MONITOR)
1224 #endif /* XNU_MONITOR */
1225
1226 /**
1227 * Atomically set some flags in a pp_attr_table entry.
1228 *
1229 * @param pai The physical address index for the entry to update.
1230 * @param bits The flags to set in the entry.
1231 */
1232 static inline void
ppattr_set_bits(unsigned int pai,pp_attr_t bits)1233 ppattr_set_bits(unsigned int pai, pp_attr_t bits)
1234 {
1235 volatile pp_attr_t *ppattr = &pp_attr_table[pai];
1236 os_atomic_or(ppattr, bits, acq_rel);
1237 }
1238
1239 /**
1240 * Atomically clear some flags in a pp_attr_table entry.
1241 *
1242 * @param pai The physical address index for the entry to update.
1243 * @param bits The flags to clear in the entry.
1244 */
1245 static inline void
ppattr_clear_bits(unsigned int pai,pp_attr_t bits)1246 ppattr_clear_bits(unsigned int pai, pp_attr_t bits)
1247 {
1248 volatile pp_attr_t *ppattr = &pp_attr_table[pai];
1249 os_atomic_andnot(ppattr, bits, acq_rel);
1250 }
1251
1252 /**
1253 * Return true if the pp_attr_table entry contains the passed in bits.
1254 *
1255 * @param pai The physical address index for the entry to test.
1256 * @param bits The flags to check for.
1257 */
1258 static inline bool
ppattr_test_bits(unsigned int pai,pp_attr_t bits)1259 ppattr_test_bits(unsigned int pai, pp_attr_t bits)
1260 {
1261 const volatile pp_attr_t *ppattr = &pp_attr_table[pai];
1262 return (*ppattr & bits) == bits;
1263 }
1264
1265 /**
1266 * Only set some flags in a pp_attr_table entry if the passed in physical
1267 * address is a kernel-managed address.
1268 *
1269 * @param pa The physical address for the entry to update.
1270 * @param bits The flags to set in the entry.
1271 */
1272 static inline void
ppattr_pa_set_bits(pmap_paddr_t pa,pp_attr_t bits)1273 ppattr_pa_set_bits(pmap_paddr_t pa, pp_attr_t bits)
1274 {
1275 if (pa_valid(pa)) {
1276 ppattr_set_bits(pa_index(pa), bits);
1277 }
1278 }
1279
1280 /**
1281 * Only clear some flags in a pp_attr_table entry if the passed in physical
1282 * address is a kernel-managed address.
1283 *
1284 * @param pa The physical address for the entry to update.
1285 * @param bits The flags to clear in the entry.
1286 */
1287 static inline void
ppattr_pa_clear_bits(pmap_paddr_t pa,pp_attr_t bits)1288 ppattr_pa_clear_bits(pmap_paddr_t pa, pp_attr_t bits)
1289 {
1290 if (pa_valid(pa)) {
1291 ppattr_clear_bits(pa_index(pa), bits);
1292 }
1293 }
1294
1295 /**
1296 * Only test flags in a pp_attr_table entry if the passed in physical address
1297 * is a kernel-managed page.
1298 *
1299 * @param pa The physical address for the entry to test.
1300 * @param bits The flags to check for.
1301 *
1302 * @return False if the PA isn't a kernel-managed page, otherwise true/false
1303 * depending on whether the bits are set.
1304 */
1305 static inline bool
ppattr_pa_test_bits(pmap_paddr_t pa,pp_attr_t bits)1306 ppattr_pa_test_bits(pmap_paddr_t pa, pp_attr_t bits)
1307 {
1308 return pa_valid(pa) ? ppattr_test_bits(pa_index(pa), bits) : false;
1309 }
1310
1311 /**
1312 * Set the PP_ATTR_MODIFIED flag on a specific pp_attr_table entry if the passed
1313 * in physical address is a kernel-managed page.
1314 *
1315 * @param pa The physical address for the entry to update.
1316 */
1317 static inline void
ppattr_pa_set_modify(pmap_paddr_t pa)1318 ppattr_pa_set_modify(pmap_paddr_t pa)
1319 {
1320 ppattr_pa_set_bits(pa, PP_ATTR_MODIFIED);
1321 }
1322
1323 /**
1324 * Clear the PP_ATTR_MODIFIED flag on a specific pp_attr_table entry if the
1325 * passed in physical address is a kernel-managed page.
1326 *
1327 * @param pa The physical address for the entry to update.
1328 */
1329 static inline void
ppattr_pa_clear_modify(pmap_paddr_t pa)1330 ppattr_pa_clear_modify(pmap_paddr_t pa)
1331 {
1332 ppattr_pa_clear_bits(pa, PP_ATTR_MODIFIED);
1333 }
1334
1335 /**
1336 * Set the PP_ATTR_REFERENCED flag on a specific pp_attr_table entry if the
1337 * passed in physical address is a kernel-managed page.
1338 *
1339 * @param pa The physical address for the entry to update.
1340 */
1341 static inline void
ppattr_pa_set_reference(pmap_paddr_t pa)1342 ppattr_pa_set_reference(pmap_paddr_t pa)
1343 {
1344 ppattr_pa_set_bits(pa, PP_ATTR_REFERENCED);
1345 }
1346
1347 /**
1348 * Clear the PP_ATTR_REFERENCED flag on a specific pp_attr_table entry if the
1349 * passed in physical address is a kernel-managed page.
1350 *
1351 * @param pa The physical address for the entry to update.
1352 */
1353 static inline void
ppattr_pa_clear_reference(pmap_paddr_t pa)1354 ppattr_pa_clear_reference(pmap_paddr_t pa)
1355 {
1356 ppattr_pa_clear_bits(pa, PP_ATTR_REFERENCED);
1357 }
1358
1359 #if XNU_MONITOR
1360
1361 /**
1362 * Set the PP_ATTR_MONITOR flag on a specific pp_attr_table entry if the passed
1363 * in physical address is a kernel-managed page.
1364 *
1365 * @param pa The physical address for the entry to update.
1366 */
1367 static inline void
ppattr_pa_set_monitor(pmap_paddr_t pa)1368 ppattr_pa_set_monitor(pmap_paddr_t pa)
1369 {
1370 ppattr_pa_set_bits(pa, PP_ATTR_MONITOR);
1371 }
1372
1373 /**
1374 * Clear the PP_ATTR_MONITOR flag on a specific pp_attr_table entry if the
1375 * passed in physical address is a kernel-managed page.
1376 *
1377 * @param pa The physical address for the entry to update.
1378 */
1379 static inline void
ppattr_pa_clear_monitor(pmap_paddr_t pa)1380 ppattr_pa_clear_monitor(pmap_paddr_t pa)
1381 {
1382 ppattr_pa_clear_bits(pa, PP_ATTR_MONITOR);
1383 }
1384
1385 /**
1386 * Only test for the PP_ATTR_MONITOR flag in a pp_attr_table entry if the passed
1387 * in physical address is a kernel-managed page.
1388 *
1389 * @param pa The physical address for the entry to test.
1390 *
1391 * @return False if the PA isn't a kernel-managed page, otherwise true/false
1392 * depending on whether the PP_ATTR_MONITOR is set.
1393 */
1394 static inline bool
ppattr_pa_test_monitor(pmap_paddr_t pa)1395 ppattr_pa_test_monitor(pmap_paddr_t pa)
1396 {
1397 return ppattr_pa_test_bits(pa, PP_ATTR_MONITOR);
1398 }
1399
1400 /**
1401 * Set the PP_ATTR_NO_MONITOR flag on a specific pp_attr_table entry if the
1402 * passed in physical address is a kernel-managed page.
1403 *
1404 * @param pa The physical address for the entry to update.
1405 */
1406 static inline void
ppattr_pa_set_no_monitor(pmap_paddr_t pa)1407 ppattr_pa_set_no_monitor(pmap_paddr_t pa)
1408 {
1409 ppattr_pa_set_bits(pa, PP_ATTR_NO_MONITOR);
1410 }
1411
1412 /**
1413 * Clear the PP_ATTR_NO_MONITOR flag on a specific pp_attr_table entry if the
1414 * passed in physical address is a kernel-managed page.
1415 *
1416 * @param pa The physical address for the entry to update.
1417 */
1418 static inline void
ppattr_pa_clear_no_monitor(pmap_paddr_t pa)1419 ppattr_pa_clear_no_monitor(pmap_paddr_t pa)
1420 {
1421 ppattr_pa_clear_bits(pa, PP_ATTR_NO_MONITOR);
1422 }
1423
1424 /**
1425 * Only test for the PP_ATTR_NO_MONITOR flag in a pp_attr_table entry if the
1426 * passed in physical address is a kernel-managed page.
1427 *
1428 * @param pa The physical address for the entry to test.
1429 *
1430 * @return False if the PA isn't a kernel-managed page, otherwise true/false
1431 * depending on whether the PP_ATTR_NO_MONITOR is set.
1432 */
1433 static inline bool
ppattr_pa_test_no_monitor(pmap_paddr_t pa)1434 ppattr_pa_test_no_monitor(pmap_paddr_t pa)
1435 {
1436 return ppattr_pa_test_bits(pa, PP_ATTR_NO_MONITOR);
1437 }
1438
1439 #endif /* XNU_MONITOR */
1440
1441 /**
1442 * Set the PP_ATTR_INTERNAL flag on a specific pp_attr_table entry.
1443 *
1444 * @param pai The physical address index for the entry to update.
1445 */
1446 static inline void
ppattr_set_internal(unsigned int pai)1447 ppattr_set_internal(unsigned int pai)
1448 {
1449 ppattr_set_bits(pai, PP_ATTR_INTERNAL);
1450 }
1451
1452 /**
1453 * Clear the PP_ATTR_INTERNAL flag on a specific pp_attr_table entry.
1454 *
1455 * @param pai The physical address index for the entry to update.
1456 */
1457 static inline void
ppattr_clear_internal(unsigned int pai)1458 ppattr_clear_internal(unsigned int pai)
1459 {
1460 ppattr_clear_bits(pai, PP_ATTR_INTERNAL);
1461 }
1462
1463 /**
1464 * Return true if the pp_attr_table entry has the PP_ATTR_INTERNAL flag set.
1465 *
1466 * @param pai The physical address index for the entry to test.
1467 */
1468 static inline bool
ppattr_test_internal(unsigned int pai)1469 ppattr_test_internal(unsigned int pai)
1470 {
1471 return ppattr_test_bits(pai, PP_ATTR_INTERNAL);
1472 }
1473
1474 /**
1475 * Set the PP_ATTR_REUSABLE flag on a specific pp_attr_table entry.
1476 *
1477 * @param pai The physical address index for the entry to update.
1478 */
1479 static inline void
ppattr_set_reusable(unsigned int pai)1480 ppattr_set_reusable(unsigned int pai)
1481 {
1482 ppattr_set_bits(pai, PP_ATTR_REUSABLE);
1483 }
1484
1485 /**
1486 * Clear the PP_ATTR_REUSABLE flag on a specific pp_attr_table entry.
1487 *
1488 * @param pai The physical address index for the entry to update.
1489 */
1490 static inline void
ppattr_clear_reusable(unsigned int pai)1491 ppattr_clear_reusable(unsigned int pai)
1492 {
1493 ppattr_clear_bits(pai, PP_ATTR_REUSABLE);
1494 }
1495
1496 /**
1497 * Return true if the pp_attr_table entry has the PP_ATTR_REUSABLE flag set.
1498 *
1499 * @param pai The physical address index for the entry to test.
1500 */
1501 static inline bool
ppattr_test_reusable(unsigned int pai)1502 ppattr_test_reusable(unsigned int pai)
1503 {
1504 return ppattr_test_bits(pai, PP_ATTR_REUSABLE);
1505 }
1506
1507 /**
1508 * Set the PP_ATTR_ALTACCT flag on a specific pp_attr_table entry.
1509 *
1510 * @note This is only valid when the ALTACCT flag is being tracked using the
1511 * pp_attr_table. See the descriptions above the PVE_PTEP_ALTACCT and
1512 * PP_ATTR_ALTACCT definitions for more information.
1513 *
1514 * @param pai The physical address index for the entry to update.
1515 */
1516 static inline void
ppattr_set_altacct(unsigned int pai)1517 ppattr_set_altacct(unsigned int pai)
1518 {
1519 ppattr_set_bits(pai, PP_ATTR_ALTACCT);
1520 }
1521
1522 /**
1523 * Clear the PP_ATTR_ALTACCT flag on a specific pp_attr_table entry.
1524 *
1525 * @note This is only valid when the ALTACCT flag is being tracked using the
1526 * pp_attr_table. See the descriptions above the PVE_PTEP_ALTACCT and
1527 * PP_ATTR_ALTACCT definitions for more information.
1528 *
1529 * @param pai The physical address index for the entry to update.
1530 */
1531 static inline void
ppattr_clear_altacct(unsigned int pai)1532 ppattr_clear_altacct(unsigned int pai)
1533 {
1534 ppattr_clear_bits(pai, PP_ATTR_ALTACCT);
1535 }
1536
1537 /**
1538 * Get the PP_ATTR_ALTACCT flag on a specific pp_attr_table entry.
1539 *
1540 * @note This is only valid when the ALTACCT flag is being tracked using the
1541 * pp_attr_table. See the descriptions above the PVE_PTEP_ALTACCT and
1542 * PP_ATTR_ALTACCT definitions for more information.
1543 *
1544 * @param pai The physical address index for the entry to test.
1545 *
1546 * @return True if the passed in page uses alternate accounting, false
1547 * otherwise.
1548 */
1549 static inline bool
ppattr_is_altacct(unsigned int pai)1550 ppattr_is_altacct(unsigned int pai)
1551 {
1552 return ppattr_test_bits(pai, PP_ATTR_ALTACCT);
1553 }
1554 /**
1555 * Get the PP_ATTR_INTERNAL flag on a specific pp_attr_table entry.
1556 *
1557 * @note This is only valid when the INTERNAL flag is being tracked using the
1558 * pp_attr_table. See the descriptions above the PVE_PTEP_INTERNAL and
1559 * PP_ATTR_INTERNAL definitions for more information.
1560 *
1561 * @param pai The physical address index for the entry to test.
1562 *
1563 * @return True if the passed in page is accounted for as "internal", false
1564 * otherwise.
1565 */
1566 static inline bool
ppattr_is_internal(unsigned int pai)1567 ppattr_is_internal(unsigned int pai)
1568 {
1569 return ppattr_test_bits(pai, PP_ATTR_INTERNAL);
1570 }
1571
1572 /**
1573 * The "alternate accounting" (ALTACCT) status for a page is tracked differently
1574 * depending on whether there are one or multiple mappings to a page. This
1575 * function abstracts out the difference between single and multiple mappings to
1576 * a page and provides a single function for determining whether alternate
1577 * accounting is set for a mapping.
1578 *
1579 * @note See the descriptions above the PVE_PTEP_ALTACCT and PP_ATTR_ALTACCT
1580 * definitions for more information.
1581 *
1582 * @param pai The physical address index for the entry to test.
1583 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1584 * @param idx Index of the chosen PTE pointer inside the PVE.
1585 *
1586 * @return True if the passed in page uses alternate accounting, false
1587 * otherwise.
1588 */
1589 static inline bool
ppattr_pve_is_altacct(unsigned int pai,pv_entry_t * pvep,unsigned idx)1590 ppattr_pve_is_altacct(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1591 {
1592 return (pvep == PV_ENTRY_NULL) ? ppattr_is_altacct(pai) : pve_get_altacct(pvep, idx);
1593 }
1594 /**
1595 * The "internal" (INTERNAL) status for a page is tracked differently
1596 * depending on whether there are one or multiple mappings to a page. This
1597 * function abstracts out the difference between single and multiple mappings to
1598 * a page and provides a single function for determining whether "internal"
1599 * is set for a mapping.
1600 *
1601 * @note See the descriptions above the PVE_PTEP_INTERNAL and PP_ATTR_INTERNAL
1602 * definitions for more information.
1603 *
1604 * @param pai The physical address index for the entry to test.
1605 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1606 * @param idx Index of the chosen PTE pointer inside the PVE.
1607 *
1608 * @return True if the passed in page is "internal", false otherwise.
1609 */
1610 static inline bool
ppattr_pve_is_internal(unsigned int pai,pv_entry_t * pvep,unsigned idx)1611 ppattr_pve_is_internal(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1612 {
1613 return (pvep == PV_ENTRY_NULL) ? ppattr_is_internal(pai) : pve_get_internal(pvep, idx);
1614 }
1615
1616 /**
1617 * The "alternate accounting" (ALTACCT) status for a page is tracked differently
1618 * depending on whether there are one or multiple mappings to a page. This
1619 * function abstracts out the difference between single and multiple mappings to
1620 * a page and provides a single function for setting the alternate accounting status
1621 * for a mapping.
1622 *
1623 * @note See the descriptions above the PVE_PTEP_ALTACCT and PP_ATTR_ALTACCT
1624 * definitions for more information.
1625 *
1626 * @param pai The physical address index for the entry to update.
1627 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1628 * @param idx Index of the chosen PTE pointer inside the PVE.
1629 */
1630 static inline void
ppattr_pve_set_altacct(unsigned int pai,pv_entry_t * pvep,unsigned idx)1631 ppattr_pve_set_altacct(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1632 {
1633 if (pvep == PV_ENTRY_NULL) {
1634 ppattr_set_altacct(pai);
1635 } else {
1636 pve_set_altacct(pvep, idx);
1637 }
1638 }
1639 /**
1640 * The "internal" (INTERNAL) status for a page is tracked differently
1641 * depending on whether there are one or multiple mappings to a page. This
1642 * function abstracts out the difference between single and multiple mappings to
1643 * a page and provides a single function for setting the "internal" status
1644 * for a mapping.
1645 *
1646 * @note See the descriptions above the PVE_PTEP_INTERNAL and PP_ATTR_INTERNAL
1647 * definitions for more information.
1648 *
1649 * @param pai The physical address index for the entry to update.
1650 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1651 * @param idx Index of the chosen PTE pointer inside the PVE.
1652 */
1653 static inline void
ppattr_pve_set_internal(unsigned int pai,pv_entry_t * pvep,unsigned idx)1654 ppattr_pve_set_internal(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1655 {
1656 if (pvep == PV_ENTRY_NULL) {
1657 ppattr_set_internal(pai);
1658 } else {
1659 pve_set_internal(pvep, idx);
1660 }
1661 }
1662
1663 /**
1664 * The "alternate accounting" (ALTACCT) status for a page is tracked differently
1665 * depending on whether there are one or multiple mappings to a page. This
1666 * function abstracts out the difference between single and multiple mappings to
1667 * a page and provides a single function for clearing the alternate accounting status
1668 * for a mapping.
1669 *
1670 * @note See the descriptions above the PVE_PTEP_ALTACCT and PP_ATTR_ALTACCT
1671 * definitions for more information.
1672 *
1673 * @param pai The physical address index for the entry to update.
1674 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1675 * @param idx Index of the chosen PTE pointer inside the PVE.
1676 */
1677 static inline void
ppattr_pve_clr_altacct(unsigned int pai,pv_entry_t * pvep,unsigned idx)1678 ppattr_pve_clr_altacct(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1679 {
1680 if (pvep == PV_ENTRY_NULL) {
1681 ppattr_clear_altacct(pai);
1682 } else {
1683 pve_clr_altacct(pvep, idx);
1684 }
1685 }
1686 /**
1687 * The "internal" (INTERNAL) status for a page is tracked differently
1688 * depending on whether there are one or multiple mappings to a page. This
1689 * function abstracts out the difference between single and multiple mappings to
1690 * a page and provides a single function for clearing the "internal" status
1691 * for a mapping.
1692 *
1693 * @note See the descriptions above the PVE_PTEP_INTERNAL and PP_ATTR_INTERNAL
1694 * definitions for more information.
1695 *
1696 * @param pai The physical address index for the entry to update.
1697 * @param pvep Pointer to the pv_entry_t object containing that mapping.
1698 * @param idx Index of the chosen PTE pointer inside the PVE.
1699 */
1700 static inline void
ppattr_pve_clr_internal(unsigned int pai,pv_entry_t * pvep,unsigned idx)1701 ppattr_pve_clr_internal(unsigned int pai, pv_entry_t *pvep, unsigned idx)
1702 {
1703 if (pvep == PV_ENTRY_NULL) {
1704 ppattr_clear_internal(pai);
1705 } else {
1706 pve_clr_internal(pvep, idx);
1707 }
1708 }
1709
1710 /**
1711 * Set the PP_ATTR_REFFAULT flag on a specific pp_attr_table entry.
1712 *
1713 * @param pai The physical address index for the entry to update.
1714 */
1715 static inline void
ppattr_set_reffault(unsigned int pai)1716 ppattr_set_reffault(unsigned int pai)
1717 {
1718 ppattr_set_bits(pai, PP_ATTR_REFFAULT);
1719 }
1720
1721 /**
1722 * Clear the PP_ATTR_REFFAULT flag on a specific pp_attr_table entry.
1723 *
1724 * @param pai The physical address index for the entry to update.
1725 */
1726 static inline void
ppattr_clear_reffault(unsigned int pai)1727 ppattr_clear_reffault(unsigned int pai)
1728 {
1729 ppattr_clear_bits(pai, PP_ATTR_REFFAULT);
1730 }
1731
1732 /**
1733 * Return true if the pp_attr_table entry has the PP_ATTR_REFFAULT flag set.
1734 *
1735 * @param pai The physical address index for the entry to test.
1736 */
1737 static inline bool
ppattr_test_reffault(unsigned int pai)1738 ppattr_test_reffault(unsigned int pai)
1739 {
1740 return ppattr_test_bits(pai, PP_ATTR_REFFAULT);
1741 }
1742
1743 /**
1744 * Set the PP_ATTR_MODFAULT flag on a specific pp_attr_table entry.
1745 *
1746 * @param pai The physical address index for the entry to update.
1747 */
1748 static inline void
ppattr_set_modfault(unsigned int pai)1749 ppattr_set_modfault(unsigned int pai)
1750 {
1751 ppattr_set_bits(pai, PP_ATTR_MODFAULT);
1752 }
1753
1754 /**
1755 * Clear the PP_ATTR_MODFAULT flag on a specific pp_attr_table entry.
1756 *
1757 * @param pai The physical address index for the entry to update.
1758 */
1759 static inline void
ppattr_clear_modfault(unsigned int pai)1760 ppattr_clear_modfault(unsigned int pai)
1761 {
1762 ppattr_clear_bits(pai, PP_ATTR_MODFAULT);
1763 }
1764
1765 /**
1766 * Return true if the pp_attr_table entry has the PP_ATTR_MODFAULT flag set.
1767 *
1768 * @param pai The physical address index for the entry to test.
1769 */
1770 static inline bool
ppattr_test_modfault(unsigned int pai)1771 ppattr_test_modfault(unsigned int pai)
1772 {
1773 return ppattr_test_bits(pai, PP_ATTR_MODFAULT);
1774 }
1775
1776 static inline boolean_t
pmap_is_preemptible(void)1777 pmap_is_preemptible(void)
1778 {
1779 return preemption_enabled() || (startup_phase < STARTUP_SUB_EARLY_BOOT);
1780 }
1781
1782 /**
1783 * This helper function ensures that potentially-long-running batched PPL operations are
1784 * called in preemptible context before entering the PPL, so that the PPL call may
1785 * periodically exit to allow pending urgent ASTs to be taken.
1786 */
1787 static inline void
pmap_verify_preemptible(void)1788 pmap_verify_preemptible(void)
1789 {
1790 assert(pmap_is_preemptible());
1791 }
1792
1793 /**
1794 * The minimum number of pages to keep in the PPL page free list.
1795 *
1796 * We define our target as 8 pages: enough for 2 page table pages, a PTD page,
1797 * and a PV page; in essence, twice as many pages as may be necessary to satisfy
1798 * a single pmap_enter request.
1799 */
1800 #define PMAP_MIN_FREE_PPL_PAGES 8
1801
1802 /**
1803 * Flags passed to various page allocation functions, usually accessed through
1804 * the pmap_pages_alloc_zeroed() API. Each function that can take these flags as
1805 * a part of its option field, will describe these flags in its function header.
1806 */
1807
1808 /**
1809 * Instruct the allocation function to return immediately if no pages are
1810 * current available. Without this flag, the function will spin and wait for a
1811 * page to become available. This flag can be required in some circumstances
1812 * (for instance, when allocating pages from within the PPL).
1813 */
1814 #define PMAP_PAGES_ALLOCATE_NOWAIT 0x1
1815
1816 /**
1817 * Instructs an allocation function to fallback to reclaiming a userspace page
1818 * table if it failed to allocate a page from the free lists. This can be useful
1819 * when allocating from within the PPL because refilling the free lists requires
1820 * exiting and re-entering the PPL (which incurs extra latency).
1821 *
1822 * This is a quick way of allocating a page at the expense of having to
1823 * reallocate the table the next time one of its mappings is accessed.
1824 */
1825 #define PMAP_PAGE_RECLAIM_NOWAIT 0x2
1826
1827 /**
1828 * Global variables exported to the rest of the internal pmap implementation.
1829 */
1830 #if XNU_MONITOR
1831 extern uint64_t pmap_ppl_free_page_count;
1832 extern pmap_paddr_t pmap_stacks_start_pa;
1833 extern pmap_paddr_t pmap_stacks_end_pa;
1834 extern pmap_paddr_t ppl_cpu_save_area_start;
1835 extern pmap_paddr_t ppl_cpu_save_area_end;
1836 #endif /* XNU_MONITOR */
1837 extern unsigned int inuse_pmap_pages_count;
1838 extern vm_object_t pmap_object;
1839 extern uint32_t pv_alloc_initial_target;
1840 extern uint32_t pv_kern_alloc_initial_target;
1841
1842 /**
1843 * Functions exported to the rest of the internal pmap implementation.
1844 */
1845 extern void pmap_data_bootstrap(void);
1846 extern void pmap_enqueue_pages(vm_page_t);
1847 extern kern_return_t pmap_pages_alloc_zeroed(pmap_paddr_t *, unsigned, unsigned);
1848 extern void pmap_pages_free(pmap_paddr_t, unsigned);
1849
1850 #if XNU_MONITOR
1851
1852 extern void pmap_mark_page_as_ppl_page_internal(pmap_paddr_t, bool);
1853 extern void pmap_mark_page_as_ppl_page(pmap_paddr_t);
1854 extern void pmap_mark_page_as_kernel_page(pmap_paddr_t);
1855 extern pmap_paddr_t pmap_alloc_page_for_kern(unsigned int);
1856 extern void pmap_alloc_page_for_ppl(unsigned int);
1857 extern uint64_t pmap_release_ppl_pages_to_kernel(void);
1858
1859 extern uint64_t pmap_ledger_validate(const volatile void *);
1860 void pmap_ledger_retain(ledger_t ledger);
1861 void pmap_ledger_release(ledger_t ledger);
1862 extern void pmap_ledger_check_balance(pmap_t pmap);
1863
1864 kern_return_t pmap_alloc_pmap(pmap_t *pmap);
1865 void pmap_free_pmap(pmap_t pmap);
1866
1867 #endif /* XNU_MONITOR */
1868
1869 /**
1870 * The modes in which a pmap lock can be acquired. Note that shared access
1871 * doesn't necessarily mean "read-only". As long as data is atomically updated
1872 * correctly (to account for multi-cpu accesses) data can still get written with
1873 * a shared lock held. Care just needs to be taken so as to not introduce any
1874 * race conditions when there are multiple writers.
1875 *
1876 * This is here in pmap_data.h because it's a needed parameter for pv_alloc()
1877 * and pmap_enter_pv(). This header is always included in pmap_internal.h before
1878 * the rest of the pmap locking code is defined so there shouldn't be any issues
1879 * with missing types.
1880 */
1881 OS_ENUM(pmap_lock_mode, uint8_t,
1882 PMAP_LOCK_SHARED,
1883 PMAP_LOCK_EXCLUSIVE);
1884
1885 /**
1886 * Possible return values for pv_alloc(). See the pv_alloc() function header for
1887 * a description of each of these values.
1888 */
1889 typedef enum {
1890 PV_ALLOC_SUCCESS,
1891 PV_ALLOC_RETRY,
1892 PV_ALLOC_FAIL
1893 } pv_alloc_return_t;
1894
1895 extern pv_alloc_return_t pv_alloc(
1896 pmap_t, unsigned int, pmap_lock_mode_t, unsigned int, pv_entry_t **);
1897 extern void pv_free(pv_entry_t *);
1898 extern void pv_list_free(pv_entry_t *, pv_entry_t *, int);
1899 extern void pmap_compute_pv_targets(void);
1900 extern pv_alloc_return_t pmap_enter_pv(
1901 pmap_t, pt_entry_t *, int, unsigned int, pmap_lock_mode_t, pv_entry_t **, int *new_pve_ptep_idx);
1902 extern void pmap_remove_pv(pmap_t, pt_entry_t *, int, bool, bool *, bool *);
1903
1904 extern void ptd_bootstrap(pt_desc_t *, unsigned int);
1905 extern pt_desc_t *ptd_alloc_unlinked(void);
1906 extern pt_desc_t *ptd_alloc(pmap_t);
1907 extern void ptd_deallocate(pt_desc_t *);
1908 extern void ptd_info_init(
1909 pt_desc_t *, pmap_t, vm_map_address_t, unsigned int, pt_entry_t *);
1910
1911 extern kern_return_t pmap_ledger_credit(pmap_t, int, ledger_amount_t);
1912 extern kern_return_t pmap_ledger_debit(pmap_t, int, ledger_amount_t);
1913
1914 extern void validate_pmap_internal(const volatile struct pmap *, const char *);
1915 extern void validate_pmap_mutable_internal(const volatile struct pmap *, const char *);
1916
1917 /**
1918 * Macro function wrappers around pmap validation so that the calling function
1919 * can be printed in the panic strings for easier validation failure debugging.
1920 */
1921 #define validate_pmap(x) validate_pmap_internal(x, __func__)
1922 #define validate_pmap_mutable(x) validate_pmap_mutable_internal(x, __func__)
1923
1924 /**
1925 * This structure describes a PPL-owned I/O range.
1926 *
1927 * @note This doesn't necessarily have to represent "I/O" only, this can also
1928 * represent non-kernel-managed DRAM (e.g., iBoot carveouts). Any physical
1929 * address region that isn't considered "kernel-managed" is fair game.
1930 *
1931 * @note The layout of this structure needs to map 1-to-1 with the pmap-io-range
1932 * device tree nodes. Astris (through the LowGlobals) also depends on the
1933 * consistency of this structure.
1934 */
1935 typedef struct pmap_io_range {
1936 /* Physical address of the PPL-owned I/O range. */
1937 uint64_t addr;
1938
1939 /**
1940 * Length (in bytes) of the PPL-owned I/O range. Has to be the size
1941 * of a page if the range will be refered to by pmap_io_filter_entries.
1942 */
1943 uint64_t len;
1944
1945 /* Strong DSB required for pages in this range. */
1946 #define PMAP_IO_RANGE_STRONG_SYNC (1UL << 31)
1947
1948 /* Corresponds to memory carved out by bootloader. */
1949 #define PMAP_IO_RANGE_CARVEOUT (1UL << 30)
1950
1951 /* Pages in this range need to be included in the hibernation image */
1952 #define PMAP_IO_RANGE_NEEDS_HIBERNATING (1UL << 29)
1953
1954 /* Mark the range as 'owned' by a given subsystem */
1955 #define PMAP_IO_RANGE_OWNED (1UL << 28)
1956
1957 /**
1958 * Lower 16 bits treated as pp_attr_t, upper 16 bits contain additional
1959 * mapping flags (defined above).
1960 */
1961 uint32_t wimg;
1962
1963 /**
1964 * 4 Character Code (4CC) describing what this range is.
1965 *
1966 * This has to be unique for each "type" of pages, meaning pages sharing
1967 * the same register layout, if it is used for the I/O filter descriptors
1968 * below. Otherwise it doesn't matter.
1969 */
1970 uint32_t signature;
1971 } pmap_io_range_t;
1972
1973 /* Reminder: be sure to change all relevant device trees if you change the layout of pmap_io_range_t */
1974 _Static_assert(sizeof(pmap_io_range_t) == 24, "unexpected size for pmap_io_range_t");
1975
1976 extern pmap_io_range_t* pmap_find_io_attr(pmap_paddr_t);
1977
1978 /**
1979 * This structure describes a sub-page-size I/O region owned by PPL but the kernel can write to.
1980 *
1981 * @note I/O filter software will use a collection of such data structures to determine access
1982 * permissions to a page owned by PPL.
1983 *
1984 * @note The {signature, offset} key is used to index a collection of such data structures to
1985 * optimize for space in the case where one page layout is repeated for many devices, such
1986 * as the memory controller channels.
1987 */
1988 typedef struct pmap_io_filter_entry {
1989 /* 4 Character Code (4CC) describing what this range (page) is. */
1990 uint32_t signature;
1991
1992 /* Offset within the page. It has to be within [0, PAGE_SIZE). */
1993 uint16_t offset;
1994
1995 /* Length of the range, and (offset + length) has to be within [0, PAGE_SIZE). */
1996 uint16_t length;
1997 } pmap_io_filter_entry_t;
1998
1999 _Static_assert(sizeof(pmap_io_filter_entry_t) == 8, "unexpected size for pmap_io_filter_entry_t");
2000
2001 extern pmap_io_filter_entry_t *pmap_find_io_filter_entry(pmap_paddr_t, uint64_t, const pmap_io_range_t **);
2002
2003 extern void pmap_cpu_data_init_internal(unsigned int);
2004
2005 /**
2006 * Flush a single 16K page from noncoherent coprocessor caches.
2007 *
2008 * @note Nonocoherent cache flushes are only guaranteed to work if the participating coprocessor(s)
2009 * do not have any active VA translations for the page being flushed. Since coprocessor
2010 * mappings should always be controlled by some PPL IOMMU extension, they should always
2011 * have PV list entries. This flush should therefore be performed at a point when the PV
2012 * list is known to be either empty or at least to not contain any IOMMU entries. For
2013 * the purposes of our security model, it is sufficient to wait for the PV list to become
2014 * empty, as we really want to protect PPL-sensitive pages from malicious/accidental
2015 * coprocessor cacheline evictions, and the PV list must be empty before a page can be
2016 * handed to the PPL.
2017 *
2018 * @param paddr The base physical address of the page to flush.
2019 */
2020 extern void pmap_flush_noncoherent_page(pmap_paddr_t paddr);
2021
2022 #endif /* _ARM_PMAP_PMAP_DATA_H_ */
2023