1 /*
2 * Copyright (c) 2007-2021, 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * Machine-dependent structures for the physical map module.
30 *
31 * This header file contains the types and prototypes that make up the public
32 * pmap API that's exposed to the rest of the kernel. Any types/prototypes used
33 * strictly by the pmap itself should be placed into one of the osfmk/arm/pmap/
34 * header files.
35 *
36 * To prevent circular dependencies and exposing anything not needed by the
37 * rest of the kernel, this file shouldn't include ANY of the internal
38 * osfmk/arm/pmap/ header files.
39 */
40 #ifndef _ARM_PMAP_H_
41 #define _ARM_PMAP_H_
42
43 #include <mach_assert.h>
44 #include <arm64/proc_reg.h>
45
46 #ifndef ASSEMBLER
47
48 #include <stdatomic.h>
49 #include <stdbool.h>
50 #include <libkern/section_keywords.h>
51 #include <mach/kern_return.h>
52 #include <mach/machine/vm_types.h>
53 #include <arm/pmap_public.h>
54 #include <kern/ast.h>
55 #include <mach/arm/thread_status.h>
56
57 #if defined(__arm64__)
58 #include <arm64/tlb.h>
59 #else /* defined(__arm64__) */
60 #include <arm/tlb.h>
61 #endif /* defined(__arm64__) */
62
63
64 /* Shift for 2048 max virtual ASIDs (2048 pmaps). */
65 #define ASID_SHIFT (11)
66
67 /* Max supported ASIDs (can be virtual). */
68 #define MAX_ASIDS (1 << ASID_SHIFT)
69
70 /* Shift for the maximum ARM ASID value (256 or 65536) */
71 #ifndef ARM_ASID_SHIFT
72 #if HAS_16BIT_ASID
73 #define ARM_ASID_SHIFT (16)
74 #else
75 #define ARM_ASID_SHIFT (8)
76 #endif /* HAS_16BIT_ASID */
77 #endif /* ARM_ASID_SHIFT */
78
79 /* Max ASIDs supported by the hardware. */
80 #define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT)
81
82 /* Number of bits in a byte. */
83 #define NBBY (8)
84
85 /**
86 * The maximum number of hardware ASIDs used by the pmap for user address spaces.
87 *
88 * One ASID is always dedicated to the kernel (ASID 0). On systems with software-
89 * based spectre/meltdown mitigations, each address space technically uses two
90 * hardware ASIDs (one for EL1 and one for EL0) so the total number of available
91 * ASIDs a user process can use is halved on those systems.
92 */
93 #if __ARM_KERNEL_PROTECT__
94 #define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
95 #else /* __ARM_KERNEL_PROTECT__ */
96 #define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
97 #endif /* __ARM_KERNEL_PROTECT__ */
98
99 /* Maximum number of Virtual Machine IDs */
100 #ifndef ARM_VMID_SHIFT
101 #define ARM_VMID_SHIFT (8)
102 #endif /* ARM_VMID_SHIFT */
103 #define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
104
105 /* XPRR virtual register map */
106
107 /* Maximum number of CPU windows per-cpu. */
108 #define CPUWINDOWS_MAX 4
109
110 #if defined(__arm64__)
111
112 #if defined(ARM_LARGE_MEMORY)
113 /*
114 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
115 * Upper limit on how many pages can be consumed by bootstrap page tables
116 */
117 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
118 #else /* defined(ARM_LARGE_MEMORY) */
119 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
120 #endif /* defined(ARM_LARGE_MEMORY) */
121
122 typedef uint64_t tt_entry_t; /* translation table entry type */
123 typedef uint64_t pt_entry_t; /* page table entry type */
124 #else /* defined(__arm64__) */
125 #error unknown arch
126 #endif /* defined(__arm64__) */
127
128 /* Used to represent a NULL page/translation table entry pointer. */
129 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
130 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
131
132 /**
133 * Number of PTE pointers in a single PVE. This must be 2, since the algorithm
134 * has been optimized to that case. Should this change in the future, both
135 * enter_pv() and remove_pv() will need to be modified accordingly. In addition
136 * to this, the documentation and the LLDB macros that walk PV lists will also
137 * need to be adapted.
138 */
139 #define PTE_PER_PVE 2
140 _Static_assert(PTE_PER_PVE == 2, "PTE_PER_PVE is not 2");
141
142 /**
143 * Structure to track the active mappings for a given page. This structure is
144 * used in the pv_head_table when a physical page has more than one mapping to
145 * it. Each entry in this linked list of structures can represent
146 * up to PTE_PER_PVE mappings.
147 */
148 typedef struct pv_entry {
149 /* Linked list to the next mapping of the physical page. */
150 struct pv_entry *pve_next;
151
152 /* Pointer to the page table entry for this mapping. */
153 pt_entry_t *pve_ptep[PTE_PER_PVE];
154 } pv_entry_t;
155
156 /**
157 * Structure that tracks free pv_entry nodes for the pv_head_table. Each one
158 * of these nodes represents a single mapping to a physical page, so a new node
159 * is allocated whenever a new mapping is created.
160 */
161 typedef struct {
162 pv_entry_t *list;
163 uint32_t count;
164 } pv_free_list_t;
165
166 /**
167 * Forward declaration of the structure that controls page table geometry and
168 * TTE/PTE format.
169 */
170 struct page_table_attr;
171
172 struct pmap_cpu_data {
173 #if XNU_MONITOR
174 const volatile struct pmap * _Atomic active_pmap;
175 const volatile struct pmap * _Atomic inflight_pmap;
176 uint64_t pvh_info[4];
177 void *ppl_kern_saved_sp;
178 void *ppl_stack;
179 arm_context_t *save_area;
180 unsigned int ppl_state;
181
182 #if HAS_GUARDED_IO_FILTER
183 void *iofilter_stack;
184 void *iofilter_saved_sp;
185 #endif
186
187 void *scratch_page;
188 #endif /* XNU_MONITOR */
189 pmap_t cpu_nested_pmap;
190 #if __ARM_MIXED_PAGE_SIZE__
191 uint64_t commpage_page_shift;
192 #endif
193 #if defined(__arm64__)
194 const struct page_table_attr *cpu_nested_pmap_attr;
195 vm_map_address_t cpu_nested_region_addr;
196 vm_map_offset_t cpu_nested_region_size;
197 #else /* defined(__arm64__) */
198 pmap_t cpu_user_pmap;
199 #endif /* defined(__arm64__) */
200 unsigned int cpu_number;
201 bool copywindow_strong_sync[CPUWINDOWS_MAX];
202 bool inflight_disconnect;
203 pv_free_list_t pv_free;
204 pv_entry_t *pv_free_spill_marker;
205
206 #if !HAS_16BIT_ASID
207 /*
208 * This supports overloading of ARM ASIDs by the pmap. The field needs
209 * to be wide enough to cover all the virtual bits in a virtual ASID.
210 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
211 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
212 * ASID).
213 *
214 * If we were to use bitfield shenanigans here, we could save a bit of
215 * memory by only having enough bits to support MAX_ASIDS. However, such
216 * an implementation would be more error prone.
217 */
218 uint8_t cpu_sw_asids[MAX_HW_ASIDS];
219 #endif /* !HAS_16BIT_ASID */
220 };
221 typedef struct pmap_cpu_data pmap_cpu_data_t;
222
223 #include <mach/vm_prot.h>
224 #include <mach/vm_statistics.h>
225 #include <mach/machine/vm_param.h>
226 #include <kern/kern_types.h>
227 #include <kern/thread.h>
228 #include <kern/queue.h>
229
230
231 #include <sys/cdefs.h>
232
233 /* Base address for low globals. */
234 #if defined(ARM_LARGE_MEMORY)
235 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
236 #else /* defined(ARM_LARGE_MEMORY) */
237 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
238 #endif /* defined(ARM_LARGE_MEMORY) */
239
240 /*
241 * This indicates (roughly) where there is free space for the VM
242 * to use for the heap; this does not need to be precise.
243 */
244 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
245 #if defined(ARM_LARGE_MEMORY)
246 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
247 #else /* defined(ARM_LARGE_MEMORY) */
248 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
249 #endif /* defined(ARM_LARGE_MEMORY) */
250 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
251 #if defined(ARM_LARGE_MEMORY)
252 /* For large memory systems with no KTRR/CTRR such as virtual machines */
253 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
254 #else
255 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
256 #endif
257 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
258
259 /**
260 * For setups where the VM page size does not match the hardware page size (the
261 * VM page size must be a multiple of the hardware page size), we will need to
262 * determine what the page ratio is.
263 */
264 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
265 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
266
267
268
269 /* superpages */
270 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
271
272 /* Convert addresses to pages and vice versa. No rounding is used. */
273 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
274 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
275
276 /**
277 * Round off or truncate to the nearest page. These will work for either
278 * addresses or counts (i.e. 1 byte rounds to 1 page bytes).
279 */
280 #define arm_round_page(x) ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
281 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
282
283 extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
284
285 #if defined(__arm64__)
286 extern uint64_t get_mmu_control(void);
287 extern uint64_t get_aux_control(void);
288 extern void set_aux_control(uint64_t);
289 extern void set_mmu_ttb(uint64_t);
290 extern void set_mmu_ttb_alternate(uint64_t);
291 extern uint64_t get_tcr(void);
292 extern void set_tcr(uint64_t);
293 extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
294 #else /* defined(__arm64__) */
295 #error Unsupported architecture
296 #endif /* defined(__arm64__) */
297
298
299 extern pmap_paddr_t get_mmu_ttb(void);
300 extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
301 extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
302 extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
303
304
305 /* Convert address offset to translation table index */
306 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
307 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
308 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
309
310 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
311 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
312
313 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
314 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
315 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
316 #define pte_increment_pa(p) ((p) += ptoa(1))
317
318 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
319
320
321
322 #define pmap_cs_log(level, fmt, args...)
323 #define pmap_cs_log_debug(fmt, args...)
324 #define pmap_cs_log_info(fmt, args...)
325 #define pmap_cs_log_error(fmt, args...)
326 #define pmap_cs_log_force(level, fmt, args...)
327
328
329
330
331 /* Convert translation/page table entry to kernel virtual address. */
332 #define ttetokv(a) (phystokv(tte_to_pa(a)))
333 #define ptetokv(a) (phystokv(pte_to_pa(a)))
334
335 struct pmap {
336 /* Pointer to the root translation table. */
337 tt_entry_t *tte;
338
339 /* Physical page of the root translation table. */
340 pmap_paddr_t ttep;
341
342 /*
343 * The min and max fields represent the lowest and highest addressable VAs
344 * as dictated strictly by the paging hierarchy (root level + root table size)
345 * in conjunction with whether the root table is used with TTBR0, TTBR1, or VTTBR.
346 * These fields do not encapsulate any higher-level address-space partitioning
347 * policies.
348 */
349
350 /* Lowest supported VA (inclusive) */
351 vm_map_address_t min;
352
353 /* Highest supported VA (exclusive) */
354 vm_map_address_t max;
355
356 #if ARM_PARAMETERIZED_PMAP
357 /* Details about the page table layout. */
358 const struct page_table_attr * pmap_pt_attr;
359 #endif /* ARM_PARAMETERIZED_PMAP */
360
361 /* Ledger tracking phys mappings */
362 ledger_t ledger;
363
364 decl_lck_rw_data(, rwlock);
365
366 /* Global list of pmaps */
367 queue_chain_t pmaps;
368
369 /* Free list of translation table pages. */
370 tt_entry_t *tt_entry_free;
371
372 /* Information representing the "nested" (shared) region in this pmap. */
373 struct pmap *nested_pmap;
374 vm_map_address_t nested_region_addr;
375 vm_map_offset_t nested_region_size;
376 vm_map_offset_t nested_region_true_start;
377 vm_map_offset_t nested_region_true_end;
378 unsigned int *nested_region_unnested_table_bitmap;
379 unsigned int nested_region_unnested_table_bitmap_size;
380
381
382 void * reserved0;
383 void * reserved1;
384 uint8_t reserved12;
385 uint64_t reserved2;
386 uint64_t reserved3;
387
388 /* PMAP reference count */
389 _Atomic int32_t ref_count;
390
391 #if XNU_MONITOR
392 /* number of pmaps in which this pmap is nested */
393 _Atomic int32_t nested_count;
394 #endif
395
396 /* Number of pmaps that nested this pmap without bounds set. */
397 uint32_t nested_no_bounds_refcnt;
398
399 /**
400 * Represents the real hardware ASID inserted into each TLB entry within
401 * this address space.
402 */
403 uint16_t hw_asid;
404
405 /**
406 * Represents the virtual "software" ASID. Any real hardware ASID can have
407 * multiple software ASIDs associated with it. This is used to know when to
408 * perform TLB flushes during context switches.
409 */
410 uint8_t sw_asid;
411
412 #if MACH_ASSERT
413 int pmap_pid;
414 char pmap_procname[17];
415 #endif /* MACH_ASSERT */
416
417 bool reserved4;
418
419 bool pmap_vm_map_cs_enforced;
420
421 bool reserved5;
422 unsigned int reserved6;
423 unsigned int reserved7;
424
425 bool reserved8;
426 bool reserved9;
427
428 #if defined(CONFIG_ROSETTA)
429 /* Whether the pmap is used for Rosetta. */
430 bool is_rosetta;
431 #else
432 bool reserved10;
433 #endif /* defined(CONFIG_ROSETTA) */
434
435 #if DEVELOPMENT || DEBUG
436 bool footprint_suspended;
437 bool footprint_was_suspended;
438 #endif /* DEVELOPMENT || DEBUG */
439
440 /* Whether the No-Execute functionality is enabled. */
441 bool nx_enabled;
442
443 /* Whether this pmap represents a 64-bit address space. */
444 bool is_64bit;
445
446 enum : uint8_t {
447 /**
448 * pmap contains no lingering mappings outside the established
449 * bounds of pmap->nested_pmap, and its reference has been removed
450 * from pmap->nested_pmap->nested_no_bounds_refcnt.
451 */
452 NESTED_NO_BOUNDS_REF_NONE = 0,
453 /**
454 * pmap's mappings outside the established bounds of pmap->nested_pmap
455 * have been removed, but pmap->nested_pmap->nested_no_bounds_refcnt
456 * still contains pmap's reference.
457 */
458 NESTED_NO_BOUNDS_REF_SUBORD,
459 /**
460 * pmap contains mappings after the end of the established bounds
461 * of pmap->nested_pmap.
462 */
463 NESTED_NO_BOUNDS_REF_AFTER,
464 /**
465 * pmap contains mappings before the beginning and after the end of
466 * the established bounds of pmap->nested_pmap.
467 */
468 NESTED_NO_BOUNDS_REF_BEFORE_AND_AFTER,
469 } nested_no_bounds_ref_state;
470
471 /* The nesting bounds have been set. */
472 bool nested_bounds_set;
473
474 #if HAS_APPLE_PAC
475 bool disable_jop;
476 #else
477 bool reserved11;
478 #endif /* HAS_APPLE_PAC */
479
480 bool reserved13;
481
482 #define PMAP_TYPE_USER 0 /* ordinary pmap */
483 #define PMAP_TYPE_KERNEL 1 /* kernel pmap */
484 #define PMAP_TYPE_COMMPAGE 2 /* commpage pmap */
485 #define PMAP_TYPE_NESTED 3 /* pmap nested within another pmap */
486 uint8_t type;
487 };
488
489 #define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
490
491 #if VM_DEBUG
492 extern int pmap_list_resident_pages(
493 pmap_t pmap,
494 vm_offset_t *listp,
495 int space);
496 #else /* VM_DEBUG */
497 #define pmap_list_resident_pages(pmap, listp, space) (0)
498 #endif /* VM_DEBUG */
499
500 extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
501
502 /* Globals shared between arm_vm_init and pmap */
503 extern tt_entry_t *cpu_tte; /* First CPUs translation table (shared with kernel pmap) */
504 extern pmap_paddr_t cpu_ttep; /* Physical translation table addr */
505
506 #if __arm64__
507 extern void *ropagetable_begin;
508 extern void *ropagetable_end;
509 #endif /* __arm64__ */
510
511 #if __arm64__
512 extern tt_entry_t *invalid_tte; /* Global invalid translation table */
513 extern pmap_paddr_t invalid_ttep; /* Physical invalid translation table addr */
514 #endif /* __arm64__ */
515
516 #define PMAP_CONTEXT(pmap, thread)
517
518 /**
519 * Platform dependent Prototypes
520 */
521 extern void pmap_clear_user_ttb(void);
522 extern void pmap_bootstrap(vm_offset_t);
523 extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
524 extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
525 extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va);
526 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
527 extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
528 extern void pmap_switch_user(thread_t th, vm_map_t map);
529 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
530 extern void pmap_gc(void);
531 #if HAS_APPLE_PAC
532 extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
533 extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
534 #endif /* HAS_APPLE_PAC */
535
536 /**
537 * Interfaces implemented as macros.
538 */
539
540 #define PMAP_SWITCH_USER(th, new_map, my_cpu) pmap_switch_user((th), (new_map))
541
542 #define pmap_kernel() (kernel_pmap)
543
544 #define pmap_kernel_va(VA) \
545 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
546
547 #define pmap_attribute(pmap, addr, size, attr, value) (KERN_INVALID_ADDRESS)
548
549 #define copyinmsg(from, to, cnt) copyin(from, to, cnt)
550 #define copyoutmsg(from, to, cnt) copyout(from, to, cnt)
551
552 /* Unimplemented interfaces. */
553 #define MACRO_NOOP
554 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) MACRO_NOOP
555 #define pmap_pageable(pmap, start, end, pageable) MACRO_NOOP
556
557 extern pmap_paddr_t kvtophys(vm_offset_t va);
558 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
559 extern vm_map_address_t phystokv(pmap_paddr_t pa);
560 extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
561
562 extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
563 extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
564 extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
565 extern kern_return_t pmap_map_block_addr(pmap_t pmap, addr64_t va, pmap_paddr_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
566 extern void pmap_map_globals(void);
567
568 #define PMAP_MAP_BD_DEVICE 0x0
569 #define PMAP_MAP_BD_WCOMB 0x1
570 #define PMAP_MAP_BD_POSTED 0x2
571 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
572 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
573 #define PMAP_MAP_BD_MASK 0x7
574
575 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
576 extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
577
578 extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
579
580 extern boolean_t pmap_valid_address(pmap_paddr_t addr);
581 extern void pmap_disable_NX(pmap_t pmap);
582 extern void pmap_set_nested(pmap_t pmap);
583 extern void pmap_create_commpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
584 vm_map_address_t *kernel_ro_data_addr, vm_map_address_t *user_text_addr);
585 extern void pmap_insert_commpage(pmap_t pmap);
586
587 extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
588 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
589 extern void pmap_unmap_cpu_windows_copy(unsigned int index);
590
591 static inline vm_offset_t
pmap_ro_zone_align(vm_offset_t value)592 pmap_ro_zone_align(vm_offset_t value)
593 {
594 return value;
595 }
596
597 extern void pmap_ro_zone_memcpy(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
598 vm_offset_t new_data, vm_size_t new_data_size);
599 extern uint64_t pmap_ro_zone_atomic_op(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
600 uint32_t op, uint64_t value);
601 extern void pmap_ro_zone_bzero(zone_id_t zid, vm_offset_t va, vm_offset_t offset, vm_size_t size);
602
603 #if XNU_MONITOR
604 /* exposed for use by the HMAC SHA driver */
605 extern void pmap_invoke_with_page(ppnum_t page_number, void *ctx,
606 void (*callback)(void *ctx, ppnum_t page_number, const void *page));
607 extern void pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len));
608 extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr);
609 extern void pmap_clear_ppl_hashed_flag_all(void);
610 extern void pmap_check_ppl_hashed_flag_all(void);
611 #endif /* XNU_MONITOR */
612
613 extern boolean_t pmap_valid_page(ppnum_t pn);
614 extern boolean_t pmap_bootloader_page(ppnum_t pn);
615
616 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
617
618 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
619 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
620 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
621 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
622 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
623 #if XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT
624 #define ARM_PMAP_MAX_OFFSET_EXTRA_JUMBO 0x20
625 #endif /* XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT */
626
627 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
628 extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
629 extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
630
631 boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
632
633 boolean_t pmap_enforces_execute_only(pmap_t pmap);
634
635 void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes);
636 void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
637
638 void pmap_abandon_measurement(void);
639
640
641
642 /* pmap dispatch indices */
643 #define ARM_FAST_FAULT_INDEX 0
644 #define ARM_FORCE_FAST_FAULT_INDEX 1
645 #define MAPPING_FREE_PRIME_INDEX 2
646 #define MAPPING_REPLENISH_INDEX 3
647 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
648 #define PHYS_ATTRIBUTE_SET_INDEX 5
649 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
650 #define PMAP_CHANGE_WIRING_INDEX 7
651 #define PMAP_CREATE_INDEX 8
652 #define PMAP_DESTROY_INDEX 9
653 #define PMAP_ENTER_OPTIONS_INDEX 10
654 /* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
655 #define PMAP_FIND_PA_INDEX 12
656 #define PMAP_INSERT_COMMPAGE_INDEX 13
657 #define PMAP_IS_EMPTY_INDEX 14
658 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
659 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
660 #define PMAP_NEST_INDEX 17
661 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
662 #define PMAP_PROTECT_OPTIONS_INDEX 19
663 #define PMAP_QUERY_PAGE_INFO_INDEX 20
664 #define PMAP_QUERY_RESIDENT_INDEX 21
665 #define PMAP_REFERENCE_INDEX 22
666 #define PMAP_REMOVE_OPTIONS_INDEX 23
667 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
668 #define PMAP_SET_NESTED_INDEX 26
669 #define PMAP_SET_PROCESS_INDEX 27
670 #define PMAP_SWITCH_INDEX 28
671 #define PMAP_SWITCH_USER_TTB_INDEX 29
672 #define PMAP_CLEAR_USER_TTB_INDEX 30
673 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
674 #define PMAP_UNNEST_OPTIONS_INDEX 32
675 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
676 #define PMAP_CPU_DATA_INIT_INDEX 34
677 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
678 #define PMAP_SET_JIT_ENTITLED_INDEX 36
679
680
681 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
682 #define PMAP_TRIM_INDEX 56
683 #define PMAP_LEDGER_VERIFY_SIZE_INDEX 57
684 #define PMAP_LEDGER_ALLOC_INDEX 58
685 #define PMAP_LEDGER_FREE_INDEX 59
686
687 #if HAS_APPLE_PAC
688 #define PMAP_SIGN_USER_PTR 60
689 #define PMAP_AUTH_USER_PTR 61
690 #endif /* HAS_APPLE_PAC */
691
692 #define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
693
694
695 #if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG))
696 #define PMAP_DISABLE_USER_JOP_INDEX 69
697 #endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */
698
699
700 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
701
702 #define PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX 73
703 #define PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX 74
704 #define PMAP_NOP_INDEX 75
705
706 #define PMAP_RO_ZONE_MEMCPY_INDEX 76
707 #define PMAP_RO_ZONE_ATOMIC_OP_INDEX 77
708
709 #if DEVELOPMENT || DEBUG
710 #define PMAP_TEST_TEXT_CORRUPTION_INDEX 79
711 #endif /* DEVELOPMENT || DEBUG */
712
713
714
715 #define PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX 84
716 #define PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX 85
717
718
719 #define PMAP_RO_ZONE_BZERO_INDEX 90
720
721
722
723
724 #define PMAP_LOAD_TRUST_CACHE_WITH_TYPE_INDEX 98
725 #define PMAP_QUERY_TRUST_CACHE_INDEX 99
726 #define PMAP_TOGGLE_DEVELOPER_MODE_INDEX 100
727 #define PMAP_REGISTER_PROVISIONING_PROFILE_INDEX 101
728 #define PMAP_UNREGISTER_PROVISIONING_PROFILE_INDEX 102
729 #define PMAP_ASSOCIATE_PROVISIONING_PROFILE_INDEX 103
730 #define PMAP_DISASSOCIATE_PROVISIONING_PROFILE_INDEX 104
731
732 /* HW read-only/read-write trusted path support */
733 #define PMAP_SET_TPRO_INDEX 105
734
735 #define PMAP_ASSOCIATE_KERNEL_ENTITLEMENTS_INDEX 106
736 #define PMAP_RESOLVE_KERNEL_ENTITLEMENTS_INDEX 107
737 #define PMAP_ACCELERATE_ENTITLEMENTS_INDEX 108
738 #define PMAP_CHECK_TRUST_CACHE_RUNTIME_FOR_UUID_INDEX 109
739 #define PMAP_IMAGE4_MONITOR_TRAP_INDEX 110
740
741 #define PMAP_COUNT 111
742
743
744 /**
745 * Value used when initializing pmap per-cpu data to denote that the structure
746 * hasn't been initialized with its associated CPU number yet.
747 */
748 #define PMAP_INVALID_CPU_NUM (~0U)
749
750 /**
751 * Align the pmap per-cpu data to the L2 cache size for each individual CPU's
752 * data. This prevents accesses from one CPU affecting another, especially
753 * when atomically updating fields.
754 */
755 struct pmap_cpu_data_array_entry {
756 pmap_cpu_data_t cpu_data;
757 } __attribute__((aligned(MAX_L2_CLINE_BYTES)));
758
759 /* Initialize the pmap per-CPU data for the current CPU. */
760 extern void pmap_cpu_data_init(void);
761
762 /* Get the pmap per-CPU data for the current CPU. */
763 extern pmap_cpu_data_t *pmap_get_cpu_data(void);
764
765 /* Get the pmap per-CPU data for an arbitrary CPU. */
766 extern pmap_cpu_data_t *pmap_get_remote_cpu_data(unsigned int cpu);
767
768 /*
769 * For long-running PV list operations, we pick a reasonable maximum chunk size
770 * beyond which we will exit to preemptible context to avoid excessive preemption
771 * latency and PVH lock timeouts.
772 */
773 #define PMAP_MAX_PV_LIST_CHUNK_SIZE 64
774
775 /*
776 * For most batched page operations, we pick a sane default page count
777 * interval at which to check for pending preemption and exit the PPL if found.
778 */
779 #define PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL 64
780
781 static inline bool
_pmap_pending_preemption_real(void)782 _pmap_pending_preemption_real(void)
783 {
784 return !!(*((volatile ast_t*)ast_pending()) & AST_URGENT);
785 }
786
787 #if SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT)
788 bool pmap_pending_preemption(void); // more complicated, so externally defined
789 #else /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
790 #define pmap_pending_preemption _pmap_pending_preemption_real
791 #endif /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
792
793 #if XNU_MONITOR
794 extern boolean_t pmap_ppl_locked_down;
795
796 /*
797 * Denotes the bounds of the PPL stacks. These are visible so that other code
798 * can check if addresses are part of the PPL stacks.
799 */
800 extern void *pmap_stacks_start;
801 extern void *pmap_stacks_end;
802
803 #if HAS_GUARDED_IO_FILTER
804 extern void *iofilter_stacks_start;
805 extern void *iofilter_stacks_end;
806 #endif
807
808 /* Asks if a page belongs to the monitor. */
809 extern boolean_t pmap_is_monitor(ppnum_t pn);
810
811 /*
812 * Indicates that we are done with our static bootstrap
813 * allocations, so the monitor may now mark the pages
814 * that it owns.
815 */
816 extern void pmap_static_allocations_done(void);
817
818
819 #ifdef KASAN
820 #define PPL_STACK_SIZE (PAGE_SIZE << 2)
821 #else /* KASAN */
822 #define PPL_STACK_SIZE PAGE_SIZE
823 #endif /* KASAN */
824
825 /* One stack for each CPU, plus a guard page below each stack and above the last stack. */
826 #define PPL_STACK_REGION_SIZE ((MAX_CPUS * (PPL_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
827
828 /* We don't expect heavy stack usage by I/O filter, so one page of stack even for KASAN. */
829 #define IOFILTER_STACK_SIZE PAGE_SIZE
830
831 /* One stack for each CPU, plus a guard page below each stack and above the last stack. */
832 #define IOFILTER_STACK_REGION_SIZE ((MAX_CPUS * (IOFILTER_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
833
834 #define PPL_DATA_SEGMENT_SECTION_NAME "__PPLDATA,__data"
835 #define PPL_TEXT_SEGMENT_SECTION_NAME "__PPLTEXT,__text,regular,pure_instructions"
836 #define PPL_DATACONST_SEGMENT_SECTION_NAME "__PPLDATA,__const"
837
838 #define MARK_AS_PMAP_DATA \
839 __PLACE_IN_SECTION(PPL_DATA_SEGMENT_SECTION_NAME)
840 #define MARK_AS_PMAP_TEXT \
841 __attribute__((used, section(PPL_TEXT_SEGMENT_SECTION_NAME), noinline))
842 #define MARK_AS_PMAP_RODATA \
843 __PLACE_IN_SECTION(PPL_DATACONST_SEGMENT_SECTION_NAME)
844
845 #else /* XNU_MONITOR */
846
847 #define MARK_AS_PMAP_TEXT
848 #define MARK_AS_PMAP_DATA
849 #define MARK_AS_PMAP_RODATA
850
851 #endif /* XNU_MONITOR */
852
853 /*
854 * Indicates that we are done mutating sensitive state in the system, and that
855 * the pmap may now restrict access as dictated by system security policy.
856 */
857 extern void pmap_lockdown_ppl(void);
858
859
860 extern void pmap_nop(pmap_t);
861
862 extern lck_grp_t pmap_lck_grp;
863
864 extern void CleanPoC_DcacheRegion_Force_nopreempt_nohid(vm_offset_t va, size_t length);
865
866 #if XNU_MONITOR
867 extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, size_t length);
868 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz)
869 #define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp)
870 #define pmap_simple_unlock(l) simple_unlock_nopreempt(l)
871 #define pmap_simple_lock_try(l) simple_lock_try_nopreempt(l, &pmap_lck_grp)
872 #define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
873 #define pmap_lock_bit(l, i) hw_lock_bit_nopreempt(l, i, &pmap_lck_grp)
874 #define pmap_unlock_bit(l, i) hw_unlock_bit_nopreempt(l, i)
875 #else /* XNU_MONITOR */
876 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
877 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
878 #define pmap_simple_unlock(l) simple_unlock(l)
879 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
880 #define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
881 #define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
882 #define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
883 #endif /* XNU_MONITOR */
884
885 #if DEVELOPMENT || DEBUG
886 extern kern_return_t pmap_test_text_corruption(pmap_paddr_t);
887 #endif /* DEVELOPMENT || DEBUG */
888
889 #endif /* #ifndef ASSEMBLER */
890
891 #if __ARM_KERNEL_PROTECT__
892 /*
893 * The exception vector mappings start at the middle of the kernel page table
894 * range (so that the EL0 mapping can be located at the base of the range).
895 */
896 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
897 #endif /* __ARM_KERNEL_PROTECT__ */
898
899 #endif /* #ifndef _ARM_PMAP_H_ */
900