1 /*
2 * Copyright (c) 2007-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * Machine-dependent structures for the physical map module.
30 *
31 * This header file contains the types and prototypes that make up the public
32 * pmap API that's exposed to the rest of the kernel. Any types/prototypes used
33 * strictly by the pmap itself should be placed into one of the osfmk/arm/pmap/
34 * header files.
35 *
36 * To prevent circular dependencies and exposing anything not needed by the
37 * rest of the kernel, this file shouldn't include ANY of the internal
38 * osfmk/arm/pmap/ header files.
39 */
40 #ifndef _ARM_PMAP_H_
41 #define _ARM_PMAP_H_
42
43 #include <mach_assert.h>
44 #include <arm64/proc_reg.h>
45
46 #ifndef ASSEMBLER
47
48 #include <stdatomic.h>
49 #include <stdbool.h>
50 #include <libkern/section_keywords.h>
51 #include <mach/kern_return.h>
52 #include <mach/machine/vm_types.h>
53 #include <arm/pmap_public.h>
54 #include <kern/ast.h>
55 #include <mach/arm/thread_status.h>
56
57 #if defined(__arm64__)
58 #include <arm64/tlb.h>
59 #else /* defined(__arm64__) */
60 #include <arm/tlb.h>
61 #endif /* defined(__arm64__) */
62
63
64 /* Shift for 2048 max virtual ASIDs (2048 pmaps). */
65 #define ASID_SHIFT (11)
66
67 /* Max supported ASIDs (can be virtual). */
68 #define MAX_ASIDS (1 << ASID_SHIFT)
69
70 /* Shift for the maximum ARM ASID value (256) */
71 #ifndef ARM_ASID_SHIFT
72 #define ARM_ASID_SHIFT (8)
73 #endif /* ARM_ASID_SHIFT */
74
75 /* Max ASIDs supported by the hardware. */
76 #define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT)
77
78 /* Number of bits in a byte. */
79 #define NBBY (8)
80
81 /**
82 * The maximum number of hardware ASIDs used by the pmap for user address spaces.
83 *
84 * One ASID is always dedicated to the kernel (ASID 0). On systems with software-
85 * based spectre/meltdown mitigations, each address space technically uses two
86 * hardware ASIDs (one for EL1 and one for EL0) so the total number of available
87 * ASIDs a user process can use is halved on those systems.
88 */
89 #if __ARM_KERNEL_PROTECT__
90 #define MAX_HW_ASIDS ((ARM_MAX_ASIDS >> 1) - 1)
91 #else /* __ARM_KERNEL_PROTECT__ */
92 #define MAX_HW_ASIDS (ARM_MAX_ASIDS - 1)
93 #endif /* __ARM_KERNEL_PROTECT__ */
94
95 /* Maximum number of Virtual Machine IDs */
96 #ifndef ARM_VMID_SHIFT
97 #define ARM_VMID_SHIFT (8)
98 #endif /* ARM_VMID_SHIFT */
99 #define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
100
101 /* XPRR virtual register map */
102
103 /* Maximum number of CPU windows per-cpu. */
104 #define CPUWINDOWS_MAX 4
105
106 #if defined(__arm64__)
107
108 #if defined(ARM_LARGE_MEMORY)
109 /*
110 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
111 * Upper limit on how many pages can be consumed by bootstrap page tables
112 */
113 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
114 #else /* defined(ARM_LARGE_MEMORY) */
115 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
116 #endif /* defined(ARM_LARGE_MEMORY) */
117
118 typedef uint64_t tt_entry_t; /* translation table entry type */
119 typedef uint64_t pt_entry_t; /* page table entry type */
120 #else /* defined(__arm64__) */
121 #error unknown arch
122 #endif /* defined(__arm64__) */
123
124 /* Used to represent a NULL page/translation table entry pointer. */
125 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
126 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
127
128 /**
129 * Number of PTE pointers in a single PVE. This must be 2, since the algorithm
130 * has been optimized to that case. Should this change in the future, both
131 * enter_pv() and remove_pv() will need to be modified accordingly. In addition
132 * to this, the documentation and the LLDB macros that walk PV lists will also
133 * need to be adapted.
134 */
135 #define PTE_PER_PVE 2
136 _Static_assert(PTE_PER_PVE == 2, "PTE_PER_PVE is not 2");
137
138 /**
139 * Structure to track the active mappings for a given page. This structure is
140 * used in the pv_head_table when a physical page has more than one mapping to
141 * it. Each entry in this linked list of structures can represent
142 * up to PTE_PER_PVE mappings.
143 */
144 typedef struct pv_entry {
145 /* Linked list to the next mapping of the physical page. */
146 struct pv_entry *pve_next;
147
148 /* Pointer to the page table entry for this mapping. */
149 pt_entry_t *pve_ptep[PTE_PER_PVE];
150 } pv_entry_t;
151
152 /**
153 * Structure that tracks free pv_entry nodes for the pv_head_table. Each one
154 * of these nodes represents a single mapping to a physical page, so a new node
155 * is allocated whenever a new mapping is created.
156 */
157 typedef struct {
158 pv_entry_t *list;
159 uint32_t count;
160 } pv_free_list_t;
161
162 /**
163 * Forward declaration of the structure that controls page table geometry and
164 * TTE/PTE format.
165 */
166 struct page_table_attr;
167
168 struct pmap_cpu_data {
169 #if XNU_MONITOR
170 const volatile struct pmap * _Atomic active_pmap;
171 const volatile struct pmap * _Atomic inflight_pmap;
172 uint64_t pvh_info[4];
173 void *ppl_kern_saved_sp;
174 void *ppl_stack;
175 arm_context_t *save_area;
176 unsigned int ppl_state;
177
178 #if HAS_GUARDED_IO_FILTER
179 void *iofilter_stack;
180 void *iofilter_saved_sp;
181 #endif
182 #endif /* XNU_MONITOR */
183 pmap_t cpu_nested_pmap;
184 #if __ARM_MIXED_PAGE_SIZE__
185 uint64_t commpage_page_shift;
186 #endif
187 #if defined(__arm64__)
188 const struct page_table_attr *cpu_nested_pmap_attr;
189 vm_map_address_t cpu_nested_region_addr;
190 vm_map_offset_t cpu_nested_region_size;
191 #else /* defined(__arm64__) */
192 pmap_t cpu_user_pmap;
193 #endif /* defined(__arm64__) */
194 unsigned int cpu_number;
195 bool copywindow_strong_sync[CPUWINDOWS_MAX];
196 bool inflight_disconnect;
197 pv_free_list_t pv_free;
198 pv_entry_t *pv_free_spill_marker;
199
200 /*
201 * This supports overloading of ARM ASIDs by the pmap. The field needs
202 * to be wide enough to cover all the virtual bits in a virtual ASID.
203 * With 256 physical ASIDs, 8-bit fields let us support up to 65536
204 * Virtual ASIDs, minus all that would map on to 0 (as 0 is a global
205 * ASID).
206 *
207 * If we were to use bitfield shenanigans here, we could save a bit of
208 * memory by only having enough bits to support MAX_ASIDS. However, such
209 * an implementation would be more error prone.
210 */
211 uint8_t cpu_sw_asids[MAX_HW_ASIDS];
212 };
213 typedef struct pmap_cpu_data pmap_cpu_data_t;
214
215 #include <mach/vm_prot.h>
216 #include <mach/vm_statistics.h>
217 #include <mach/machine/vm_param.h>
218 #include <kern/kern_types.h>
219 #include <kern/thread.h>
220 #include <kern/queue.h>
221
222
223 #include <sys/cdefs.h>
224
225 /* Base address for low globals. */
226 #if defined(ARM_LARGE_MEMORY)
227 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
228 #else /* defined(ARM_LARGE_MEMORY) */
229 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
230 #endif /* defined(ARM_LARGE_MEMORY) */
231
232 /*
233 * This indicates (roughly) where there is free space for the VM
234 * to use for the heap; this does not need to be precise.
235 */
236 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
237 #if defined(ARM_LARGE_MEMORY)
238 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
239 #else /* defined(ARM_LARGE_MEMORY) */
240 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
241 #endif /* defined(ARM_LARGE_MEMORY) */
242 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
243 #if defined(ARM_LARGE_MEMORY)
244 /* For large memory systems with no KTRR/CTRR such as virtual machines */
245 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
246 #else
247 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
248 #endif
249 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
250
251 /**
252 * For setups where the VM page size does not match the hardware page size (the
253 * VM page size must be a multiple of the hardware page size), we will need to
254 * determine what the page ratio is.
255 */
256 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
257 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
258
259
260
261 /* superpages */
262 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
263
264 /* Convert addresses to pages and vice versa. No rounding is used. */
265 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
266 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
267
268 /**
269 * Round off or truncate to the nearest page. These will work for either
270 * addresses or counts (i.e. 1 byte rounds to 1 page bytes).
271 */
272 #define arm_round_page(x) ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
273 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
274
275 extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
276
277 #if defined(__arm64__)
278 extern uint64_t get_mmu_control(void);
279 extern uint64_t get_aux_control(void);
280 extern void set_aux_control(uint64_t);
281 extern void set_mmu_ttb(uint64_t);
282 extern void set_mmu_ttb_alternate(uint64_t);
283 extern uint64_t get_tcr(void);
284 extern void set_tcr(uint64_t);
285 extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
286 #else /* defined(__arm64__) */
287 #error Unsupported architecture
288 #endif /* defined(__arm64__) */
289
290 extern pmap_paddr_t get_mmu_ttb(void);
291 extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
292 extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
293 extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
294
295
296 /* Convert address offset to translation table index */
297 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
298 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
299 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
300
301 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
302 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
303
304 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
305 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
306 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
307 #define pte_increment_pa(p) ((p) += ptoa(1))
308
309 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
310
311
312
313 #define pmap_cs_log(level, fmt, args...)
314 #define pmap_cs_log_debug(fmt, args...)
315 #define pmap_cs_log_info(fmt, args...)
316 #define pmap_cs_log_error(fmt, args...)
317 #define pmap_cs_log_force(level, fmt, args...)
318
319
320
321
322 /* Convert translation/page table entry to kernel virtual address. */
323 #define ttetokv(a) (phystokv(tte_to_pa(a)))
324 #define ptetokv(a) (phystokv(pte_to_pa(a)))
325
326 struct pmap {
327 /* Pointer to the root translation table. */
328 tt_entry_t *tte;
329
330 /* Physical page of the root translation table. */
331 pmap_paddr_t ttep;
332
333 /*
334 * The min and max fields represent the lowest and highest addressable VAs
335 * as dictated strictly by the paging hierarchy (root level + root table size)
336 * in conjunction with whether the root table is used with TTBR0, TTBR1, or VTTBR.
337 * These fields do not encapsulate any higher-level address-space partitioning
338 * policies.
339 */
340
341 /* Lowest supported VA (inclusive) */
342 vm_map_address_t min;
343
344 /* Highest supported VA (exclusive) */
345 vm_map_address_t max;
346
347 #if ARM_PARAMETERIZED_PMAP
348 /* Details about the page table layout. */
349 const struct page_table_attr * pmap_pt_attr;
350 #endif /* ARM_PARAMETERIZED_PMAP */
351
352 /* Ledger tracking phys mappings */
353 ledger_t ledger;
354
355 decl_lck_rw_data(, rwlock);
356
357 /* Global list of pmaps */
358 queue_chain_t pmaps;
359
360 /* Free list of translation table pages. */
361 tt_entry_t *tt_entry_free;
362
363 /* Information representing the "nested" (shared) region in this pmap. */
364 struct pmap *nested_pmap;
365 vm_map_address_t nested_region_addr;
366 vm_map_offset_t nested_region_size;
367 vm_map_offset_t nested_region_true_start;
368 vm_map_offset_t nested_region_true_end;
369 unsigned int *nested_region_asid_bitmap;
370 unsigned int nested_region_asid_bitmap_size;
371
372
373 void * reserved0;
374 void * reserved1;
375 uint64_t reserved2;
376 uint64_t reserved3;
377
378 /* PMAP reference count */
379 _Atomic int32_t ref_count;
380
381 #if XNU_MONITOR
382 /* number of pmaps in which this pmap is nested */
383 _Atomic int32_t nested_count;
384 #endif
385
386 /* Number of pmaps that nested this pmap without bounds set. */
387 uint32_t nested_no_bounds_refcnt;
388
389 /**
390 * Represents the real hardware ASID inserted into each TLB entry within
391 * this address space.
392 */
393 uint16_t hw_asid;
394
395 /**
396 * Represents the virtual "software" ASID. Any real hardware ASID can have
397 * multiple software ASIDs associated with it. This is used to know when to
398 * perform TLB flushes during context switches.
399 */
400 uint8_t sw_asid;
401
402 #if MACH_ASSERT
403 int pmap_pid;
404 char pmap_procname[17];
405 #endif /* MACH_ASSERT */
406
407 bool reserved4;
408
409 bool pmap_vm_map_cs_enforced;
410
411 bool reserved5;
412 unsigned int reserved6;
413 unsigned int reserved7;
414
415 bool reserved8;
416 bool reserved9;
417
418 #if defined(CONFIG_ROSETTA)
419 /* Whether the pmap is used for Rosetta. */
420 bool is_rosetta;
421 #else
422 bool reserved10;
423 #endif /* defined(CONFIG_ROSETTA) */
424
425 #if DEVELOPMENT || DEBUG
426 bool footprint_suspended;
427 bool footprint_was_suspended;
428 #endif /* DEVELOPMENT || DEBUG */
429
430 /* Whether the No-Execute functionality is enabled. */
431 bool nx_enabled;
432
433 /* Whether this pmap represents a 64-bit address space. */
434 bool is_64bit;
435
436 /* Nested a pmap when the bounds were not set. */
437 bool nested_has_no_bounds_ref;
438
439 /* The nesting bounds have been set. */
440 bool nested_bounds_set;
441
442 #if HAS_APPLE_PAC
443 bool disable_jop;
444 #else
445 bool reserved10;
446 #endif /* HAS_APPLE_PAC */
447
448 bool reserved11;
449
450 #define PMAP_TYPE_USER 0 /* ordinary pmap */
451 #define PMAP_TYPE_KERNEL 1 /* kernel pmap */
452 #define PMAP_TYPE_COMMPAGE 2 /* commpage pmap */
453 #define PMAP_TYPE_NESTED 3 /* pmap nested within another pmap */
454 uint8_t type;
455 };
456
457 #define PMAP_VASID(pmap) (((uint32_t)((pmap)->sw_asid) << 16) | pmap->hw_asid)
458
459 #if VM_DEBUG
460 extern int pmap_list_resident_pages(
461 pmap_t pmap,
462 vm_offset_t *listp,
463 int space);
464 #else /* VM_DEBUG */
465 #define pmap_list_resident_pages(pmap, listp, space) (0)
466 #endif /* VM_DEBUG */
467
468 extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
469
470 /* Globals shared between arm_vm_init and pmap */
471 extern tt_entry_t *cpu_tte; /* First CPUs translation table (shared with kernel pmap) */
472 extern pmap_paddr_t cpu_ttep; /* Physical translation table addr */
473
474 #if __arm64__
475 extern void *ropagetable_begin;
476 extern void *ropagetable_end;
477 #endif /* __arm64__ */
478
479 #if __arm64__
480 extern tt_entry_t *invalid_tte; /* Global invalid translation table */
481 extern pmap_paddr_t invalid_ttep; /* Physical invalid translation table addr */
482 #endif /* __arm64__ */
483
484 #define PMAP_CONTEXT(pmap, thread)
485
486 /**
487 * Platform dependent Prototypes
488 */
489 extern void pmap_clear_user_ttb(void);
490 extern void pmap_bootstrap(vm_offset_t);
491 extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
492 extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
493 extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va);
494 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
495 extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
496 extern void pmap_switch_user(thread_t th, vm_map_t map);
497 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
498 extern void pmap_gc(void);
499 #if HAS_APPLE_PAC
500 extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
501 extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
502 #endif /* HAS_APPLE_PAC */
503
504 /**
505 * Interfaces implemented as macros.
506 */
507
508 #define PMAP_SWITCH_USER(th, new_map, my_cpu) pmap_switch_user((th), (new_map))
509
510 #define pmap_kernel() (kernel_pmap)
511
512 #define pmap_kernel_va(VA) \
513 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
514
515 #define pmap_attribute(pmap, addr, size, attr, value) (KERN_INVALID_ADDRESS)
516
517 #define copyinmsg(from, to, cnt) copyin(from, to, cnt)
518 #define copyoutmsg(from, to, cnt) copyout(from, to, cnt)
519
520 /* Unimplemented interfaces. */
521 #define MACRO_NOOP
522 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) MACRO_NOOP
523 #define pmap_pageable(pmap, start, end, pageable) MACRO_NOOP
524
525 extern pmap_paddr_t kvtophys(vm_offset_t va);
526 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
527 extern vm_map_address_t phystokv(pmap_paddr_t pa);
528 extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
529
530 extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
531 extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
532 extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
533 extern kern_return_t pmap_map_block_addr(pmap_t pmap, addr64_t va, pmap_paddr_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
534 extern void pmap_map_globals(void);
535
536 #define PMAP_MAP_BD_DEVICE 0x0
537 #define PMAP_MAP_BD_WCOMB 0x1
538 #define PMAP_MAP_BD_POSTED 0x2
539 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
540 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
541 #define PMAP_MAP_BD_MASK 0x7
542
543 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
544 extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
545
546 extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
547
548 extern boolean_t pmap_valid_address(pmap_paddr_t addr);
549 extern void pmap_disable_NX(pmap_t pmap);
550 extern void pmap_set_nested(pmap_t pmap);
551 extern void pmap_create_commpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
552 vm_map_address_t *kernel_ro_data_addr, vm_map_address_t *user_text_addr);
553 extern void pmap_insert_commpage(pmap_t pmap);
554
555 extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
556 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
557 extern void pmap_unmap_cpu_windows_copy(unsigned int index);
558
559 static inline vm_offset_t
pmap_ro_zone_align(vm_offset_t value)560 pmap_ro_zone_align(vm_offset_t value)
561 {
562 return value;
563 }
564
565 extern void pmap_ro_zone_memcpy(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
566 vm_offset_t new_data, vm_size_t new_data_size);
567 extern uint64_t pmap_ro_zone_atomic_op(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
568 uint32_t op, uint64_t value);
569 extern void pmap_ro_zone_bzero(zone_id_t zid, vm_offset_t va, vm_offset_t offset, vm_size_t size);
570
571 #if XNU_MONITOR
572 /* exposed for use by the HMAC SHA driver */
573 extern void pmap_invoke_with_page(ppnum_t page_number, void *ctx,
574 void (*callback)(void *ctx, ppnum_t page_number, const void *page));
575 extern void pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len));
576 extern void pmap_set_ppl_hashed_flag(const pmap_paddr_t addr);
577 extern void pmap_clear_ppl_hashed_flag_all(void);
578 extern void pmap_check_ppl_hashed_flag_all(void);
579 #endif /* XNU_MONITOR */
580
581 extern boolean_t pmap_valid_page(ppnum_t pn);
582 extern boolean_t pmap_bootloader_page(ppnum_t pn);
583
584 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
585
586 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
587 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
588 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
589 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
590 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
591
592 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
593 extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
594 extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
595
596 boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
597
598 boolean_t pmap_enforces_execute_only(pmap_t pmap);
599
600 void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes);
601 void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
602
603 void pmap_abandon_measurement(void);
604
605
606
607 /* pmap dispatch indices */
608 #define ARM_FAST_FAULT_INDEX 0
609 #define ARM_FORCE_FAST_FAULT_INDEX 1
610 #define MAPPING_FREE_PRIME_INDEX 2
611 #define MAPPING_REPLENISH_INDEX 3
612 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
613 #define PHYS_ATTRIBUTE_SET_INDEX 5
614 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
615 #define PMAP_CHANGE_WIRING_INDEX 7
616 #define PMAP_CREATE_INDEX 8
617 #define PMAP_DESTROY_INDEX 9
618 #define PMAP_ENTER_OPTIONS_INDEX 10
619 /* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
620 #define PMAP_FIND_PA_INDEX 12
621 #define PMAP_INSERT_COMMPAGE_INDEX 13
622 #define PMAP_IS_EMPTY_INDEX 14
623 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
624 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
625 #define PMAP_NEST_INDEX 17
626 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
627 #define PMAP_PROTECT_OPTIONS_INDEX 19
628 #define PMAP_QUERY_PAGE_INFO_INDEX 20
629 #define PMAP_QUERY_RESIDENT_INDEX 21
630 #define PMAP_REFERENCE_INDEX 22
631 #define PMAP_REMOVE_OPTIONS_INDEX 23
632 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
633 #define PMAP_SET_NESTED_INDEX 26
634 #define PMAP_SET_PROCESS_INDEX 27
635 #define PMAP_SWITCH_INDEX 28
636 #define PMAP_SWITCH_USER_TTB_INDEX 29
637 #define PMAP_CLEAR_USER_TTB_INDEX 30
638 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
639 #define PMAP_UNNEST_OPTIONS_INDEX 32
640 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
641 #define PMAP_CPU_DATA_INIT_INDEX 34
642 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
643 #define PMAP_SET_JIT_ENTITLED_INDEX 36
644
645
646 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
647 #define PMAP_TRIM_INDEX 56
648 #define PMAP_LEDGER_VERIFY_SIZE_INDEX 57
649 #define PMAP_LEDGER_ALLOC_INDEX 58
650 #define PMAP_LEDGER_FREE_INDEX 59
651
652 #if HAS_APPLE_PAC
653 #define PMAP_SIGN_USER_PTR 60
654 #define PMAP_AUTH_USER_PTR 61
655 #endif /* HAS_APPLE_PAC */
656
657 #define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
658
659
660 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
661 #define PMAP_DISABLE_USER_JOP_INDEX 69
662 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
663
664
665 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
666
667 #define PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX 73
668 #define PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX 74
669 #define PMAP_NOP_INDEX 75
670
671 #define PMAP_RO_ZONE_MEMCPY_INDEX 76
672 #define PMAP_RO_ZONE_ATOMIC_OP_INDEX 77
673
674 #if DEVELOPMENT || DEBUG
675 #define PMAP_TEST_TEXT_CORRUPTION_INDEX 79
676 #endif /* DEVELOPMENT || DEBUG */
677
678
679
680 #define PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX 84
681 #define PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX 85
682
683
684 #define PMAP_RO_ZONE_BZERO_INDEX 90
685
686
687
688
689 #define PMAP_LOAD_TRUST_CACHE_WITH_TYPE_INDEX 98
690 #define PMAP_QUERY_TRUST_CACHE_INDEX 99
691 #define PMAP_TOGGLE_DEVELOPER_MODE_INDEX 100
692 #define PMAP_REGISTER_PROVISIONING_PROFILE_INDEX 101
693 #define PMAP_UNREGISTER_PROVISIONING_PROFILE_INDEX 102
694 #define PMAP_ASSOCIATE_PROVISIONING_PROFILE_INDEX 103
695 #define PMAP_DISASSOCIATE_PROVISIONING_PROFILE_INDEX 104
696
697 /* HW read-only/read-write trusted path support */
698 #define PMAP_SET_TPRO_INDEX 105
699
700 #define PMAP_ASSOCIATE_KERNEL_ENTITLEMENTS_INDEX 106
701 #define PMAP_RESOLVE_KERNEL_ENTITLEMENTS_INDEX 107
702 #define PMAP_ACCELERATE_ENTITLEMENTS_INDEX 108
703 #define PMAP_CHECK_TRUST_CACHE_RUNTIME_FOR_UUID_INDEX 109
704
705 #define PMAP_COUNT 110
706
707 /**
708 * Value used when initializing pmap per-cpu data to denote that the structure
709 * hasn't been initialized with its associated CPU number yet.
710 */
711 #define PMAP_INVALID_CPU_NUM (~0U)
712
713 /**
714 * Align the pmap per-cpu data to the L2 cache size for each individual CPU's
715 * data. This prevents accesses from one CPU affecting another, especially
716 * when atomically updating fields.
717 */
718 struct pmap_cpu_data_array_entry {
719 pmap_cpu_data_t cpu_data;
720 } __attribute__((aligned(MAX_L2_CLINE_BYTES)));
721
722 /* Initialize the pmap per-CPU data for the current CPU. */
723 extern void pmap_cpu_data_init(void);
724
725 /* Get the pmap per-CPU data for the current CPU. */
726 extern pmap_cpu_data_t *pmap_get_cpu_data(void);
727
728 /* Get the pmap per-CPU data for an arbitrary CPU. */
729 extern pmap_cpu_data_t *pmap_get_remote_cpu_data(unsigned int cpu);
730
731 /*
732 * For long-running PV list operations, we pick a reasonable maximum chunk size
733 * beyond which we will exit to preemptible context to avoid excessive preemption
734 * latency and PVH lock timeouts.
735 */
736 #define PMAP_MAX_PV_LIST_CHUNK_SIZE 64
737
738 /*
739 * For most batched page operations, we pick a sane default page count
740 * interval at which to check for pending preemption and exit the PPL if found.
741 */
742 #define PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL 64
743
744 static inline bool
_pmap_pending_preemption_real(void)745 _pmap_pending_preemption_real(void)
746 {
747 return !!(*((volatile ast_t*)ast_pending()) & AST_URGENT);
748 }
749
750 #if SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT)
751 bool pmap_pending_preemption(void); // more complicated, so externally defined
752 #else /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
753 #define pmap_pending_preemption _pmap_pending_preemption_real
754 #endif /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
755
756 #if XNU_MONITOR
757 extern boolean_t pmap_ppl_locked_down;
758
759 /*
760 * Denotes the bounds of the PPL stacks. These are visible so that other code
761 * can check if addresses are part of the PPL stacks.
762 */
763 extern void *pmap_stacks_start;
764 extern void *pmap_stacks_end;
765
766 #if HAS_GUARDED_IO_FILTER
767 extern void *iofilter_stacks_start;
768 extern void *iofilter_stacks_end;
769 #endif
770
771 /* Asks if a page belongs to the monitor. */
772 extern boolean_t pmap_is_monitor(ppnum_t pn);
773
774 /*
775 * Indicates that we are done with our static bootstrap
776 * allocations, so the monitor may now mark the pages
777 * that it owns.
778 */
779 extern void pmap_static_allocations_done(void);
780
781
782 #ifdef KASAN
783 #define PPL_STACK_SIZE (PAGE_SIZE << 2)
784 #else /* KASAN */
785 #define PPL_STACK_SIZE PAGE_SIZE
786 #endif /* KASAN */
787
788 /* One stack for each CPU, plus a guard page below each stack and above the last stack. */
789 #define PPL_STACK_REGION_SIZE ((MAX_CPUS * (PPL_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
790
791 /* We don't expect heavy stack usage by I/O filter, so one page of stack even for KASAN. */
792 #define IOFILTER_STACK_SIZE PAGE_SIZE
793
794 /* One stack for each CPU, plus a guard page below each stack and above the last stack. */
795 #define IOFILTER_STACK_REGION_SIZE ((MAX_CPUS * (IOFILTER_STACK_SIZE + ARM_PGBYTES)) + ARM_PGBYTES)
796
797 #define PPL_DATA_SEGMENT_SECTION_NAME "__PPLDATA,__data"
798 #define PPL_TEXT_SEGMENT_SECTION_NAME "__PPLTEXT,__text,regular,pure_instructions"
799 #define PPL_DATACONST_SEGMENT_SECTION_NAME "__PPLDATA,__const"
800
801 #define MARK_AS_PMAP_DATA \
802 __PLACE_IN_SECTION(PPL_DATA_SEGMENT_SECTION_NAME)
803 #define MARK_AS_PMAP_TEXT \
804 __attribute__((used, section(PPL_TEXT_SEGMENT_SECTION_NAME), noinline))
805 #define MARK_AS_PMAP_RODATA \
806 __PLACE_IN_SECTION(PPL_DATACONST_SEGMENT_SECTION_NAME)
807
808 #else /* XNU_MONITOR */
809
810 #define MARK_AS_PMAP_TEXT
811 #define MARK_AS_PMAP_DATA
812 #define MARK_AS_PMAP_RODATA
813
814 #endif /* XNU_MONITOR */
815
816 /*
817 * Indicates that we are done mutating sensitive state in the system, and that
818 * the pmap may now restrict access as dictated by system security policy.
819 */
820 extern void pmap_lockdown_ppl(void);
821
822
823 extern void pmap_nop(pmap_t);
824
825 extern lck_grp_t pmap_lck_grp;
826
827 extern void CleanPoC_DcacheRegion_Force_nopreempt_nohid(vm_offset_t va, size_t length);
828
829 #if XNU_MONITOR
830 extern void CleanPoC_DcacheRegion_Force_nopreempt(vm_offset_t va, size_t length);
831 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force_nopreempt(va, sz)
832 #define pmap_simple_lock(l) simple_lock_nopreempt(l, &pmap_lck_grp)
833 #define pmap_simple_unlock(l) simple_unlock_nopreempt(l)
834 #define pmap_simple_lock_try(l) simple_lock_try_nopreempt(l, &pmap_lck_grp)
835 #define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
836 #define pmap_lock_bit(l, i) hw_lock_bit_nopreempt(l, i, &pmap_lck_grp)
837 #define pmap_unlock_bit(l, i) hw_unlock_bit_nopreempt(l, i)
838 #else /* XNU_MONITOR */
839 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
840 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
841 #define pmap_simple_unlock(l) simple_unlock(l)
842 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
843 #define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
844 #define pmap_lock_bit(l, i) hw_lock_bit(l, i, &pmap_lck_grp)
845 #define pmap_unlock_bit(l, i) hw_unlock_bit(l, i)
846 #endif /* XNU_MONITOR */
847
848 #if DEVELOPMENT || DEBUG
849 extern kern_return_t pmap_test_text_corruption(pmap_paddr_t);
850 #endif /* DEVELOPMENT || DEBUG */
851
852 #endif /* #ifndef ASSEMBLER */
853
854 #if __ARM_KERNEL_PROTECT__
855 /*
856 * The exception vector mappings start at the middle of the kernel page table
857 * range (so that the EL0 mapping can be located at the base of the range).
858 */
859 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
860 #endif /* __ARM_KERNEL_PROTECT__ */
861
862 #endif /* #ifndef _ARM_PMAP_H_ */
863