1 /*
2 * Copyright (c) 2007-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /**
29 * Machine-dependent structures for the physical map module.
30 *
31 * This header file contains the types and prototypes that make up the public
32 * pmap API that's exposed to the rest of the kernel. Any types/prototypes used
33 * strictly by the pmap itself should be placed into one of the osfmk/arm/pmap/
34 * header files.
35 *
36 * To prevent circular dependencies and exposing anything not needed by the
37 * rest of the kernel, this file shouldn't include ANY of the internal
38 * osfmk/arm/pmap/ header files.
39 */
40 #ifndef _ARM_PMAP_H_
41 #define _ARM_PMAP_H_
42
43 #include <mach_assert.h>
44
45 #include <arm64/proc_reg.h>
46
47 #ifndef ASSEMBLER
48
49 #include <stdatomic.h>
50 #include <stdbool.h>
51 #include <libkern/section_keywords.h>
52 #include <mach/kern_return.h>
53 #include <mach/machine/vm_types.h>
54 #include <arm64/sptm/pmap/pmap_public.h>
55 #include <kern/ast.h>
56 #include <mach/arm/thread_status.h>
57 #include <os/refcnt.h>
58
59 #include <arm64/tlb.h>
60
61
62 /* Shift for 2048 max virtual ASIDs (2048 pmaps). */
63 #define ASID_SHIFT (11)
64
65 /* Max supported ASIDs (can be virtual). */
66 #define MAX_ASIDS (1 << ASID_SHIFT)
67
68 /* Shift for the maximum ARM ASID value (256 or 65536) */
69 #ifndef ARM_ASID_SHIFT
70 #if HAS_16BIT_ASID
71 #define ARM_ASID_SHIFT (16)
72 #else
73 #define ARM_ASID_SHIFT (8)
74 #endif /* HAS_16BIT_ASID */
75 #endif /* ARM_ASID_SHIFT */
76
77 /* Max ASIDs supported by the hardware. */
78 #define ARM_MAX_ASIDS (1 << ARM_ASID_SHIFT)
79
80 /* Number of bits in a byte. */
81 #define NBBY (8)
82
83 /**
84 * The maximum number of hardware ASIDs used by the pmap for user address spaces.
85 *
86 * One ASID is always dedicated to the kernel (ASID 0). On systems with software-
87 * based spectre/meltdown mitigations, each address space technically uses two
88 * hardware ASIDs (one for EL1 and one for EL0) so the total number of available
89 * ASIDs a user process can use is halved on those systems.
90 */
91 #if __ARM_KERNEL_PROTECT__
92 #define MAX_HW_ASIDS (ARM_MAX_ASIDS >> 1)
93 #else
94 #define MAX_HW_ASIDS ARM_MAX_ASIDS
95 #endif /* __ARM_KERNEL_PROTECT__ */
96
97 /**
98 * Maximum number of Virtual Machine IDs.
99 *
100 * All even number physical VMIDs are reserved for SK usage. Thus only 128
101 * logical VMIDs are available. Software will convert the logical VMID to
102 * the proper odd numbered physical VMID when allocating/freeing VMIDs.
103 */
104 #ifndef ARM_VMID_SHIFT
105 #define ARM_VMID_SHIFT (7)
106 #endif /* ARM_VMID_SHIFT */
107 #define ARM_MAX_VMIDS (1 << ARM_VMID_SHIFT)
108
109 /* XPRR virtual register map */
110
111 /* Maximum number of CPU windows per-cpu. */
112 #define CPUWINDOWS_MAX 4
113
114
115 #if defined(ARM_LARGE_MEMORY)
116 /*
117 * 2 L1 tables (Linear KVA and V=P), plus 2*16 L2 tables map up to (16*64GB) 1TB of DRAM
118 * Upper limit on how many pages can be consumed by bootstrap page tables
119 */
120 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 34)
121 #else /* defined(ARM_LARGE_MEMORY) */
122 #define BOOTSTRAP_TABLE_SIZE (ARM_PGBYTES * 8)
123 #endif /* defined(ARM_LARGE_MEMORY) */
124
125 typedef uint64_t tt_entry_t; /* translation table entry type */
126 typedef uint64_t pt_entry_t; /* page table entry type */
127
128 /* Used to represent a NULL page/translation table entry pointer. */
129 #define PT_ENTRY_NULL ((pt_entry_t *) 0)
130 #define TT_ENTRY_NULL ((tt_entry_t *) 0)
131
132 /**
133 * Number of PTE pointers in a single PVE. This must be 2, since the algorithm
134 * has been optimized to that case. Should this change in the future, both
135 * enter_pv() and remove_pv() will need to be modified accordingly. In addition
136 * to this, the documentation and the LLDB macros that walk PV lists will also
137 * need to be adapted.
138 */
139 #define PTE_PER_PVE 2
140 _Static_assert(PTE_PER_PVE == 2, "PTE_PER_PVE is not 2");
141
142 /**
143 * Structure to track the active mappings for a given page. This structure is
144 * used in the pv_head_table when a physical page has more than one mapping to
145 * it. Each entry in this linked list of structures can represent
146 * up to PTE_PER_PVE mappings.
147 */
148 typedef struct pv_entry {
149 /* Linked list to the next mapping of the physical page. */
150 struct pv_entry *pve_next;
151
152 /* Pointer to the page table entry for this mapping. */
153 pt_entry_t *pve_ptep[PTE_PER_PVE];
154 } pv_entry_t;
155
156 /**
157 * Structure that tracks free pv_entry nodes for the pv_head_table. Each one
158 * of these nodes represents a single mapping to a physical page, so a new node
159 * is allocated whenever a new mapping is created.
160 */
161 typedef struct {
162 pv_entry_t *list;
163 uint32_t count;
164 } pv_free_list_t;
165
166 /**
167 * Forward declaration of the structure that controls page table geometry and
168 * TTE/PTE format.
169 */
170 struct page_table_attr;
171
172 struct pmap_cpu_data {
173 unsigned int cpu_number;
174 bool copywindow_strong_sync[CPUWINDOWS_MAX];
175 pv_free_list_t pv_free;
176 pv_entry_t *pv_free_spill_marker;
177 };
178 typedef struct pmap_cpu_data pmap_cpu_data_t;
179
180 #include <mach/vm_prot.h>
181 #include <mach/vm_statistics.h>
182 #include <mach/machine/vm_param.h>
183 #include <kern/kern_types.h>
184 #include <kern/thread.h>
185 #include <kern/queue.h>
186
187
188 #include <sys/cdefs.h>
189
190 /* Base address for low globals. */
191 #if defined(ARM_LARGE_MEMORY)
192 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffe0000000000ULL
193 #else /* defined(ARM_LARGE_MEMORY) */
194 #define LOW_GLOBAL_BASE_ADDRESS 0xfffffff000000000ULL
195 #endif /* defined(ARM_LARGE_MEMORY) */
196
197 /*
198 * This indicates (roughly) where there is free space for the VM
199 * to use for the heap; this does not need to be precise.
200 */
201 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
202 #if defined(ARM_LARGE_MEMORY)
203 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
204 #else /* defined(ARM_LARGE_MEMORY) */
205 #define KERNEL_PMAP_HEAP_RANGE_START VM_MIN_KERNEL_AND_KEXT_ADDRESS
206 #endif /* defined(ARM_LARGE_MEMORY) */
207 #else /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
208 #if defined(ARM_LARGE_MEMORY)
209 /* For large memory systems with no KTRR/CTRR such as virtual machines */
210 #define KERNEL_PMAP_HEAP_RANGE_START (VM_MIN_KERNEL_AND_KEXT_ADDRESS+ARM_TT_L1_SIZE)
211 #else
212 #define KERNEL_PMAP_HEAP_RANGE_START LOW_GLOBAL_BASE_ADDRESS
213 #endif
214 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR) */
215
216 /**
217 * For setups where the VM page size does not match the hardware page size (the
218 * VM page size must be a multiple of the hardware page size), we will need to
219 * determine what the page ratio is.
220 */
221 #define PAGE_RATIO ((1 << PAGE_SHIFT) >> ARM_PGSHIFT)
222 #define TEST_PAGE_RATIO_4 (PAGE_RATIO == 4)
223
224
225
226 /* superpages */
227 #define SUPERPAGE_NBASEPAGES 1 /* No superpages support */
228
229 /* Convert addresses to pages and vice versa. No rounding is used. */
230 #define arm_atop(x) (((vm_map_address_t)(x)) >> ARM_PGSHIFT)
231 #define arm_ptoa(x) (((vm_map_address_t)(x)) << ARM_PGSHIFT)
232
233 /**
234 * Round off or truncate to the nearest page. These will work for either
235 * addresses or counts (i.e. 1 byte rounds to 1 page bytes).
236 */
237 #define arm_round_page(x) ((((vm_map_address_t)(x)) + ARM_PGMASK) & ~ARM_PGMASK)
238 #define arm_trunc_page(x) (((vm_map_address_t)(x)) & ~ARM_PGMASK)
239
240 extern void flush_mmu_tlb_region(vm_offset_t va, unsigned length);
241
242 extern uint64_t get_mmu_control(void);
243 extern uint64_t get_aux_control(void);
244 extern void set_aux_control(uint64_t);
245 extern void set_mmu_ttb(uint64_t);
246 extern void set_mmu_ttb_alternate(uint64_t);
247 extern uint64_t get_tcr(void);
248 extern void set_tcr(uint64_t);
249 extern uint64_t pmap_get_arm64_prot(pmap_t, vm_offset_t);
250
251 #if HAS_MTE
252 extern bool is_mte_enabled;
253 extern bool panic_on_user_induced_iomd_kernel_faults;
254 #endif /* HAS_MTE */
255
256 extern pmap_paddr_t get_mmu_ttb(void);
257 extern pmap_paddr_t mmu_kvtop(vm_offset_t va);
258 extern pmap_paddr_t mmu_kvtop_wpreflight(vm_offset_t va);
259 extern pmap_paddr_t mmu_uvtop(vm_offset_t va);
260
261
262 /* Convert address offset to translation table index */
263 #define ttel0num(a) ((a & ARM_TTE_L0_MASK) >> ARM_TT_L0_SHIFT)
264 #define ttel1num(a) ((a & ARM_TTE_L1_MASK) >> ARM_TT_L1_SHIFT)
265 #define ttel2num(a) ((a & ARM_TTE_L2_MASK) >> ARM_TT_L2_SHIFT)
266
267 #define pa_to_tte(a) ((a) & ARM_TTE_TABLE_MASK)
268 #define tte_to_pa(p) ((p) & ARM_TTE_TABLE_MASK)
269
270 #define pa_to_pte(a) ((a) & ARM_PTE_PAGE_MASK)
271 #define pte_to_pa(p) ((p) & ARM_PTE_PAGE_MASK)
272 #define pte_to_ap(p) (((p) & ARM_PTE_APMASK) >> ARM_PTE_APSHIFT)
273 #define pte_increment_pa(p) ((p) += ptoa(1))
274
275 #define TLBFLUSH_SIZE (ARM_TTE_MAX/((sizeof(unsigned int))*BYTE_SIZE))
276
277
278 #define pmap_cs_log(level, fmt, args...)
279 #define pmap_cs_log_debug(fmt, args...)
280 #define pmap_cs_log_info(fmt, args...)
281 #define pmap_cs_log_error(fmt, args...)
282 #define pmap_cs_log_force(level, fmt, args...)
283
284
285 /**
286 * Wrapper struct that represents a locked entry in the PV head table.
287 * This struct should only be obtained as the return value from pvh_try_lock()
288 * or the pvh_lock* functions.
289 */
290 typedef struct {
291 /* The pv_head_table entry obtained from the lock operation. */
292 uintptr_t pvh;
293 /**
294 * Token obtained from thread_priority_floor_start(), the lock was
295 * placed in sleep mode using pvh_lock_enter_sleep_mode().
296 */
297 thread_pri_floor_t pri_token;
298 /* The index of the locked physical page. */
299 unsigned int pai;
300 } locked_pvh_t;
301
302
303
304 /* Convert translation/page table entry to kernel virtual address. */
305 #define ttetokv(a) (phystokv(tte_to_pa(a)))
306 #define ptetokv(a) (phystokv(pte_to_pa(a)))
307
308 struct pmap {
309 /* Pointer to the root translation table. */
310 tt_entry_t *tte;
311
312 /* Physical page of the root translation table. */
313 pmap_paddr_t ttep;
314
315 /*
316 * The min and max fields represent the lowest and highest addressable VAs
317 * as dictated strictly by the paging hierarchy (root level + root table size)
318 * in conjunction with whether the root table is used with TTBR0, TTBR1, or VTTBR.
319 * These fields do not encapsulate any higher-level address-space partitioning
320 * policies.
321 */
322
323 /* Lowest supported VA (inclusive) */
324 vm_map_address_t min;
325
326 /* Highest supported VA (exclusive) */
327 vm_map_address_t max;
328
329 #if ARM_PARAMETERIZED_PMAP
330 /* Details about the page table layout. */
331 const struct page_table_attr * pmap_pt_attr;
332 #endif /* ARM_PARAMETERIZED_PMAP */
333
334 /* Ledger tracking phys mappings */
335 ledger_t ledger;
336
337 decl_lck_rw_data(, rwlock);
338
339 /* Global list of pmaps */
340 queue_chain_t pmaps;
341
342 /* Information representing the "nested" (shared) region in this pmap. */
343 vm_map_address_t nested_region_addr;
344 vm_map_offset_t nested_region_size;
345 vm_map_offset_t nested_region_true_start;
346 vm_map_offset_t nested_region_true_end;
347 union {
348 struct pmap *nested_pmap;
349 bitmap_t *nested_region_unnested_table_bitmap;
350 };
351
352 /* PMAP reference count */
353 os_ref_atomic_t ref_count;
354
355 union {
356 /**
357 * Represents the address space identifier (ASID) for this pmap.
358 * The value 0 is reserved for the kernel pmap; this field will
359 * also be 0 for nested pmaps as those pmaps are never directly
360 * activated on a CPU. This represents a virtual ASID that
361 * is used to globally identify an address space on
362 * the system. Depending upon hardware configuration, this
363 * identifier may have a 1:1 correspondence with the hardware
364 * ASID.
365 */
366 uint16_t asid;
367
368 /**
369 * Represents the virtual machine identifier (VMID) for this pmap.
370 * The value 0 is reserved.
371 */
372 uint16_t vmid;
373 };
374
375 #if MACH_ASSERT
376 int pmap_pid;
377 char pmap_procname[17];
378 #endif /* MACH_ASSERT */
379
380 bool reserved0;
381
382 bool pmap_vm_map_cs_enforced;
383
384 bool reserved1;
385 unsigned int reserved2;
386 unsigned int reserved3;
387
388 #if defined(CONFIG_ROSETTA)
389 /* Whether the pmap is used for Rosetta. */
390 bool is_rosetta;
391 #else
392 bool reserved4;
393 #endif /* defined(CONFIG_ROSETTA) */
394
395 #if DEVELOPMENT || DEBUG
396 bool footprint_suspended;
397 bool footprint_was_suspended;
398 #endif /* DEVELOPMENT || DEBUG */
399
400 /* Whether the No-Execute functionality is enabled. */
401 bool nx_enabled;
402
403 /* Whether this pmap represents a 64-bit address space. */
404 bool is_64bit;
405
406 #if HAS_APPLE_PAC
407 bool disable_jop;
408 #else
409 bool reserved5;
410 #endif /* HAS_APPLE_PAC */
411
412 bool reserved6;
413
414 #define PMAP_TYPE_USER 0 /* ordinary pmap */
415 #define PMAP_TYPE_KERNEL 1 /* kernel pmap */
416 #define PMAP_TYPE_COMMPAGE 2 /* commpage pmap */
417 #define PMAP_TYPE_NESTED 3 /* pmap nested within another pmap */
418 uint8_t type;
419
420 /*
421 * TrustedExecutionMonitor manages its own address space data structure and
422 * the PMAP is used as the owning structure for keeping this structure.
423 */
424 uint32_t reserved7[4];
425 void *reserved8;
426 uint8_t reserved9;
427
428 /* The ID of the vm_map that this pmap is backing, if any */
429 vm_map_serial_t associated_vm_map_serial_id;
430 #if HAS_MTE
431 /*
432 * Whether this pmap is marked as being explicitly disallowed from
433 * receiving aliases to untagged memory from other actors.
434 */
435 bool restrict_receiving_aliases_to_tagged_memory;
436 #endif /* HAS_MTE */
437 };
438
439 #define PMAP_VASID(pmap) ((pmap)->asid)
440 #define PMAP_HWASID(pmap) ((pmap)->asid & (MAX_HW_ASIDS - 1))
441
442 #if VM_DEBUG
443 extern int pmap_list_resident_pages(
444 pmap_t pmap,
445 vm_offset_t *listp,
446 int space);
447 #else /* VM_DEBUG */
448 #define pmap_list_resident_pages(pmap, listp, space) (0)
449 #endif /* VM_DEBUG */
450
451 extern int copysafe(vm_map_address_t from, vm_map_address_t to, uint32_t cnt, int type, uint32_t *bytes_copied);
452
453 /* Globals shared between arm_vm_init and pmap */
454 extern tt_entry_t *cpu_tte; /* First CPUs translation table (shared with kernel pmap) */
455 extern pmap_paddr_t cpu_ttep; /* Physical translation table addr */
456
457 extern void *ropagetable_begin;
458 extern void *ropagetable_end;
459
460
461 extern tt_entry_t *invalid_tte; /* Global invalid translation table */
462 extern pmap_paddr_t invalid_ttep; /* Physical invalid translation table addr */
463
464 #define PMAP_CONTEXT(pmap, thread)
465
466 /**
467 * Platform dependent Prototypes
468 */
469 extern void pmap_clear_user_ttb(void);
470 extern void pmap_bootstrap(vm_offset_t);
471 extern vm_map_address_t pmap_ptov(pmap_t, ppnum_t);
472 extern pmap_paddr_t pmap_find_pa(pmap_t map, addr64_t va);
473 extern pmap_paddr_t pmap_find_pa_nofault(pmap_t map, addr64_t va);
474 extern ppnum_t pmap_find_phys(pmap_t map, addr64_t va);
475 extern ppnum_t pmap_find_phys_nofault(pmap_t map, addr64_t va);
476 extern void pmap_switch_user(thread_t th, vm_map_t map);
477 extern void pmap_set_pmap(pmap_t pmap, thread_t thread);
478 extern void pmap_gc(void);
479 #if HAS_APPLE_PAC
480 extern void * pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
481 extern void * pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t data, uint64_t jop_key);
482 extern bool pmap_batch_sign_user_ptr(void *location, void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key);
483 #endif /* HAS_APPLE_PAC */
484 #if HAS_MTE
485 /* Inform the pmap layer that the process has tag checks enabled. */
486 extern void pmap_set_tag_check_enabled(pmap_t pmap);
487
488 /* Inform the pmap layer that the process has EL0 tag check faults disabled. */
489 extern void pmap_set_user_tag_check_faults_disabled(pmap_t pmap);
490 #endif /* HAS_MTE */
491
492 /**
493 * Interfaces implemented as macros.
494 */
495
496 #define PMAP_SWITCH_USER(th, new_map, my_cpu) pmap_switch_user((th), (new_map))
497
498 #define pmap_kernel() (kernel_pmap)
499
500 #define pmap_kernel_va(VA) \
501 (((VA) >= VM_MIN_KERNEL_ADDRESS) && ((VA) <= VM_MAX_KERNEL_ADDRESS))
502
503 #define pmap_attribute(pmap, addr, size, attr, value) (KERN_INVALID_ADDRESS)
504
505 #define copyinmsg(from, to, cnt) copyin(from, to, cnt)
506 #define copyoutmsg(from, to, cnt) copyout(from, to, cnt)
507
508 /* Unimplemented interfaces. */
509 #define MACRO_NOOP
510 #define pmap_copy(dst_pmap, src_pmap, dst_addr, len, src_addr) MACRO_NOOP
511 #define pmap_pageable(pmap, start, end, pageable) MACRO_NOOP
512
513 extern pmap_paddr_t kvtophys(vm_offset_t va);
514 extern pmap_paddr_t kvtophys_nofail(vm_offset_t va);
515 extern vm_map_address_t phystokv(pmap_paddr_t pa);
516 extern vm_map_address_t phystokv_range(pmap_paddr_t pa, vm_size_t *max_len);
517
518 extern vm_map_address_t pmap_map(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, unsigned int flags);
519 extern vm_map_address_t pmap_map_high_window_bd( vm_offset_t pa, vm_size_t len, vm_prot_t prot);
520 extern kern_return_t pmap_map_block(pmap_t pmap, addr64_t va, ppnum_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
521 extern kern_return_t pmap_map_block_addr(pmap_t pmap, addr64_t va, pmap_paddr_t pa, uint32_t size, vm_prot_t prot, int attr, unsigned int flags);
522 extern void pmap_map_globals(void);
523
524 #define PMAP_MAP_BD_DEVICE 0x0
525 #define PMAP_MAP_BD_WCOMB 0x1
526 #define PMAP_MAP_BD_POSTED 0x2
527 #define PMAP_MAP_BD_POSTED_REORDERED 0x3
528 #define PMAP_MAP_BD_POSTED_COMBINED_REORDERED 0x4
529 #define PMAP_MAP_BD_MASK 0x7
530
531 extern vm_map_address_t pmap_map_bd_with_options(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot, int32_t options);
532 extern vm_map_address_t pmap_map_bd(vm_map_address_t va, vm_offset_t sa, vm_offset_t ea, vm_prot_t prot);
533
534 extern void pmap_init_pte_page(pmap_t, pt_entry_t *, vm_offset_t, unsigned int ttlevel, boolean_t alloc_ptd);
535
536 extern boolean_t pmap_valid_address(pmap_paddr_t addr);
537 extern void pmap_disable_NX(pmap_t pmap);
538 extern void pmap_set_nested(pmap_t pmap);
539 extern void pmap_create_commpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
540 vm_map_address_t *kernel_ro_data_addr, vm_map_address_t *user_text_addr);
541 extern void pmap_insert_commpage(pmap_t pmap);
542
543 extern vm_offset_t pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index);
544 extern unsigned int pmap_map_cpu_windows_copy(ppnum_t pn, vm_prot_t prot, unsigned int wimg_bits);
545 extern void pmap_unmap_cpu_windows_copy(unsigned int index);
546
547 extern vm_offset_t pmap_ro_zone_align(vm_offset_t);
548 extern void pmap_ro_zone_memcpy(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
549 vm_offset_t new_data, vm_size_t new_data_size);
550 extern uint64_t pmap_ro_zone_atomic_op(zone_id_t zid, vm_offset_t va, vm_offset_t offset,
551 uint32_t op, uint64_t value);
552 extern void pmap_ro_zone_bzero(zone_id_t zid, vm_offset_t va, vm_offset_t offset, vm_size_t size);
553
554 extern boolean_t pmap_valid_page(ppnum_t pn);
555 extern boolean_t pmap_bootloader_page(ppnum_t pn);
556
557 extern boolean_t pmap_is_empty(pmap_t pmap, vm_map_offset_t start, vm_map_offset_t end);
558
559 #if HAS_MTE || HAS_MTE_EMULATION_SHIMS
560 /*
561 * Strip a pointer of all metadata bits based on its pmap.
562 * This function will return a pointer that is cleared of any bit that is not part
563 * of the specified VA size. "cleared" here is meant in the canonicalization sense,
564 * replacing these bits with bit 55 value.
565 */
566 extern vm_map_address_t pmap_strip_addr(pmap_t pmap, vm_map_address_t ptr);
567 #endif /* HAS_MTE || HAS_MTE_EMULATION_SHIMS */
568
569
570 #define ARM_PMAP_MAX_OFFSET_DEFAULT 0x01
571 #define ARM_PMAP_MAX_OFFSET_MIN 0x02
572 #define ARM_PMAP_MAX_OFFSET_MAX 0x04
573 #define ARM_PMAP_MAX_OFFSET_DEVICE 0x08
574 #define ARM_PMAP_MAX_OFFSET_JUMBO 0x10
575 #if XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT
576 #define ARM_PMAP_MAX_OFFSET_EXTRA_JUMBO 0x20
577 #endif /* XNU_PLATFORM_iPhoneOS && EXTENDED_USER_VA_SUPPORT */
578
579 extern vm_map_offset_t pmap_max_offset(boolean_t is64, unsigned int option);
580 extern vm_map_offset_t pmap_max_64bit_offset(unsigned int option);
581 extern vm_map_offset_t pmap_max_32bit_offset(unsigned int option);
582
583 boolean_t pmap_virtual_region(unsigned int region_select, vm_map_offset_t *startp, vm_map_size_t *size);
584
585 boolean_t pmap_enforces_execute_only(pmap_t pmap);
586
587 void pmap_abandon_measurement(void);
588
589
590
591 /* pmap dispatch indices */
592 #define ARM_FAST_FAULT_INDEX 0
593 #define ARM_FORCE_FAST_FAULT_INDEX 1
594 #define MAPPING_FREE_PRIME_INDEX 2
595 #define MAPPING_REPLENISH_INDEX 3
596 #define PHYS_ATTRIBUTE_CLEAR_INDEX 4
597 #define PHYS_ATTRIBUTE_SET_INDEX 5
598 #define PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX 6
599 #define PMAP_CHANGE_WIRING_INDEX 7
600 #define PMAP_CREATE_INDEX 8
601 #define PMAP_DESTROY_INDEX 9
602 #define PMAP_ENTER_OPTIONS_INDEX 10
603 /* #define PMAP_EXTRACT_INDEX 11 -- Not used*/
604 #define PMAP_FIND_PA_INDEX 12
605 #define PMAP_INSERT_COMMPAGE_INDEX 13
606 #define PMAP_IS_EMPTY_INDEX 14
607 #define PMAP_MAP_CPU_WINDOWS_COPY_INDEX 15
608 #define PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX 16
609 #define PMAP_NEST_INDEX 17
610 #define PMAP_PAGE_PROTECT_OPTIONS_INDEX 18
611 #define PMAP_PROTECT_OPTIONS_INDEX 19
612 #define PMAP_QUERY_PAGE_INFO_INDEX 20
613 #define PMAP_QUERY_RESIDENT_INDEX 21
614 #define PMAP_REFERENCE_INDEX 22
615 #define PMAP_REMOVE_OPTIONS_INDEX 23
616 #define PMAP_SET_CACHE_ATTRIBUTES_INDEX 25
617 #define PMAP_SET_NESTED_INDEX 26
618 #define PMAP_SET_PROCESS_INDEX 27
619 #define PMAP_SWITCH_INDEX 28
620 #define PMAP_SWITCH_USER_TTB_INDEX 29
621 #define PMAP_CLEAR_USER_TTB_INDEX 30
622 #define PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX 31
623 #define PMAP_UNNEST_OPTIONS_INDEX 32
624 #define PMAP_FOOTPRINT_SUSPEND_INDEX 33
625 #define PMAP_CPU_DATA_INIT_INDEX 34
626 #define PMAP_RELEASE_PAGES_TO_KERNEL_INDEX 35
627 #define PMAP_SET_JIT_ENTITLED_INDEX 36
628
629
630 #define PMAP_UPDATE_COMPRESSOR_PAGE_INDEX 55
631 #define PMAP_TRIM_INDEX 56
632 #define PMAP_LEDGER_VERIFY_SIZE_INDEX 57
633 #define PMAP_LEDGER_ALLOC_INDEX 58
634 #define PMAP_LEDGER_FREE_INDEX 59
635
636 #if HAS_APPLE_PAC
637 #define PMAP_SIGN_USER_PTR 60
638 #define PMAP_AUTH_USER_PTR 61
639 #endif /* HAS_APPLE_PAC */
640
641 #define PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX 66
642
643
644 #if __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG))
645 #define PMAP_DISABLE_USER_JOP_INDEX 69
646 #endif /* __has_feature(ptrauth_calls) && (defined(XNU_TARGET_OS_OSX) || (DEVELOPMENT || DEBUG)) */
647
648
649
650 #define PMAP_SET_VM_MAP_CS_ENFORCED_INDEX 72
651
652 #define PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX 73
653 #define PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX 74
654 #define PMAP_NOP_INDEX 75
655
656 #define PMAP_RO_ZONE_MEMCPY_INDEX 76
657 #define PMAP_RO_ZONE_ATOMIC_OP_INDEX 77
658
659 #if DEVELOPMENT || DEBUG
660 #define PMAP_TEST_TEXT_CORRUPTION_INDEX 79
661 #endif /* DEVELOPMENT || DEBUG */
662
663
664
665 #define PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX 84
666 #define PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX 85
667
668
669
670 #define PMAP_RO_ZONE_BZERO_INDEX 90
671
672
673
674
675 #define PMAP_SET_TPRO_INDEX 98
676
677 #define PMAP_COUNT 99
678
679 /**
680 * Value used when initializing pmap per-cpu data to denote that the structure
681 * hasn't been initialized with its associated CPU number yet.
682 */
683 #define PMAP_INVALID_CPU_NUM (~0U)
684
685 /**
686 * Align the pmap per-cpu data to the L2 cache size for each individual CPU's
687 * data. This prevents accesses from one CPU affecting another, especially
688 * when atomically updating fields.
689 */
690 struct pmap_cpu_data_array_entry {
691 pmap_cpu_data_t cpu_data;
692 } __attribute__((aligned(MAX_L2_CLINE_BYTES)));
693
694 /* Initialize the pmap per-CPU data for the current CPU. */
695 extern void pmap_cpu_data_init(void);
696
697 /* Get the pmap per-CPU data for the current CPU. */
698 extern pmap_cpu_data_t *pmap_get_cpu_data(void);
699
700 /* Get the pmap per-CPU data for an arbitrary CPU. */
701 extern pmap_cpu_data_t *pmap_get_remote_cpu_data(unsigned int cpu);
702
703 /*
704 * For long-running PV list operations, we pick a reasonable maximum chunk size
705 * beyond which we will exit to preemptible context to avoid excessive preemption
706 * latency and PVH lock timeouts.
707 */
708 #define PMAP_MAX_PV_LIST_CHUNK_SIZE 64
709
710 /*
711 * For most batched page operations, we pick a sane default page count
712 * interval at which to check for pending preemption and exit the PPL if found.
713 */
714 #define PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL 64
715
716 static inline bool
_pmap_pending_preemption_real(void)717 _pmap_pending_preemption_real(void)
718 {
719 return !!(*((volatile ast_t*)ast_pending()) & AST_URGENT);
720 }
721
722 #if SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT)
723 bool pmap_pending_preemption(void); // more complicated, so externally defined
724 #else /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
725 #define pmap_pending_preemption _pmap_pending_preemption_real
726 #endif /* SCHED_HYGIENE_DEBUG && (DEBUG || DEVELOPMENT) */
727
728 #define MARK_AS_PMAP_TEXT
729 #define MARK_AS_PMAP_DATA
730 #define MARK_AS_PMAP_RODATA
731
732 extern void pmap_nop(pmap_t);
733
734 extern lck_grp_t pmap_lck_grp;
735
736 extern void CleanPoC_DcacheRegion_Force_nopreempt_nohid_nobarrier(vm_offset_t va, size_t length);
737
738 #define pmap_force_dcache_clean(va, sz) CleanPoC_DcacheRegion_Force(va, sz)
739 #define pmap_simple_lock(l) simple_lock(l, &pmap_lck_grp)
740 #define pmap_simple_unlock(l) simple_unlock(l)
741 #define pmap_simple_lock_try(l) simple_lock_try(l, &pmap_lck_grp)
742 #define pmap_simple_lock_assert(l, t) simple_lock_assert(l, t)
743
744 #if DEVELOPMENT || DEBUG
745 extern kern_return_t pmap_test_text_corruption(pmap_paddr_t);
746 #endif /* DEVELOPMENT || DEBUG */
747
748 /* Check if a page has any mappings. */
749 extern bool pmap_is_page_free(pmap_paddr_t paddr);
750
751 #endif /* #ifndef ASSEMBLER */
752
753 #if __ARM_KERNEL_PROTECT__
754 /*
755 * The exception vector mappings start at the middle of the kernel page table
756 * range (so that the EL0 mapping can be located at the base of the range).
757 */
758 #define ARM_KERNEL_PROTECT_EXCEPTION_START ((~((ARM_TT_ROOT_SIZE + ARM_TT_ROOT_INDEX_MASK) / 2ULL)) + 1ULL)
759 #endif /* __ARM_KERNEL_PROTECT__ */
760
761 #endif /* #ifndef _ARM_PMAP_H_ */
762