1 /*
2 * Copyright (c) 2011-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <mach_assert.h>
30 #include <mach_ldebug.h>
31
32 #include <mach/shared_region.h>
33 #include <mach/vm_param.h>
34 #include <mach/vm_prot.h>
35 #include <mach/vm_map.h>
36 #include <mach/machine/vm_param.h>
37 #include <mach/machine/vm_types.h>
38
39 #include <mach/boolean.h>
40 #include <kern/bits.h>
41 #include <kern/thread.h>
42 #include <kern/sched.h>
43 #include <kern/zalloc.h>
44 #include <kern/zalloc_internal.h>
45 #include <kern/kalloc.h>
46 #include <kern/spl.h>
47 #include <kern/startup.h>
48 #include <kern/trustcache.h>
49
50 #include <os/overflow.h>
51
52 #include <vm/pmap.h>
53 #include <vm/pmap_cs.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_kern.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_object.h>
58 #include <vm/vm_page.h>
59 #include <vm/vm_pageout.h>
60 #include <vm/cpm.h>
61
62 #include <libkern/img4/interface.h>
63 #include <libkern/section_keywords.h>
64 #include <sys/errno.h>
65
66 #include <machine/atomic.h>
67 #include <machine/thread.h>
68 #include <machine/lowglobals.h>
69
70 #include <arm/caches_internal.h>
71 #include <arm/cpu_data.h>
72 #include <arm/cpu_data_internal.h>
73 #include <arm/cpu_capabilities.h>
74 #include <arm/cpu_number.h>
75 #include <arm/machine_cpu.h>
76 #include <arm/misc_protos.h>
77 #include <arm/pmap/pmap_internal.h>
78 #include <arm/trap.h>
79
80 #if (__ARM_VMSA__ > 7)
81 #include <arm64/proc_reg.h>
82 #include <pexpert/arm64/boot.h>
83 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
84 #include <arm64/amcc_rorgn.h>
85 #endif // defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
86 #endif
87
88 #include <pexpert/device_tree.h>
89
90 #include <san/kasan.h>
91 #include <sys/cdefs.h>
92
93 #if defined(HAS_APPLE_PAC)
94 #include <ptrauth.h>
95 #endif
96
97 #ifdef CONFIG_XNUPOST
98 #include <tests/xnupost.h>
99 #endif
100
101
102 #if HIBERNATION
103 #include <IOKit/IOHibernatePrivate.h>
104 #endif /* HIBERNATION */
105
106 #ifdef __ARM64_PMAP_SUBPAGE_L1__
107 #if (__ARM_VMSA__ <= 7)
108 #error This is not supported for old-style page tables
109 #endif
110 #define PMAP_ROOT_ALLOC_SIZE (((ARM_TT_L1_INDEX_MASK >> ARM_TT_L1_SHIFT) + 1) * sizeof(tt_entry_t))
111 #else
112 #if (__ARM_VMSA__ <= 7)
113 #define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES * 2)
114 #else
115 #define PMAP_ROOT_ALLOC_SIZE (ARM_PGBYTES)
116 #endif
117 #endif
118
119 extern u_int32_t random(void); /* from <libkern/libkern.h> */
120
121 static bool alloc_asid(pmap_t pmap);
122 static void free_asid(pmap_t pmap);
123 static void flush_mmu_tlb_region_asid_async(vm_offset_t va, size_t length, pmap_t pmap, bool last_level_only);
124 static void flush_mmu_tlb_full_asid_async(pmap_t pmap);
125 static pt_entry_t wimg_to_pte(unsigned int wimg, pmap_paddr_t pa);
126
127 static const struct page_table_ops native_pt_ops =
128 {
129 .alloc_id = alloc_asid,
130 .free_id = free_asid,
131 .flush_tlb_region_async = flush_mmu_tlb_region_asid_async,
132 .flush_tlb_async = flush_mmu_tlb_full_asid_async,
133 .wimg_to_pte = wimg_to_pte,
134 };
135
136 #if (__ARM_VMSA__ > 7)
137 const struct page_table_level_info pmap_table_level_info_16k[] =
138 {
139 [0] = {
140 .size = ARM_16K_TT_L0_SIZE,
141 .offmask = ARM_16K_TT_L0_OFFMASK,
142 .shift = ARM_16K_TT_L0_SHIFT,
143 .index_mask = ARM_16K_TT_L0_INDEX_MASK,
144 .valid_mask = ARM_TTE_VALID,
145 .type_mask = ARM_TTE_TYPE_MASK,
146 .type_block = ARM_TTE_TYPE_BLOCK
147 },
148 [1] = {
149 .size = ARM_16K_TT_L1_SIZE,
150 .offmask = ARM_16K_TT_L1_OFFMASK,
151 .shift = ARM_16K_TT_L1_SHIFT,
152 .index_mask = ARM_16K_TT_L1_INDEX_MASK,
153 .valid_mask = ARM_TTE_VALID,
154 .type_mask = ARM_TTE_TYPE_MASK,
155 .type_block = ARM_TTE_TYPE_BLOCK
156 },
157 [2] = {
158 .size = ARM_16K_TT_L2_SIZE,
159 .offmask = ARM_16K_TT_L2_OFFMASK,
160 .shift = ARM_16K_TT_L2_SHIFT,
161 .index_mask = ARM_16K_TT_L2_INDEX_MASK,
162 .valid_mask = ARM_TTE_VALID,
163 .type_mask = ARM_TTE_TYPE_MASK,
164 .type_block = ARM_TTE_TYPE_BLOCK
165 },
166 [3] = {
167 .size = ARM_16K_TT_L3_SIZE,
168 .offmask = ARM_16K_TT_L3_OFFMASK,
169 .shift = ARM_16K_TT_L3_SHIFT,
170 .index_mask = ARM_16K_TT_L3_INDEX_MASK,
171 .valid_mask = ARM_PTE_TYPE_VALID,
172 .type_mask = ARM_PTE_TYPE_MASK,
173 .type_block = ARM_TTE_TYPE_L3BLOCK
174 }
175 };
176
177 const struct page_table_level_info pmap_table_level_info_4k[] =
178 {
179 [0] = {
180 .size = ARM_4K_TT_L0_SIZE,
181 .offmask = ARM_4K_TT_L0_OFFMASK,
182 .shift = ARM_4K_TT_L0_SHIFT,
183 .index_mask = ARM_4K_TT_L0_INDEX_MASK,
184 .valid_mask = ARM_TTE_VALID,
185 .type_mask = ARM_TTE_TYPE_MASK,
186 .type_block = ARM_TTE_TYPE_BLOCK
187 },
188 [1] = {
189 .size = ARM_4K_TT_L1_SIZE,
190 .offmask = ARM_4K_TT_L1_OFFMASK,
191 .shift = ARM_4K_TT_L1_SHIFT,
192 .index_mask = ARM_4K_TT_L1_INDEX_MASK,
193 .valid_mask = ARM_TTE_VALID,
194 .type_mask = ARM_TTE_TYPE_MASK,
195 .type_block = ARM_TTE_TYPE_BLOCK
196 },
197 [2] = {
198 .size = ARM_4K_TT_L2_SIZE,
199 .offmask = ARM_4K_TT_L2_OFFMASK,
200 .shift = ARM_4K_TT_L2_SHIFT,
201 .index_mask = ARM_4K_TT_L2_INDEX_MASK,
202 .valid_mask = ARM_TTE_VALID,
203 .type_mask = ARM_TTE_TYPE_MASK,
204 .type_block = ARM_TTE_TYPE_BLOCK
205 },
206 [3] = {
207 .size = ARM_4K_TT_L3_SIZE,
208 .offmask = ARM_4K_TT_L3_OFFMASK,
209 .shift = ARM_4K_TT_L3_SHIFT,
210 .index_mask = ARM_4K_TT_L3_INDEX_MASK,
211 .valid_mask = ARM_PTE_TYPE_VALID,
212 .type_mask = ARM_PTE_TYPE_MASK,
213 .type_block = ARM_TTE_TYPE_L3BLOCK
214 }
215 };
216
217 const struct page_table_attr pmap_pt_attr_4k = {
218 .pta_level_info = pmap_table_level_info_4k,
219 .pta_root_level = (T0SZ_BOOT - 16) / 9,
220 #if __ARM_MIXED_PAGE_SIZE__
221 .pta_commpage_level = PMAP_TT_L2_LEVEL,
222 #else /* __ARM_MIXED_PAGE_SIZE__ */
223 #if __ARM_16K_PG__
224 .pta_commpage_level = PMAP_TT_L2_LEVEL,
225 #else /* __ARM_16K_PG__ */
226 .pta_commpage_level = PMAP_TT_L1_LEVEL,
227 #endif /* __ARM_16K_PG__ */
228 #endif /* __ARM_MIXED_PAGE_SIZE__ */
229 .pta_max_level = PMAP_TT_L3_LEVEL,
230 .pta_ops = &native_pt_ops,
231 .ap_ro = ARM_PTE_AP(AP_RORO),
232 .ap_rw = ARM_PTE_AP(AP_RWRW),
233 .ap_rona = ARM_PTE_AP(AP_RONA),
234 .ap_rwna = ARM_PTE_AP(AP_RWNA),
235 .ap_xn = ARM_PTE_PNX | ARM_PTE_NX,
236 .ap_x = ARM_PTE_PNX,
237 #if __ARM_MIXED_PAGE_SIZE__
238 .pta_tcr_value = TCR_EL1_4KB,
239 #endif /* __ARM_MIXED_PAGE_SIZE__ */
240 .pta_page_size = 4096,
241 .pta_page_shift = 12,
242 };
243
244 const struct page_table_attr pmap_pt_attr_16k = {
245 .pta_level_info = pmap_table_level_info_16k,
246 .pta_root_level = PMAP_TT_L1_LEVEL,
247 .pta_commpage_level = PMAP_TT_L2_LEVEL,
248 .pta_max_level = PMAP_TT_L3_LEVEL,
249 .pta_ops = &native_pt_ops,
250 .ap_ro = ARM_PTE_AP(AP_RORO),
251 .ap_rw = ARM_PTE_AP(AP_RWRW),
252 .ap_rona = ARM_PTE_AP(AP_RONA),
253 .ap_rwna = ARM_PTE_AP(AP_RWNA),
254 .ap_xn = ARM_PTE_PNX | ARM_PTE_NX,
255 .ap_x = ARM_PTE_PNX,
256 #if __ARM_MIXED_PAGE_SIZE__
257 .pta_tcr_value = TCR_EL1_16KB,
258 #endif /* __ARM_MIXED_PAGE_SIZE__ */
259 .pta_page_size = 16384,
260 .pta_page_shift = 14,
261 };
262
263 #if __ARM_16K_PG__
264 const struct page_table_attr * const native_pt_attr = &pmap_pt_attr_16k;
265 #else /* !__ARM_16K_PG__ */
266 const struct page_table_attr * const native_pt_attr = &pmap_pt_attr_4k;
267 #endif /* !__ARM_16K_PG__ */
268
269
270 #else /* (__ARM_VMSA__ > 7) */
271 /*
272 * We don't support pmap parameterization for VMSA7, so use an opaque
273 * page_table_attr structure.
274 */
275 const struct page_table_attr * const native_pt_attr = NULL;
276 #endif /* (__ARM_VMSA__ > 7) */
277
278
279 static inline void
pmap_sync_tlb(bool strong __unused)280 pmap_sync_tlb(bool strong __unused)
281 {
282 sync_tlb_flush();
283 }
284
285 #if MACH_ASSERT
286 int vm_footprint_suspend_allowed = 1;
287
288 extern int pmap_ledgers_panic;
289 extern int pmap_ledgers_panic_leeway;
290
291 #endif /* MACH_ASSERT */
292
293 #if DEVELOPMENT || DEBUG
294 #define PMAP_FOOTPRINT_SUSPENDED(pmap) \
295 (current_thread()->pmap_footprint_suspended)
296 #else /* DEVELOPMENT || DEBUG */
297 #define PMAP_FOOTPRINT_SUSPENDED(pmap) (FALSE)
298 #endif /* DEVELOPMENT || DEBUG */
299
300
301 #ifdef PLATFORM_BridgeOS
302 static struct pmap_legacy_trust_cache *pmap_legacy_trust_caches MARK_AS_PMAP_DATA = NULL;
303 #endif
304 static struct pmap_image4_trust_cache *pmap_image4_trust_caches MARK_AS_PMAP_DATA = NULL;
305
306 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_loaded_trust_caches_lock, 0);
307
308 SECURITY_READ_ONLY_LATE(int) srd_fused = 0;
309
310 /*
311 * Represents a tlb range that will be flushed before exiting
312 * the ppl.
313 * Used by phys_attribute_clear_range to defer flushing pages in
314 * this range until the end of the operation.
315 */
316 typedef struct pmap_tlb_flush_range {
317 pmap_t ptfr_pmap;
318 vm_map_address_t ptfr_start;
319 vm_map_address_t ptfr_end;
320 bool ptfr_flush_needed;
321 } pmap_tlb_flush_range_t;
322
323 #if XNU_MONITOR
324 /*
325 * PPL External References.
326 */
327 extern vm_offset_t segPPLDATAB;
328 extern unsigned long segSizePPLDATA;
329 extern vm_offset_t segPPLTEXTB;
330 extern unsigned long segSizePPLTEXT;
331 extern vm_offset_t segPPLDATACONSTB;
332 extern unsigned long segSizePPLDATACONST;
333
334
335 /*
336 * PPL Global Variables
337 */
338
339 #if (DEVELOPMENT || DEBUG) || CONFIG_CSR_FROM_DT
340 /* Indicates if the PPL will enforce mapping policies; set by -unsafe_kernel_text */
341 SECURITY_READ_ONLY_LATE(boolean_t) pmap_ppl_disable = FALSE;
342 #else
343 const boolean_t pmap_ppl_disable = FALSE;
344 #endif
345
346 /*
347 * Indicates if the PPL has started applying APRR.
348 * This variable is accessed from various assembly trampolines, so be sure to change
349 * those if you change the size or layout of this variable.
350 */
351 boolean_t pmap_ppl_locked_down MARK_AS_PMAP_DATA = FALSE;
352
353 extern void *pmap_stacks_start;
354 extern void *pmap_stacks_end;
355
356 #endif /* !XNU_MONITOR */
357
358
359 /* Virtual memory region for early allocation */
360 #if (__ARM_VMSA__ == 7)
361 #define VREGION1_HIGH_WINDOW (0)
362 #else
363 #define VREGION1_HIGH_WINDOW (PE_EARLY_BOOT_VA)
364 #endif
365 #define VREGION1_START ((VM_MAX_KERNEL_ADDRESS & CPUWINDOWS_BASE_MASK) - VREGION1_HIGH_WINDOW)
366 #define VREGION1_SIZE (trunc_page(VM_MAX_KERNEL_ADDRESS - (VREGION1_START)))
367
368 extern uint8_t bootstrap_pagetables[];
369
370 extern unsigned int not_in_kdp;
371
372 extern vm_offset_t first_avail;
373
374 extern vm_offset_t virtual_space_start; /* Next available kernel VA */
375 extern vm_offset_t virtual_space_end; /* End of kernel address space */
376 extern vm_offset_t static_memory_end;
377
378 extern const vm_map_address_t physmap_base;
379 extern const vm_map_address_t physmap_end;
380
381 extern int maxproc, hard_maxproc;
382
383 vm_address_t MARK_AS_PMAP_DATA image4_slab = 0;
384 vm_address_t MARK_AS_PMAP_DATA image4_late_slab = 0;
385
386 #if (__ARM_VMSA__ > 7)
387 /* The number of address bits one TTBR can cover. */
388 #define PGTABLE_ADDR_BITS (64ULL - T0SZ_BOOT)
389
390 /*
391 * The bounds on our TTBRs. These are for sanity checking that
392 * an address is accessible by a TTBR before we attempt to map it.
393 */
394
395 /* The level of the root of a page table. */
396 const uint64_t arm64_root_pgtable_level = (3 - ((PGTABLE_ADDR_BITS - 1 - ARM_PGSHIFT) / (ARM_PGSHIFT - TTE_SHIFT)));
397
398 /* The number of entries in the root TT of a page table. */
399 const uint64_t arm64_root_pgtable_num_ttes = (2 << ((PGTABLE_ADDR_BITS - 1 - ARM_PGSHIFT) % (ARM_PGSHIFT - TTE_SHIFT)));
400 #else
401 const uint64_t arm64_root_pgtable_level = 0;
402 const uint64_t arm64_root_pgtable_num_ttes = 0;
403 #endif
404
405 struct pmap kernel_pmap_store MARK_AS_PMAP_DATA;
406 const pmap_t kernel_pmap = &kernel_pmap_store;
407
408 static SECURITY_READ_ONLY_LATE(zone_t) pmap_zone; /* zone of pmap structures */
409
410 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmaps_lock, 0);
411 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(tt1_lock, 0);
412 unsigned int pmap_stamp MARK_AS_PMAP_DATA;
413 queue_head_t map_pmap_list MARK_AS_PMAP_DATA;
414
415 typedef struct tt_free_entry {
416 struct tt_free_entry *next;
417 } tt_free_entry_t;
418
419 #define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0)
420
421 tt_free_entry_t *free_page_size_tt_list MARK_AS_PMAP_DATA;
422 unsigned int free_page_size_tt_count MARK_AS_PMAP_DATA;
423 unsigned int free_page_size_tt_max MARK_AS_PMAP_DATA;
424 #define FREE_PAGE_SIZE_TT_MAX 4
425 tt_free_entry_t *free_two_page_size_tt_list MARK_AS_PMAP_DATA;
426 unsigned int free_two_page_size_tt_count MARK_AS_PMAP_DATA;
427 unsigned int free_two_page_size_tt_max MARK_AS_PMAP_DATA;
428 #define FREE_TWO_PAGE_SIZE_TT_MAX 4
429 tt_free_entry_t *free_tt_list MARK_AS_PMAP_DATA;
430 unsigned int free_tt_count MARK_AS_PMAP_DATA;
431 unsigned int free_tt_max MARK_AS_PMAP_DATA;
432
433 #define TT_FREE_ENTRY_NULL ((tt_free_entry_t *) 0)
434
435 boolean_t pmap_gc_allowed MARK_AS_PMAP_DATA = TRUE;
436 boolean_t pmap_gc_forced MARK_AS_PMAP_DATA = FALSE;
437 boolean_t pmap_gc_allowed_by_time_throttle = TRUE;
438
439 unsigned int inuse_user_ttepages_count MARK_AS_PMAP_DATA = 0; /* non-root, non-leaf user pagetable pages, in units of PAGE_SIZE */
440 unsigned int inuse_user_ptepages_count MARK_AS_PMAP_DATA = 0; /* leaf user pagetable pages, in units of PAGE_SIZE */
441 unsigned int inuse_user_tteroot_count MARK_AS_PMAP_DATA = 0; /* root user pagetables, in units of PMAP_ROOT_ALLOC_SIZE */
442 unsigned int inuse_kernel_ttepages_count MARK_AS_PMAP_DATA = 0; /* non-root, non-leaf kernel pagetable pages, in units of PAGE_SIZE */
443 unsigned int inuse_kernel_ptepages_count MARK_AS_PMAP_DATA = 0; /* leaf kernel pagetable pages, in units of PAGE_SIZE */
444 unsigned int inuse_kernel_tteroot_count MARK_AS_PMAP_DATA = 0; /* root kernel pagetables, in units of PMAP_ROOT_ALLOC_SIZE */
445
446 SECURITY_READ_ONLY_LATE(tt_entry_t *) invalid_tte = 0;
447 SECURITY_READ_ONLY_LATE(pmap_paddr_t) invalid_ttep = 0;
448
449 SECURITY_READ_ONLY_LATE(tt_entry_t *) cpu_tte = 0; /* set by arm_vm_init() - keep out of bss */
450 SECURITY_READ_ONLY_LATE(pmap_paddr_t) cpu_ttep = 0; /* set by arm_vm_init() - phys tte addr */
451
452 /* Lock group used for all pmap object locks. */
453 lck_grp_t pmap_lck_grp MARK_AS_PMAP_DATA;
454
455 #if DEVELOPMENT || DEBUG
456 int nx_enabled = 1; /* enable no-execute protection */
457 int allow_data_exec = 0; /* No apps may execute data */
458 int allow_stack_exec = 0; /* No apps may execute from the stack */
459 unsigned long pmap_asid_flushes MARK_AS_PMAP_DATA = 0;
460 unsigned long pmap_asid_hits MARK_AS_PMAP_DATA = 0;
461 unsigned long pmap_asid_misses MARK_AS_PMAP_DATA = 0;
462 #else /* DEVELOPMENT || DEBUG */
463 const int nx_enabled = 1; /* enable no-execute protection */
464 const int allow_data_exec = 0; /* No apps may execute data */
465 const int allow_stack_exec = 0; /* No apps may execute from the stack */
466 #endif /* DEVELOPMENT || DEBUG */
467
468 /**
469 * This variable is set true during hibernation entry to protect pmap data structures
470 * during image copying, and reset false on hibernation exit.
471 */
472 bool hib_entry_pmap_lockdown MARK_AS_PMAP_DATA = false;
473
474 #if MACH_ASSERT
475 static void pmap_check_ledgers(pmap_t pmap);
476 #else
477 static inline void
pmap_check_ledgers(__unused pmap_t pmap)478 pmap_check_ledgers(__unused pmap_t pmap)
479 {
480 }
481 #endif /* MACH_ASSERT */
482
483 /**
484 * This helper function ensures that potentially-long-running batched PPL operations are
485 * called in preemptible context before entering the PPL, so that the PPL call may
486 * periodically exit to allow pending urgent ASTs to be taken.
487 */
488 static inline void
pmap_verify_preemptible(void)489 pmap_verify_preemptible(void)
490 {
491 assert(preemption_enabled() || (startup_phase < STARTUP_SUB_EARLY_BOOT));
492 }
493
494 SIMPLE_LOCK_DECLARE(phys_backup_lock, 0);
495
496 SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_first_phys = (pmap_paddr_t) 0;
497 SECURITY_READ_ONLY_LATE(pmap_paddr_t) vm_last_phys = (pmap_paddr_t) 0;
498
499 SECURITY_READ_ONLY_LATE(boolean_t) pmap_initialized = FALSE; /* Has pmap_init completed? */
500
501 SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm_pmap_max_offset_default = 0x0;
502 #if defined(__arm64__)
503 # ifdef XNU_TARGET_OS_OSX
504 SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = MACH_VM_MAX_ADDRESS;
505 # else
506 SECURITY_READ_ONLY_LATE(vm_map_offset_t) arm64_pmap_max_offset_default = 0x0;
507 # endif
508 #endif /* __arm64__ */
509
510 #if PMAP_PANIC_DEV_WIMG_ON_MANAGED && (DEVELOPMENT || DEBUG)
511 SECURITY_READ_ONLY_LATE(boolean_t) pmap_panic_dev_wimg_on_managed = TRUE;
512 #else
513 SECURITY_READ_ONLY_LATE(boolean_t) pmap_panic_dev_wimg_on_managed = FALSE;
514 #endif
515
516 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(asid_lock, 0);
517 SECURITY_READ_ONLY_LATE(uint32_t) pmap_max_asids = 0;
518 SECURITY_READ_ONLY_LATE(int) pmap_asid_plru = 1;
519 SECURITY_READ_ONLY_LATE(uint16_t) asid_chunk_size = 0;
520 SECURITY_READ_ONLY_LATE(static bitmap_t*) asid_bitmap;
521 static bitmap_t asid_plru_bitmap[BITMAP_LEN(MAX_HW_ASIDS)] MARK_AS_PMAP_DATA;
522 static uint64_t asid_plru_generation[BITMAP_LEN(MAX_HW_ASIDS)] MARK_AS_PMAP_DATA = {0};
523 static uint64_t asid_plru_gencount MARK_AS_PMAP_DATA = 0;
524
525
526 #if (__ARM_VMSA__ > 7)
527 #if __ARM_MIXED_PAGE_SIZE__
528 SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap_4k;
529 #endif
530 SECURITY_READ_ONLY_LATE(pmap_t) sharedpage_pmap_default;
531 #endif
532
533 /* PTE Define Macros */
534
535 #define pte_is_wired(pte) \
536 (((pte) & ARM_PTE_WIRED_MASK) == ARM_PTE_WIRED)
537
538 #define pte_was_writeable(pte) \
539 (((pte) & ARM_PTE_WRITEABLE) == ARM_PTE_WRITEABLE)
540
541 #define pte_set_was_writeable(pte, was_writeable) \
542 do { \
543 if ((was_writeable)) { \
544 (pte) |= ARM_PTE_WRITEABLE; \
545 } else { \
546 (pte) &= ~ARM_PTE_WRITEABLE; \
547 } \
548 } while(0)
549
550 static inline void
pte_set_wired(pmap_t pmap,pt_entry_t * ptep,boolean_t wired)551 pte_set_wired(pmap_t pmap, pt_entry_t *ptep, boolean_t wired)
552 {
553 if (wired) {
554 *ptep |= ARM_PTE_WIRED;
555 } else {
556 *ptep &= ~ARM_PTE_WIRED;
557 }
558 /*
559 * Do not track wired page count for kernel pagetable pages. Kernel mappings are
560 * not guaranteed to have PTDs in the first place, and kernel pagetable pages are
561 * never reclaimed.
562 */
563 if (pmap == kernel_pmap) {
564 return;
565 }
566 unsigned short *ptd_wiredcnt_ptr;
567 ptd_wiredcnt_ptr = &(ptep_get_info(ptep)->wiredcnt);
568 if (wired) {
569 os_atomic_add(ptd_wiredcnt_ptr, (unsigned short)1, relaxed);
570 } else {
571 unsigned short prev_wired = os_atomic_sub_orig(ptd_wiredcnt_ptr, (unsigned short)1, relaxed);
572 if (__improbable(prev_wired == 0)) {
573 panic("pmap %p (pte %p): wired count underflow", pmap, ptep);
574 }
575 }
576 }
577
578 #define PMAP_UPDATE_TLBS(pmap, s, e, strong, last_level_only) { \
579 pmap_get_pt_ops(pmap)->flush_tlb_region_async(s, (size_t)((e) - (s)), pmap, last_level_only); \
580 pmap_sync_tlb(strong); \
581 }
582
583 /*
584 * Synchronize updates to PTEs that were previously invalid or had the AF bit cleared,
585 * therefore not requiring TLBI. Use a store-load barrier to ensure subsequent loads
586 * will observe the updated PTE.
587 */
588 #define FLUSH_PTE() \
589 __builtin_arm_dmb(DMB_ISH);
590
591 /*
592 * Synchronize updates to PTEs that were previously valid and thus may be cached in
593 * TLBs. DSB is required to ensure the PTE stores have completed prior to the ensuing
594 * TLBI. This should only require a store-store barrier, as subsequent accesses in
595 * program order will not issue until the DSB completes. Prior loads may be reordered
596 * after the barrier, but their behavior should not be materially affected by the
597 * reordering. For fault-driven PTE updates such as COW, PTE contents should not
598 * matter for loads until the access is re-driven well after the TLB update is
599 * synchronized. For "involuntary" PTE access restriction due to paging lifecycle,
600 * we should be in a position to handle access faults. For "voluntary" PTE access
601 * restriction due to unmapping or protection, the decision to restrict access should
602 * have a data dependency on prior loads in order to avoid a data race.
603 */
604 #define FLUSH_PTE_STRONG() \
605 __builtin_arm_dsb(DSB_ISHST);
606
607 /**
608 * Write enough page table entries to map a single VM page. On systems where the
609 * VM page size does not match the hardware page size, multiple page table
610 * entries will need to be written.
611 *
612 * @note This function does not emit a barrier to ensure these page table writes
613 * have completed before continuing. This is commonly needed. In the case
614 * where a DMB or DSB barrier is needed, then use the write_pte() and
615 * write_pte_strong() functions respectively instead of this one.
616 *
617 * @param ptep Pointer to the first page table entry to update.
618 * @param pte The value to write into each page table entry. In the case that
619 * multiple PTEs are updated to a non-empty value, then the address
620 * in this value will automatically be incremented for each PTE
621 * write.
622 */
623 static void
write_pte_fast(pt_entry_t * ptep,pt_entry_t pte)624 write_pte_fast(pt_entry_t *ptep, pt_entry_t pte)
625 {
626 /**
627 * The PAGE_SHIFT (and in turn, the PAGE_RATIO) can be a variable on some
628 * systems, which is why it's checked at runtime instead of compile time.
629 * The "unreachable" warning needs to be suppressed because it still is a
630 * compile time constant on some systems.
631 */
632 __unreachable_ok_push
633 if (TEST_PAGE_RATIO_4) {
634 if (((uintptr_t)ptep) & 0x1f) {
635 panic("%s: PTE write is unaligned, ptep=%p, pte=%p",
636 __func__, ptep, (void*)pte);
637 }
638
639 if ((pte & ~ARM_PTE_COMPRESSED_MASK) == ARM_PTE_EMPTY) {
640 /**
641 * If we're writing an empty/compressed PTE value, then don't
642 * auto-increment the address for each PTE write.
643 */
644 *ptep = pte;
645 *(ptep + 1) = pte;
646 *(ptep + 2) = pte;
647 *(ptep + 3) = pte;
648 } else {
649 *ptep = pte;
650 *(ptep + 1) = pte | 0x1000;
651 *(ptep + 2) = pte | 0x2000;
652 *(ptep + 3) = pte | 0x3000;
653 }
654 } else {
655 *ptep = pte;
656 }
657 __unreachable_ok_pop
658 }
659
660 /**
661 * Writes enough page table entries to map a single VM page and then ensures
662 * those writes complete by executing a Data Memory Barrier.
663 *
664 * @note The DMB issued by this function is not strong enough to protect against
665 * TLB invalidates from being reordered above the PTE writes. If a TLBI
666 * instruction is going to immediately be called after this write, it's
667 * recommended to call write_pte_strong() instead of this function.
668 *
669 * See the function header for write_pte_fast() for more details on the
670 * parameters.
671 */
672 void
write_pte(pt_entry_t * ptep,pt_entry_t pte)673 write_pte(pt_entry_t *ptep, pt_entry_t pte)
674 {
675 write_pte_fast(ptep, pte);
676 FLUSH_PTE();
677 }
678
679 /**
680 * Writes enough page table entries to map a single VM page and then ensures
681 * those writes complete by executing a Data Synchronization Barrier. This
682 * barrier provides stronger guarantees than the DMB executed by write_pte().
683 *
684 * @note This function is useful if you're going to immediately flush the TLB
685 * after making the PTE write. A DSB is required to protect against the
686 * TLB invalidate being reordered before the PTE write.
687 *
688 * See the function header for write_pte_fast() for more details on the
689 * parameters.
690 */
691 static void
write_pte_strong(pt_entry_t * ptep,pt_entry_t pte)692 write_pte_strong(pt_entry_t *ptep, pt_entry_t pte)
693 {
694 write_pte_fast(ptep, pte);
695 FLUSH_PTE_STRONG();
696 }
697
698 /**
699 * Retrieve the pmap structure for the thread running on the current CPU.
700 */
701 pmap_t
current_pmap()702 current_pmap()
703 {
704 const pmap_t current = vm_map_pmap(current_thread()->map);
705
706 assert(current != NULL);
707
708 #if XNU_MONITOR
709 /**
710 * On PPL-enabled systems, it's important that PPL policy decisions aren't
711 * decided by kernel-writable memory. This function is used in various parts
712 * of the PPL, and besides validating that the pointer returned by this
713 * function is indeed a pmap structure, it's also important to ensure that
714 * it's actually the current thread's pmap. This is because different pmaps
715 * will have access to different entitlements based on the code signature of
716 * their loaded process. So if a different user pmap is set in the current
717 * thread structure (in an effort to bypass code signing restrictions), even
718 * though the structure would validate correctly as it is a real pmap
719 * structure, it should fail here.
720 *
721 * This only needs to occur for user pmaps because the kernel pmap's root
722 * page table is always the same as TTBR1 (it's set during bootstrap and not
723 * changed so it'd be redundant to check), and its code signing fields are
724 * always set to NULL. The PMAP CS logic won't operate on the kernel pmap so
725 * it shouldn't be possible to set those fields. Due to that, an attacker
726 * setting the current thread's pmap to the kernel pmap as a way to bypass
727 * this check won't accomplish anything as it doesn't provide any extra code
728 * signing entitlements.
729 */
730 if ((current != kernel_pmap) &&
731 ((get_mmu_ttb() & TTBR_BADDR_MASK) != (current->ttep))) {
732 panic_plain("%s: Current thread's pmap doesn't match up with TTBR0 "
733 "%#llx %#llx", __func__, get_mmu_ttb(), current->ttep);
734 }
735 #endif /* XNU_MONITOR */
736
737 return current;
738 }
739
740 #if DEVELOPMENT || DEBUG
741
742 /*
743 * Trace levels are controlled by a bitmask in which each
744 * level can be enabled/disabled by the (1<<level) position
745 * in the boot arg
746 * Level 0: PPL extension functionality
747 * Level 1: pmap lifecycle (create/destroy/switch)
748 * Level 2: mapping lifecycle (enter/remove/protect/nest/unnest)
749 * Level 3: internal state management (attributes/fast-fault)
750 * Level 4-7: TTE traces for paging levels 0-3. TTBs are traced at level 4.
751 */
752
753 SECURITY_READ_ONLY_LATE(unsigned int) pmap_trace_mask = 0;
754
755 #define PMAP_TRACE(level, ...) \
756 if (__improbable((1 << (level)) & pmap_trace_mask)) { \
757 KDBG_RELEASE(__VA_ARGS__); \
758 }
759 #else /* DEVELOPMENT || DEBUG */
760
761 #define PMAP_TRACE(level, ...)
762
763 #endif /* DEVELOPMENT || DEBUG */
764
765
766 /*
767 * Internal function prototypes (forward declarations).
768 */
769
770 static vm_map_size_t pmap_user_va_size(pmap_t pmap);
771
772 static void pmap_set_reference(ppnum_t pn);
773
774 pmap_paddr_t pmap_vtophys(pmap_t pmap, addr64_t va);
775
776 static void pmap_switch_user_ttb(pmap_t pmap, pmap_cpu_data_t *cpu_data_ptr);
777
778 static kern_return_t pmap_expand(
779 pmap_t, vm_map_address_t, unsigned int options, unsigned int level);
780
781 static int pmap_remove_range(
782 pmap_t, vm_map_address_t, pt_entry_t *, pt_entry_t *);
783
784 static tt_entry_t *pmap_tt1_allocate(
785 pmap_t, vm_size_t, unsigned int);
786
787 #define PMAP_TT_ALLOCATE_NOWAIT 0x1
788
789 static void pmap_tt1_deallocate(
790 pmap_t, tt_entry_t *, vm_size_t, unsigned int);
791
792 #define PMAP_TT_DEALLOCATE_NOBLOCK 0x1
793
794 static kern_return_t pmap_tt_allocate(
795 pmap_t, tt_entry_t **, unsigned int, unsigned int);
796
797 #define PMAP_TT_ALLOCATE_NOWAIT 0x1
798
799 const unsigned int arm_hardware_page_size = ARM_PGBYTES;
800 const unsigned int arm_pt_desc_size = sizeof(pt_desc_t);
801 const unsigned int arm_pt_root_size = PMAP_ROOT_ALLOC_SIZE;
802
803 #define PMAP_TT_DEALLOCATE_NOBLOCK 0x1
804
805 #if (__ARM_VMSA__ > 7)
806
807 static void pmap_unmap_sharedpage(
808 pmap_t pmap);
809
810 static boolean_t
811 pmap_is_64bit(pmap_t);
812
813
814 #endif /* (__ARM_VMSA__ > 7) */
815
816 static void pmap_update_cache_attributes_locked(
817 ppnum_t, unsigned);
818
819 static boolean_t arm_clear_fast_fault(
820 ppnum_t ppnum,
821 vm_prot_t fault_type,
822 pt_entry_t *pte_p);
823
824 static void pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes);
825
826 static void pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes);
827
828 static void pmap_trim_self(pmap_t pmap);
829 static void pmap_trim_subord(pmap_t subord);
830
831
832 /*
833 * Temporary prototypes, while we wait for pmap_enter to move to taking an
834 * address instead of a page number.
835 */
836 static kern_return_t
837 pmap_enter_addr(
838 pmap_t pmap,
839 vm_map_address_t v,
840 pmap_paddr_t pa,
841 vm_prot_t prot,
842 vm_prot_t fault_type,
843 unsigned int flags,
844 boolean_t wired);
845
846 kern_return_t
847 pmap_enter_options_addr(
848 pmap_t pmap,
849 vm_map_address_t v,
850 pmap_paddr_t pa,
851 vm_prot_t prot,
852 vm_prot_t fault_type,
853 unsigned int flags,
854 boolean_t wired,
855 unsigned int options,
856 __unused void *arg);
857
858 #ifdef CONFIG_XNUPOST
859 kern_return_t pmap_test(void);
860 #endif /* CONFIG_XNUPOST */
861
862 PMAP_SUPPORT_PROTOTYPES(
863 kern_return_t,
864 arm_fast_fault, (pmap_t pmap,
865 vm_map_address_t va,
866 vm_prot_t fault_type,
867 bool was_af_fault,
868 bool from_user), ARM_FAST_FAULT_INDEX);
869
870 PMAP_SUPPORT_PROTOTYPES(
871 boolean_t,
872 arm_force_fast_fault, (ppnum_t ppnum,
873 vm_prot_t allow_mode,
874 int options), ARM_FORCE_FAST_FAULT_INDEX);
875
876 MARK_AS_PMAP_TEXT static boolean_t
877 arm_force_fast_fault_with_flush_range(
878 ppnum_t ppnum,
879 vm_prot_t allow_mode,
880 int options,
881 pmap_tlb_flush_range_t *flush_range);
882
883 PMAP_SUPPORT_PROTOTYPES(
884 boolean_t,
885 pmap_batch_set_cache_attributes, (ppnum_t pn,
886 unsigned int cacheattr,
887 unsigned int page_cnt,
888 unsigned int page_index,
889 boolean_t doit,
890 unsigned int *res), PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX);
891
892 PMAP_SUPPORT_PROTOTYPES(
893 void,
894 pmap_change_wiring, (pmap_t pmap,
895 vm_map_address_t v,
896 boolean_t wired), PMAP_CHANGE_WIRING_INDEX);
897
898 PMAP_SUPPORT_PROTOTYPES(
899 pmap_t,
900 pmap_create_options, (ledger_t ledger,
901 vm_map_size_t size,
902 unsigned int flags,
903 kern_return_t * kr), PMAP_CREATE_INDEX);
904
905 PMAP_SUPPORT_PROTOTYPES(
906 void,
907 pmap_destroy, (pmap_t pmap), PMAP_DESTROY_INDEX);
908
909 PMAP_SUPPORT_PROTOTYPES(
910 kern_return_t,
911 pmap_enter_options, (pmap_t pmap,
912 vm_map_address_t v,
913 pmap_paddr_t pa,
914 vm_prot_t prot,
915 vm_prot_t fault_type,
916 unsigned int flags,
917 boolean_t wired,
918 unsigned int options), PMAP_ENTER_OPTIONS_INDEX);
919
920 PMAP_SUPPORT_PROTOTYPES(
921 pmap_paddr_t,
922 pmap_find_pa, (pmap_t pmap,
923 addr64_t va), PMAP_FIND_PA_INDEX);
924
925 #if (__ARM_VMSA__ > 7)
926 PMAP_SUPPORT_PROTOTYPES(
927 kern_return_t,
928 pmap_insert_sharedpage, (pmap_t pmap), PMAP_INSERT_SHAREDPAGE_INDEX);
929 #endif
930
931
932 PMAP_SUPPORT_PROTOTYPES(
933 boolean_t,
934 pmap_is_empty, (pmap_t pmap,
935 vm_map_offset_t va_start,
936 vm_map_offset_t va_end), PMAP_IS_EMPTY_INDEX);
937
938
939 PMAP_SUPPORT_PROTOTYPES(
940 unsigned int,
941 pmap_map_cpu_windows_copy, (ppnum_t pn,
942 vm_prot_t prot,
943 unsigned int wimg_bits), PMAP_MAP_CPU_WINDOWS_COPY_INDEX);
944
945 PMAP_SUPPORT_PROTOTYPES(
946 void,
947 pmap_ro_zone_memcpy, (zone_id_t zid,
948 vm_offset_t va,
949 vm_offset_t offset,
950 const vm_offset_t new_data,
951 vm_size_t new_data_size), PMAP_RO_ZONE_MEMCPY_INDEX);
952
953 PMAP_SUPPORT_PROTOTYPES(
954 uint64_t,
955 pmap_ro_zone_atomic_op, (zone_id_t zid,
956 vm_offset_t va,
957 vm_offset_t offset,
958 zro_atomic_op_t op,
959 uint64_t value), PMAP_RO_ZONE_ATOMIC_OP_INDEX);
960
961 PMAP_SUPPORT_PROTOTYPES(
962 void,
963 pmap_ro_zone_bzero, (zone_id_t zid,
964 vm_offset_t va,
965 vm_offset_t offset,
966 vm_size_t size), PMAP_RO_ZONE_BZERO_INDEX);
967
968 PMAP_SUPPORT_PROTOTYPES(
969 vm_map_offset_t,
970 pmap_nest, (pmap_t grand,
971 pmap_t subord,
972 addr64_t vstart,
973 uint64_t size,
974 vm_map_offset_t vrestart,
975 kern_return_t * krp), PMAP_NEST_INDEX);
976
977 PMAP_SUPPORT_PROTOTYPES(
978 void,
979 pmap_page_protect_options, (ppnum_t ppnum,
980 vm_prot_t prot,
981 unsigned int options,
982 void *arg), PMAP_PAGE_PROTECT_OPTIONS_INDEX);
983
984 PMAP_SUPPORT_PROTOTYPES(
985 vm_map_address_t,
986 pmap_protect_options, (pmap_t pmap,
987 vm_map_address_t start,
988 vm_map_address_t end,
989 vm_prot_t prot,
990 unsigned int options,
991 void *args), PMAP_PROTECT_OPTIONS_INDEX);
992
993 PMAP_SUPPORT_PROTOTYPES(
994 kern_return_t,
995 pmap_query_page_info, (pmap_t pmap,
996 vm_map_offset_t va,
997 int *disp_p), PMAP_QUERY_PAGE_INFO_INDEX);
998
999 PMAP_SUPPORT_PROTOTYPES(
1000 mach_vm_size_t,
1001 pmap_query_resident, (pmap_t pmap,
1002 vm_map_address_t start,
1003 vm_map_address_t end,
1004 mach_vm_size_t * compressed_bytes_p), PMAP_QUERY_RESIDENT_INDEX);
1005
1006 PMAP_SUPPORT_PROTOTYPES(
1007 void,
1008 pmap_reference, (pmap_t pmap), PMAP_REFERENCE_INDEX);
1009
1010 PMAP_SUPPORT_PROTOTYPES(
1011 vm_map_address_t,
1012 pmap_remove_options, (pmap_t pmap,
1013 vm_map_address_t start,
1014 vm_map_address_t end,
1015 int options), PMAP_REMOVE_OPTIONS_INDEX);
1016
1017
1018 PMAP_SUPPORT_PROTOTYPES(
1019 void,
1020 pmap_set_cache_attributes, (ppnum_t pn,
1021 unsigned int cacheattr), PMAP_SET_CACHE_ATTRIBUTES_INDEX);
1022
1023 PMAP_SUPPORT_PROTOTYPES(
1024 void,
1025 pmap_update_compressor_page, (ppnum_t pn,
1026 unsigned int prev_cacheattr, unsigned int new_cacheattr), PMAP_UPDATE_COMPRESSOR_PAGE_INDEX);
1027
1028 PMAP_SUPPORT_PROTOTYPES(
1029 void,
1030 pmap_set_nested, (pmap_t pmap), PMAP_SET_NESTED_INDEX);
1031
1032 #if MACH_ASSERT || XNU_MONITOR
1033 PMAP_SUPPORT_PROTOTYPES(
1034 void,
1035 pmap_set_process, (pmap_t pmap,
1036 int pid,
1037 char *procname), PMAP_SET_PROCESS_INDEX);
1038 #endif
1039
1040 PMAP_SUPPORT_PROTOTYPES(
1041 void,
1042 pmap_unmap_cpu_windows_copy, (unsigned int index), PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX);
1043
1044 PMAP_SUPPORT_PROTOTYPES(
1045 vm_map_offset_t,
1046 pmap_unnest_options, (pmap_t grand,
1047 addr64_t vaddr,
1048 uint64_t size,
1049 vm_map_offset_t vrestart,
1050 unsigned int option), PMAP_UNNEST_OPTIONS_INDEX);
1051
1052 PMAP_SUPPORT_PROTOTYPES(
1053 void,
1054 phys_attribute_set, (ppnum_t pn,
1055 unsigned int bits), PHYS_ATTRIBUTE_SET_INDEX);
1056
1057 PMAP_SUPPORT_PROTOTYPES(
1058 void,
1059 phys_attribute_clear, (ppnum_t pn,
1060 unsigned int bits,
1061 int options,
1062 void *arg), PHYS_ATTRIBUTE_CLEAR_INDEX);
1063
1064 #if __ARM_RANGE_TLBI__
1065 PMAP_SUPPORT_PROTOTYPES(
1066 vm_map_address_t,
1067 phys_attribute_clear_range, (pmap_t pmap,
1068 vm_map_address_t start,
1069 vm_map_address_t end,
1070 unsigned int bits,
1071 unsigned int options), PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX);
1072 #endif /* __ARM_RANGE_TLBI__ */
1073
1074
1075 PMAP_SUPPORT_PROTOTYPES(
1076 void,
1077 pmap_switch, (pmap_t pmap), PMAP_SWITCH_INDEX);
1078
1079 PMAP_SUPPORT_PROTOTYPES(
1080 void,
1081 pmap_clear_user_ttb, (void), PMAP_CLEAR_USER_TTB_INDEX);
1082
1083 PMAP_SUPPORT_PROTOTYPES(
1084 void,
1085 pmap_set_vm_map_cs_enforced, (pmap_t pmap, bool new_value), PMAP_SET_VM_MAP_CS_ENFORCED_INDEX);
1086
1087 PMAP_SUPPORT_PROTOTYPES(
1088 void,
1089 pmap_set_jit_entitled, (pmap_t pmap), PMAP_SET_JIT_ENTITLED_INDEX);
1090
1091 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
1092 PMAP_SUPPORT_PROTOTYPES(
1093 void,
1094 pmap_disable_user_jop, (pmap_t pmap), PMAP_DISABLE_USER_JOP_INDEX);
1095 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
1096
1097 PMAP_SUPPORT_PROTOTYPES(
1098 void,
1099 pmap_trim, (pmap_t grand,
1100 pmap_t subord,
1101 addr64_t vstart,
1102 uint64_t size), PMAP_TRIM_INDEX);
1103
1104 #if HAS_APPLE_PAC
1105 PMAP_SUPPORT_PROTOTYPES(
1106 void *,
1107 pmap_sign_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_SIGN_USER_PTR);
1108 PMAP_SUPPORT_PROTOTYPES(
1109 void *,
1110 pmap_auth_user_ptr, (void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key), PMAP_AUTH_USER_PTR);
1111 #endif /* HAS_APPLE_PAC */
1112
1113
1114
1115
1116 PMAP_SUPPORT_PROTOTYPES(
1117 bool,
1118 pmap_is_trust_cache_loaded, (const uuid_t uuid), PMAP_IS_TRUST_CACHE_LOADED_INDEX);
1119
1120 PMAP_SUPPORT_PROTOTYPES(
1121 uint32_t,
1122 pmap_lookup_in_static_trust_cache, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_STATIC_TRUST_CACHE_INDEX);
1123
1124 PMAP_SUPPORT_PROTOTYPES(
1125 bool,
1126 pmap_lookup_in_loaded_trust_caches, (const uint8_t cdhash[CS_CDHASH_LEN]), PMAP_LOOKUP_IN_LOADED_TRUST_CACHES_INDEX);
1127
1128 PMAP_SUPPORT_PROTOTYPES(
1129 void,
1130 pmap_set_compilation_service_cdhash, (const uint8_t cdhash[CS_CDHASH_LEN]),
1131 PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX);
1132
1133 PMAP_SUPPORT_PROTOTYPES(
1134 bool,
1135 pmap_match_compilation_service_cdhash, (const uint8_t cdhash[CS_CDHASH_LEN]),
1136 PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX);
1137
1138 PMAP_SUPPORT_PROTOTYPES(
1139 void,
1140 pmap_set_local_signing_public_key, (const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE]),
1141 PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX);
1142
1143 PMAP_SUPPORT_PROTOTYPES(
1144 void,
1145 pmap_unrestrict_local_signing, (const uint8_t cdhash[CS_CDHASH_LEN]),
1146 PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX);
1147
1148 PMAP_SUPPORT_PROTOTYPES(
1149 void,
1150 pmap_nop, (pmap_t pmap), PMAP_NOP_INDEX);
1151
1152 void pmap_footprint_suspend(vm_map_t map,
1153 boolean_t suspend);
1154 PMAP_SUPPORT_PROTOTYPES(
1155 void,
1156 pmap_footprint_suspend, (vm_map_t map,
1157 boolean_t suspend),
1158 PMAP_FOOTPRINT_SUSPEND_INDEX);
1159
1160
1161
1162
1163 #if DEVELOPMENT || DEBUG
1164 PMAP_SUPPORT_PROTOTYPES(
1165 kern_return_t,
1166 pmap_test_text_corruption, (pmap_paddr_t),
1167 PMAP_TEST_TEXT_CORRUPTION_INDEX);
1168 #endif /* DEVELOPMENT || DEBUG */
1169
1170 #if (__ARM_VMSA__ > 7)
1171 /*
1172 * The low global vector page is mapped at a fixed alias.
1173 * Since the page size is 16k for H8 and newer we map the globals to a 16k
1174 * aligned address. Readers of the globals (e.g. lldb, panic server) need
1175 * to check both addresses anyway for backward compatibility. So for now
1176 * we leave H6 and H7 where they were.
1177 */
1178 #if (ARM_PGSHIFT == 14)
1179 #define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x4000)
1180 #else
1181 #define LOWGLOBAL_ALIAS (LOW_GLOBAL_BASE_ADDRESS + 0x2000)
1182 #endif
1183
1184 #else
1185 #define LOWGLOBAL_ALIAS (0xFFFF1000)
1186 #endif
1187
1188 long long alloc_tteroot_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL;
1189 long long alloc_ttepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL;
1190 long long alloc_ptepages_count __attribute__((aligned(8))) MARK_AS_PMAP_DATA = 0LL;
1191
1192 #if XNU_MONITOR
1193
1194 #if __has_feature(ptrauth_calls)
1195 #define __ptrauth_ppl_handler __ptrauth(ptrauth_key_function_pointer, true, 0)
1196 #else
1197 #define __ptrauth_ppl_handler
1198 #endif
1199
1200 /*
1201 * Table of function pointers used for PPL dispatch.
1202 */
1203 const void * __ptrauth_ppl_handler const ppl_handler_table[PMAP_COUNT] = {
1204 [ARM_FAST_FAULT_INDEX] = arm_fast_fault_internal,
1205 [ARM_FORCE_FAST_FAULT_INDEX] = arm_force_fast_fault_internal,
1206 [MAPPING_FREE_PRIME_INDEX] = mapping_free_prime_internal,
1207 [PHYS_ATTRIBUTE_CLEAR_INDEX] = phys_attribute_clear_internal,
1208 [PHYS_ATTRIBUTE_SET_INDEX] = phys_attribute_set_internal,
1209 [PMAP_BATCH_SET_CACHE_ATTRIBUTES_INDEX] = pmap_batch_set_cache_attributes_internal,
1210 [PMAP_CHANGE_WIRING_INDEX] = pmap_change_wiring_internal,
1211 [PMAP_CREATE_INDEX] = pmap_create_options_internal,
1212 [PMAP_DESTROY_INDEX] = pmap_destroy_internal,
1213 [PMAP_ENTER_OPTIONS_INDEX] = pmap_enter_options_internal,
1214 [PMAP_FIND_PA_INDEX] = pmap_find_pa_internal,
1215 [PMAP_INSERT_SHAREDPAGE_INDEX] = pmap_insert_sharedpage_internal,
1216 [PMAP_IS_EMPTY_INDEX] = pmap_is_empty_internal,
1217 [PMAP_MAP_CPU_WINDOWS_COPY_INDEX] = pmap_map_cpu_windows_copy_internal,
1218 [PMAP_RO_ZONE_MEMCPY_INDEX] = pmap_ro_zone_memcpy_internal,
1219 [PMAP_RO_ZONE_ATOMIC_OP_INDEX] = pmap_ro_zone_atomic_op_internal,
1220 [PMAP_RO_ZONE_BZERO_INDEX] = pmap_ro_zone_bzero_internal,
1221 [PMAP_MARK_PAGE_AS_PMAP_PAGE_INDEX] = pmap_mark_page_as_ppl_page_internal,
1222 [PMAP_NEST_INDEX] = pmap_nest_internal,
1223 [PMAP_PAGE_PROTECT_OPTIONS_INDEX] = pmap_page_protect_options_internal,
1224 [PMAP_PROTECT_OPTIONS_INDEX] = pmap_protect_options_internal,
1225 [PMAP_QUERY_PAGE_INFO_INDEX] = pmap_query_page_info_internal,
1226 [PMAP_QUERY_RESIDENT_INDEX] = pmap_query_resident_internal,
1227 [PMAP_REFERENCE_INDEX] = pmap_reference_internal,
1228 [PMAP_REMOVE_OPTIONS_INDEX] = pmap_remove_options_internal,
1229 [PMAP_SET_CACHE_ATTRIBUTES_INDEX] = pmap_set_cache_attributes_internal,
1230 [PMAP_UPDATE_COMPRESSOR_PAGE_INDEX] = pmap_update_compressor_page_internal,
1231 [PMAP_SET_NESTED_INDEX] = pmap_set_nested_internal,
1232 [PMAP_SET_PROCESS_INDEX] = pmap_set_process_internal,
1233 [PMAP_SWITCH_INDEX] = pmap_switch_internal,
1234 [PMAP_CLEAR_USER_TTB_INDEX] = pmap_clear_user_ttb_internal,
1235 [PMAP_UNMAP_CPU_WINDOWS_COPY_INDEX] = pmap_unmap_cpu_windows_copy_internal,
1236 [PMAP_UNNEST_OPTIONS_INDEX] = pmap_unnest_options_internal,
1237 [PMAP_FOOTPRINT_SUSPEND_INDEX] = pmap_footprint_suspend_internal,
1238 [PMAP_CPU_DATA_INIT_INDEX] = pmap_cpu_data_init_internal,
1239 [PMAP_RELEASE_PAGES_TO_KERNEL_INDEX] = pmap_release_ppl_pages_to_kernel_internal,
1240 [PMAP_SET_VM_MAP_CS_ENFORCED_INDEX] = pmap_set_vm_map_cs_enforced_internal,
1241 [PMAP_SET_JIT_ENTITLED_INDEX] = pmap_set_jit_entitled_internal,
1242 [PMAP_IS_TRUST_CACHE_LOADED_INDEX] = pmap_is_trust_cache_loaded_internal,
1243 [PMAP_LOOKUP_IN_STATIC_TRUST_CACHE_INDEX] = pmap_lookup_in_static_trust_cache_internal,
1244 [PMAP_LOOKUP_IN_LOADED_TRUST_CACHES_INDEX] = pmap_lookup_in_loaded_trust_caches_internal,
1245 [PMAP_SET_COMPILATION_SERVICE_CDHASH_INDEX] = pmap_set_compilation_service_cdhash_internal,
1246 [PMAP_MATCH_COMPILATION_SERVICE_CDHASH_INDEX] = pmap_match_compilation_service_cdhash_internal,
1247 [PMAP_SET_LOCAL_SIGNING_PUBLIC_KEY_INDEX] = pmap_set_local_signing_public_key_internal,
1248 [PMAP_UNRESTRICT_LOCAL_SIGNING_INDEX] = pmap_unrestrict_local_signing_internal,
1249 [PMAP_TRIM_INDEX] = pmap_trim_internal,
1250 [PMAP_LEDGER_VERIFY_SIZE_INDEX] = pmap_ledger_verify_size_internal,
1251 [PMAP_LEDGER_ALLOC_INDEX] = pmap_ledger_alloc_internal,
1252 [PMAP_LEDGER_FREE_INDEX] = pmap_ledger_free_internal,
1253 #if HAS_APPLE_PAC
1254 [PMAP_SIGN_USER_PTR] = pmap_sign_user_ptr_internal,
1255 [PMAP_AUTH_USER_PTR] = pmap_auth_user_ptr_internal,
1256 #endif /* HAS_APPLE_PAC */
1257 #if __ARM_RANGE_TLBI__
1258 [PHYS_ATTRIBUTE_CLEAR_RANGE_INDEX] = phys_attribute_clear_range_internal,
1259 #endif /* __ARM_RANGE_TLBI__ */
1260 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
1261 [PMAP_DISABLE_USER_JOP_INDEX] = pmap_disable_user_jop_internal,
1262 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
1263 [PMAP_NOP_INDEX] = pmap_nop_internal,
1264
1265 #if DEVELOPMENT || DEBUG
1266 [PMAP_TEST_TEXT_CORRUPTION_INDEX] = pmap_test_text_corruption_internal,
1267 #endif /* DEVELOPMENT || DEBUG */
1268 };
1269 #endif
1270
1271 #if XNU_MONITOR
1272 /**
1273 * A convenience function for setting protections on a single physical
1274 * aperture or static region mapping without invalidating the TLB.
1275 *
1276 * @note This function does not perform any TLB invalidations. That must be done
1277 * separately to be able to safely use the updated mapping.
1278 *
1279 * @note This function understands the difference between the VM page size and
1280 * the kernel page size and will update multiple PTEs if the sizes differ.
1281 * In other words, enough PTEs will always get updated to change the
1282 * permissions on a PAGE_SIZE amount of memory.
1283 *
1284 * @note The PVH lock for the physical page represented by this mapping must
1285 * already be locked.
1286 *
1287 * @note This function assumes the caller has already verified that the PTE
1288 * pointer does indeed point to a physical aperture or static region page
1289 * table. Please validate your inputs before passing it along to this
1290 * function.
1291 *
1292 * @param ptep Pointer to the physical aperture or static region page table to
1293 * update with a new XPRR index.
1294 * @param expected_perm The XPRR index that is expected to already exist at the
1295 * current mapping. If the current index doesn't match this
1296 * then the system will panic.
1297 * @param new_perm The new XPRR index to update the mapping with.
1298 */
1299 MARK_AS_PMAP_TEXT static void
pmap_set_pte_xprr_perm(pt_entry_t * const ptep,unsigned int expected_perm,unsigned int new_perm)1300 pmap_set_pte_xprr_perm(
1301 pt_entry_t * const ptep,
1302 unsigned int expected_perm,
1303 unsigned int new_perm)
1304 {
1305 assert(ptep != NULL);
1306
1307 pt_entry_t spte = *ptep;
1308 pvh_assert_locked(pa_index(pte_to_pa(spte)));
1309
1310 if (__improbable((new_perm > XPRR_MAX_PERM) || (expected_perm > XPRR_MAX_PERM))) {
1311 panic_plain("%s: invalid XPRR index, ptep=%p, new_perm=%u, expected_perm=%u",
1312 __func__, ptep, new_perm, expected_perm);
1313 }
1314
1315 /**
1316 * The PTE involved should be valid, should not have the hint bit set, and
1317 * should have the expected XPRR index.
1318 */
1319 if (__improbable((spte & ARM_PTE_TYPE_MASK) == ARM_PTE_TYPE_FAULT)) {
1320 panic_plain("%s: physical aperture or static region PTE is invalid, "
1321 "ptep=%p, spte=%#llx, new_perm=%u, expected_perm=%u",
1322 __func__, ptep, spte, new_perm, expected_perm);
1323 }
1324
1325 if (__improbable(spte & ARM_PTE_HINT_MASK)) {
1326 panic_plain("%s: physical aperture or static region PTE has hint bit "
1327 "set, ptep=%p, spte=0x%llx, new_perm=%u, expected_perm=%u",
1328 __func__, ptep, spte, new_perm, expected_perm);
1329 }
1330
1331 if (__improbable(pte_to_xprr_perm(spte) != expected_perm)) {
1332 panic("%s: perm=%llu does not match expected_perm, spte=0x%llx, "
1333 "ptep=%p, new_perm=%u, expected_perm=%u",
1334 __func__, pte_to_xprr_perm(spte), spte, ptep, new_perm, expected_perm);
1335 }
1336
1337 pt_entry_t template = spte;
1338 template &= ~ARM_PTE_XPRR_MASK;
1339 template |= xprr_perm_to_pte(new_perm);
1340
1341 write_pte_strong(ptep, template);
1342 }
1343
1344 /**
1345 * Update the protections on a single physical aperture mapping and invalidate
1346 * the TLB so the mapping can be used.
1347 *
1348 * @note The PVH lock for the physical page must already be locked.
1349 *
1350 * @param pai The physical address index of the page whose physical aperture
1351 * mapping will be updated with new permissions.
1352 * @param expected_perm The XPRR index that is expected to already exist at the
1353 * current mapping. If the current index doesn't match this
1354 * then the system will panic.
1355 * @param new_perm The new XPRR index to update the mapping with.
1356 */
1357 MARK_AS_PMAP_TEXT void
pmap_set_xprr_perm(unsigned int pai,unsigned int expected_perm,unsigned int new_perm)1358 pmap_set_xprr_perm(
1359 unsigned int pai,
1360 unsigned int expected_perm,
1361 unsigned int new_perm)
1362 {
1363 pvh_assert_locked(pai);
1364
1365 const vm_offset_t kva = phystokv(vm_first_phys + (pmap_paddr_t)ptoa(pai));
1366 pt_entry_t * const ptep = pmap_pte(kernel_pmap, kva);
1367
1368 pmap_set_pte_xprr_perm(ptep, expected_perm, new_perm);
1369
1370 native_pt_ops.flush_tlb_region_async(kva, PAGE_SIZE, kernel_pmap, true);
1371 sync_tlb_flush();
1372 }
1373
1374 /**
1375 * Update the protections on a range of physical aperture or static region
1376 * mappings and invalidate the TLB so the mappings can be used.
1377 *
1378 * @note Static region mappings can only be updated before machine_lockdown().
1379 * Physical aperture mappings can be updated at any time.
1380 *
1381 * @param start The starting virtual address of the static region or physical
1382 * aperture range whose permissions will be updated.
1383 * @param end The final (inclusive) virtual address of the static region or
1384 * physical aperture range whose permissions will be updated.
1385 * @param expected_perm The XPRR index that is expected to already exist at the
1386 * current mappings. If the current indices don't match
1387 * this then the system will panic.
1388 * @param new_perm The new XPRR index to update the mappings with.
1389 */
1390 MARK_AS_PMAP_TEXT static void
pmap_set_range_xprr_perm(vm_address_t start,vm_address_t end,unsigned int expected_perm,unsigned int new_perm)1391 pmap_set_range_xprr_perm(
1392 vm_address_t start,
1393 vm_address_t end,
1394 unsigned int expected_perm,
1395 unsigned int new_perm)
1396 {
1397 #if (__ARM_VMSA__ == 7)
1398 #error This function is not supported on older ARM hardware.
1399 #endif /* (__ARM_VMSA__ == 7) */
1400
1401 /**
1402 * Validate our arguments; any invalid argument will be grounds for a panic.
1403 */
1404 if (__improbable((start | end) & ARM_PGMASK)) {
1405 panic_plain("%s: start or end not page aligned, "
1406 "start=%p, end=%p, new_perm=%u, expected_perm=%u",
1407 __func__, (void *)start, (void *)end, new_perm, expected_perm);
1408 }
1409
1410 if (__improbable(start > end)) {
1411 panic("%s: start > end, start=%p, end=%p, new_perm=%u, expected_perm=%u",
1412 __func__, (void *)start, (void *)end, new_perm, expected_perm);
1413 }
1414
1415 const bool in_physmap = (start >= physmap_base) && (end < physmap_end);
1416 const bool in_static = (start >= gVirtBase) && (end < static_memory_end);
1417
1418 if (__improbable(!(in_physmap || in_static))) {
1419 panic_plain("%s: address not in static region or physical aperture, "
1420 "start=%p, end=%p, new_perm=%u, expected_perm=%u",
1421 __func__, (void *)start, (void *)end, new_perm, expected_perm);
1422 }
1423
1424 if (__improbable((new_perm > XPRR_MAX_PERM) || (expected_perm > XPRR_MAX_PERM))) {
1425 panic_plain("%s: invalid XPRR index, "
1426 "start=%p, end=%p, new_perm=%u, expected_perm=%u",
1427 __func__, (void *)start, (void *)end, new_perm, expected_perm);
1428 }
1429
1430 /*
1431 * Walk over the PTEs for the given range, and set the protections on those
1432 * PTEs. Each iteration of this loop will update all of the leaf PTEs within
1433 * one twig entry (whichever twig entry currently maps "va").
1434 */
1435 vm_address_t va = start;
1436 while (va < end) {
1437 /**
1438 * Get the last VA that the twig entry for "va" maps. All of the leaf
1439 * PTEs from va to tte_va_end will have their permissions updated.
1440 */
1441 vm_address_t tte_va_end =
1442 (va + pt_attr_twig_size(native_pt_attr)) & ~pt_attr_twig_offmask(native_pt_attr);
1443
1444 if (tte_va_end > end) {
1445 tte_va_end = end;
1446 }
1447
1448 tt_entry_t *ttep = pmap_tte(kernel_pmap, va);
1449
1450 if (ttep == NULL) {
1451 panic_plain("%s: physical aperture or static region tte is NULL, "
1452 "start=%p, end=%p, new_perm=%u, expected_perm=%u",
1453 __func__, (void *)start, (void *)end, new_perm, expected_perm);
1454 }
1455
1456 tt_entry_t tte = *ttep;
1457
1458 if ((tte & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE) {
1459 panic_plain("%s: tte=0x%llx is not a table type entry, "
1460 "start=%p, end=%p, new_perm=%u, expected_perm=%u", __func__,
1461 tte, (void *)start, (void *)end, new_perm, expected_perm);
1462 }
1463
1464 /* Walk over the given L3 page table page and update the PTEs. */
1465 pt_entry_t * const ptep = (pt_entry_t *)ttetokv(tte);
1466 pt_entry_t * const begin_ptep = &ptep[pte_index(native_pt_attr, va)];
1467 const uint64_t num_ptes = (tte_va_end - va) >> pt_attr_leaf_shift(native_pt_attr);
1468 pt_entry_t * const end_ptep = begin_ptep + num_ptes;
1469
1470 /**
1471 * The current PTE pointer is incremented by the page ratio (ratio of
1472 * VM page size to kernel hardware page size) because one call to
1473 * pmap_set_pte_xprr_perm() will update all PTE entries required to map
1474 * a PAGE_SIZE worth of hardware pages.
1475 */
1476 for (pt_entry_t *cur_ptep = begin_ptep; cur_ptep < end_ptep;
1477 cur_ptep += PAGE_RATIO, va += PAGE_SIZE) {
1478 unsigned int pai = pa_index(pte_to_pa(*cur_ptep));
1479 pvh_lock(pai);
1480 pmap_set_pte_xprr_perm(cur_ptep, expected_perm, new_perm);
1481 pvh_unlock(pai);
1482 }
1483
1484 va = tte_va_end;
1485 }
1486
1487 PMAP_UPDATE_TLBS(kernel_pmap, start, end, false, true);
1488 }
1489
1490 #endif /* XNU_MONITOR */
1491
1492 static inline void
PMAP_ZINFO_PALLOC(pmap_t pmap,int bytes)1493 PMAP_ZINFO_PALLOC(
1494 pmap_t pmap, int bytes)
1495 {
1496 pmap_ledger_credit(pmap, task_ledgers.tkm_private, bytes);
1497 }
1498
1499 static inline void
PMAP_ZINFO_PFREE(pmap_t pmap,int bytes)1500 PMAP_ZINFO_PFREE(
1501 pmap_t pmap,
1502 int bytes)
1503 {
1504 pmap_ledger_debit(pmap, task_ledgers.tkm_private, bytes);
1505 }
1506
1507 void
pmap_tt_ledger_credit(pmap_t pmap,vm_size_t size)1508 pmap_tt_ledger_credit(
1509 pmap_t pmap,
1510 vm_size_t size)
1511 {
1512 if (pmap != kernel_pmap) {
1513 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, size);
1514 pmap_ledger_credit(pmap, task_ledgers.page_table, size);
1515 }
1516 }
1517
1518 void
pmap_tt_ledger_debit(pmap_t pmap,vm_size_t size)1519 pmap_tt_ledger_debit(
1520 pmap_t pmap,
1521 vm_size_t size)
1522 {
1523 if (pmap != kernel_pmap) {
1524 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, size);
1525 pmap_ledger_debit(pmap, task_ledgers.page_table, size);
1526 }
1527 }
1528
1529 static inline void
pmap_update_plru(uint16_t asid_index)1530 pmap_update_plru(uint16_t asid_index)
1531 {
1532 if (__probable(pmap_asid_plru)) {
1533 unsigned plru_index = asid_index >> 6;
1534 if (__improbable(os_atomic_andnot(&asid_plru_bitmap[plru_index], (1ULL << (asid_index & 63)), relaxed) == 0)) {
1535 asid_plru_generation[plru_index] = ++asid_plru_gencount;
1536 asid_plru_bitmap[plru_index] = ((plru_index == (MAX_HW_ASIDS >> 6)) ? ~(1ULL << 63) : UINT64_MAX);
1537 }
1538 }
1539 }
1540
1541 static bool
alloc_asid(pmap_t pmap)1542 alloc_asid(pmap_t pmap)
1543 {
1544 int vasid = -1;
1545 uint16_t hw_asid;
1546
1547 pmap_simple_lock(&asid_lock);
1548
1549 if (__probable(pmap_asid_plru)) {
1550 unsigned plru_index = 0;
1551 uint64_t lowest_gen = asid_plru_generation[0];
1552 uint64_t lowest_gen_bitmap = asid_plru_bitmap[0];
1553 for (unsigned i = 1; i < (sizeof(asid_plru_generation) / sizeof(asid_plru_generation[0])); ++i) {
1554 if (asid_plru_generation[i] < lowest_gen) {
1555 plru_index = i;
1556 lowest_gen = asid_plru_generation[i];
1557 lowest_gen_bitmap = asid_plru_bitmap[i];
1558 }
1559 }
1560
1561 for (; plru_index < BITMAP_LEN(pmap_max_asids); plru_index += ((MAX_HW_ASIDS + 1) >> 6)) {
1562 uint64_t temp_plru = lowest_gen_bitmap & asid_bitmap[plru_index];
1563 if (temp_plru) {
1564 vasid = (plru_index << 6) + lsb_first(temp_plru);
1565 #if DEVELOPMENT || DEBUG
1566 ++pmap_asid_hits;
1567 #endif
1568 break;
1569 }
1570 }
1571 }
1572 if (__improbable(vasid < 0)) {
1573 // bitmap_first() returns highest-order bits first, but a 0-based scheme works
1574 // slightly better with the collision detection scheme used by pmap_switch_internal().
1575 vasid = bitmap_lsb_first(&asid_bitmap[0], pmap_max_asids);
1576 #if DEVELOPMENT || DEBUG
1577 ++pmap_asid_misses;
1578 #endif
1579 }
1580 if (__improbable(vasid < 0)) {
1581 pmap_simple_unlock(&asid_lock);
1582 return false;
1583 }
1584 assert((uint32_t)vasid < pmap_max_asids);
1585 assert(bitmap_test(&asid_bitmap[0], (unsigned int)vasid));
1586 bitmap_clear(&asid_bitmap[0], (unsigned int)vasid);
1587 pmap_simple_unlock(&asid_lock);
1588 hw_asid = (uint16_t)(vasid % asid_chunk_size);
1589 pmap->sw_asid = (uint8_t)(vasid / asid_chunk_size);
1590 if (__improbable(hw_asid == MAX_HW_ASIDS)) {
1591 /* If we took a PLRU "miss" and ended up with a hardware ASID we can't actually support,
1592 * reassign to a reserved VASID. */
1593 assert(pmap->sw_asid < UINT8_MAX);
1594 pmap->sw_asid = UINT8_MAX;
1595 /* Allocate from the high end of the hardware ASID range to reduce the likelihood of
1596 * aliasing with vital system processes, which are likely to have lower ASIDs. */
1597 hw_asid = MAX_HW_ASIDS - 1 - (uint16_t)(vasid / asid_chunk_size);
1598 assert(hw_asid < MAX_HW_ASIDS);
1599 }
1600 pmap_update_plru(hw_asid);
1601 hw_asid += 1; // Account for ASID 0, which is reserved for the kernel
1602 #if __ARM_KERNEL_PROTECT__
1603 hw_asid <<= 1; // We're really handing out 2 hardware ASIDs, one for EL0 and one for EL1 access
1604 #endif
1605 pmap->hw_asid = hw_asid;
1606 return true;
1607 }
1608
1609 static void
free_asid(pmap_t pmap)1610 free_asid(pmap_t pmap)
1611 {
1612 unsigned int vasid;
1613 uint16_t hw_asid = os_atomic_xchg(&pmap->hw_asid, 0, relaxed);
1614 if (__improbable(hw_asid == 0)) {
1615 return;
1616 }
1617
1618 #if __ARM_KERNEL_PROTECT__
1619 hw_asid >>= 1;
1620 #endif
1621 hw_asid -= 1;
1622
1623 if (__improbable(pmap->sw_asid == UINT8_MAX)) {
1624 vasid = ((MAX_HW_ASIDS - 1 - hw_asid) * asid_chunk_size) + MAX_HW_ASIDS;
1625 } else {
1626 vasid = ((unsigned int)pmap->sw_asid * asid_chunk_size) + hw_asid;
1627 }
1628
1629 if (__probable(pmap_asid_plru)) {
1630 os_atomic_or(&asid_plru_bitmap[hw_asid >> 6], (1ULL << (hw_asid & 63)), relaxed);
1631 }
1632 pmap_simple_lock(&asid_lock);
1633 assert(!bitmap_test(&asid_bitmap[0], vasid));
1634 bitmap_set(&asid_bitmap[0], vasid);
1635 pmap_simple_unlock(&asid_lock);
1636 }
1637
1638
1639 boolean_t
pmap_valid_address(pmap_paddr_t addr)1640 pmap_valid_address(
1641 pmap_paddr_t addr)
1642 {
1643 return pa_valid(addr);
1644 }
1645
1646
1647
1648
1649
1650
1651 /*
1652 * Map memory at initialization. The physical addresses being
1653 * mapped are not managed and are never unmapped.
1654 *
1655 * For now, VM is already on, we only need to map the
1656 * specified memory.
1657 */
1658 vm_map_address_t
pmap_map(vm_map_address_t virt,vm_offset_t start,vm_offset_t end,vm_prot_t prot,unsigned int flags)1659 pmap_map(
1660 vm_map_address_t virt,
1661 vm_offset_t start,
1662 vm_offset_t end,
1663 vm_prot_t prot,
1664 unsigned int flags)
1665 {
1666 kern_return_t kr;
1667 vm_size_t ps;
1668
1669 ps = PAGE_SIZE;
1670 while (start < end) {
1671 kr = pmap_enter(kernel_pmap, virt, (ppnum_t)atop(start),
1672 prot, VM_PROT_NONE, flags, FALSE);
1673
1674 if (kr != KERN_SUCCESS) {
1675 panic("%s: failed pmap_enter, "
1676 "virt=%p, start_addr=%p, end_addr=%p, prot=%#x, flags=%#x",
1677 __FUNCTION__,
1678 (void *) virt, (void *) start, (void *) end, prot, flags);
1679 }
1680
1681 virt += ps;
1682 start += ps;
1683 }
1684 return virt;
1685 }
1686
1687 vm_map_address_t
pmap_map_bd_with_options(vm_map_address_t virt,vm_offset_t start,vm_offset_t end,vm_prot_t prot,int32_t options)1688 pmap_map_bd_with_options(
1689 vm_map_address_t virt,
1690 vm_offset_t start,
1691 vm_offset_t end,
1692 vm_prot_t prot,
1693 int32_t options)
1694 {
1695 pt_entry_t tmplate;
1696 pt_entry_t *ptep;
1697 vm_map_address_t vaddr;
1698 vm_offset_t paddr;
1699 pt_entry_t mem_attr;
1700
1701 switch (options & PMAP_MAP_BD_MASK) {
1702 case PMAP_MAP_BD_WCOMB:
1703 mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB);
1704 #if (__ARM_VMSA__ > 7)
1705 mem_attr |= ARM_PTE_SH(SH_OUTER_MEMORY);
1706 #else
1707 mem_attr |= ARM_PTE_SH;
1708 #endif
1709 break;
1710 case PMAP_MAP_BD_POSTED:
1711 mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED);
1712 break;
1713 case PMAP_MAP_BD_POSTED_REORDERED:
1714 mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED_REORDERED);
1715 break;
1716 case PMAP_MAP_BD_POSTED_COMBINED_REORDERED:
1717 mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED);
1718 break;
1719 default:
1720 mem_attr = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE);
1721 break;
1722 }
1723
1724 tmplate = pa_to_pte(start) | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA) |
1725 mem_attr | ARM_PTE_TYPE | ARM_PTE_NX | ARM_PTE_PNX | ARM_PTE_AF;
1726 #if __ARM_KERNEL_PROTECT__
1727 tmplate |= ARM_PTE_NG;
1728 #endif /* __ARM_KERNEL_PROTECT__ */
1729
1730 vaddr = virt;
1731 paddr = start;
1732 while (paddr < end) {
1733 ptep = pmap_pte(kernel_pmap, vaddr);
1734 if (ptep == PT_ENTRY_NULL) {
1735 panic("%s: no PTE for vaddr=%p, "
1736 "virt=%p, start=%p, end=%p, prot=0x%x, options=0x%x",
1737 __FUNCTION__, (void*)vaddr,
1738 (void*)virt, (void*)start, (void*)end, prot, options);
1739 }
1740
1741 assert(!ARM_PTE_IS_COMPRESSED(*ptep, ptep));
1742 write_pte_strong(ptep, tmplate);
1743
1744 pte_increment_pa(tmplate);
1745 vaddr += PAGE_SIZE;
1746 paddr += PAGE_SIZE;
1747 }
1748
1749 if (end >= start) {
1750 flush_mmu_tlb_region(virt, (unsigned)(end - start));
1751 }
1752
1753 return vaddr;
1754 }
1755
1756 /*
1757 * Back-door routine for mapping kernel VM at initialization.
1758 * Useful for mapping memory outside the range
1759 * [vm_first_phys, vm_last_phys] (i.e., devices).
1760 * Otherwise like pmap_map.
1761 */
1762 vm_map_address_t
pmap_map_bd(vm_map_address_t virt,vm_offset_t start,vm_offset_t end,vm_prot_t prot)1763 pmap_map_bd(
1764 vm_map_address_t virt,
1765 vm_offset_t start,
1766 vm_offset_t end,
1767 vm_prot_t prot)
1768 {
1769 pt_entry_t tmplate;
1770 pt_entry_t *ptep;
1771 vm_map_address_t vaddr;
1772 vm_offset_t paddr;
1773
1774 /* not cacheable and not buffered */
1775 tmplate = pa_to_pte(start)
1776 | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX
1777 | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA)
1778 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE);
1779 #if __ARM_KERNEL_PROTECT__
1780 tmplate |= ARM_PTE_NG;
1781 #endif /* __ARM_KERNEL_PROTECT__ */
1782
1783 vaddr = virt;
1784 paddr = start;
1785 while (paddr < end) {
1786 ptep = pmap_pte(kernel_pmap, vaddr);
1787 if (ptep == PT_ENTRY_NULL) {
1788 panic("pmap_map_bd");
1789 }
1790 assert(!ARM_PTE_IS_COMPRESSED(*ptep, ptep));
1791 write_pte_strong(ptep, tmplate);
1792
1793 pte_increment_pa(tmplate);
1794 vaddr += PAGE_SIZE;
1795 paddr += PAGE_SIZE;
1796 }
1797
1798 if (end >= start) {
1799 flush_mmu_tlb_region(virt, (unsigned)(end - start));
1800 }
1801
1802 return vaddr;
1803 }
1804
1805 /*
1806 * Back-door routine for mapping kernel VM at initialization.
1807 * Useful for mapping memory specific physical addresses in early
1808 * boot (i.e., before kernel_map is initialized).
1809 *
1810 * Maps are in the VM_HIGH_KERNEL_WINDOW area.
1811 */
1812
1813 vm_map_address_t
pmap_map_high_window_bd(vm_offset_t pa_start,vm_size_t len,vm_prot_t prot)1814 pmap_map_high_window_bd(
1815 vm_offset_t pa_start,
1816 vm_size_t len,
1817 vm_prot_t prot)
1818 {
1819 pt_entry_t *ptep, pte;
1820 #if (__ARM_VMSA__ == 7)
1821 vm_map_address_t va_start = VM_HIGH_KERNEL_WINDOW;
1822 vm_map_address_t va_max = VM_MAX_KERNEL_ADDRESS;
1823 #else
1824 vm_map_address_t va_start = VREGION1_START;
1825 vm_map_address_t va_max = VREGION1_START + VREGION1_SIZE;
1826 #endif
1827 vm_map_address_t va_end;
1828 vm_map_address_t va;
1829 vm_size_t offset;
1830
1831 offset = pa_start & PAGE_MASK;
1832 pa_start -= offset;
1833 len += offset;
1834
1835 if (len > (va_max - va_start)) {
1836 panic("%s: area too large, "
1837 "pa_start=%p, len=%p, prot=0x%x",
1838 __FUNCTION__,
1839 (void*)pa_start, (void*)len, prot);
1840 }
1841
1842 scan:
1843 for (; va_start < va_max; va_start += PAGE_SIZE) {
1844 ptep = pmap_pte(kernel_pmap, va_start);
1845 assert(!ARM_PTE_IS_COMPRESSED(*ptep, ptep));
1846 if (*ptep == ARM_PTE_TYPE_FAULT) {
1847 break;
1848 }
1849 }
1850 if (va_start > va_max) {
1851 panic("%s: insufficient pages, "
1852 "pa_start=%p, len=%p, prot=0x%x",
1853 __FUNCTION__,
1854 (void*)pa_start, (void*)len, prot);
1855 }
1856
1857 for (va_end = va_start + PAGE_SIZE; va_end < va_start + len; va_end += PAGE_SIZE) {
1858 ptep = pmap_pte(kernel_pmap, va_end);
1859 assert(!ARM_PTE_IS_COMPRESSED(*ptep, ptep));
1860 if (*ptep != ARM_PTE_TYPE_FAULT) {
1861 va_start = va_end + PAGE_SIZE;
1862 goto scan;
1863 }
1864 }
1865
1866 for (va = va_start; va < va_end; va += PAGE_SIZE, pa_start += PAGE_SIZE) {
1867 ptep = pmap_pte(kernel_pmap, va);
1868 pte = pa_to_pte(pa_start)
1869 | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX
1870 | ARM_PTE_AP((prot & VM_PROT_WRITE) ? AP_RWNA : AP_RONA)
1871 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
1872 #if (__ARM_VMSA__ > 7)
1873 pte |= ARM_PTE_SH(SH_OUTER_MEMORY);
1874 #else
1875 pte |= ARM_PTE_SH;
1876 #endif
1877 #if __ARM_KERNEL_PROTECT__
1878 pte |= ARM_PTE_NG;
1879 #endif /* __ARM_KERNEL_PROTECT__ */
1880 write_pte_strong(ptep, pte);
1881 }
1882 PMAP_UPDATE_TLBS(kernel_pmap, va_start, va_start + len, false, true);
1883 #if KASAN
1884 kasan_notify_address(va_start, len);
1885 #endif
1886 return va_start;
1887 }
1888
1889 static uint32_t
pmap_compute_max_asids(void)1890 pmap_compute_max_asids(void)
1891 {
1892 DTEntry entry;
1893 void const *prop = NULL;
1894 uint32_t max_asids;
1895 int err;
1896 unsigned int prop_size;
1897
1898 err = SecureDTLookupEntry(NULL, "/defaults", &entry);
1899 assert(err == kSuccess);
1900
1901 if (kSuccess != SecureDTGetProperty(entry, "pmap-max-asids", &prop, &prop_size)) {
1902 /* TODO: consider allowing maxproc limits to be scaled earlier so that
1903 * we can choose a more flexible default value here. */
1904 return MAX_ASIDS;
1905 }
1906
1907 if (prop_size != sizeof(max_asids)) {
1908 panic("pmap-max-asids property is not a 32-bit integer");
1909 }
1910
1911 max_asids = *((uint32_t const *)prop);
1912 /* Round up to the nearest 64 to make things a bit easier for the Pseudo-LRU allocator. */
1913 max_asids = (max_asids + 63) & ~63UL;
1914
1915 if (((max_asids + MAX_HW_ASIDS) / (MAX_HW_ASIDS + 1)) > MIN(MAX_HW_ASIDS, UINT8_MAX)) {
1916 /* currently capped by size of pmap->sw_asid */
1917 panic("pmap-max-asids too large");
1918 }
1919 if (max_asids == 0) {
1920 panic("pmap-max-asids cannot be zero");
1921 }
1922 return max_asids;
1923 }
1924
1925 #if __arm64__
1926 /*
1927 * pmap_get_arm64_prot
1928 *
1929 * return effective armv8 VMSA block protections including
1930 * table AP/PXN/XN overrides of a pmap entry
1931 *
1932 */
1933
1934 uint64_t
pmap_get_arm64_prot(pmap_t pmap,vm_offset_t addr)1935 pmap_get_arm64_prot(
1936 pmap_t pmap,
1937 vm_offset_t addr)
1938 {
1939 tt_entry_t tte = 0;
1940 unsigned int level = 0;
1941 uint64_t tte_type = 0;
1942 uint64_t effective_prot_bits = 0;
1943 uint64_t aggregate_tte = 0;
1944 uint64_t table_ap_bits = 0, table_xn = 0, table_pxn = 0;
1945 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
1946
1947 for (level = pt_attr->pta_root_level; level <= pt_attr->pta_max_level; level++) {
1948 tte = *pmap_ttne(pmap, level, addr);
1949
1950 if (!(tte & ARM_TTE_VALID)) {
1951 return 0;
1952 }
1953
1954 tte_type = tte & ARM_TTE_TYPE_MASK;
1955
1956 if ((tte_type == ARM_TTE_TYPE_BLOCK) ||
1957 (level == pt_attr->pta_max_level)) {
1958 /* Block or page mapping; both have the same protection bit layout. */
1959 break;
1960 } else if (tte_type == ARM_TTE_TYPE_TABLE) {
1961 /* All of the table bits we care about are overrides, so just OR them together. */
1962 aggregate_tte |= tte;
1963 }
1964 }
1965
1966 table_ap_bits = ((aggregate_tte >> ARM_TTE_TABLE_APSHIFT) & AP_MASK);
1967 table_xn = (aggregate_tte & ARM_TTE_TABLE_XN);
1968 table_pxn = (aggregate_tte & ARM_TTE_TABLE_PXN);
1969
1970 /* Start with the PTE bits. */
1971 effective_prot_bits = tte & (ARM_PTE_APMASK | ARM_PTE_NX | ARM_PTE_PNX);
1972
1973 /* Table AP bits mask out block/page AP bits */
1974 effective_prot_bits &= ~(ARM_PTE_AP(table_ap_bits));
1975
1976 /* XN/PXN bits can be OR'd in. */
1977 effective_prot_bits |= (table_xn ? ARM_PTE_NX : 0);
1978 effective_prot_bits |= (table_pxn ? ARM_PTE_PNX : 0);
1979
1980 return effective_prot_bits;
1981 }
1982 #endif /* __arm64__ */
1983
1984 static void
pmap_set_srd_fusing()1985 pmap_set_srd_fusing()
1986 {
1987 DTEntry entry;
1988 uint32_t const *prop = NULL;
1989 int err;
1990 unsigned int prop_size = 0;
1991
1992 err = SecureDTLookupEntry(NULL, "/chosen", &entry);
1993 if (err != kSuccess) {
1994 panic("PMAP: no chosen DT node");
1995 }
1996
1997 if (kSuccess == SecureDTGetProperty(entry, "research-enabled", (const void**)&prop, &prop_size)) {
1998 if (prop_size == sizeof(uint32_t)) {
1999 srd_fused = *prop;
2000 }
2001 }
2002
2003 #if DEVELOPMENT || DEBUG
2004 PE_parse_boot_argn("srd_fusing", &srd_fused, sizeof(srd_fused));
2005 #endif
2006 }
2007
2008 /*
2009 * Bootstrap the system enough to run with virtual memory.
2010 *
2011 * The early VM initialization code has already allocated
2012 * the first CPU's translation table and made entries for
2013 * all the one-to-one mappings to be found there.
2014 *
2015 * We must set up the kernel pmap structures, the
2016 * physical-to-virtual translation lookup tables for the
2017 * physical memory to be managed (between avail_start and
2018 * avail_end).
2019 *
2020 * Map the kernel's code and data, and allocate the system page table.
2021 * Page_size must already be set.
2022 *
2023 * Parameters:
2024 * first_avail first available physical page -
2025 * after kernel page tables
2026 * avail_start PA of first managed physical page
2027 * avail_end PA of last managed physical page
2028 */
2029
2030 void
pmap_bootstrap(vm_offset_t vstart)2031 pmap_bootstrap(
2032 vm_offset_t vstart)
2033 {
2034 vm_map_offset_t maxoffset;
2035
2036 lck_grp_init(&pmap_lck_grp, "pmap", LCK_GRP_ATTR_NULL);
2037
2038 pmap_set_srd_fusing();
2039
2040 #if XNU_MONITOR
2041
2042 #if DEVELOPMENT || DEBUG
2043 PE_parse_boot_argn("-unsafe_kernel_text", &pmap_ppl_disable, sizeof(pmap_ppl_disable));
2044 #endif
2045
2046 #if CONFIG_CSR_FROM_DT
2047 if (csr_unsafe_kernel_text) {
2048 pmap_ppl_disable = true;
2049 }
2050 #endif /* CONFIG_CSR_FROM_DT */
2051
2052 #endif /* XNU_MONITOR */
2053
2054 #if DEVELOPMENT || DEBUG
2055 if (PE_parse_boot_argn("pmap_trace", &pmap_trace_mask, sizeof(pmap_trace_mask))) {
2056 kprintf("Kernel traces for pmap operations enabled\n");
2057 }
2058 #endif
2059
2060 /*
2061 * Initialize the kernel pmap.
2062 */
2063 pmap_stamp = 1;
2064 #if ARM_PARAMETERIZED_PMAP
2065 kernel_pmap->pmap_pt_attr = native_pt_attr;
2066 #endif /* ARM_PARAMETERIZED_PMAP */
2067 #if HAS_APPLE_PAC
2068 kernel_pmap->disable_jop = 0;
2069 #endif /* HAS_APPLE_PAC */
2070 kernel_pmap->tte = cpu_tte;
2071 kernel_pmap->ttep = cpu_ttep;
2072 #if (__ARM_VMSA__ > 7)
2073 kernel_pmap->min = UINT64_MAX - (1ULL << (64 - T1SZ_BOOT)) + 1;
2074 #else
2075 kernel_pmap->min = VM_MIN_KERNEL_AND_KEXT_ADDRESS;
2076 #endif
2077 kernel_pmap->max = UINTPTR_MAX;
2078 os_atomic_init(&kernel_pmap->ref_count, 1);
2079 #if XNU_MONITOR
2080 os_atomic_init(&kernel_pmap->nested_count, 0);
2081 #endif
2082 kernel_pmap->gc_status = 0;
2083 kernel_pmap->nx_enabled = TRUE;
2084 #ifdef __arm64__
2085 kernel_pmap->is_64bit = TRUE;
2086 #else
2087 kernel_pmap->is_64bit = FALSE;
2088 #endif
2089 kernel_pmap->stamp = os_atomic_inc(&pmap_stamp, relaxed);
2090
2091 #if ARM_PARAMETERIZED_PMAP
2092 kernel_pmap->pmap_pt_attr = native_pt_attr;
2093 #endif /* ARM_PARAMETERIZED_PMAP */
2094
2095 kernel_pmap->nested_region_addr = 0x0ULL;
2096 kernel_pmap->nested_region_size = 0x0ULL;
2097 kernel_pmap->nested_region_asid_bitmap = NULL;
2098 kernel_pmap->nested_region_asid_bitmap_size = 0x0UL;
2099 kernel_pmap->type = PMAP_TYPE_KERNEL;
2100
2101 #if (__ARM_VMSA__ == 7)
2102 kernel_pmap->tte_index_max = 4 * (ARM_PGBYTES / sizeof(tt_entry_t));
2103 #endif
2104 kernel_pmap->hw_asid = 0;
2105 kernel_pmap->sw_asid = 0;
2106
2107 pmap_lock_init(kernel_pmap);
2108
2109 pmap_max_asids = pmap_compute_max_asids();
2110 pmap_asid_plru = (pmap_max_asids > MAX_HW_ASIDS);
2111 PE_parse_boot_argn("pmap_asid_plru", &pmap_asid_plru, sizeof(pmap_asid_plru));
2112 /* Align the range of available hardware ASIDs to a multiple of 64 to enable the
2113 * masking used by the PLRU scheme. This means we must handle the case in which
2114 * the returned hardware ASID is MAX_HW_ASIDS, which we do in alloc_asid() and free_asid(). */
2115 _Static_assert(sizeof(asid_plru_bitmap[0] == sizeof(uint64_t)), "bitmap_t is not a 64-bit integer");
2116 _Static_assert(((MAX_HW_ASIDS + 1) % 64) == 0, "MAX_HW_ASIDS + 1 is not divisible by 64");
2117 asid_chunk_size = (pmap_asid_plru ? (MAX_HW_ASIDS + 1) : MAX_HW_ASIDS);
2118
2119 const vm_size_t asid_table_size = sizeof(*asid_bitmap) * BITMAP_LEN(pmap_max_asids);
2120
2121 /**
2122 * Bootstrap the core pmap data structures (e.g., pv_head_table,
2123 * pp_attr_table, etc). This function will use `avail_start` to allocate
2124 * space for these data structures.
2125 * */
2126 pmap_data_bootstrap();
2127
2128 /**
2129 * Don't make any assumptions about the alignment of avail_start before this
2130 * point (i.e., pmap_data_bootstrap() performs allocations).
2131 */
2132 avail_start = PMAP_ALIGN(avail_start, __alignof(bitmap_t));
2133
2134 const pmap_paddr_t pmap_struct_start = avail_start;
2135
2136 asid_bitmap = (bitmap_t*)phystokv(avail_start);
2137 avail_start = round_page(avail_start + asid_table_size);
2138
2139 memset((char *)phystokv(pmap_struct_start), 0, avail_start - pmap_struct_start);
2140
2141 vm_first_phys = gPhysBase;
2142 vm_last_phys = trunc_page(avail_end);
2143
2144 queue_init(&map_pmap_list);
2145 queue_enter(&map_pmap_list, kernel_pmap, pmap_t, pmaps);
2146 free_page_size_tt_list = TT_FREE_ENTRY_NULL;
2147 free_page_size_tt_count = 0;
2148 free_page_size_tt_max = 0;
2149 free_two_page_size_tt_list = TT_FREE_ENTRY_NULL;
2150 free_two_page_size_tt_count = 0;
2151 free_two_page_size_tt_max = 0;
2152 free_tt_list = TT_FREE_ENTRY_NULL;
2153 free_tt_count = 0;
2154 free_tt_max = 0;
2155
2156 virtual_space_start = vstart;
2157 virtual_space_end = VM_MAX_KERNEL_ADDRESS;
2158
2159 bitmap_full(&asid_bitmap[0], pmap_max_asids);
2160 bitmap_full(&asid_plru_bitmap[0], MAX_HW_ASIDS);
2161 // Clear the highest-order bit, which corresponds to MAX_HW_ASIDS + 1
2162 asid_plru_bitmap[MAX_HW_ASIDS >> 6] = ~(1ULL << 63);
2163
2164
2165
2166 if (PE_parse_boot_argn("arm_maxoffset", &maxoffset, sizeof(maxoffset))) {
2167 maxoffset = trunc_page(maxoffset);
2168 if ((maxoffset >= pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_MIN))
2169 && (maxoffset <= pmap_max_offset(FALSE, ARM_PMAP_MAX_OFFSET_MAX))) {
2170 arm_pmap_max_offset_default = maxoffset;
2171 }
2172 }
2173 #if defined(__arm64__)
2174 if (PE_parse_boot_argn("arm64_maxoffset", &maxoffset, sizeof(maxoffset))) {
2175 maxoffset = trunc_page(maxoffset);
2176 if ((maxoffset >= pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_MIN))
2177 && (maxoffset <= pmap_max_offset(TRUE, ARM_PMAP_MAX_OFFSET_MAX))) {
2178 arm64_pmap_max_offset_default = maxoffset;
2179 }
2180 }
2181 #endif
2182
2183 PE_parse_boot_argn("pmap_panic_dev_wimg_on_managed", &pmap_panic_dev_wimg_on_managed, sizeof(pmap_panic_dev_wimg_on_managed));
2184
2185
2186 #if MACH_ASSERT
2187 PE_parse_boot_argn("vm_footprint_suspend_allowed",
2188 &vm_footprint_suspend_allowed,
2189 sizeof(vm_footprint_suspend_allowed));
2190 #endif /* MACH_ASSERT */
2191
2192 #if KASAN
2193 /* Shadow the CPU copy windows, as they fall outside of the physical aperture */
2194 kasan_map_shadow(CPUWINDOWS_BASE, CPUWINDOWS_TOP - CPUWINDOWS_BASE, true);
2195 #endif /* KASAN */
2196
2197 /**
2198 * Ensure that avail_start is always left on a page boundary. The calling
2199 * code might not perform any alignment before allocating page tables so
2200 * this is important.
2201 */
2202 avail_start = round_page(avail_start);
2203 }
2204
2205 #if XNU_MONITOR
2206
2207 static inline void
pa_set_range_monitor(pmap_paddr_t start_pa,pmap_paddr_t end_pa)2208 pa_set_range_monitor(pmap_paddr_t start_pa, pmap_paddr_t end_pa)
2209 {
2210 pmap_paddr_t cur_pa;
2211 for (cur_pa = start_pa; cur_pa < end_pa; cur_pa += ARM_PGBYTES) {
2212 assert(pa_valid(cur_pa));
2213 ppattr_pa_set_monitor(cur_pa);
2214 }
2215 }
2216
2217 void
pa_set_range_xprr_perm(pmap_paddr_t start_pa,pmap_paddr_t end_pa,unsigned int expected_perm,unsigned int new_perm)2218 pa_set_range_xprr_perm(pmap_paddr_t start_pa,
2219 pmap_paddr_t end_pa,
2220 unsigned int expected_perm,
2221 unsigned int new_perm)
2222 {
2223 vm_offset_t start_va = phystokv(start_pa);
2224 vm_offset_t end_va = start_va + (end_pa - start_pa);
2225
2226 pa_set_range_monitor(start_pa, end_pa);
2227 pmap_set_range_xprr_perm(start_va, end_va, expected_perm, new_perm);
2228 }
2229
2230 static void
pmap_lockdown_kc(void)2231 pmap_lockdown_kc(void)
2232 {
2233 extern vm_offset_t vm_kernelcache_base;
2234 extern vm_offset_t vm_kernelcache_top;
2235 pmap_paddr_t start_pa = kvtophys_nofail(vm_kernelcache_base);
2236 pmap_paddr_t end_pa = start_pa + (vm_kernelcache_top - vm_kernelcache_base);
2237 pmap_paddr_t cur_pa = start_pa;
2238 vm_offset_t cur_va = vm_kernelcache_base;
2239 while (cur_pa < end_pa) {
2240 vm_size_t range_size = end_pa - cur_pa;
2241 vm_offset_t ptov_va = phystokv_range(cur_pa, &range_size);
2242 if (ptov_va != cur_va) {
2243 /*
2244 * If the physical address maps back to a virtual address that is non-linear
2245 * w.r.t. the kernelcache, that means it corresponds to memory that will be
2246 * reclaimed by the OS and should therefore not be locked down.
2247 */
2248 cur_pa += range_size;
2249 cur_va += range_size;
2250 continue;
2251 }
2252 unsigned int pai = pa_index(cur_pa);
2253 pv_entry_t **pv_h = pai_to_pvh(pai);
2254
2255 vm_offset_t pvh_flags = pvh_get_flags(pv_h);
2256
2257 if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN_MASK)) {
2258 panic("pai %d already locked down", pai);
2259 }
2260 pvh_set_flags(pv_h, pvh_flags | PVH_FLAG_LOCKDOWN_KC);
2261 cur_pa += ARM_PGBYTES;
2262 cur_va += ARM_PGBYTES;
2263 }
2264 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
2265 extern uint64_t ctrr_ro_test;
2266 extern uint64_t ctrr_nx_test;
2267 pmap_paddr_t exclude_pages[] = {kvtophys_nofail((vm_offset_t)&ctrr_ro_test), kvtophys_nofail((vm_offset_t)&ctrr_nx_test)};
2268 for (unsigned i = 0; i < (sizeof(exclude_pages) / sizeof(exclude_pages[0])); ++i) {
2269 pv_entry_t **pv_h = pai_to_pvh(pa_index(exclude_pages[i]));
2270 pvh_set_flags(pv_h, pvh_get_flags(pv_h) & ~PVH_FLAG_LOCKDOWN_KC);
2271 }
2272 #endif
2273 }
2274
2275 void
pmap_static_allocations_done(void)2276 pmap_static_allocations_done(void)
2277 {
2278 pmap_paddr_t monitor_start_pa;
2279 pmap_paddr_t monitor_end_pa;
2280
2281 /*
2282 * Protect the bootstrap (V=P and V->P) page tables.
2283 *
2284 * These bootstrap allocations will be used primarily for page tables.
2285 * If we wish to secure the page tables, we need to start by marking
2286 * these bootstrap allocations as pages that we want to protect.
2287 */
2288 monitor_start_pa = kvtophys_nofail((vm_offset_t)&bootstrap_pagetables);
2289 monitor_end_pa = monitor_start_pa + BOOTSTRAP_TABLE_SIZE;
2290
2291 /* The bootstrap page tables are mapped RW at boostrap. */
2292 pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_KERN_RO_PERM);
2293
2294 /*
2295 * We use avail_start as a pointer to the first address that has not
2296 * been reserved for bootstrap, so we know which pages to give to the
2297 * virtual memory layer.
2298 */
2299 monitor_start_pa = BootArgs->topOfKernelData;
2300 monitor_end_pa = avail_start;
2301
2302 /* The other bootstrap allocations are mapped RW at bootstrap. */
2303 pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM);
2304
2305 /*
2306 * The RO page tables are mapped RW in arm_vm_init() and later restricted
2307 * to RO in arm_vm_prot_finalize(), which is called after this function.
2308 * Here we only need to mark the underlying physical pages as PPL-owned to ensure
2309 * they can't be allocated for other uses. We don't need a special xPRR
2310 * protection index, as there is no PPL_RO index, and these pages are ultimately
2311 * protected by KTRR/CTRR. Furthermore, use of PPL_RW for these pages would
2312 * expose us to a functional issue on H11 devices where CTRR shifts the APRR
2313 * lookup table index to USER_XO before APRR is applied, leading the hardware
2314 * to believe we are dealing with an user XO page upon performing a translation.
2315 */
2316 monitor_start_pa = kvtophys_nofail((vm_offset_t)&ropagetable_begin);
2317 monitor_end_pa = monitor_start_pa + ((vm_offset_t)&ropagetable_end - (vm_offset_t)&ropagetable_begin);
2318 pa_set_range_monitor(monitor_start_pa, monitor_end_pa);
2319
2320 monitor_start_pa = kvtophys_nofail(segPPLDATAB);
2321 monitor_end_pa = monitor_start_pa + segSizePPLDATA;
2322
2323 /* PPL data is RW for the PPL, RO for the kernel. */
2324 pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RW_PERM, XPRR_PPL_RW_PERM);
2325
2326 monitor_start_pa = kvtophys_nofail(segPPLTEXTB);
2327 monitor_end_pa = monitor_start_pa + segSizePPLTEXT;
2328
2329 /* PPL text is RX for the PPL, RO for the kernel. */
2330 pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RX_PERM, XPRR_PPL_RX_PERM);
2331
2332
2333 /*
2334 * In order to support DTrace, the save areas for the PPL must be
2335 * writable. This is due to the fact that DTrace will try to update
2336 * register state.
2337 */
2338 if (pmap_ppl_disable) {
2339 vm_offset_t monitor_start_va = phystokv(ppl_cpu_save_area_start);
2340 vm_offset_t monitor_end_va = monitor_start_va + (ppl_cpu_save_area_end - ppl_cpu_save_area_start);
2341
2342 pmap_set_range_xprr_perm(monitor_start_va, monitor_end_va, XPRR_PPL_RW_PERM, XPRR_KERN_RW_PERM);
2343 }
2344
2345
2346 if (segSizePPLDATACONST > 0) {
2347 monitor_start_pa = kvtophys_nofail(segPPLDATACONSTB);
2348 monitor_end_pa = monitor_start_pa + segSizePPLDATACONST;
2349
2350 pa_set_range_xprr_perm(monitor_start_pa, monitor_end_pa, XPRR_KERN_RO_PERM, XPRR_KERN_RO_PERM);
2351 }
2352
2353 /*
2354 * Mark the original physical aperture mapping for the PPL stack pages RO as an additional security
2355 * precaution. The real RW mappings are at a different location with guard pages.
2356 */
2357 pa_set_range_xprr_perm(pmap_stacks_start_pa, pmap_stacks_end_pa, XPRR_PPL_RW_PERM, XPRR_KERN_RO_PERM);
2358
2359 /* Prevent remapping of the kernelcache */
2360 pmap_lockdown_kc();
2361 }
2362
2363 void
pmap_lockdown_ppl(void)2364 pmap_lockdown_ppl(void)
2365 {
2366 /* Mark the PPL as being locked down. */
2367
2368 #error "XPRR configuration error"
2369 }
2370 #endif /* XNU_MONITOR */
2371
2372 void
pmap_virtual_space(vm_offset_t * startp,vm_offset_t * endp)2373 pmap_virtual_space(
2374 vm_offset_t *startp,
2375 vm_offset_t *endp
2376 )
2377 {
2378 *startp = virtual_space_start;
2379 *endp = virtual_space_end;
2380 }
2381
2382
2383 boolean_t
pmap_virtual_region(unsigned int region_select,vm_map_offset_t * startp,vm_map_size_t * size)2384 pmap_virtual_region(
2385 unsigned int region_select,
2386 vm_map_offset_t *startp,
2387 vm_map_size_t *size
2388 )
2389 {
2390 boolean_t ret = FALSE;
2391 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
2392 if (region_select == 0) {
2393 /*
2394 * In this config, the bootstrap mappings should occupy their own L2
2395 * TTs, as they should be immutable after boot. Having the associated
2396 * TTEs and PTEs in their own pages allows us to lock down those pages,
2397 * while allowing the rest of the kernel address range to be remapped.
2398 */
2399 #if (__ARM_VMSA__ > 7)
2400 *startp = LOW_GLOBAL_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK;
2401 #else
2402 #error Unsupported configuration
2403 #endif
2404 #if defined(ARM_LARGE_MEMORY)
2405 *size = ((KERNEL_PMAP_HEAP_RANGE_START - *startp) & ~PAGE_MASK);
2406 #else
2407 *size = ((VM_MAX_KERNEL_ADDRESS - *startp) & ~PAGE_MASK);
2408 #endif
2409 ret = TRUE;
2410 }
2411
2412 #if defined(ARM_LARGE_MEMORY)
2413 if (region_select == 1) {
2414 *startp = VREGION1_START;
2415 *size = VREGION1_SIZE;
2416 ret = TRUE;
2417 }
2418 #endif
2419 #else /* !(defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)) */
2420 #if defined(ARM_LARGE_MEMORY)
2421 /* For large memory systems with no KTRR/CTRR such as virtual machines */
2422 #if (__ARM_VMSA__ > 7)
2423 *startp = LOW_GLOBAL_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK;
2424 #else
2425 #error Unsupported configuration
2426 #endif
2427 if (region_select == 0) {
2428 *size = ((KERNEL_PMAP_HEAP_RANGE_START - *startp) & ~PAGE_MASK);
2429 ret = TRUE;
2430 }
2431 #else /* !defined(ARM_LARGE_MEMORY) */
2432 #if (__ARM_VMSA__ > 7)
2433 unsigned long low_global_vr_mask = 0;
2434 vm_map_size_t low_global_vr_size = 0;
2435 #endif
2436
2437 if (region_select == 0) {
2438 #if (__ARM_VMSA__ == 7)
2439 *startp = gVirtBase & 0xFFC00000;
2440 *size = ((virtual_space_start - (gVirtBase & 0xFFC00000)) + ~0xFFC00000) & 0xFFC00000;
2441 #else
2442 /* Round to avoid overlapping with the V=P area; round to at least the L2 block size. */
2443 if (!TEST_PAGE_SIZE_4K) {
2444 *startp = gVirtBase & 0xFFFFFFFFFE000000;
2445 *size = ((virtual_space_start - (gVirtBase & 0xFFFFFFFFFE000000)) + ~0xFFFFFFFFFE000000) & 0xFFFFFFFFFE000000;
2446 } else {
2447 *startp = gVirtBase & 0xFFFFFFFFFF800000;
2448 *size = ((virtual_space_start - (gVirtBase & 0xFFFFFFFFFF800000)) + ~0xFFFFFFFFFF800000) & 0xFFFFFFFFFF800000;
2449 }
2450 #endif
2451 ret = TRUE;
2452 }
2453 if (region_select == 1) {
2454 *startp = VREGION1_START;
2455 *size = VREGION1_SIZE;
2456 ret = TRUE;
2457 }
2458 #if (__ARM_VMSA__ > 7)
2459 /* We need to reserve a range that is at least the size of an L2 block mapping for the low globals */
2460 if (!TEST_PAGE_SIZE_4K) {
2461 low_global_vr_mask = 0xFFFFFFFFFE000000;
2462 low_global_vr_size = 0x2000000;
2463 } else {
2464 low_global_vr_mask = 0xFFFFFFFFFF800000;
2465 low_global_vr_size = 0x800000;
2466 }
2467
2468 if (((gVirtBase & low_global_vr_mask) != LOW_GLOBAL_BASE_ADDRESS) && (region_select == 2)) {
2469 *startp = LOW_GLOBAL_BASE_ADDRESS;
2470 *size = low_global_vr_size;
2471 ret = TRUE;
2472 }
2473
2474 if (region_select == 3) {
2475 /* In this config, we allow the bootstrap mappings to occupy the same
2476 * page table pages as the heap.
2477 */
2478 *startp = VM_MIN_KERNEL_ADDRESS;
2479 *size = LOW_GLOBAL_BASE_ADDRESS - *startp;
2480 ret = TRUE;
2481 }
2482 #endif
2483 #endif /* defined(ARM_LARGE_MEMORY) */
2484 #endif /* defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) */
2485 return ret;
2486 }
2487
2488 /*
2489 * Routines to track and allocate physical pages during early boot.
2490 * On most systems that memory runs from first_avail through to avail_end
2491 * with no gaps.
2492 *
2493 * However if the system supports ECC and bad_ram_pages_count > 0, we
2494 * need to be careful and skip those pages.
2495 */
2496 static unsigned int avail_page_count = 0;
2497 static bool need_ram_ranges_init = true;
2498
2499 #if defined(__arm64__)
2500 pmap_paddr_t *bad_ram_pages = NULL;
2501 unsigned int bad_ram_pages_count = 0;
2502
2503 /*
2504 * We use this sub-range of bad_ram_pages for pmap_next_page()
2505 */
2506 static pmap_paddr_t *skip_pages;
2507 static unsigned int skip_pages_count = 0;
2508
2509 #define MAX_BAD_RAM_PAGE_COUNT 64
2510 static pmap_paddr_t bad_ram_pages_arr[MAX_BAD_RAM_PAGE_COUNT];
2511
2512 /*
2513 * XXX - temporary code to get the bad pages array from boot-args.
2514 * expects a comma separated list of offsets from the start
2515 * of physical memory to be considered bad.
2516 *
2517 * HERE JOE -- will eventually be replaced by data provided by iboot
2518 */
2519 static void
parse_bad_ram_pages_boot_arg(void)2520 parse_bad_ram_pages_boot_arg(void)
2521 {
2522 char buf[256] = {0};
2523 char *s = buf;
2524 char *end;
2525 int count = 0;
2526 pmap_paddr_t num;
2527 extern uint64_t strtouq(const char *, char **, int);
2528
2529 if (!PE_parse_boot_arg_str("bad_ram_pages", buf, sizeof(buf))) {
2530 goto done;
2531 }
2532
2533 while (*s && count < MAX_BAD_RAM_PAGE_COUNT) {
2534 num = (pmap_paddr_t)strtouq(s, &end, 0);
2535 if (num == 0) {
2536 break;
2537 }
2538 num &= ~PAGE_MASK;
2539
2540 bad_ram_pages_arr[count++] = gDramBase + num;
2541
2542 if (*end != ',') {
2543 break;
2544 }
2545
2546 s = end + 1;
2547 }
2548
2549 done:
2550 bad_ram_pages = bad_ram_pages_arr;
2551 bad_ram_pages_count = count;
2552 }
2553
2554 /*
2555 * Comparison routine for qsort of array of physical addresses.
2556 */
2557 static int
pmap_paddr_cmp(void * a,void * b)2558 pmap_paddr_cmp(void *a, void *b)
2559 {
2560 pmap_paddr_t *x = a;
2561 pmap_paddr_t *y = b;
2562 if (*x < *y) {
2563 return -1;
2564 }
2565 return *x > *y;
2566 }
2567 #endif /* defined(__arm64__) */
2568
2569 /*
2570 * Look up ppn in the sorted bad_ram_pages array.
2571 */
2572 bool
pmap_is_bad_ram(__unused ppnum_t ppn)2573 pmap_is_bad_ram(__unused ppnum_t ppn)
2574 {
2575 #if defined(__arm64__)
2576 pmap_paddr_t pa = ptoa(ppn);
2577 int low = 0;
2578 int high = bad_ram_pages_count - 1;
2579 int mid;
2580
2581 while (low <= high) {
2582 mid = (low + high) / 2;
2583 if (bad_ram_pages[mid] < pa) {
2584 low = mid + 1;
2585 } else if (bad_ram_pages[mid] > pa) {
2586 high = mid - 1;
2587 } else {
2588 return true;
2589 }
2590 }
2591 #endif /* defined(__arm64__) */
2592 return false;
2593 }
2594
2595 /*
2596 * Initialize the count of available pages. If we have bad_ram_pages, then sort the list of them.
2597 * No lock needed here, as this code is called while kernel boot up is single threaded.
2598 */
2599 static void
initialize_ram_ranges(void)2600 initialize_ram_ranges(void)
2601 {
2602 pmap_paddr_t first = first_avail;
2603 pmap_paddr_t end = avail_end;
2604
2605 assert(first <= end);
2606 assert(first == (first & ~PAGE_MASK));
2607 assert(end == (end & ~PAGE_MASK));
2608 avail_page_count = atop(end - first);
2609
2610 #if defined(__arm64__)
2611 /*
2612 * XXX Temporary code for testing, until there is iboot support
2613 *
2614 * Parse a list of known bad pages from a boot-args.
2615 */
2616 parse_bad_ram_pages_boot_arg();
2617
2618 /*
2619 * Sort and filter the bad pages list and adjust avail_page_count.
2620 */
2621 if (bad_ram_pages_count != 0) {
2622 qsort(bad_ram_pages, bad_ram_pages_count, sizeof(*bad_ram_pages), (cmpfunc_t)pmap_paddr_cmp);
2623 skip_pages = bad_ram_pages;
2624 skip_pages_count = bad_ram_pages_count;
2625
2626 /* ignore any pages before first */
2627 while (skip_pages_count > 0 && skip_pages[0] < first) {
2628 --skip_pages_count;
2629 ++skip_pages;
2630 }
2631
2632 /* ignore any pages at or after end */
2633 while (skip_pages_count > 0 && skip_pages[skip_pages_count - 1] >= end) {
2634 --skip_pages_count;
2635 }
2636
2637 avail_page_count -= skip_pages_count;
2638 }
2639 #endif /* defined(__arm64__) */
2640 need_ram_ranges_init = false;
2641 }
2642
2643 unsigned int
pmap_free_pages(void)2644 pmap_free_pages(
2645 void)
2646 {
2647 if (need_ram_ranges_init) {
2648 initialize_ram_ranges();
2649 }
2650 return avail_page_count;
2651 }
2652
2653 unsigned int
pmap_free_pages_span(void)2654 pmap_free_pages_span(
2655 void)
2656 {
2657 if (need_ram_ranges_init) {
2658 initialize_ram_ranges();
2659 }
2660 return (unsigned int)atop(avail_end - first_avail);
2661 }
2662
2663
2664 boolean_t
pmap_next_page_hi(ppnum_t * pnum,__unused boolean_t might_free)2665 pmap_next_page_hi(
2666 ppnum_t * pnum,
2667 __unused boolean_t might_free)
2668 {
2669 return pmap_next_page(pnum);
2670 }
2671
2672
2673 boolean_t
pmap_next_page(ppnum_t * pnum)2674 pmap_next_page(
2675 ppnum_t *pnum)
2676 {
2677 if (need_ram_ranges_init) {
2678 initialize_ram_ranges();
2679 }
2680
2681 #if defined(__arm64__)
2682 /*
2683 * Skip over any known bad pages.
2684 */
2685 while (skip_pages_count > 0 && first_avail == skip_pages[0]) {
2686 first_avail += PAGE_SIZE;
2687 ++skip_pages;
2688 --skip_pages_count;
2689 }
2690 #endif /* defined(__arm64__) */
2691
2692 if (first_avail != avail_end) {
2693 *pnum = (ppnum_t)atop(first_avail);
2694 first_avail += PAGE_SIZE;
2695 assert(avail_page_count > 0);
2696 --avail_page_count;
2697 return TRUE;
2698 }
2699 assert(avail_page_count == 0);
2700 return FALSE;
2701 }
2702
2703 void
pmap_retire_page(__unused ppnum_t pnum)2704 pmap_retire_page(
2705 __unused ppnum_t pnum)
2706 {
2707 /* XXX Justin TBD - mark the page as unusable in pmap data structures */
2708 }
2709
2710
2711 /*
2712 * Initialize the pmap module.
2713 * Called by vm_init, to initialize any structures that the pmap
2714 * system needs to map virtual memory.
2715 */
2716 void
pmap_init(void)2717 pmap_init(
2718 void)
2719 {
2720 /*
2721 * Protect page zero in the kernel map.
2722 * (can be overruled by permanent transltion
2723 * table entries at page zero - see arm_vm_init).
2724 */
2725 vm_protect(kernel_map, 0, PAGE_SIZE, TRUE, VM_PROT_NONE);
2726
2727 pmap_initialized = TRUE;
2728
2729 /*
2730 * Create the zone of physical maps
2731 * and the physical-to-virtual entries.
2732 */
2733 pmap_zone = zone_create_ext("pmap", sizeof(struct pmap),
2734 ZC_ZFREE_CLEARMEM, ZONE_ID_PMAP, NULL);
2735
2736
2737 /*
2738 * Initialize the pmap object (for tracking the vm_page_t
2739 * structures for pages we allocate to be page tables in
2740 * pmap_expand().
2741 */
2742 _vm_object_allocate(mem_size, pmap_object);
2743 pmap_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
2744
2745 /*
2746 * The values of [hard_]maxproc may have been scaled, make sure
2747 * they are still less than the value of pmap_max_asids.
2748 */
2749 if ((uint32_t)maxproc > pmap_max_asids) {
2750 maxproc = pmap_max_asids;
2751 }
2752 if ((uint32_t)hard_maxproc > pmap_max_asids) {
2753 hard_maxproc = pmap_max_asids;
2754 }
2755 }
2756
2757 /**
2758 * Verify that a given physical page contains no mappings (outside of the
2759 * default physical aperture mapping).
2760 *
2761 * @param ppnum Physical page number to check there are no mappings to.
2762 *
2763 * @return True if there are no mappings, false otherwise or if the page is not
2764 * kernel-managed.
2765 */
2766 bool
pmap_verify_free(ppnum_t ppnum)2767 pmap_verify_free(ppnum_t ppnum)
2768 {
2769 const pmap_paddr_t pa = ptoa(ppnum);
2770
2771 assert(pa != vm_page_fictitious_addr);
2772
2773 /* Only mappings to kernel-managed physical memory are tracked. */
2774 if (!pa_valid(pa)) {
2775 return false;
2776 }
2777
2778 const unsigned int pai = pa_index(pa);
2779 pv_entry_t **pvh = pai_to_pvh(pai);
2780
2781 return pvh_test_type(pvh, PVH_TYPE_NULL);
2782 }
2783
2784 #if MACH_ASSERT
2785 /**
2786 * Verify that a given physical page contains no mappings (outside of the
2787 * default physical aperture mapping) and if it does, then panic.
2788 *
2789 * @note It's recommended to use pmap_verify_free() directly when operating in
2790 * the PPL since the PVH lock isn't getting grabbed here (due to this code
2791 * normally being called from outside of the PPL, and the pv_head_table
2792 * can't be modified outside of the PPL).
2793 *
2794 * @param ppnum Physical page number to check there are no mappings to.
2795 */
2796 void
pmap_assert_free(ppnum_t ppnum)2797 pmap_assert_free(ppnum_t ppnum)
2798 {
2799 const pmap_paddr_t pa = ptoa(ppnum);
2800
2801 /* Only mappings to kernel-managed physical memory are tracked. */
2802 if (__probable(!pa_valid(pa) || pmap_verify_free(ppnum))) {
2803 return;
2804 }
2805
2806 const unsigned int pai = pa_index(pa);
2807 pv_entry_t **pvh = pai_to_pvh(pai);
2808
2809 /**
2810 * This function is always called from outside of the PPL. Because of this,
2811 * the PVH entry can't be locked. This function is generally only called
2812 * before the VM reclaims a physical page and shouldn't be creating new
2813 * mappings. Even if a new mapping is created while parsing the hierarchy,
2814 * the worst case is that the system will panic in another way, and we were
2815 * already about to panic anyway.
2816 */
2817
2818 /**
2819 * Since pmap_verify_free() returned false, that means there is at least one
2820 * mapping left. Let's get some extra info on the first mapping we find to
2821 * dump in the panic string (the common case is that there is one spare
2822 * mapping that was never unmapped).
2823 */
2824 pt_entry_t *first_ptep = PT_ENTRY_NULL;
2825
2826 if (pvh_test_type(pvh, PVH_TYPE_PTEP)) {
2827 first_ptep = pvh_ptep(pvh);
2828 } else if (pvh_test_type(pvh, PVH_TYPE_PVEP)) {
2829 pv_entry_t *pvep = pvh_pve_list(pvh);
2830
2831 /* Each PVE can contain multiple PTEs. Let's find the first one. */
2832 for (int pve_ptep_idx = 0; pve_ptep_idx < PTE_PER_PVE; pve_ptep_idx++) {
2833 first_ptep = pve_get_ptep(pvep, pve_ptep_idx);
2834 if (first_ptep != PT_ENTRY_NULL) {
2835 break;
2836 }
2837 }
2838
2839 /* The PVE should have at least one valid PTE. */
2840 assert(first_ptep != PT_ENTRY_NULL);
2841 } else if (pvh_test_type(pvh, PVH_TYPE_PTDP)) {
2842 panic("%s: Physical page is being used as a page table at PVH %p (pai: %d)",
2843 __func__, pvh, pai);
2844 } else {
2845 /**
2846 * The mapping disappeared between here and the pmap_verify_free() call.
2847 * The only way that can happen is if the VM was racing this call with
2848 * a call that unmaps PTEs. Operations on this page should not be
2849 * occurring at the same time as this check, and unfortunately we can't
2850 * lock the PVH entry to prevent it, so just panic instead.
2851 */
2852 panic("%s: Mapping was detected but is now gone. Is the VM racing this "
2853 "call with an operation that unmaps PTEs? PVH %p (pai: %d)",
2854 __func__, pvh, pai);
2855 }
2856
2857 /* Panic with a unique string identifying the first bad mapping and owner. */
2858 {
2859 /* First PTE is mapped by the main CPUs. */
2860 pmap_t pmap = ptep_get_pmap(first_ptep);
2861 const char *type = (pmap == kernel_pmap) ? "Kernel" : "User";
2862
2863 panic("%s: Found at least one mapping to %#llx. First PTEP (%p) is a "
2864 "%s CPU mapping (pmap: %p)",
2865 __func__, (uint64_t)pa, first_ptep, type, pmap);
2866 }
2867 }
2868 #endif
2869
2870
2871 static vm_size_t
pmap_root_alloc_size(pmap_t pmap)2872 pmap_root_alloc_size(pmap_t pmap)
2873 {
2874 #if (__ARM_VMSA__ > 7)
2875 #pragma unused(pmap)
2876 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
2877 unsigned int root_level = pt_attr_root_level(pt_attr);
2878 return ((pt_attr_ln_index_mask(pt_attr, root_level) >> pt_attr_ln_shift(pt_attr, root_level)) + 1) * sizeof(tt_entry_t);
2879 #else
2880 (void)pmap;
2881 return PMAP_ROOT_ALLOC_SIZE;
2882 #endif
2883 }
2884
2885 /*
2886 * Create and return a physical map.
2887 *
2888 * If the size specified for the map
2889 * is zero, the map is an actual physical
2890 * map, and may be referenced by the
2891 * hardware.
2892 *
2893 * If the size specified is non-zero,
2894 * the map will be used in software only, and
2895 * is bounded by that size.
2896 */
2897 MARK_AS_PMAP_TEXT pmap_t
pmap_create_options_internal(ledger_t ledger,vm_map_size_t size,unsigned int flags,kern_return_t * kr)2898 pmap_create_options_internal(
2899 ledger_t ledger,
2900 vm_map_size_t size,
2901 unsigned int flags,
2902 kern_return_t *kr)
2903 {
2904 unsigned i;
2905 unsigned tte_index_max;
2906 pmap_t p;
2907 bool is_64bit = flags & PMAP_CREATE_64BIT;
2908 #if defined(HAS_APPLE_PAC)
2909 bool disable_jop = flags & PMAP_CREATE_DISABLE_JOP;
2910 #endif /* defined(HAS_APPLE_PAC) */
2911 kern_return_t local_kr = KERN_SUCCESS;
2912
2913 /*
2914 * A software use-only map doesn't even need a pmap.
2915 */
2916 if (size != 0) {
2917 return PMAP_NULL;
2918 }
2919
2920 if (0 != (flags & ~PMAP_CREATE_KNOWN_FLAGS)) {
2921 return PMAP_NULL;
2922 }
2923
2924 #if XNU_MONITOR
2925 if ((local_kr = pmap_alloc_pmap(&p)) != KERN_SUCCESS) {
2926 goto pmap_create_fail;
2927 }
2928
2929 assert(p != PMAP_NULL);
2930
2931 if (ledger) {
2932 pmap_ledger_validate(ledger);
2933 pmap_ledger_retain(ledger);
2934 }
2935 #else
2936 /*
2937 * Allocate a pmap struct from the pmap_zone. Then allocate
2938 * the translation table of the right size for the pmap.
2939 */
2940 if ((p = (pmap_t) zalloc(pmap_zone)) == PMAP_NULL) {
2941 local_kr = KERN_RESOURCE_SHORTAGE;
2942 goto pmap_create_fail;
2943 }
2944 #endif
2945
2946 p->ledger = ledger;
2947
2948
2949 p->pmap_vm_map_cs_enforced = false;
2950
2951 p->min = 0;
2952 if (flags & PMAP_CREATE_64BIT) {
2953 } else {
2954 }
2955
2956 #if defined(HAS_APPLE_PAC)
2957 p->disable_jop = disable_jop;
2958 #endif /* defined(HAS_APPLE_PAC) */
2959
2960 p->nested_region_true_start = 0;
2961 p->nested_region_true_end = ~0;
2962
2963 p->gc_status = 0;
2964 p->stamp = os_atomic_inc(&pmap_stamp, relaxed);
2965 p->nx_enabled = true;
2966 p->is_64bit = is_64bit;
2967 p->nested_pmap = PMAP_NULL;
2968 p->type = PMAP_TYPE_USER;
2969
2970 #if ARM_PARAMETERIZED_PMAP
2971 /* Default to the native pt_attr */
2972 p->pmap_pt_attr = native_pt_attr;
2973 #endif /* ARM_PARAMETERIZED_PMAP */
2974 #if __ARM_MIXED_PAGE_SIZE__
2975 if (flags & PMAP_CREATE_FORCE_4K_PAGES) {
2976 p->pmap_pt_attr = &pmap_pt_attr_4k;
2977 }
2978 #endif /* __ARM_MIXED_PAGE_SIZE__ */
2979 p->max = pmap_user_va_size(p);
2980
2981 if (!pmap_get_pt_ops(p)->alloc_id(p)) {
2982 local_kr = KERN_NO_SPACE;
2983 goto id_alloc_fail;
2984 }
2985
2986 pmap_lock_init(p);
2987
2988 p->tt_entry_free = (tt_entry_t *)0;
2989 tte_index_max = ((unsigned)pmap_root_alloc_size(p) / sizeof(tt_entry_t));
2990
2991 #if (__ARM_VMSA__ == 7)
2992 p->tte_index_max = tte_index_max;
2993 #endif
2994
2995 #if XNU_MONITOR
2996 p->tte = pmap_tt1_allocate(p, pmap_root_alloc_size(p), PMAP_TT_ALLOCATE_NOWAIT);
2997 #else
2998 p->tte = pmap_tt1_allocate(p, pmap_root_alloc_size(p), 0);
2999 #endif
3000 if (!(p->tte)) {
3001 local_kr = KERN_RESOURCE_SHORTAGE;
3002 goto tt1_alloc_fail;
3003 }
3004
3005 p->ttep = ml_static_vtop((vm_offset_t)p->tte);
3006 PMAP_TRACE(4, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(p), VM_KERNEL_ADDRHIDE(p->min), VM_KERNEL_ADDRHIDE(p->max), p->ttep);
3007
3008 /* nullify the translation table */
3009 for (i = 0; i < tte_index_max; i++) {
3010 p->tte[i] = ARM_TTE_TYPE_FAULT;
3011 }
3012
3013 FLUSH_PTE();
3014
3015 /*
3016 * initialize the rest of the structure
3017 */
3018 p->nested_region_addr = 0x0ULL;
3019 p->nested_region_size = 0x0ULL;
3020 p->nested_region_asid_bitmap = NULL;
3021 p->nested_region_asid_bitmap_size = 0x0UL;
3022
3023 p->nested_has_no_bounds_ref = false;
3024 p->nested_no_bounds_refcnt = 0;
3025 p->nested_bounds_set = false;
3026
3027
3028 #if MACH_ASSERT
3029 p->pmap_stats_assert = TRUE;
3030 p->pmap_pid = 0;
3031 strlcpy(p->pmap_procname, "<nil>", sizeof(p->pmap_procname));
3032 #endif /* MACH_ASSERT */
3033 #if DEVELOPMENT || DEBUG
3034 p->footprint_was_suspended = FALSE;
3035 #endif /* DEVELOPMENT || DEBUG */
3036
3037 #if XNU_MONITOR
3038 os_atomic_init(&p->nested_count, 0);
3039 assert(os_atomic_load(&p->ref_count, relaxed) == 0);
3040 /* Ensure prior updates to the new pmap are visible before the non-zero ref_count is visible */
3041 os_atomic_thread_fence(release);
3042 #endif
3043 os_atomic_init(&p->ref_count, 1);
3044 pmap_simple_lock(&pmaps_lock);
3045 queue_enter(&map_pmap_list, p, pmap_t, pmaps);
3046 pmap_simple_unlock(&pmaps_lock);
3047
3048 return p;
3049
3050 tt1_alloc_fail:
3051 pmap_get_pt_ops(p)->free_id(p);
3052 id_alloc_fail:
3053 #if XNU_MONITOR
3054 pmap_free_pmap(p);
3055
3056 if (ledger) {
3057 pmap_ledger_release(ledger);
3058 }
3059 #else
3060 zfree(pmap_zone, p);
3061 #endif
3062 pmap_create_fail:
3063 #if XNU_MONITOR
3064 pmap_pin_kernel_pages((vm_offset_t)kr, sizeof(*kr));
3065 #endif
3066 *kr = local_kr;
3067 #if XNU_MONITOR
3068 pmap_unpin_kernel_pages((vm_offset_t)kr, sizeof(*kr));
3069 #endif
3070 return PMAP_NULL;
3071 }
3072
3073 pmap_t
pmap_create_options(ledger_t ledger,vm_map_size_t size,unsigned int flags)3074 pmap_create_options(
3075 ledger_t ledger,
3076 vm_map_size_t size,
3077 unsigned int flags)
3078 {
3079 pmap_t pmap;
3080 kern_return_t kr = KERN_SUCCESS;
3081
3082 PMAP_TRACE(1, PMAP_CODE(PMAP__CREATE) | DBG_FUNC_START, size, flags);
3083
3084 ledger_reference(ledger);
3085
3086 #if XNU_MONITOR
3087 for (;;) {
3088 pmap = pmap_create_options_ppl(ledger, size, flags, &kr);
3089 if (kr != KERN_RESOURCE_SHORTAGE) {
3090 break;
3091 }
3092 assert(pmap == PMAP_NULL);
3093 pmap_alloc_page_for_ppl(0);
3094 kr = KERN_SUCCESS;
3095 }
3096 #else
3097 pmap = pmap_create_options_internal(ledger, size, flags, &kr);
3098 #endif
3099
3100 if (pmap == PMAP_NULL) {
3101 ledger_dereference(ledger);
3102 }
3103
3104 PMAP_TRACE(1, PMAP_CODE(PMAP__CREATE) | DBG_FUNC_END, VM_KERNEL_ADDRHIDE(pmap), PMAP_VASID(pmap), pmap->hw_asid);
3105
3106 return pmap;
3107 }
3108
3109 #if XNU_MONITOR
3110 /*
3111 * This symbol remains in place when the PPL is enabled so that the dispatch
3112 * table does not change from development to release configurations.
3113 */
3114 #endif
3115 #if MACH_ASSERT || XNU_MONITOR
3116 MARK_AS_PMAP_TEXT void
pmap_set_process_internal(__unused pmap_t pmap,__unused int pid,__unused char * procname)3117 pmap_set_process_internal(
3118 __unused pmap_t pmap,
3119 __unused int pid,
3120 __unused char *procname)
3121 {
3122 #if MACH_ASSERT
3123 if (pmap == NULL) {
3124 return;
3125 }
3126
3127 validate_pmap_mutable(pmap);
3128
3129 pmap->pmap_pid = pid;
3130 strlcpy(pmap->pmap_procname, procname, sizeof(pmap->pmap_procname));
3131 if (pmap_ledgers_panic_leeway) {
3132 /*
3133 * XXX FBDP
3134 * Some processes somehow trigger some issues that make
3135 * the pmap stats and ledgers go off track, causing
3136 * some assertion failures and ledger panics.
3137 * Turn off the sanity checks if we allow some ledger leeway
3138 * because of that. We'll still do a final check in
3139 * pmap_check_ledgers() for discrepancies larger than the
3140 * allowed leeway after the address space has been fully
3141 * cleaned up.
3142 */
3143 pmap->pmap_stats_assert = FALSE;
3144 ledger_disable_panic_on_negative(pmap->ledger,
3145 task_ledgers.phys_footprint);
3146 ledger_disable_panic_on_negative(pmap->ledger,
3147 task_ledgers.internal);
3148 ledger_disable_panic_on_negative(pmap->ledger,
3149 task_ledgers.internal_compressed);
3150 ledger_disable_panic_on_negative(pmap->ledger,
3151 task_ledgers.iokit_mapped);
3152 ledger_disable_panic_on_negative(pmap->ledger,
3153 task_ledgers.alternate_accounting);
3154 ledger_disable_panic_on_negative(pmap->ledger,
3155 task_ledgers.alternate_accounting_compressed);
3156 }
3157 #endif /* MACH_ASSERT */
3158 }
3159 #endif /* MACH_ASSERT || XNU_MONITOR */
3160
3161 #if MACH_ASSERT
3162 void
pmap_set_process(pmap_t pmap,int pid,char * procname)3163 pmap_set_process(
3164 pmap_t pmap,
3165 int pid,
3166 char *procname)
3167 {
3168 #if XNU_MONITOR
3169 pmap_set_process_ppl(pmap, pid, procname);
3170 #else
3171 pmap_set_process_internal(pmap, pid, procname);
3172 #endif
3173 }
3174 #endif /* MACH_ASSERT */
3175
3176 #if (__ARM_VMSA__ > 7)
3177 /*
3178 * pmap_deallocate_all_leaf_tts:
3179 *
3180 * Recursive function for deallocating all leaf TTEs. Walks the given TT,
3181 * removing and deallocating all TTEs.
3182 */
3183 MARK_AS_PMAP_TEXT static void
pmap_deallocate_all_leaf_tts(pmap_t pmap,tt_entry_t * first_ttep,unsigned level)3184 pmap_deallocate_all_leaf_tts(pmap_t pmap, tt_entry_t * first_ttep, unsigned level)
3185 {
3186 tt_entry_t tte = ARM_TTE_EMPTY;
3187 tt_entry_t * ttep = NULL;
3188 tt_entry_t * last_ttep = NULL;
3189
3190 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
3191
3192 assert(level < pt_attr_leaf_level(pt_attr));
3193
3194 last_ttep = &first_ttep[ttn_index(pt_attr, ~0, level)];
3195
3196 for (ttep = first_ttep; ttep <= last_ttep; ttep++) {
3197 tte = *ttep;
3198
3199 if (!(tte & ARM_TTE_VALID)) {
3200 continue;
3201 }
3202
3203 if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) {
3204 panic("%s: found block mapping, ttep=%p, tte=%p, "
3205 "pmap=%p, first_ttep=%p, level=%u",
3206 __FUNCTION__, ttep, (void *)tte,
3207 pmap, first_ttep, level);
3208 }
3209
3210 /* Must be valid, type table */
3211 if (level < pt_attr_twig_level(pt_attr)) {
3212 /* If we haven't reached the twig level, recurse to the next level. */
3213 pmap_deallocate_all_leaf_tts(pmap, (tt_entry_t *)phystokv((tte) & ARM_TTE_TABLE_MASK), level + 1);
3214 }
3215
3216 /* Remove the TTE. */
3217 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3218 pmap_tte_deallocate(pmap, 0, 0, false, ttep, level);
3219 }
3220 }
3221 #endif /* (__ARM_VMSA__ > 7) */
3222
3223 /*
3224 * We maintain stats and ledgers so that a task's physical footprint is:
3225 * phys_footprint = ((internal - alternate_accounting)
3226 * + (internal_compressed - alternate_accounting_compressed)
3227 * + iokit_mapped
3228 * + purgeable_nonvolatile
3229 * + purgeable_nonvolatile_compressed
3230 * + page_table)
3231 * where "alternate_accounting" includes "iokit" and "purgeable" memory.
3232 */
3233
3234 /*
3235 * Retire the given physical map from service.
3236 * Should only be called if the map contains
3237 * no valid mappings.
3238 */
3239 MARK_AS_PMAP_TEXT void
pmap_destroy_internal(pmap_t pmap)3240 pmap_destroy_internal(
3241 pmap_t pmap)
3242 {
3243 if (pmap == PMAP_NULL) {
3244 return;
3245 }
3246
3247 validate_pmap(pmap);
3248
3249 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
3250
3251 int32_t ref_count = os_atomic_dec(&pmap->ref_count, relaxed);
3252 if (ref_count > 0) {
3253 return;
3254 } else if (__improbable(ref_count < 0)) {
3255 panic("pmap %p: refcount underflow", pmap);
3256 } else if (__improbable(pmap == kernel_pmap)) {
3257 panic("pmap %p: attempt to destroy kernel pmap", pmap);
3258 } else if (__improbable(pmap->type == PMAP_TYPE_COMMPAGE)) {
3259 panic("pmap %p: attempt to destroy commpage pmap", pmap);
3260 }
3261
3262 #if XNU_MONITOR
3263 /*
3264 * Issue a store-load barrier to ensure the checks of nested_count and the per-CPU
3265 * pmaps below will not be speculated ahead of the decrement of ref_count above.
3266 * That ensures that if the pmap is currently in use elsewhere, this path will
3267 * either observe it in use and panic, or PMAP_VALIDATE_MUTABLE will observe a
3268 * ref_count of 0 and panic.
3269 */
3270 os_atomic_thread_fence(seq_cst);
3271 if (__improbable(os_atomic_load(&pmap->nested_count, relaxed) != 0)) {
3272 panic("pmap %p: attempt to destroy while nested", pmap);
3273 }
3274 const int max_cpu = ml_get_max_cpu_number();
3275 for (unsigned int i = 0; i <= max_cpu; ++i) {
3276 const pmap_cpu_data_t *cpu_data = pmap_get_remote_cpu_data(i);
3277 if (cpu_data == NULL) {
3278 continue;
3279 }
3280 if (__improbable(os_atomic_load(&cpu_data->inflight_pmap, relaxed) == pmap)) {
3281 panic("pmap %p: attempting to destroy while in-flight on cpu %llu", pmap, (uint64_t)i);
3282 } else if (__improbable(os_atomic_load(&cpu_data->active_pmap, relaxed) == pmap)) {
3283 panic("pmap %p: attempting to destroy while active on cpu %llu", pmap, (uint64_t)i);
3284 }
3285 }
3286 #endif
3287 #if (__ARM_VMSA__ > 7)
3288 pmap_unmap_sharedpage(pmap);
3289 #endif /* (__ARM_VMSA__ > 7) */
3290
3291 pmap_simple_lock(&pmaps_lock);
3292 #if !XNU_MONITOR
3293 while (pmap->gc_status & PMAP_GC_INFLIGHT) {
3294 pmap->gc_status |= PMAP_GC_WAIT;
3295 assert_wait((event_t) &pmap->gc_status, THREAD_UNINT);
3296 pmap_simple_unlock(&pmaps_lock);
3297 (void) thread_block(THREAD_CONTINUE_NULL);
3298 pmap_simple_lock(&pmaps_lock);
3299 }
3300 #endif /* !XNU_MONITOR */
3301 queue_remove(&map_pmap_list, pmap, pmap_t, pmaps);
3302 pmap_simple_unlock(&pmaps_lock);
3303
3304 pmap_trim_self(pmap);
3305
3306 /*
3307 * Free the memory maps, then the
3308 * pmap structure.
3309 */
3310 #if (__ARM_VMSA__ == 7)
3311 unsigned int i = 0;
3312 pt_entry_t *ttep;
3313
3314 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3315 for (i = 0; i < pmap->tte_index_max; i++) {
3316 ttep = &pmap->tte[i];
3317 if ((*ttep & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
3318 pmap_tte_deallocate(pmap, 0, 0, false, ttep, PMAP_TT_L1_LEVEL);
3319 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3320 }
3321 }
3322 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
3323 #else /* (__ARM_VMSA__ == 7) */
3324 pmap_deallocate_all_leaf_tts(pmap, pmap->tte, pt_attr_root_level(pt_attr));
3325 #endif /* (__ARM_VMSA__ == 7) */
3326
3327
3328
3329 if (pmap->tte) {
3330 #if (__ARM_VMSA__ == 7)
3331 pmap_tt1_deallocate(pmap, pmap->tte, pmap->tte_index_max * sizeof(tt_entry_t), 0);
3332 pmap->tte_index_max = 0;
3333 #else /* (__ARM_VMSA__ == 7) */
3334 pmap_tt1_deallocate(pmap, pmap->tte, pmap_root_alloc_size(pmap), 0);
3335 #endif /* (__ARM_VMSA__ == 7) */
3336 pmap->tte = (tt_entry_t *) NULL;
3337 pmap->ttep = 0;
3338 }
3339
3340 assert((tt_free_entry_t*)pmap->tt_entry_free == NULL);
3341
3342 if (__improbable(pmap->type == PMAP_TYPE_NESTED)) {
3343 pmap_get_pt_ops(pmap)->flush_tlb_region_async(pmap->nested_region_addr, pmap->nested_region_size, pmap, false);
3344 sync_tlb_flush();
3345 } else {
3346 pmap_get_pt_ops(pmap)->flush_tlb_async(pmap);
3347 sync_tlb_flush();
3348 /* return its asid to the pool */
3349 pmap_get_pt_ops(pmap)->free_id(pmap);
3350 if (pmap->nested_pmap != NULL) {
3351 #if XNU_MONITOR
3352 os_atomic_dec(&pmap->nested_pmap->nested_count, relaxed);
3353 #endif
3354 /* release the reference we hold on the nested pmap */
3355 pmap_destroy_internal(pmap->nested_pmap);
3356 }
3357 }
3358
3359 pmap_check_ledgers(pmap);
3360
3361 if (pmap->nested_region_asid_bitmap) {
3362 #if XNU_MONITOR
3363 pmap_pages_free(kvtophys_nofail((vm_offset_t)(pmap->nested_region_asid_bitmap)), PAGE_SIZE);
3364 #else
3365 kfree_data(pmap->nested_region_asid_bitmap,
3366 pmap->nested_region_asid_bitmap_size * sizeof(unsigned int));
3367 #endif
3368 }
3369
3370 #if XNU_MONITOR
3371 if (pmap->ledger) {
3372 pmap_ledger_release(pmap->ledger);
3373 }
3374
3375 pmap_lock_destroy(pmap);
3376 pmap_free_pmap(pmap);
3377 #else
3378 pmap_lock_destroy(pmap);
3379 zfree(pmap_zone, pmap);
3380 #endif
3381 }
3382
3383 void
pmap_destroy(pmap_t pmap)3384 pmap_destroy(
3385 pmap_t pmap)
3386 {
3387 PMAP_TRACE(1, PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(pmap), PMAP_VASID(pmap), pmap->hw_asid);
3388
3389 ledger_t ledger = pmap->ledger;
3390
3391 #if XNU_MONITOR
3392 pmap_destroy_ppl(pmap);
3393
3394 pmap_ledger_check_balance(pmap);
3395 #else
3396 pmap_destroy_internal(pmap);
3397 #endif
3398
3399 ledger_dereference(ledger);
3400
3401 PMAP_TRACE(1, PMAP_CODE(PMAP__DESTROY) | DBG_FUNC_END);
3402 }
3403
3404
3405 /*
3406 * Add a reference to the specified pmap.
3407 */
3408 MARK_AS_PMAP_TEXT void
pmap_reference_internal(pmap_t pmap)3409 pmap_reference_internal(
3410 pmap_t pmap)
3411 {
3412 if (pmap != PMAP_NULL) {
3413 validate_pmap_mutable(pmap);
3414 os_atomic_inc(&pmap->ref_count, relaxed);
3415 }
3416 }
3417
3418 void
pmap_reference(pmap_t pmap)3419 pmap_reference(
3420 pmap_t pmap)
3421 {
3422 #if XNU_MONITOR
3423 pmap_reference_ppl(pmap);
3424 #else
3425 pmap_reference_internal(pmap);
3426 #endif
3427 }
3428
3429 static tt_entry_t *
pmap_tt1_allocate(pmap_t pmap,vm_size_t size,unsigned option)3430 pmap_tt1_allocate(
3431 pmap_t pmap,
3432 vm_size_t size,
3433 unsigned option)
3434 {
3435 tt_entry_t *tt1 = NULL;
3436 tt_free_entry_t *tt1_free;
3437 pmap_paddr_t pa;
3438 vm_address_t va;
3439 vm_address_t va_end;
3440 kern_return_t ret;
3441
3442 if ((size < PAGE_SIZE) && (size != PMAP_ROOT_ALLOC_SIZE)) {
3443 size = PAGE_SIZE;
3444 }
3445
3446 pmap_simple_lock(&tt1_lock);
3447 if ((size == PAGE_SIZE) && (free_page_size_tt_count != 0)) {
3448 free_page_size_tt_count--;
3449 tt1 = (tt_entry_t *)free_page_size_tt_list;
3450 free_page_size_tt_list = ((tt_free_entry_t *)tt1)->next;
3451 } else if ((size == 2 * PAGE_SIZE) && (free_two_page_size_tt_count != 0)) {
3452 free_two_page_size_tt_count--;
3453 tt1 = (tt_entry_t *)free_two_page_size_tt_list;
3454 free_two_page_size_tt_list = ((tt_free_entry_t *)tt1)->next;
3455 } else if ((size < PAGE_SIZE) && (free_tt_count != 0)) {
3456 free_tt_count--;
3457 tt1 = (tt_entry_t *)free_tt_list;
3458 free_tt_list = (tt_free_entry_t *)((tt_free_entry_t *)tt1)->next;
3459 }
3460
3461 pmap_simple_unlock(&tt1_lock);
3462
3463 if (tt1 != NULL) {
3464 pmap_tt_ledger_credit(pmap, size);
3465 return (tt_entry_t *)tt1;
3466 }
3467
3468 ret = pmap_pages_alloc_zeroed(&pa, (unsigned)((size < PAGE_SIZE)? PAGE_SIZE : size), ((option & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0));
3469
3470 if (ret == KERN_RESOURCE_SHORTAGE) {
3471 return (tt_entry_t *)0;
3472 }
3473
3474 #if XNU_MONITOR
3475 assert(pa);
3476 #endif
3477
3478 if (size < PAGE_SIZE) {
3479 va = phystokv(pa) + size;
3480 tt_free_entry_t *local_free_list = (tt_free_entry_t*)va;
3481 tt_free_entry_t *next_free = NULL;
3482 for (va_end = phystokv(pa) + PAGE_SIZE; va < va_end; va = va + size) {
3483 tt1_free = (tt_free_entry_t *)va;
3484 tt1_free->next = next_free;
3485 next_free = tt1_free;
3486 }
3487 pmap_simple_lock(&tt1_lock);
3488 local_free_list->next = free_tt_list;
3489 free_tt_list = next_free;
3490 free_tt_count += ((PAGE_SIZE / size) - 1);
3491 if (free_tt_count > free_tt_max) {
3492 free_tt_max = free_tt_count;
3493 }
3494 pmap_simple_unlock(&tt1_lock);
3495 }
3496
3497 /* Always report root allocations in units of PMAP_ROOT_ALLOC_SIZE, which can be obtained by sysctl arm_pt_root_size.
3498 * Depending on the device, this can vary between 512b and 16K. */
3499 OSAddAtomic((uint32_t)(size / PMAP_ROOT_ALLOC_SIZE), (pmap == kernel_pmap ? &inuse_kernel_tteroot_count : &inuse_user_tteroot_count));
3500 OSAddAtomic64(size / PMAP_ROOT_ALLOC_SIZE, &alloc_tteroot_count);
3501 pmap_tt_ledger_credit(pmap, size);
3502
3503 return (tt_entry_t *) phystokv(pa);
3504 }
3505
3506 static void
pmap_tt1_deallocate(pmap_t pmap,tt_entry_t * tt,vm_size_t size,unsigned option)3507 pmap_tt1_deallocate(
3508 pmap_t pmap,
3509 tt_entry_t *tt,
3510 vm_size_t size,
3511 unsigned option)
3512 {
3513 tt_free_entry_t *tt_entry;
3514
3515 if ((size < PAGE_SIZE) && (size != PMAP_ROOT_ALLOC_SIZE)) {
3516 size = PAGE_SIZE;
3517 }
3518
3519 tt_entry = (tt_free_entry_t *)tt;
3520 assert(not_in_kdp);
3521 pmap_simple_lock(&tt1_lock);
3522
3523 if (size < PAGE_SIZE) {
3524 free_tt_count++;
3525 if (free_tt_count > free_tt_max) {
3526 free_tt_max = free_tt_count;
3527 }
3528 tt_entry->next = free_tt_list;
3529 free_tt_list = tt_entry;
3530 }
3531
3532 if (size == PAGE_SIZE) {
3533 free_page_size_tt_count++;
3534 if (free_page_size_tt_count > free_page_size_tt_max) {
3535 free_page_size_tt_max = free_page_size_tt_count;
3536 }
3537 tt_entry->next = free_page_size_tt_list;
3538 free_page_size_tt_list = tt_entry;
3539 }
3540
3541 if (size == 2 * PAGE_SIZE) {
3542 free_two_page_size_tt_count++;
3543 if (free_two_page_size_tt_count > free_two_page_size_tt_max) {
3544 free_two_page_size_tt_max = free_two_page_size_tt_count;
3545 }
3546 tt_entry->next = free_two_page_size_tt_list;
3547 free_two_page_size_tt_list = tt_entry;
3548 }
3549
3550 if (option & PMAP_TT_DEALLOCATE_NOBLOCK) {
3551 pmap_simple_unlock(&tt1_lock);
3552 pmap_tt_ledger_debit(pmap, size);
3553 return;
3554 }
3555
3556 while (free_page_size_tt_count > FREE_PAGE_SIZE_TT_MAX) {
3557 free_page_size_tt_count--;
3558 tt = (tt_entry_t *)free_page_size_tt_list;
3559 free_page_size_tt_list = ((tt_free_entry_t *)tt)->next;
3560
3561 pmap_simple_unlock(&tt1_lock);
3562
3563 pmap_pages_free(ml_static_vtop((vm_offset_t)tt), PAGE_SIZE);
3564
3565 OSAddAtomic(-(int32_t)(PAGE_SIZE / PMAP_ROOT_ALLOC_SIZE), (pmap == kernel_pmap ? &inuse_kernel_tteroot_count : &inuse_user_tteroot_count));
3566
3567 pmap_simple_lock(&tt1_lock);
3568 }
3569
3570 while (free_two_page_size_tt_count > FREE_TWO_PAGE_SIZE_TT_MAX) {
3571 free_two_page_size_tt_count--;
3572 tt = (tt_entry_t *)free_two_page_size_tt_list;
3573 free_two_page_size_tt_list = ((tt_free_entry_t *)tt)->next;
3574
3575 pmap_simple_unlock(&tt1_lock);
3576
3577 pmap_pages_free(ml_static_vtop((vm_offset_t)tt), 2 * PAGE_SIZE);
3578
3579 OSAddAtomic(-2 * (int32_t)(PAGE_SIZE / PMAP_ROOT_ALLOC_SIZE), (pmap == kernel_pmap ? &inuse_kernel_tteroot_count : &inuse_user_tteroot_count));
3580
3581 pmap_simple_lock(&tt1_lock);
3582 }
3583 pmap_simple_unlock(&tt1_lock);
3584 pmap_tt_ledger_debit(pmap, size);
3585 }
3586
3587 MARK_AS_PMAP_TEXT static kern_return_t
pmap_tt_allocate(pmap_t pmap,tt_entry_t ** ttp,unsigned int level,unsigned int options)3588 pmap_tt_allocate(
3589 pmap_t pmap,
3590 tt_entry_t **ttp,
3591 unsigned int level,
3592 unsigned int options)
3593 {
3594 pmap_paddr_t pa;
3595 *ttp = NULL;
3596
3597 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3598 if ((tt_free_entry_t *)pmap->tt_entry_free != NULL) {
3599 tt_free_entry_t *tt_free_cur, *tt_free_next;
3600
3601 tt_free_cur = ((tt_free_entry_t *)pmap->tt_entry_free);
3602 tt_free_next = tt_free_cur->next;
3603 tt_free_cur->next = NULL;
3604 *ttp = (tt_entry_t *)tt_free_cur;
3605 pmap->tt_entry_free = (tt_entry_t *)tt_free_next;
3606 }
3607 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
3608
3609 if (*ttp == NULL) {
3610 pt_desc_t *ptdp;
3611
3612 /*
3613 * Allocate a VM page for the level x page table entries.
3614 */
3615 while (pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) {
3616 if (options & PMAP_OPTIONS_NOWAIT) {
3617 return KERN_RESOURCE_SHORTAGE;
3618 }
3619 VM_PAGE_WAIT();
3620 }
3621
3622 while ((ptdp = ptd_alloc(pmap)) == NULL) {
3623 if (options & PMAP_OPTIONS_NOWAIT) {
3624 pmap_pages_free(pa, PAGE_SIZE);
3625 return KERN_RESOURCE_SHORTAGE;
3626 }
3627 VM_PAGE_WAIT();
3628 }
3629
3630 if (level < pt_attr_leaf_level(pmap_get_pt_attr(pmap))) {
3631 OSAddAtomic64(1, &alloc_ttepages_count);
3632 OSAddAtomic(1, (pmap == kernel_pmap ? &inuse_kernel_ttepages_count : &inuse_user_ttepages_count));
3633 } else {
3634 OSAddAtomic64(1, &alloc_ptepages_count);
3635 OSAddAtomic(1, (pmap == kernel_pmap ? &inuse_kernel_ptepages_count : &inuse_user_ptepages_count));
3636 }
3637
3638 pmap_tt_ledger_credit(pmap, PAGE_SIZE);
3639
3640 PMAP_ZINFO_PALLOC(pmap, PAGE_SIZE);
3641
3642 pvh_update_head_unlocked(pai_to_pvh(pa_index(pa)), ptdp, PVH_TYPE_PTDP);
3643
3644 uint64_t pmap_page_size = pt_attr_page_size(pmap_get_pt_attr(pmap));
3645 if (PAGE_SIZE > pmap_page_size) {
3646 vm_address_t va;
3647 vm_address_t va_end;
3648
3649 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3650
3651 for (va_end = phystokv(pa) + PAGE_SIZE, va = phystokv(pa) + pmap_page_size; va < va_end; va = va + pmap_page_size) {
3652 ((tt_free_entry_t *)va)->next = (tt_free_entry_t *)pmap->tt_entry_free;
3653 pmap->tt_entry_free = (tt_entry_t *)va;
3654 }
3655 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
3656 }
3657
3658 *ttp = (tt_entry_t *)phystokv(pa);
3659 }
3660
3661 #if XNU_MONITOR
3662 assert(*ttp);
3663 #endif
3664
3665 return KERN_SUCCESS;
3666 }
3667
3668
3669 static void
pmap_tt_deallocate(pmap_t pmap,tt_entry_t * ttp,unsigned int level)3670 pmap_tt_deallocate(
3671 pmap_t pmap,
3672 tt_entry_t *ttp,
3673 unsigned int level)
3674 {
3675 pt_desc_t *ptdp;
3676 ptd_info_t *ptd_info;
3677 unsigned pt_acc_cnt;
3678 unsigned i;
3679 vm_offset_t free_page = 0;
3680 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
3681 unsigned max_pt_index = PAGE_SIZE / pt_attr_page_size(pt_attr);
3682
3683 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
3684
3685 ptdp = ptep_get_ptd(ttp);
3686 ptd_info = ptd_get_info(ptdp, ttp);
3687
3688 ptdp->va[ptd_get_index(ptdp, ttp)] = (vm_offset_t)-1;
3689
3690 if ((level < pt_attr_leaf_level(pt_attr)) && (ptd_info->refcnt == PT_DESC_REFCOUNT)) {
3691 ptd_info->refcnt = 0;
3692 }
3693
3694 if (ptd_info->refcnt != 0) {
3695 panic("pmap_tt_deallocate(): ptdp %p, count %d", ptdp, ptd_info->refcnt);
3696 }
3697
3698 ptd_info->refcnt = 0;
3699
3700 for (i = 0, pt_acc_cnt = 0; i < max_pt_index; i++) {
3701 pt_acc_cnt += ptdp->ptd_info[i].refcnt;
3702 }
3703
3704 if (pt_acc_cnt == 0) {
3705 tt_free_entry_t *tt_free_list = (tt_free_entry_t *)&pmap->tt_entry_free;
3706 unsigned pt_free_entry_cnt = 1;
3707
3708 while (pt_free_entry_cnt < max_pt_index && tt_free_list) {
3709 tt_free_entry_t *tt_free_list_next;
3710
3711 tt_free_list_next = tt_free_list->next;
3712 if ((((vm_offset_t)tt_free_list_next) - ((vm_offset_t)ttp & ~PAGE_MASK)) < PAGE_SIZE) {
3713 pt_free_entry_cnt++;
3714 }
3715 tt_free_list = tt_free_list_next;
3716 }
3717 if (pt_free_entry_cnt == max_pt_index) {
3718 tt_free_entry_t *tt_free_list_cur;
3719
3720 free_page = (vm_offset_t)ttp & ~PAGE_MASK;
3721 tt_free_list = (tt_free_entry_t *)&pmap->tt_entry_free;
3722 tt_free_list_cur = (tt_free_entry_t *)&pmap->tt_entry_free;
3723
3724 while (tt_free_list_cur) {
3725 tt_free_entry_t *tt_free_list_next;
3726
3727 tt_free_list_next = tt_free_list_cur->next;
3728 if ((((vm_offset_t)tt_free_list_next) - free_page) < PAGE_SIZE) {
3729 tt_free_list->next = tt_free_list_next->next;
3730 } else {
3731 tt_free_list = tt_free_list_next;
3732 }
3733 tt_free_list_cur = tt_free_list_next;
3734 }
3735 } else {
3736 ((tt_free_entry_t *)ttp)->next = (tt_free_entry_t *)pmap->tt_entry_free;
3737 pmap->tt_entry_free = ttp;
3738 }
3739 } else {
3740 ((tt_free_entry_t *)ttp)->next = (tt_free_entry_t *)pmap->tt_entry_free;
3741 pmap->tt_entry_free = ttp;
3742 }
3743
3744 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
3745
3746 if (free_page != 0) {
3747 ptd_deallocate(ptep_get_ptd((pt_entry_t*)free_page));
3748 *(pt_desc_t **)pai_to_pvh(pa_index(ml_static_vtop(free_page))) = NULL;
3749 pmap_pages_free(ml_static_vtop(free_page), PAGE_SIZE);
3750 if (level < pt_attr_leaf_level(pt_attr)) {
3751 OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ttepages_count : &inuse_user_ttepages_count));
3752 } else {
3753 OSAddAtomic(-1, (pmap == kernel_pmap ? &inuse_kernel_ptepages_count : &inuse_user_ptepages_count));
3754 }
3755 PMAP_ZINFO_PFREE(pmap, PAGE_SIZE);
3756 pmap_tt_ledger_debit(pmap, PAGE_SIZE);
3757 }
3758 }
3759
3760 /**
3761 * Safely clear out a translation table entry.
3762 *
3763 * @note If the TTE to clear out points to a leaf table, then that leaf table
3764 * must have a refcnt of zero before the TTE can be removed.
3765 * @note This function expects to be called with pmap locked exclusive, and will
3766 * return with pmap unlocked.
3767 *
3768 * @param pmap The pmap containing the page table whose TTE is being removed.
3769 * @param va_start Beginning of the VA range mapped by the table being removed, for TLB maintenance
3770 * @param va_end Non-inclusive end of the VA range mapped by the table being removed, for TLB maintenance
3771 * @param need_strong_sync Indicates whether strong DSB should be used to synchronize TLB maintenance
3772 * @param ttep Pointer to the TTE that should be cleared out.
3773 * @param level The level of the page table that contains the TTE to be removed.
3774 */
3775 static void
pmap_tte_remove(pmap_t pmap,vm_offset_t va_start,vm_offset_t va_end,bool need_strong_sync,tt_entry_t * ttep,unsigned int level)3776 pmap_tte_remove(
3777 pmap_t pmap,
3778 vm_offset_t va_start,
3779 vm_offset_t va_end,
3780 bool need_strong_sync,
3781 tt_entry_t *ttep,
3782 unsigned int level)
3783 {
3784 pmap_assert_locked(pmap, PMAP_LOCK_EXCLUSIVE);
3785
3786 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
3787 const tt_entry_t tte = *ttep;
3788
3789 if (__improbable(tte == ARM_TTE_EMPTY)) {
3790 panic("%s: L%d TTE is already empty. Potential double unmap or memory "
3791 "stomper? pmap=%p ttep=%p", __func__, level, pmap, ttep);
3792 }
3793
3794 #if (__ARM_VMSA__ == 7)
3795 {
3796 tt_entry_t *ttep_4M = (tt_entry_t *) ((vm_offset_t)ttep & 0xFFFFFFF0);
3797 unsigned i;
3798
3799 for (i = 0; i < 4; i++, ttep_4M++) {
3800 *ttep_4M = (tt_entry_t) 0;
3801 }
3802 FLUSH_PTE_STRONG();
3803 }
3804 #else
3805 *ttep = (tt_entry_t) 0;
3806 FLUSH_PTE_STRONG();
3807 #endif /* (__ARM_VMSA__ == 7) */
3808 // If given a VA range, we're being asked to flush the TLB before the table in ttep is freed.
3809 if (va_end > va_start) {
3810 #if (__ARM_VMSA__ == 7)
3811 // Ensure intermediate translations are flushed for each 1MB block
3812 flush_mmu_tlb_entry_async((va_start & ~ARM_TT_L1_PT_OFFMASK) | (pmap->hw_asid & 0xff));
3813 flush_mmu_tlb_entry_async(((va_start & ~ARM_TT_L1_PT_OFFMASK) + ARM_TT_L1_SIZE) | (pmap->hw_asid & 0xff));
3814 flush_mmu_tlb_entry_async(((va_start & ~ARM_TT_L1_PT_OFFMASK) + 2 * ARM_TT_L1_SIZE) | (pmap->hw_asid & 0xff));
3815 flush_mmu_tlb_entry_async(((va_start & ~ARM_TT_L1_PT_OFFMASK) + 3 * ARM_TT_L1_SIZE) | (pmap->hw_asid & 0xff));
3816 #endif
3817 PMAP_UPDATE_TLBS(pmap, va_start, va_end, need_strong_sync, false);
3818 }
3819
3820 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
3821
3822 /**
3823 * Remember, the passed in "level" parameter refers to the level above the
3824 * table that's getting removed (e.g., removing an L2 TTE will unmap an L3
3825 * page table).
3826 */
3827 const bool remove_leaf_table = (level == pt_attr_twig_level(pt_attr));
3828
3829 /**
3830 * Non-leaf pagetables don't track active references in the PTD and instead
3831 * use a sentinel refcount. If we're removing a leaf pagetable, we'll load
3832 * the real refcount below.
3833 */
3834 unsigned short refcnt = PT_DESC_REFCOUNT;
3835
3836 /*
3837 * It's possible that a concurrent pmap_disconnect() operation may need to reference
3838 * a PTE on the pagetable page to be removed. A full disconnect() may have cleared
3839 * one or more PTEs on this page but not yet dropped the refcount, which would cause
3840 * us to panic in this function on a non-zero refcount. Moreover, it's possible for
3841 * a disconnect-to-compress operation to set the compressed marker on a PTE, and
3842 * for pmap_remove_range_options() to concurrently observe that marker, clear it, and
3843 * drop the pagetable refcount accordingly, without taking any PVH locks that could
3844 * synchronize it against the disconnect operation. If that removal caused the
3845 * refcount to reach zero, the pagetable page could be freed before the disconnect
3846 * operation is finished using the relevant pagetable descriptor.
3847 * Address these cases by waiting until all CPUs have been observed to not be
3848 * executing pmap_disconnect().
3849 */
3850 if (remove_leaf_table) {
3851 bitmap_t active_disconnects[BITMAP_LEN(MAX_CPUS)];
3852 const int max_cpu = ml_get_max_cpu_number();
3853 bitmap_full(&active_disconnects[0], max_cpu + 1);
3854 bool inflight_disconnect;
3855
3856 /*
3857 * Ensure the ensuing load of per-CPU inflight_disconnect is not speculated
3858 * ahead of any prior PTE load which may have observed the effect of a
3859 * concurrent disconnect operation. An acquire fence is required for this;
3860 * a load-acquire operation is insufficient.
3861 */
3862 os_atomic_thread_fence(acquire);
3863 do {
3864 inflight_disconnect = false;
3865 for (int i = bitmap_first(&active_disconnects[0], max_cpu + 1);
3866 i >= 0;
3867 i = bitmap_next(&active_disconnects[0], i)) {
3868 const pmap_cpu_data_t *cpu_data = pmap_get_remote_cpu_data(i);
3869 if (cpu_data == NULL) {
3870 continue;
3871 }
3872 if (os_atomic_load_exclusive(&cpu_data->inflight_disconnect, relaxed)) {
3873 __builtin_arm_wfe();
3874 inflight_disconnect = true;
3875 continue;
3876 }
3877 os_atomic_clear_exclusive();
3878 bitmap_clear(&active_disconnects[0], (unsigned int)i);
3879 }
3880 } while (inflight_disconnect);
3881 /* Ensure the refcount is observed after any observation of inflight_disconnect */
3882 os_atomic_thread_fence(acquire);
3883 refcnt = os_atomic_load(&(ptep_get_info((pt_entry_t*)ttetokv(tte))->refcnt), relaxed);
3884 }
3885
3886 #if MACH_ASSERT
3887 /**
3888 * On internal devices, always do the page table consistency check
3889 * regardless of page table level or the actual refcnt value.
3890 */
3891 {
3892 #else /* MACH_ASSERT */
3893 /**
3894 * Only perform the page table consistency check when deleting leaf page
3895 * tables and it seems like there might be valid/compressed mappings
3896 * leftover.
3897 */
3898 if (__improbable(remove_leaf_table && refcnt != 0)) {
3899 #endif /* MACH_ASSERT */
3900
3901 /**
3902 * There are multiple problems that can arise as a non-zero refcnt:
3903 * 1. A bug in the refcnt management logic.
3904 * 2. A memory stomper or hardware failure.
3905 * 3. The VM forgetting to unmap all of the valid mappings in an address
3906 * space before destroying a pmap.
3907 *
3908 * By looping over the page table and determining how many valid or
3909 * compressed entries there actually are, we can narrow down which of
3910 * these three cases is causing this panic. If the expected refcnt
3911 * (valid + compressed) and the actual refcnt don't match then the
3912 * problem is probably either a memory corruption issue (if the
3913 * non-empty entries don't match valid+compressed, that could also be a
3914 * sign of corruption) or refcnt management bug. Otherwise, there
3915 * actually are leftover mappings and the higher layers of xnu are
3916 * probably at fault.
3917 */
3918 const uint64_t pmap_page_size = pt_attr_page_size(pt_attr);
3919 pt_entry_t *bpte = ((pt_entry_t *) (ttetokv(tte) & ~(pmap_page_size - 1)));
3920
3921 pt_entry_t *ptep = bpte;
3922 unsigned short non_empty = 0, valid = 0, comp = 0;
3923 for (unsigned int i = 0; i < (pmap_page_size / sizeof(*ptep)); i++, ptep++) {
3924 /* Keep track of all non-empty entries to detect memory corruption. */
3925 if (__improbable(*ptep != ARM_PTE_EMPTY)) {
3926 non_empty++;
3927 }
3928
3929 if (__improbable(ARM_PTE_IS_COMPRESSED(*ptep, ptep))) {
3930 comp++;
3931 } else if (__improbable((*ptep & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE)) {
3932 valid++;
3933 }
3934 }
3935
3936 #if MACH_ASSERT
3937 /**
3938 * On internal machines, panic whenever a page table getting deleted has
3939 * leftover mappings (valid or otherwise) or a leaf page table has a
3940 * non-zero refcnt.
3941 */
3942 if (__improbable((non_empty != 0) || (remove_leaf_table && refcnt != 0))) {
3943 #else /* MACH_ASSERT */
3944 /* We already know the leaf page-table has a non-zero refcnt, so panic. */
3945 {
3946 #endif /* MACH_ASSERT */
3947 panic("%s: Found inconsistent state in soon to be deleted L%d table: %d valid, "
3948 "%d compressed, %d non-empty, refcnt=%d, L%d tte=%#llx, pmap=%p, bpte=%p", __func__,
3949 level + 1, valid, comp, non_empty, refcnt, level, (uint64_t)tte, pmap, bpte);
3950 }
3951 }
3952 }
3953
3954 /**
3955 * Given a pointer to an entry within a `level` page table, delete the
3956 * page table at `level` + 1 that is represented by that entry. For instance,
3957 * to delete an unused L3 table, `ttep` would be a pointer to the L2 entry that
3958 * contains the PA of the L3 table, and `level` would be "2".
3959 *
3960 * @note If the table getting deallocated is a leaf table, then that leaf table
3961 * must have a refcnt of zero before getting deallocated. All other levels
3962 * must have a refcnt of PT_DESC_REFCOUNT in their page table descriptor.
3963 * @note This function expects to be called with pmap locked exclusive and will
3964 * return with pmap unlocked.
3965 *
3966 * @param pmap The pmap that owns the page table to be deallocated.
3967 * @param va_start Beginning of the VA range mapped by the table being removed, for TLB maintenance
3968 * @param va_end Non-inclusive end of the VA range mapped by the table being removed, for TLB maintenance
3969 * @param need_strong_sync Indicates whether strong DSB should be used to synchronize TLB maintenance
3970 * @param ttep Pointer to the `level` TTE to remove.
3971 * @param level The level of the table that contains an entry pointing to the
3972 * table to be removed. The deallocated page table will be a
3973 * `level` + 1 table (so if `level` is 2, then an L3 table will be
3974 * deleted).
3975 */
3976 void
3977 pmap_tte_deallocate(
3978 pmap_t pmap,
3979 vm_offset_t va_start,
3980 vm_offset_t va_end,
3981 bool need_strong_sync,
3982 tt_entry_t *ttep,
3983 unsigned int level)
3984 {
3985 pmap_paddr_t pa;
3986 tt_entry_t tte;
3987
3988 pmap_assert_locked(pmap, PMAP_LOCK_EXCLUSIVE);
3989
3990 tte = *ttep;
3991
3992 if (tte_get_ptd(tte)->pmap != pmap) {
3993 panic("%s: Passed in pmap doesn't own the page table to be deleted ptd=%p ptd->pmap=%p pmap=%p",
3994 __func__, tte_get_ptd(tte), tte_get_ptd(tte)->pmap, pmap);
3995 }
3996
3997 assertf((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE, "%s: invalid TTE %p (0x%llx)",
3998 __func__, ttep, (unsigned long long)tte);
3999 uint64_t pmap_page_size = pt_attr_page_size(pmap_get_pt_attr(pmap));
4000
4001 /* pmap_tte_remove() will drop the pmap lock */
4002 pmap_tte_remove(pmap, va_start, va_end, need_strong_sync, ttep, level);
4003
4004 /* Clear any page offset: we mean to free the whole page, but armv7 TTEs may only be
4005 * aligned on 1K boundaries. We clear the surrounding "chunk" of 4 TTEs above. */
4006 pa = tte_to_pa(tte) & ~(pmap_page_size - 1);
4007 pmap_tt_deallocate(pmap, (tt_entry_t *) phystokv(pa), level + 1);
4008 }
4009
4010 /*
4011 * Remove a range of hardware page-table entries.
4012 * The entries given are the first (inclusive)
4013 * and last (exclusive) entries for the VM pages.
4014 * The virtual address is the va for the first pte.
4015 *
4016 * The pmap must be locked.
4017 * If the pmap is not the kernel pmap, the range must lie
4018 * entirely within one pte-page. This is NOT checked.
4019 * Assumes that the pte-page exists.
4020 *
4021 * Returns the number of PTE changed
4022 */
4023 MARK_AS_PMAP_TEXT static int
4024 pmap_remove_range(
4025 pmap_t pmap,
4026 vm_map_address_t va,
4027 pt_entry_t *bpte,
4028 pt_entry_t *epte)
4029 {
4030 bool need_strong_sync = false;
4031 int num_changed = pmap_remove_range_options(pmap, va, bpte, epte, NULL,
4032 &need_strong_sync, PMAP_OPTIONS_REMOVE);
4033 if (num_changed > 0) {
4034 PMAP_UPDATE_TLBS(pmap, va,
4035 va + (pt_attr_page_size(pmap_get_pt_attr(pmap)) * (epte - bpte)), need_strong_sync, true);
4036 }
4037 return num_changed;
4038 }
4039
4040
4041 #ifdef PVH_FLAG_EXEC
4042
4043 /*
4044 * Update the access protection bits of the physical aperture mapping for a page.
4045 * This is useful, for example, in guranteeing that a verified executable page
4046 * has no writable mappings anywhere in the system, including the physical
4047 * aperture. flush_tlb_async can be set to true to avoid unnecessary TLB
4048 * synchronization overhead in cases where the call to this function is
4049 * guaranteed to be followed by other TLB operations.
4050 */
4051 void
4052 pmap_set_ptov_ap(unsigned int pai __unused, unsigned int ap __unused, boolean_t flush_tlb_async __unused)
4053 {
4054 #if __ARM_PTE_PHYSMAP__
4055 pvh_assert_locked(pai);
4056 vm_offset_t kva = phystokv(vm_first_phys + (pmap_paddr_t)ptoa(pai));
4057 pt_entry_t *pte_p = pmap_pte(kernel_pmap, kva);
4058
4059 pt_entry_t tmplate = *pte_p;
4060 if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(ap)) {
4061 return;
4062 }
4063 tmplate = (tmplate & ~ARM_PTE_APMASK) | ARM_PTE_AP(ap);
4064 #if (__ARM_VMSA__ > 7)
4065 if (tmplate & ARM_PTE_HINT_MASK) {
4066 panic("%s: physical aperture PTE %p has hint bit set, va=%p, pte=0x%llx",
4067 __func__, pte_p, (void *)kva, tmplate);
4068 }
4069 #endif
4070 write_pte_strong(pte_p, tmplate);
4071 flush_mmu_tlb_region_asid_async(kva, PAGE_SIZE, kernel_pmap, true);
4072 if (!flush_tlb_async) {
4073 sync_tlb_flush();
4074 }
4075 #endif
4076 }
4077
4078 #endif /* defined(PVH_FLAG_EXEC) */
4079
4080 MARK_AS_PMAP_TEXT int
4081 pmap_remove_range_options(
4082 pmap_t pmap,
4083 vm_map_address_t va,
4084 pt_entry_t *bpte,
4085 pt_entry_t *epte,
4086 vm_map_address_t *eva,
4087 bool *need_strong_sync __unused,
4088 int options)
4089 {
4090 pt_entry_t *cpte;
4091 size_t npages = 0;
4092 int num_removed, num_unwired;
4093 int num_pte_changed;
4094 unsigned int pai = 0;
4095 pmap_paddr_t pa;
4096 int num_external, num_internal, num_reusable;
4097 int num_alt_internal;
4098 uint64_t num_compressed, num_alt_compressed;
4099 int16_t refcnt = 0;
4100
4101 pmap_assert_locked(pmap, PMAP_LOCK_EXCLUSIVE);
4102
4103 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
4104 uint64_t pmap_page_size = PAGE_RATIO * pt_attr_page_size(pt_attr);
4105
4106 if (__improbable((uintptr_t)epte > (((uintptr_t)bpte + pmap_page_size) & ~(pmap_page_size - 1)))) {
4107 panic("%s: PTE range [%p, %p) in pmap %p crosses page table boundary", __func__, bpte, epte, pmap);
4108 }
4109
4110 num_removed = 0;
4111 num_unwired = 0;
4112 num_pte_changed = 0;
4113 num_external = 0;
4114 num_internal = 0;
4115 num_reusable = 0;
4116 num_compressed = 0;
4117 num_alt_internal = 0;
4118 num_alt_compressed = 0;
4119
4120 #if XNU_MONITOR
4121 bool ro_va = false;
4122 if (__improbable((pmap == kernel_pmap) && (eva != NULL) && zone_spans_ro_va(va, *eva))) {
4123 ro_va = true;
4124 }
4125 #endif
4126 for (cpte = bpte; cpte < epte;
4127 cpte += PAGE_RATIO, va += pmap_page_size) {
4128 pt_entry_t spte;
4129 boolean_t managed = FALSE;
4130
4131 /*
4132 * Check for pending preemption on every iteration: the PV list may be arbitrarily long,
4133 * so we need to be as aggressive as possible in checking for preemption when we can.
4134 */
4135 if (__improbable((eva != NULL) && npages++ && pmap_pending_preemption())) {
4136 *eva = va;
4137 break;
4138 }
4139
4140 spte = *((volatile pt_entry_t*)cpte);
4141
4142 while (!managed) {
4143 if (pmap != kernel_pmap &&
4144 (options & PMAP_OPTIONS_REMOVE) &&
4145 (ARM_PTE_IS_COMPRESSED(spte, cpte))) {
4146 /*
4147 * "pmap" must be locked at this point,
4148 * so this should not race with another
4149 * pmap_remove_range() or pmap_enter().
4150 */
4151
4152 /* one less "compressed"... */
4153 num_compressed++;
4154 if (spte & ARM_PTE_COMPRESSED_ALT) {
4155 /* ... but it used to be "ALTACCT" */
4156 num_alt_compressed++;
4157 }
4158
4159 /* clear marker */
4160 write_pte_fast(cpte, ARM_PTE_TYPE_FAULT);
4161 /*
4162 * "refcnt" also accounts for
4163 * our "compressed" markers,
4164 * so let's update it here.
4165 */
4166 --refcnt;
4167 spte = *((volatile pt_entry_t*)cpte);
4168 }
4169 /*
4170 * It may be possible for the pte to transition from managed
4171 * to unmanaged in this timeframe; for now, elide the assert.
4172 * We should break out as a consequence of checking pa_valid.
4173 */
4174 //assert(!ARM_PTE_IS_COMPRESSED(spte));
4175 pa = pte_to_pa(spte);
4176 if (!pa_valid(pa)) {
4177 #if XNU_MONITOR
4178 unsigned int cacheattr = pmap_cache_attributes((ppnum_t)atop(pa));
4179 #endif
4180 #if XNU_MONITOR
4181 if (__improbable((cacheattr & PP_ATTR_MONITOR) &&
4182 (pte_to_xprr_perm(spte) != XPRR_KERN_RO_PERM) && !pmap_ppl_disable)) {
4183 panic("%s: attempt to remove mapping of writable PPL-protected I/O address 0x%llx",
4184 __func__, (uint64_t)pa);
4185 }
4186 #endif
4187 break;
4188 }
4189 pai = pa_index(pa);
4190 pvh_lock(pai);
4191 spte = *((volatile pt_entry_t*)cpte);
4192 pa = pte_to_pa(spte);
4193 if (pai == pa_index(pa)) {
4194 managed = TRUE;
4195 break; // Leave pai locked as we will unlock it after we free the PV entry
4196 }
4197 pvh_unlock(pai);
4198 }
4199
4200 if (ARM_PTE_IS_COMPRESSED(*cpte, cpte)) {
4201 /*
4202 * There used to be a valid mapping here but it
4203 * has already been removed when the page was
4204 * sent to the VM compressor, so nothing left to
4205 * remove now...
4206 */
4207 continue;
4208 }
4209
4210 /* remove the translation, do not flush the TLB */
4211 if (*cpte != ARM_PTE_TYPE_FAULT) {
4212 assertf(!ARM_PTE_IS_COMPRESSED(*cpte, cpte), "unexpected compressed pte %p (=0x%llx)", cpte, (uint64_t)*cpte);
4213 assertf((*cpte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", cpte, (uint64_t)*cpte);
4214 #if MACH_ASSERT
4215 if (managed && (pmap != kernel_pmap) && (ptep_get_va(cpte) != va)) {
4216 panic("pmap_remove_range_options(): VA mismatch: cpte=%p ptd=%p pte=0x%llx va=0x%llx, cpte va=0x%llx",
4217 cpte, ptep_get_ptd(cpte), (uint64_t)*cpte, (uint64_t)va, (uint64_t)ptep_get_va(cpte));
4218 }
4219 #endif
4220 write_pte_fast(cpte, ARM_PTE_TYPE_FAULT);
4221 num_pte_changed++;
4222 }
4223
4224 if ((spte != ARM_PTE_TYPE_FAULT) &&
4225 (pmap != kernel_pmap)) {
4226 assertf(!ARM_PTE_IS_COMPRESSED(spte, cpte), "unexpected compressed pte %p (=0x%llx)", cpte, (uint64_t)spte);
4227 assertf((spte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", cpte, (uint64_t)spte);
4228 --refcnt;
4229 }
4230
4231 if (pte_is_wired(spte)) {
4232 pte_set_wired(pmap, cpte, 0);
4233 num_unwired++;
4234 }
4235 /*
4236 * if not managed, we're done
4237 */
4238 if (!managed) {
4239 continue;
4240 }
4241
4242 #if XNU_MONITOR
4243 if (__improbable(ro_va)) {
4244 pmap_ppl_unlockdown_page_locked(pai, PVH_FLAG_LOCKDOWN_RO, true);
4245 }
4246 #endif
4247
4248 /*
4249 * find and remove the mapping from the chain for this
4250 * physical address.
4251 */
4252 bool is_internal, is_altacct;
4253 pmap_remove_pv(pmap, cpte, pai, true, &is_internal, &is_altacct);
4254
4255 if (is_altacct) {
4256 assert(is_internal);
4257 num_internal++;
4258 num_alt_internal++;
4259 if (!pvh_test_type(pai_to_pvh(pai), PVH_TYPE_PTEP)) {
4260 ppattr_clear_altacct(pai);
4261 ppattr_clear_internal(pai);
4262 }
4263 } else if (is_internal) {
4264 if (ppattr_test_reusable(pai)) {
4265 num_reusable++;
4266 } else {
4267 num_internal++;
4268 }
4269 if (!pvh_test_type(pai_to_pvh(pai), PVH_TYPE_PTEP)) {
4270 ppattr_clear_internal(pai);
4271 }
4272 } else {
4273 num_external++;
4274 }
4275 pvh_unlock(pai);
4276 num_removed++;
4277 }
4278
4279 /*
4280 * Update the counts
4281 */
4282 pmap_ledger_debit(pmap, task_ledgers.phys_mem, num_removed * pmap_page_size);
4283
4284 if (pmap != kernel_pmap) {
4285 if ((refcnt != 0) && (OSAddAtomic16(refcnt, (SInt16 *) &(ptep_get_info(bpte)->refcnt)) <= 0)) {
4286 panic("pmap_remove_range_options: over-release of ptdp %p for pte [%p, %p)", ptep_get_ptd(bpte), bpte, epte);
4287 }
4288
4289 /* update ledgers */
4290 pmap_ledger_debit(pmap, task_ledgers.external, (num_external) * pmap_page_size);
4291 pmap_ledger_debit(pmap, task_ledgers.reusable, (num_reusable) * pmap_page_size);
4292 pmap_ledger_debit(pmap, task_ledgers.wired_mem, (num_unwired) * pmap_page_size);
4293 pmap_ledger_debit(pmap, task_ledgers.internal, (num_internal) * pmap_page_size);
4294 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, (num_alt_internal) * pmap_page_size);
4295 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, (num_alt_compressed) * pmap_page_size);
4296 pmap_ledger_debit(pmap, task_ledgers.internal_compressed, (num_compressed) * pmap_page_size);
4297 /* make needed adjustments to phys_footprint */
4298 pmap_ledger_debit(pmap, task_ledgers.phys_footprint,
4299 ((num_internal -
4300 num_alt_internal) +
4301 (num_compressed -
4302 num_alt_compressed)) * pmap_page_size);
4303 }
4304
4305 /* flush the ptable entries we have written */
4306 if (num_pte_changed > 0) {
4307 FLUSH_PTE_STRONG();
4308 }
4309
4310 return num_pte_changed;
4311 }
4312
4313
4314 /*
4315 * Remove the given range of addresses
4316 * from the specified map.
4317 *
4318 * It is assumed that the start and end are properly
4319 * rounded to the hardware page size.
4320 */
4321 void
4322 pmap_remove(
4323 pmap_t pmap,
4324 vm_map_address_t start,
4325 vm_map_address_t end)
4326 {
4327 pmap_remove_options(pmap, start, end, PMAP_OPTIONS_REMOVE);
4328 }
4329
4330 MARK_AS_PMAP_TEXT vm_map_address_t
4331 pmap_remove_options_internal(
4332 pmap_t pmap,
4333 vm_map_address_t start,
4334 vm_map_address_t end,
4335 int options)
4336 {
4337 vm_map_address_t eva = end;
4338 pt_entry_t *bpte, *epte;
4339 pt_entry_t *pte_p;
4340 tt_entry_t *tte_p;
4341 int remove_count = 0;
4342 bool need_strong_sync = false;
4343 bool unlock = true;
4344
4345 if (__improbable(end < start)) {
4346 panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end);
4347 }
4348 if (__improbable(pmap->type == PMAP_TYPE_COMMPAGE)) {
4349 panic("%s: attempt to remove mappings from commpage pmap %p", __func__, pmap);
4350 }
4351
4352 validate_pmap_mutable(pmap);
4353
4354 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
4355
4356 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
4357
4358 tte_p = pmap_tte(pmap, start);
4359
4360 if (tte_p == (tt_entry_t *) NULL) {
4361 goto done;
4362 }
4363
4364 if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
4365 pte_p = (pt_entry_t *) ttetokv(*tte_p);
4366 bpte = &pte_p[pte_index(pt_attr, start)];
4367 epte = bpte + ((end - start) >> pt_attr_leaf_shift(pt_attr));
4368
4369 /*
4370 * This check is really intended to ensure that mappings in a nested pmap can't be removed
4371 * through a top-level user pmap, although it's also a useful sanity check for other pmap types.
4372 * Note that kernel page tables may not have PTDs, so we can't use the check there.
4373 */
4374 if (__improbable((pmap->type != PMAP_TYPE_KERNEL) && (ptep_get_pmap(bpte) != pmap))) {
4375 panic("%s: attempt to remove mappings owned by pmap %p through pmap %p, starting at pte %p",
4376 __func__, ptep_get_pmap(bpte), pmap, bpte);
4377 }
4378
4379 remove_count = pmap_remove_range_options(pmap, start, bpte, epte, &eva,
4380 &need_strong_sync, options);
4381
4382 if ((pmap->type == PMAP_TYPE_USER) && (ptep_get_info(pte_p)->refcnt == 0)) {
4383 pmap_tte_deallocate(pmap, start, eva, need_strong_sync, tte_p, pt_attr_twig_level(pt_attr));
4384 remove_count = 0; // pmap_tte_deallocate has flushed the TLB for us
4385 unlock = false; // pmap_tte_deallocate() has dropped the lock
4386 }
4387 }
4388
4389 done:
4390 if (unlock) {
4391 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
4392 }
4393
4394 if (remove_count > 0) {
4395 PMAP_UPDATE_TLBS(pmap, start, eva, need_strong_sync, true);
4396 }
4397 return eva;
4398 }
4399
4400 void
4401 pmap_remove_options(
4402 pmap_t pmap,
4403 vm_map_address_t start,
4404 vm_map_address_t end,
4405 int options)
4406 {
4407 vm_map_address_t va;
4408
4409 if (pmap == PMAP_NULL) {
4410 return;
4411 }
4412
4413 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
4414
4415 PMAP_TRACE(2, PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_START,
4416 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start),
4417 VM_KERNEL_ADDRHIDE(end));
4418
4419 #if MACH_ASSERT
4420 if ((start | end) & pt_attr_leaf_offmask(pt_attr)) {
4421 panic("pmap_remove_options() pmap %p start 0x%llx end 0x%llx",
4422 pmap, (uint64_t)start, (uint64_t)end);
4423 }
4424 if ((end < start) || (start < pmap->min) || (end > pmap->max)) {
4425 panic("pmap_remove_options(): invalid address range, pmap=%p, start=0x%llx, end=0x%llx",
4426 pmap, (uint64_t)start, (uint64_t)end);
4427 }
4428 #endif
4429
4430 /*
4431 * We allow single-page requests to execute non-preemptibly,
4432 * as it doesn't make sense to sample AST_URGENT for a single-page
4433 * operation, and there are a couple of special use cases that
4434 * require a non-preemptible single-page operation.
4435 */
4436 if ((end - start) > (pt_attr_page_size(pt_attr) * PAGE_RATIO)) {
4437 pmap_verify_preemptible();
4438 }
4439
4440 /*
4441 * Invalidate the translation buffer first
4442 */
4443 va = start;
4444 while (va < end) {
4445 vm_map_address_t l;
4446
4447 l = ((va + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr));
4448 if (l > end) {
4449 l = end;
4450 }
4451
4452 #if XNU_MONITOR
4453 va = pmap_remove_options_ppl(pmap, va, l, options);
4454
4455 pmap_ledger_check_balance(pmap);
4456 #else
4457 va = pmap_remove_options_internal(pmap, va, l, options);
4458 #endif
4459 }
4460
4461 PMAP_TRACE(2, PMAP_CODE(PMAP__REMOVE) | DBG_FUNC_END);
4462 }
4463
4464
4465 /*
4466 * Remove phys addr if mapped in specified map
4467 */
4468 void
4469 pmap_remove_some_phys(
4470 __unused pmap_t map,
4471 __unused ppnum_t pn)
4472 {
4473 /* Implement to support working set code */
4474 }
4475
4476 /*
4477 * Implementation of PMAP_SWITCH_USER that Mach VM uses to
4478 * switch a thread onto a new vm_map.
4479 */
4480 void
4481 pmap_switch_user(thread_t thread, vm_map_t new_map)
4482 {
4483 pmap_t new_pmap = new_map->pmap;
4484
4485
4486 thread->map = new_map;
4487 pmap_set_pmap(new_pmap, thread);
4488
4489 }
4490
4491 void
4492 pmap_set_pmap(
4493 pmap_t pmap,
4494 #if !__ARM_USER_PROTECT__
4495 __unused
4496 #endif
4497 thread_t thread)
4498 {
4499 pmap_switch(pmap);
4500 #if __ARM_USER_PROTECT__
4501 thread->machine.uptw_ttb = ((unsigned int) pmap->ttep) | TTBR_SETUP;
4502 thread->machine.asid = pmap->hw_asid;
4503 #endif
4504 }
4505
4506 static void
4507 pmap_flush_core_tlb_asid_async(pmap_t pmap)
4508 {
4509 #if (__ARM_VMSA__ == 7)
4510 flush_core_tlb_asid_async(pmap->hw_asid);
4511 #else
4512 flush_core_tlb_asid_async(((uint64_t) pmap->hw_asid) << TLBI_ASID_SHIFT);
4513 #endif
4514 }
4515
4516 static inline bool
4517 pmap_user_ttb_is_clear(void)
4518 {
4519 #if (__ARM_VMSA__ > 7)
4520 return get_mmu_ttb() == (invalid_ttep & TTBR_BADDR_MASK);
4521 #else
4522 return get_mmu_ttb() == kernel_pmap->ttep;
4523 #endif
4524 }
4525
4526 MARK_AS_PMAP_TEXT void
4527 pmap_switch_internal(
4528 pmap_t pmap)
4529 {
4530 pmap_cpu_data_t *cpu_data_ptr = pmap_get_cpu_data();
4531 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
4532 #if XNU_MONITOR
4533 os_atomic_store(&cpu_data_ptr->active_pmap, pmap, relaxed);
4534 #endif
4535 validate_pmap_mutable(pmap);
4536 uint16_t asid_index = pmap->hw_asid;
4537 bool do_asid_flush = false;
4538 bool do_commpage_flush = false;
4539
4540 if (__improbable((asid_index == 0) && (pmap != kernel_pmap))) {
4541 panic("%s: attempt to activate pmap with invalid ASID %p", __func__, pmap);
4542 }
4543 #if __ARM_KERNEL_PROTECT__
4544 asid_index >>= 1;
4545 #endif
4546
4547 pmap_t last_nested_pmap = cpu_data_ptr->cpu_nested_pmap;
4548 #if (__ARM_VMSA__ > 7)
4549 __unused const pt_attr_t *last_nested_pmap_attr = cpu_data_ptr->cpu_nested_pmap_attr;
4550 __unused vm_map_address_t last_nested_region_addr = cpu_data_ptr->cpu_nested_region_addr;
4551 __unused vm_map_offset_t last_nested_region_size = cpu_data_ptr->cpu_nested_region_size;
4552 #endif
4553 bool do_shared_region_flush = ((pmap != kernel_pmap) && (last_nested_pmap != NULL) && (pmap->nested_pmap != last_nested_pmap));
4554 bool break_before_make = do_shared_region_flush;
4555
4556 if ((pmap_max_asids > MAX_HW_ASIDS) && (asid_index > 0)) {
4557 asid_index -= 1;
4558 pmap_update_plru(asid_index);
4559
4560 /* Paranoia. */
4561 assert(asid_index < (sizeof(cpu_data_ptr->cpu_sw_asids) / sizeof(*cpu_data_ptr->cpu_sw_asids)));
4562
4563 /* Extract the "virtual" bits of the ASIDs (which could cause us to alias). */
4564 uint8_t new_sw_asid = pmap->sw_asid;
4565 uint8_t last_sw_asid = cpu_data_ptr->cpu_sw_asids[asid_index];
4566
4567 if (new_sw_asid != last_sw_asid) {
4568 /*
4569 * If the virtual ASID of the new pmap does not match the virtual ASID
4570 * last seen on this CPU for the physical ASID (that was a mouthful),
4571 * then this switch runs the risk of aliasing. We need to flush the
4572 * TLB for this phyiscal ASID in this case.
4573 */
4574 cpu_data_ptr->cpu_sw_asids[asid_index] = new_sw_asid;
4575 do_asid_flush = true;
4576 break_before_make = true;
4577 }
4578 }
4579
4580 #if __ARM_MIXED_PAGE_SIZE__
4581 if (pt_attr->pta_tcr_value != get_tcr()) {
4582 break_before_make = true;
4583 }
4584 #endif
4585 #if __ARM_MIXED_PAGE_SIZE__
4586 /*
4587 * For mixed page size configurations, we need to flush the global commpage mappings from
4588 * the TLB when transitioning between address spaces with different page sizes. Otherwise
4589 * it's possible for a TLB fill against the incoming commpage to produce a TLB entry which
4590 * which partially overlaps a TLB entry from the outgoing commpage, leading to a TLB
4591 * conflict abort or other unpredictable behavior.
4592 */
4593 if (pt_attr_leaf_shift(pt_attr) != cpu_data_ptr->commpage_page_shift) {
4594 do_commpage_flush = true;
4595 }
4596 if (do_commpage_flush) {
4597 break_before_make = true;
4598 }
4599 #endif
4600 if (__improbable(break_before_make && !pmap_user_ttb_is_clear())) {
4601 PMAP_TRACE(1, PMAP_CODE(PMAP__CLEAR_USER_TTB), VM_KERNEL_ADDRHIDE(pmap), PMAP_VASID(pmap), pmap->hw_asid);
4602 pmap_clear_user_ttb_internal();
4603 }
4604
4605 /* If we're switching to a different nested pmap (i.e. shared region), we'll need
4606 * to flush the userspace mappings for that region. Those mappings are global
4607 * and will not be protected by the ASID. It should also be cheaper to flush the
4608 * entire local TLB rather than to do a broadcast MMU flush by VA region. */
4609 if (__improbable(do_shared_region_flush)) {
4610 #if __ARM_RANGE_TLBI__
4611 uint64_t page_shift_prev = pt_attr_leaf_shift(last_nested_pmap_attr);
4612 vm_map_offset_t npages_prev = last_nested_region_size >> page_shift_prev;
4613
4614 /* NOTE: here we flush the global TLB entries for the previous nested region only.
4615 * There may still be non-global entries that overlap with the incoming pmap's
4616 * nested region. On Apple SoCs at least, this is acceptable. Those non-global entries
4617 * must necessarily belong to a different ASID than the incoming pmap, or they would
4618 * be flushed in the do_asid_flush case below. This will prevent them from conflicting
4619 * with the incoming pmap's nested region. However, the ARMv8 ARM is not crystal clear
4620 * on whether such a global/inactive-nonglobal overlap is acceptable, so we may need
4621 * to consider additional invalidation here in the future. */
4622 if (npages_prev <= ARM64_TLB_RANGE_PAGES) {
4623 flush_core_tlb_allrange_async(generate_rtlbi_param((ppnum_t)npages_prev, 0, last_nested_region_addr, page_shift_prev));
4624 } else {
4625 do_asid_flush = false;
4626 flush_core_tlb_async();
4627 }
4628 #else
4629 do_asid_flush = false;
4630 flush_core_tlb_async();
4631 #endif // __ARM_RANGE_TLBI__
4632 }
4633
4634 #if __ARM_MIXED_PAGE_SIZE__
4635 if (__improbable(do_commpage_flush)) {
4636 const uint64_t commpage_shift = cpu_data_ptr->commpage_page_shift;
4637 const uint64_t rtlbi_param = generate_rtlbi_param((ppnum_t)_COMM_PAGE64_NESTING_SIZE >> commpage_shift,
4638 0, _COMM_PAGE64_NESTING_START, commpage_shift);
4639 flush_core_tlb_allrange_async(rtlbi_param);
4640 }
4641 #endif
4642 if (__improbable(do_asid_flush)) {
4643 pmap_flush_core_tlb_asid_async(pmap);
4644 #if DEVELOPMENT || DEBUG
4645 os_atomic_inc(&pmap_asid_flushes, relaxed);
4646 #endif
4647 }
4648 if (__improbable(do_asid_flush || do_shared_region_flush || do_commpage_flush)) {
4649 sync_tlb_flush_local();
4650 }
4651
4652 pmap_switch_user_ttb(pmap, cpu_data_ptr);
4653 }
4654
4655 void
4656 pmap_switch(
4657 pmap_t pmap)
4658 {
4659 PMAP_TRACE(1, PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(pmap), PMAP_VASID(pmap), pmap->hw_asid);
4660 #if XNU_MONITOR
4661 pmap_switch_ppl(pmap);
4662 #else
4663 pmap_switch_internal(pmap);
4664 #endif
4665 PMAP_TRACE(1, PMAP_CODE(PMAP__SWITCH) | DBG_FUNC_END);
4666 }
4667
4668 void
4669 pmap_page_protect(
4670 ppnum_t ppnum,
4671 vm_prot_t prot)
4672 {
4673 pmap_page_protect_options(ppnum, prot, 0, NULL);
4674 }
4675
4676 /*
4677 * Routine: pmap_page_protect_options
4678 *
4679 * Function:
4680 * Lower the permission for all mappings to a given
4681 * page.
4682 */
4683 MARK_AS_PMAP_TEXT static void
4684 pmap_page_protect_options_with_flush_range(
4685 ppnum_t ppnum,
4686 vm_prot_t prot,
4687 unsigned int options,
4688 pmap_tlb_flush_range_t *flush_range)
4689 {
4690 pmap_paddr_t phys = ptoa(ppnum);
4691 pv_entry_t **pv_h;
4692 pv_entry_t *pve_p, *orig_pve_p;
4693 pv_entry_t *pveh_p;
4694 pv_entry_t *pvet_p;
4695 pt_entry_t *pte_p, *orig_pte_p;
4696 pv_entry_t *new_pve_p;
4697 pt_entry_t *new_pte_p;
4698 vm_offset_t pvh_flags;
4699 unsigned int pai;
4700 bool remove;
4701 bool set_NX;
4702 unsigned int pvh_cnt = 0;
4703 unsigned int pass1_updated = 0;
4704 unsigned int pass2_updated = 0;
4705
4706 assert(ppnum != vm_page_fictitious_addr);
4707
4708 /* Only work with managed pages. */
4709 if (!pa_valid(phys)) {
4710 return;
4711 }
4712
4713 /*
4714 * Determine the new protection.
4715 */
4716 switch (prot) {
4717 case VM_PROT_ALL:
4718 return; /* nothing to do */
4719 case VM_PROT_READ:
4720 case VM_PROT_READ | VM_PROT_EXECUTE:
4721 remove = false;
4722 break;
4723 default:
4724 /* PPL security model requires that we flush TLBs before we exit if the page may be recycled. */
4725 options = options & ~PMAP_OPTIONS_NOFLUSH;
4726 remove = true;
4727 break;
4728 }
4729
4730 pmap_cpu_data_t *pmap_cpu_data = NULL;
4731 if (remove) {
4732 #if !XNU_MONITOR
4733 mp_disable_preemption();
4734 #endif
4735 pmap_cpu_data = pmap_get_cpu_data();
4736 os_atomic_store(&pmap_cpu_data->inflight_disconnect, true, relaxed);
4737 /*
4738 * Ensure the store to inflight_disconnect will be observed before any of the
4739 * ensuing PTE/refcount stores in this function. This flag is used to avoid
4740 * a race in which the VM may clear a pmap's mappings and destroy the pmap on
4741 * another CPU, in between this function's clearing a PTE and dropping the
4742 * corresponding pagetable refcount. That can lead to a panic if the
4743 * destroying thread observes a non-zero refcount. For this we need a store-
4744 * store barrier; a store-release operation would not be sufficient.
4745 */
4746 os_atomic_thread_fence(release);
4747 }
4748
4749 pai = pa_index(phys);
4750 pvh_lock(pai);
4751 pv_h = pai_to_pvh(pai);
4752 pvh_flags = pvh_get_flags(pv_h);
4753
4754 #if XNU_MONITOR
4755 if (__improbable(remove && (pvh_flags & PVH_FLAG_LOCKDOWN_MASK))) {
4756 panic("%d is locked down (%#llx), cannot remove", pai, (uint64_t)pvh_get_flags(pv_h));
4757 }
4758 if (__improbable(ppattr_pa_test_monitor(phys))) {
4759 panic("%s: PA 0x%llx belongs to PPL.", __func__, (uint64_t)phys);
4760 }
4761 #endif
4762
4763 orig_pte_p = pte_p = PT_ENTRY_NULL;
4764 orig_pve_p = pve_p = PV_ENTRY_NULL;
4765 pveh_p = PV_ENTRY_NULL;
4766 pvet_p = PV_ENTRY_NULL;
4767 new_pve_p = PV_ENTRY_NULL;
4768 new_pte_p = PT_ENTRY_NULL;
4769
4770
4771 if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) {
4772 orig_pte_p = pte_p = pvh_ptep(pv_h);
4773 } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) {
4774 orig_pve_p = pve_p = pvh_pve_list(pv_h);
4775 pveh_p = pve_p;
4776 } else if (__improbable(!pvh_test_type(pv_h, PVH_TYPE_NULL))) {
4777 panic("%s: invalid PV head 0x%llx for PA 0x%llx", __func__, (uint64_t)(*pv_h), (uint64_t)phys);
4778 }
4779
4780 /* Pass 1: Update all CPU PTEs and accounting info as necessary */
4781 int pve_ptep_idx = 0;
4782
4783 /*
4784 * issue_tlbi is used to indicate that this function will need to issue at least one TLB
4785 * invalidation during pass 2. tlb_flush_needed only indicates that PTE permissions have
4786 * changed and that a TLB flush will be needed *at some point*, so we'll need to call
4787 * FLUSH_PTE_STRONG() to synchronize prior PTE updates. In the case of a flush_range
4788 * operation, TLB invalidation may be handled by the caller so it's possible for
4789 * tlb_flush_needed to be true while issue_tlbi is false.
4790 */
4791 bool issue_tlbi = false;
4792 bool tlb_flush_needed = false;
4793 const bool compress = (options & PMAP_OPTIONS_COMPRESSOR);
4794 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
4795 pt_entry_t tmplate = ARM_PTE_TYPE_FAULT;
4796 bool update = false;
4797
4798 if (pve_p != PV_ENTRY_NULL) {
4799 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
4800 if (pte_p == PT_ENTRY_NULL) {
4801 goto protect_skip_pve_pass1;
4802 }
4803 }
4804
4805 #ifdef PVH_FLAG_IOMMU
4806 if (pvh_ptep_is_iommu(pte_p)) {
4807 #if XNU_MONITOR
4808 if (__improbable(pvh_flags & PVH_FLAG_LOCKDOWN_MASK)) {
4809 panic("pmap_page_protect: ppnum 0x%x locked down, cannot be owned by iommu %p, pve_p=%p",
4810 ppnum, ptep_get_iommu(pte_p), pve_p);
4811 }
4812 #endif
4813 if (remove && (options & PMAP_OPTIONS_COMPRESSOR)) {
4814 panic("pmap_page_protect: attempt to compress ppnum 0x%x owned by iommu %p, pve_p=%p",
4815 ppnum, ptep_get_iommu(pte_p), pve_p);
4816 }
4817 goto protect_skip_pve_pass1;
4818 }
4819 #endif
4820 const pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
4821 const pmap_t pmap = ptdp->pmap;
4822 const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
4823
4824 if (__improbable((pmap == NULL) || (atop(pte_to_pa(*pte_p)) != ppnum))) {
4825 #if MACH_ASSERT
4826 if ((pmap != NULL) && (pve_p != PV_ENTRY_NULL) && (kern_feature_override(KF_PMAPV_OVRD) == FALSE)) {
4827 /* Temporarily set PTEP to NULL so that the logic below doesn't pick it up as duplicate. */
4828 pt_entry_t *temp_ptep = pve_get_ptep(pve_p, pve_ptep_idx);
4829 pve_set_ptep(pve_p, pve_ptep_idx, PT_ENTRY_NULL);
4830
4831 pv_entry_t *check_pvep = pve_p;
4832
4833 do {
4834 if (pve_find_ptep_index(check_pvep, pte_p) != -1) {
4835 panic_plain("%s: duplicate pve entry ptep=%p pmap=%p, pvh=%p, "
4836 "pvep=%p, pai=0x%x", __func__, pte_p, pmap, pv_h, pve_p, pai);
4837 }
4838 } while ((check_pvep = pve_next(check_pvep)) != PV_ENTRY_NULL);
4839
4840 /* Restore previous PTEP value. */
4841 pve_set_ptep(pve_p, pve_ptep_idx, temp_ptep);
4842 }
4843 #endif
4844 panic("pmap_page_protect: bad pve entry pte_p=%p pmap=%p prot=%d options=%u, pv_h=%p, pveh_p=%p, pve_p=%p, pte=0x%llx, va=0x%llx ppnum: 0x%x",
4845 pte_p, pmap, prot, options, pv_h, pveh_p, pve_p, (uint64_t)*pte_p, (uint64_t)va, ppnum);
4846 }
4847
4848 #if DEVELOPMENT || DEBUG
4849 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
4850 #else
4851 if ((prot & VM_PROT_EXECUTE))
4852 #endif
4853 {
4854 set_NX = false;
4855 } else {
4856 set_NX = true;
4857 }
4858
4859 /* Remove the mapping if new protection is NONE */
4860 if (remove) {
4861 const bool is_internal = ppattr_pve_is_internal(pai, pve_p, pve_ptep_idx);
4862 const bool is_altacct = ppattr_pve_is_altacct(pai, pve_p, pve_ptep_idx);
4863 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
4864 pt_entry_t spte = *pte_p;
4865
4866 if (pte_is_wired(spte)) {
4867 pte_set_wired(pmap, pte_p, 0);
4868 spte = *pte_p;
4869 if (pmap != kernel_pmap) {
4870 pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4871 }
4872 }
4873
4874 assertf(atop(pte_to_pa(spte)) == ppnum, "unexpected value 0x%llx for pte %p mapping ppnum 0x%x",
4875 (uint64_t)spte, pte_p, ppnum);
4876
4877 if (compress && is_internal && (pmap != kernel_pmap)) {
4878 assert(!ARM_PTE_IS_COMPRESSED(*pte_p, pte_p));
4879 /* mark this PTE as having been "compressed" */
4880 tmplate = ARM_PTE_COMPRESSED;
4881 if (is_altacct) {
4882 tmplate |= ARM_PTE_COMPRESSED_ALT;
4883 }
4884 } else {
4885 tmplate = ARM_PTE_TYPE_FAULT;
4886 }
4887
4888 assert(spte != tmplate);
4889 write_pte_fast(pte_p, tmplate);
4890 update = true;
4891 ++pass1_updated;
4892
4893 pmap_ledger_debit(pmap, task_ledgers.phys_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4894
4895 if (pmap != kernel_pmap) {
4896 if (ppattr_test_reusable(pai) &&
4897 is_internal &&
4898 !is_altacct) {
4899 pmap_ledger_debit(pmap, task_ledgers.reusable, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4900 } else if (!is_internal) {
4901 pmap_ledger_debit(pmap, task_ledgers.external, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4902 }
4903
4904 if (is_altacct) {
4905 assert(is_internal);
4906 pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4907 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4908 if (options & PMAP_OPTIONS_COMPRESSOR) {
4909 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4910 pmap_ledger_credit(pmap, task_ledgers.alternate_accounting_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4911 }
4912 ppattr_pve_clr_internal(pai, pve_p, pve_ptep_idx);
4913 ppattr_pve_clr_altacct(pai, pve_p, pve_ptep_idx);
4914 } else if (ppattr_test_reusable(pai)) {
4915 assert(is_internal);
4916 if (options & PMAP_OPTIONS_COMPRESSOR) {
4917 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4918 /* was not in footprint, but is now */
4919 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4920 }
4921 ppattr_pve_clr_internal(pai, pve_p, pve_ptep_idx);
4922 } else if (is_internal) {
4923 pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4924
4925 /*
4926 * Update all stats related to physical footprint, which only
4927 * deals with internal pages.
4928 */
4929 if (options & PMAP_OPTIONS_COMPRESSOR) {
4930 /*
4931 * This removal is only being done so we can send this page to
4932 * the compressor; therefore it mustn't affect total task footprint.
4933 */
4934 pmap_ledger_credit(pmap, task_ledgers.internal_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4935 } else {
4936 /*
4937 * This internal page isn't going to the compressor, so adjust stats to keep
4938 * phys_footprint up to date.
4939 */
4940 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
4941 }
4942 ppattr_pve_clr_internal(pai, pve_p, pve_ptep_idx);
4943 } else {
4944 /* external page: no impact on ledgers */
4945 }
4946 }
4947 assert((pve_p == PV_ENTRY_NULL) || !pve_get_altacct(pve_p, pve_ptep_idx));
4948 } else {
4949 pt_entry_t spte = *pte_p;
4950 const pt_attr_t *const pt_attr = pmap_get_pt_attr(pmap);
4951
4952 if (pmap == kernel_pmap) {
4953 tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA));
4954 } else {
4955 tmplate = ((spte & ~ARM_PTE_APMASK) | pt_attr_leaf_ro(pt_attr));
4956 }
4957
4958 /*
4959 * While the naive implementation of this would serve to add execute
4960 * permission, this is not how the VM uses this interface, or how
4961 * x86_64 implements it. So ignore requests to add execute permissions.
4962 */
4963 if (set_NX) {
4964 tmplate |= pt_attr_leaf_xn(pt_attr);
4965 }
4966
4967
4968 assert(spte != ARM_PTE_TYPE_FAULT);
4969 assert(!ARM_PTE_IS_COMPRESSED(spte, pte_p));
4970
4971 if (spte != tmplate) {
4972 /*
4973 * Mark the PTE so that we'll know this mapping requires a TLB flush in pass 2.
4974 * This allows us to avoid unnecessary flushing e.g. for COW aliases that didn't
4975 * require permission updates. We use the ARM_PTE_WRITEABLE bit as that bit
4976 * should always be cleared by this function.
4977 */
4978 pte_set_was_writeable(tmplate, true);
4979 write_pte_fast(pte_p, tmplate);
4980 update = true;
4981 ++pass1_updated;
4982 } else if (pte_was_writeable(tmplate)) {
4983 /*
4984 * We didn't change any of the relevant permission bits in the PTE, so we don't need
4985 * to flush the TLB, but we do want to clear the "was_writeable" flag. When revoking
4986 * write access to a page, this function should always at least clear that flag for
4987 * all PTEs, as the VM is effectively requesting that subsequent write accesses to
4988 * these mappings go through vm_fault(). We therefore don't want those accesses to
4989 * be handled through arm_fast_fault().
4990 */
4991 pte_set_was_writeable(tmplate, false);
4992 write_pte_fast(pte_p, tmplate);
4993 }
4994 }
4995
4996 if (!issue_tlbi && update && !(options & PMAP_OPTIONS_NOFLUSH)) {
4997 tlb_flush_needed = true;
4998 if (remove || !flush_range || (flush_range->ptfr_pmap != pmap) ||
4999 (va >= flush_range->ptfr_end) || (va < flush_range->ptfr_start)) {
5000 issue_tlbi = true;
5001 }
5002 }
5003 protect_skip_pve_pass1:
5004 pte_p = PT_ENTRY_NULL;
5005 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
5006 pve_ptep_idx = 0;
5007 pve_p = pve_next(pve_p);
5008 }
5009 }
5010
5011 if (tlb_flush_needed) {
5012 FLUSH_PTE_STRONG();
5013 }
5014
5015 if (!remove && !issue_tlbi) {
5016 goto protect_finish;
5017 }
5018
5019 /* Pass 2: Invalidate TLBs and update the list to remove CPU mappings */
5020 pv_entry_t **pve_pp = pv_h;
5021 pve_p = orig_pve_p;
5022 pte_p = orig_pte_p;
5023 pve_ptep_idx = 0;
5024
5025 /*
5026 * We need to keep track of whether a particular PVE list contains IOMMU
5027 * mappings when removing entries, because we should only remove CPU
5028 * mappings. If a PVE list contains at least one IOMMU mapping, we keep
5029 * it around.
5030 */
5031 bool iommu_mapping_in_pve = false;
5032 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
5033 if (pve_p != PV_ENTRY_NULL) {
5034 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
5035 if (pte_p == PT_ENTRY_NULL) {
5036 goto protect_skip_pve_pass2;
5037 }
5038 }
5039
5040 #ifdef PVH_FLAG_IOMMU
5041 if (pvh_ptep_is_iommu(pte_p)) {
5042 iommu_mapping_in_pve = true;
5043 if (remove && (pve_p == PV_ENTRY_NULL)) {
5044 /*
5045 * We've found an IOMMU entry and it's the only entry in the PV list.
5046 * We don't discard IOMMU entries, so simply set up the new PV list to
5047 * contain the single IOMMU PTE and exit the loop.
5048 */
5049 new_pte_p = pte_p;
5050 break;
5051 }
5052 goto protect_skip_pve_pass2;
5053 }
5054 #endif
5055 pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
5056 const pmap_t pmap = ptdp->pmap;
5057 const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
5058
5059 if (remove) {
5060 if (!compress && (pmap != kernel_pmap)) {
5061 /*
5062 * We must wait to decrement the refcount until we're completely finished using the PTE
5063 * on this path. Otherwise, if we happened to drop the refcount to zero, a concurrent
5064 * pmap_remove() call might observe the zero refcount and free the pagetable out from
5065 * under us.
5066 */
5067 if (OSAddAtomic16(-1, (SInt16 *) &(ptd_get_info(ptdp, pte_p)->refcnt)) <= 0) {
5068 panic("pmap_page_protect_options(): over-release of ptdp %p for pte %p", ptep_get_ptd(pte_p), pte_p);
5069 }
5070 }
5071 /* Remove this CPU mapping from PVE list. */
5072 if (pve_p != PV_ENTRY_NULL) {
5073 pve_set_ptep(pve_p, pve_ptep_idx, PT_ENTRY_NULL);
5074 }
5075 } else {
5076 pt_entry_t spte = *pte_p;
5077 if (pte_was_writeable(spte)) {
5078 pte_set_was_writeable(spte, false);
5079 write_pte_fast(pte_p, spte);
5080 } else {
5081 goto protect_skip_pve_pass2;
5082 }
5083 }
5084 ++pass2_updated;
5085 if (remove || !flush_range || (flush_range->ptfr_pmap != pmap) ||
5086 (va >= flush_range->ptfr_end) || (va < flush_range->ptfr_start)) {
5087 pmap_get_pt_ops(pmap)->flush_tlb_region_async(va,
5088 pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap, true);
5089 }
5090
5091 protect_skip_pve_pass2:
5092 pte_p = PT_ENTRY_NULL;
5093 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
5094 pve_ptep_idx = 0;
5095
5096 if (remove) {
5097 /**
5098 * If there are any IOMMU mappings in the PVE list, preserve
5099 * those mappings in a new PVE list (new_pve_p) which will later
5100 * become the new PVH entry. Keep track of the CPU mappings in
5101 * pveh_p/pvet_p so they can be deallocated later.
5102 */
5103 if (iommu_mapping_in_pve) {
5104 iommu_mapping_in_pve = false;
5105 pv_entry_t *temp_pve_p = pve_next(pve_p);
5106 pve_remove(pv_h, pve_pp, pve_p);
5107 pveh_p = pvh_pve_list(pv_h);
5108 pve_p->pve_next = new_pve_p;
5109 new_pve_p = pve_p;
5110 pve_p = temp_pve_p;
5111 continue;
5112 } else {
5113 pvet_p = pve_p;
5114 pvh_cnt++;
5115 }
5116 }
5117
5118 pve_pp = pve_next_ptr(pve_p);
5119 pve_p = pve_next(pve_p);
5120 iommu_mapping_in_pve = false;
5121 }
5122 }
5123
5124 protect_finish:
5125
5126 #ifdef PVH_FLAG_EXEC
5127 if (remove && (pvh_get_flags(pv_h) & PVH_FLAG_EXEC)) {
5128 pmap_set_ptov_ap(pai, AP_RWNA, tlb_flush_needed);
5129 }
5130 #endif
5131 if (__improbable(pass1_updated != pass2_updated)) {
5132 panic("%s: first pass (%u) and second pass (%u) disagree on updated mappings",
5133 __func__, pass1_updated, pass2_updated);
5134 }
5135 /* if we removed a bunch of entries, take care of them now */
5136 if (remove) {
5137 if (new_pve_p != PV_ENTRY_NULL) {
5138 pvh_update_head(pv_h, new_pve_p, PVH_TYPE_PVEP);
5139 pvh_set_flags(pv_h, pvh_flags);
5140 } else if (new_pte_p != PT_ENTRY_NULL) {
5141 pvh_update_head(pv_h, new_pte_p, PVH_TYPE_PTEP);
5142 pvh_set_flags(pv_h, pvh_flags);
5143 } else {
5144 pvh_update_head(pv_h, PV_ENTRY_NULL, PVH_TYPE_NULL);
5145 }
5146 }
5147
5148 if (flush_range && tlb_flush_needed) {
5149 if (!remove) {
5150 flush_range->ptfr_flush_needed = true;
5151 tlb_flush_needed = false;
5152 }
5153 }
5154
5155 /*
5156 * If we removed PV entries, ensure prior TLB flushes are complete before we drop the PVH
5157 * lock to allow the backing pages to be repurposed. This is a security precaution, aimed
5158 * primarily at XNU_MONITOR configurations, to reduce the likelihood of an attacker causing
5159 * a page to be repurposed while it is still live in the TLBs.
5160 */
5161 if (remove && tlb_flush_needed) {
5162 sync_tlb_flush();
5163 }
5164
5165 pvh_unlock(pai);
5166
5167 if (remove) {
5168 os_atomic_store(&pmap_cpu_data->inflight_disconnect, false, release);
5169 #if !XNU_MONITOR
5170 mp_enable_preemption();
5171 #endif
5172 }
5173
5174 if (!remove && tlb_flush_needed) {
5175 sync_tlb_flush();
5176 }
5177
5178 if (remove && (pvet_p != PV_ENTRY_NULL)) {
5179 pv_list_free(pveh_p, pvet_p, pvh_cnt);
5180 }
5181 }
5182
5183 MARK_AS_PMAP_TEXT void
5184 pmap_page_protect_options_internal(
5185 ppnum_t ppnum,
5186 vm_prot_t prot,
5187 unsigned int options,
5188 void *arg)
5189 {
5190 if (arg != NULL) {
5191 /*
5192 * If the argument is non-NULL, the VM layer is conveying its intention that the TLBs should
5193 * ultimately be flushed. The nature of ARM TLB maintenance is such that we can flush the
5194 * TLBs much more precisely if we do so inline with the pagetable updates, and PPL security
5195 * model requires that we not exit the PPL without performing required TLB flushes anyway.
5196 * In that case, force the flush to take place.
5197 */
5198 options &= ~PMAP_OPTIONS_NOFLUSH;
5199 }
5200 pmap_page_protect_options_with_flush_range(ppnum, prot, options, NULL);
5201 }
5202
5203 void
5204 pmap_page_protect_options(
5205 ppnum_t ppnum,
5206 vm_prot_t prot,
5207 unsigned int options,
5208 void *arg)
5209 {
5210 pmap_paddr_t phys = ptoa(ppnum);
5211
5212 assert(ppnum != vm_page_fictitious_addr);
5213
5214 /* Only work with managed pages. */
5215 if (!pa_valid(phys)) {
5216 return;
5217 }
5218
5219 /*
5220 * Determine the new protection.
5221 */
5222 if (prot == VM_PROT_ALL) {
5223 return; /* nothing to do */
5224 }
5225
5226 PMAP_TRACE(2, PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_START, ppnum, prot);
5227
5228 #if XNU_MONITOR
5229 pmap_page_protect_options_ppl(ppnum, prot, options, arg);
5230 #else
5231 pmap_page_protect_options_internal(ppnum, prot, options, arg);
5232 #endif
5233
5234 PMAP_TRACE(2, PMAP_CODE(PMAP__PAGE_PROTECT) | DBG_FUNC_END);
5235 }
5236
5237
5238 #if __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX)
5239 MARK_AS_PMAP_TEXT void
5240 pmap_disable_user_jop_internal(pmap_t pmap)
5241 {
5242 if (pmap == kernel_pmap) {
5243 panic("%s: called with kernel_pmap", __func__);
5244 }
5245 validate_pmap_mutable(pmap);
5246 pmap->disable_jop = true;
5247 }
5248
5249 void
5250 pmap_disable_user_jop(pmap_t pmap)
5251 {
5252 #if XNU_MONITOR
5253 pmap_disable_user_jop_ppl(pmap);
5254 #else
5255 pmap_disable_user_jop_internal(pmap);
5256 #endif
5257 }
5258 #endif /* __has_feature(ptrauth_calls) && defined(XNU_TARGET_OS_OSX) */
5259
5260 /*
5261 * Indicates if the pmap layer enforces some additional restrictions on the
5262 * given set of protections.
5263 */
5264 bool
5265 pmap_has_prot_policy(__unused pmap_t pmap, __unused bool translated_allow_execute, __unused vm_prot_t prot)
5266 {
5267 return false;
5268 }
5269
5270 /*
5271 * Set the physical protection on the
5272 * specified range of this map as requested.
5273 * VERY IMPORTANT: Will not increase permissions.
5274 * VERY IMPORTANT: Only pmap_enter() is allowed to grant permissions.
5275 */
5276 void
5277 pmap_protect(
5278 pmap_t pmap,
5279 vm_map_address_t b,
5280 vm_map_address_t e,
5281 vm_prot_t prot)
5282 {
5283 pmap_protect_options(pmap, b, e, prot, 0, NULL);
5284 }
5285
5286 MARK_AS_PMAP_TEXT vm_map_address_t
5287 pmap_protect_options_internal(
5288 pmap_t pmap,
5289 vm_map_address_t start,
5290 vm_map_address_t end,
5291 vm_prot_t prot,
5292 unsigned int options,
5293 __unused void *args)
5294 {
5295 tt_entry_t *tte_p;
5296 pt_entry_t *bpte_p, *epte_p;
5297 pt_entry_t *pte_p;
5298 boolean_t set_NX = TRUE;
5299 #if (__ARM_VMSA__ > 7)
5300 boolean_t set_XO = FALSE;
5301 #endif
5302 boolean_t should_have_removed = FALSE;
5303 bool need_strong_sync = false;
5304
5305 /* Validate the pmap input before accessing its data. */
5306 validate_pmap_mutable(pmap);
5307
5308 const pt_attr_t *const pt_attr = pmap_get_pt_attr(pmap);
5309
5310 if (__improbable((end < start) || (end > ((start + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr))))) {
5311 panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end);
5312 }
5313
5314 #if DEVELOPMENT || DEBUG
5315 if (options & PMAP_OPTIONS_PROTECT_IMMEDIATE) {
5316 if ((prot & VM_PROT_ALL) == VM_PROT_NONE) {
5317 should_have_removed = TRUE;
5318 }
5319 } else
5320 #endif
5321 {
5322 /* Determine the new protection. */
5323 switch (prot) {
5324 #if (__ARM_VMSA__ > 7)
5325 case VM_PROT_EXECUTE:
5326 set_XO = TRUE;
5327 OS_FALLTHROUGH;
5328 #endif
5329 case VM_PROT_READ:
5330 case VM_PROT_READ | VM_PROT_EXECUTE:
5331 break;
5332 case VM_PROT_READ | VM_PROT_WRITE:
5333 case VM_PROT_ALL:
5334 return end; /* nothing to do */
5335 default:
5336 should_have_removed = TRUE;
5337 }
5338 }
5339
5340 if (should_have_removed) {
5341 panic("%s: should have been a remove operation, "
5342 "pmap=%p, start=%p, end=%p, prot=%#x, options=%#x, args=%p",
5343 __FUNCTION__,
5344 pmap, (void *)start, (void *)end, prot, options, args);
5345 }
5346
5347 #if DEVELOPMENT || DEBUG
5348 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
5349 #else
5350 if ((prot & VM_PROT_EXECUTE))
5351 #endif
5352 {
5353 set_NX = FALSE;
5354 } else {
5355 set_NX = TRUE;
5356 }
5357
5358 const uint64_t pmap_page_size = PAGE_RATIO * pt_attr_page_size(pt_attr);
5359 vm_map_address_t va = start;
5360 unsigned int npages = 0;
5361
5362 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
5363
5364 tte_p = pmap_tte(pmap, start);
5365
5366 if ((tte_p != (tt_entry_t *) NULL) && (*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
5367 bpte_p = (pt_entry_t *) ttetokv(*tte_p);
5368 bpte_p = &bpte_p[pte_index(pt_attr, start)];
5369 epte_p = bpte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr));
5370 pte_p = bpte_p;
5371
5372 for (pte_p = bpte_p;
5373 pte_p < epte_p;
5374 pte_p += PAGE_RATIO, va += pmap_page_size) {
5375 ++npages;
5376 if (__improbable(!(npages % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
5377 pmap_pending_preemption())) {
5378 break;
5379 }
5380 pt_entry_t spte;
5381 #if DEVELOPMENT || DEBUG
5382 boolean_t force_write = FALSE;
5383 #endif
5384
5385 spte = *((volatile pt_entry_t*)pte_p);
5386
5387 if ((spte == ARM_PTE_TYPE_FAULT) ||
5388 ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
5389 continue;
5390 }
5391
5392 pmap_paddr_t pa;
5393 unsigned int pai = 0;
5394 boolean_t managed = FALSE;
5395
5396 while (!managed) {
5397 /*
5398 * It may be possible for the pte to transition from managed
5399 * to unmanaged in this timeframe; for now, elide the assert.
5400 * We should break out as a consequence of checking pa_valid.
5401 */
5402 // assert(!ARM_PTE_IS_COMPRESSED(spte));
5403 pa = pte_to_pa(spte);
5404 if (!pa_valid(pa)) {
5405 break;
5406 }
5407 pai = pa_index(pa);
5408 pvh_lock(pai);
5409 spte = *((volatile pt_entry_t*)pte_p);
5410 pa = pte_to_pa(spte);
5411 if (pai == pa_index(pa)) {
5412 managed = TRUE;
5413 break; // Leave the PVH locked as we will unlock it after we free the PTE
5414 }
5415 pvh_unlock(pai);
5416 }
5417
5418 if ((spte == ARM_PTE_TYPE_FAULT) ||
5419 ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
5420 continue;
5421 }
5422
5423 pt_entry_t tmplate;
5424
5425 if (pmap == kernel_pmap) {
5426 #if DEVELOPMENT || DEBUG
5427 if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) && (prot & VM_PROT_WRITE)) {
5428 force_write = TRUE;
5429 tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RWNA));
5430 } else
5431 #endif
5432 {
5433 tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA));
5434 }
5435 } else {
5436 #if DEVELOPMENT || DEBUG
5437 if ((options & PMAP_OPTIONS_PROTECT_IMMEDIATE) && (prot & VM_PROT_WRITE)) {
5438 assert(pmap->type != PMAP_TYPE_NESTED);
5439 force_write = TRUE;
5440 tmplate = ((spte & ~ARM_PTE_APMASK) | pt_attr_leaf_rw(pt_attr));
5441 } else
5442 #endif
5443 {
5444 tmplate = ((spte & ~ARM_PTE_APMASK) | pt_attr_leaf_ro(pt_attr));
5445 }
5446 }
5447
5448 /*
5449 * XXX Removing "NX" would
5450 * grant "execute" access
5451 * immediately, bypassing any
5452 * checks VM might want to do
5453 * in its soft fault path.
5454 * pmap_protect() and co. are
5455 * not allowed to increase
5456 * access permissions.
5457 */
5458 if (set_NX) {
5459 tmplate |= pt_attr_leaf_xn(pt_attr);
5460 } else {
5461 #if (__ARM_VMSA__ > 7)
5462 if (pmap == kernel_pmap) {
5463 /* do NOT clear "PNX"! */
5464 tmplate |= ARM_PTE_NX;
5465 } else {
5466 /* do NOT clear "NX"! */
5467 tmplate |= pt_attr_leaf_x(pt_attr);
5468 if (set_XO) {
5469 tmplate &= ~ARM_PTE_APMASK;
5470 tmplate |= pt_attr_leaf_rona(pt_attr);
5471 }
5472 }
5473 #endif
5474 }
5475
5476 #if DEVELOPMENT || DEBUG
5477 if (force_write) {
5478 /*
5479 * TODO: Run CS/Monitor checks here.
5480 */
5481 if (managed) {
5482 /*
5483 * We are marking the page as writable,
5484 * so we consider it to be modified and
5485 * referenced.
5486 */
5487 ppattr_pa_set_bits(pa, PP_ATTR_REFERENCED | PP_ATTR_MODIFIED);
5488 tmplate |= ARM_PTE_AF;
5489
5490 if (ppattr_test_reffault(pai)) {
5491 ppattr_clear_reffault(pai);
5492 }
5493
5494 if (ppattr_test_modfault(pai)) {
5495 ppattr_clear_modfault(pai);
5496 }
5497 }
5498 } else if (options & PMAP_OPTIONS_PROTECT_IMMEDIATE) {
5499 /*
5500 * An immediate request for anything other than
5501 * write should still mark the page as
5502 * referenced if managed.
5503 */
5504 if (managed) {
5505 ppattr_pa_set_bits(pa, PP_ATTR_REFERENCED);
5506 tmplate |= ARM_PTE_AF;
5507
5508 if (ppattr_test_reffault(pai)) {
5509 ppattr_clear_reffault(pai);
5510 }
5511 }
5512 }
5513 #endif
5514
5515 /* We do not expect to write fast fault the entry. */
5516 pte_set_was_writeable(tmplate, false);
5517
5518 write_pte_fast(pte_p, tmplate);
5519
5520 if (managed) {
5521 pvh_assert_locked(pai);
5522 pvh_unlock(pai);
5523 }
5524 }
5525 FLUSH_PTE_STRONG();
5526 PMAP_UPDATE_TLBS(pmap, start, va, need_strong_sync, true);
5527 } else {
5528 va = end;
5529 }
5530
5531 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
5532 return va;
5533 }
5534
5535 void
5536 pmap_protect_options(
5537 pmap_t pmap,
5538 vm_map_address_t b,
5539 vm_map_address_t e,
5540 vm_prot_t prot,
5541 unsigned int options,
5542 __unused void *args)
5543 {
5544 vm_map_address_t l, beg;
5545
5546 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
5547
5548 if ((b | e) & pt_attr_leaf_offmask(pt_attr)) {
5549 panic("pmap_protect_options() pmap %p start 0x%llx end 0x%llx",
5550 pmap, (uint64_t)b, (uint64_t)e);
5551 }
5552
5553 /*
5554 * We allow single-page requests to execute non-preemptibly,
5555 * as it doesn't make sense to sample AST_URGENT for a single-page
5556 * operation, and there are a couple of special use cases that
5557 * require a non-preemptible single-page operation.
5558 */
5559 if ((e - b) > (pt_attr_page_size(pt_attr) * PAGE_RATIO)) {
5560 pmap_verify_preemptible();
5561 }
5562
5563 #if DEVELOPMENT || DEBUG
5564 if (options & PMAP_OPTIONS_PROTECT_IMMEDIATE) {
5565 if ((prot & VM_PROT_ALL) == VM_PROT_NONE) {
5566 pmap_remove_options(pmap, b, e, options);
5567 return;
5568 }
5569 } else
5570 #endif
5571 {
5572 /* Determine the new protection. */
5573 switch (prot) {
5574 case VM_PROT_EXECUTE:
5575 case VM_PROT_READ:
5576 case VM_PROT_READ | VM_PROT_EXECUTE:
5577 break;
5578 case VM_PROT_READ | VM_PROT_WRITE:
5579 case VM_PROT_ALL:
5580 return; /* nothing to do */
5581 default:
5582 pmap_remove_options(pmap, b, e, options);
5583 return;
5584 }
5585 }
5586
5587 PMAP_TRACE(2, PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_START,
5588 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(b),
5589 VM_KERNEL_ADDRHIDE(e));
5590
5591 beg = b;
5592
5593 while (beg < e) {
5594 l = ((beg + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr));
5595
5596 if (l > e) {
5597 l = e;
5598 }
5599
5600 #if XNU_MONITOR
5601 beg = pmap_protect_options_ppl(pmap, beg, l, prot, options, args);
5602 #else
5603 beg = pmap_protect_options_internal(pmap, beg, l, prot, options, args);
5604 #endif
5605 }
5606
5607 PMAP_TRACE(2, PMAP_CODE(PMAP__PROTECT) | DBG_FUNC_END);
5608 }
5609
5610 /**
5611 * Inserts an arbitrary number of physical pages ("block") in a pmap.
5612 *
5613 * @param pmap pmap to insert the pages into.
5614 * @param va virtual address to map the pages into.
5615 * @param pa page number of the first physical page to map.
5616 * @param size block size, in number of pages.
5617 * @param prot mapping protection attributes.
5618 * @param attr flags to pass to pmap_enter().
5619 *
5620 * @return KERN_SUCCESS.
5621 */
5622 kern_return_t
5623 pmap_map_block(
5624 pmap_t pmap,
5625 addr64_t va,
5626 ppnum_t pa,
5627 uint32_t size,
5628 vm_prot_t prot,
5629 int attr,
5630 unsigned int flags)
5631 {
5632 return pmap_map_block_addr(pmap, va, ((pmap_paddr_t)pa) << PAGE_SHIFT, size, prot, attr, flags);
5633 }
5634
5635 /**
5636 * Inserts an arbitrary number of physical pages ("block") in a pmap.
5637 * As opposed to pmap_map_block(), this function takes
5638 * a physical address as an input and operates using the
5639 * page size associated with the input pmap.
5640 *
5641 * @param pmap pmap to insert the pages into.
5642 * @param va virtual address to map the pages into.
5643 * @param pa physical address of the first physical page to map.
5644 * @param size block size, in number of pages.
5645 * @param prot mapping protection attributes.
5646 * @param attr flags to pass to pmap_enter().
5647 *
5648 * @return KERN_SUCCESS.
5649 */
5650 kern_return_t
5651 pmap_map_block_addr(
5652 pmap_t pmap,
5653 addr64_t va,
5654 pmap_paddr_t pa,
5655 uint32_t size,
5656 vm_prot_t prot,
5657 int attr,
5658 unsigned int flags)
5659 {
5660 #if __ARM_MIXED_PAGE_SIZE__
5661 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
5662 const uint64_t pmap_page_size = pt_attr_page_size(pt_attr);
5663 #else
5664 const uint64_t pmap_page_size = PAGE_SIZE;
5665 #endif
5666
5667 for (ppnum_t page = 0; page < size; page++) {
5668 if (pmap_enter_addr(pmap, va, pa, prot, VM_PROT_NONE, attr, TRUE) != KERN_SUCCESS) {
5669 panic("%s: failed pmap_enter_addr, "
5670 "pmap=%p, va=%#llx, pa=%llu, size=%u, prot=%#x, flags=%#x",
5671 __FUNCTION__,
5672 pmap, va, (uint64_t)pa, size, prot, flags);
5673 }
5674
5675 va += pmap_page_size;
5676 pa += pmap_page_size;
5677 }
5678
5679 return KERN_SUCCESS;
5680 }
5681
5682 kern_return_t
5683 pmap_enter_addr(
5684 pmap_t pmap,
5685 vm_map_address_t v,
5686 pmap_paddr_t pa,
5687 vm_prot_t prot,
5688 vm_prot_t fault_type,
5689 unsigned int flags,
5690 boolean_t wired)
5691 {
5692 return pmap_enter_options_addr(pmap, v, pa, prot, fault_type, flags, wired, 0, NULL);
5693 }
5694
5695 /*
5696 * Insert the given physical page (p) at
5697 * the specified virtual address (v) in the
5698 * target physical map with the protection requested.
5699 *
5700 * If specified, the page will be wired down, meaning
5701 * that the related pte can not be reclaimed.
5702 *
5703 * NB: This is the only routine which MAY NOT lazy-evaluate
5704 * or lose information. That is, this routine must actually
5705 * insert this page into the given map eventually (must make
5706 * forward progress eventually.
5707 */
5708 kern_return_t
5709 pmap_enter(
5710 pmap_t pmap,
5711 vm_map_address_t v,
5712 ppnum_t pn,
5713 vm_prot_t prot,
5714 vm_prot_t fault_type,
5715 unsigned int flags,
5716 boolean_t wired)
5717 {
5718 return pmap_enter_addr(pmap, v, ((pmap_paddr_t)pn) << PAGE_SHIFT, prot, fault_type, flags, wired);
5719 }
5720
5721 /*
5722 * Attempt to commit the pte.
5723 * Succeeds iff able to change *pte_p from old_pte to new_pte.
5724 * Performs no page table or accounting writes on failures.
5725 */
5726 static inline bool
5727 pmap_enter_pte(pmap_t pmap, pt_entry_t *pte_p, pt_entry_t *old_pte, pt_entry_t new_pte, vm_map_address_t v)
5728 {
5729 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
5730 bool success = false, changed_wiring = false;
5731
5732 __unreachable_ok_push
5733 if (TEST_PAGE_RATIO_4) {
5734 /*
5735 * 16K virtual pages w/ 4K hw pages.
5736 * We actually need to update 4 ptes here which can't easily be done atomically.
5737 * As a result we require the exclusive pmap lock.
5738 */
5739 pmap_assert_locked(pmap, PMAP_LOCK_EXCLUSIVE);
5740 *old_pte = *pte_p;
5741 if (*old_pte == new_pte) {
5742 /* Another thread completed this operation. Nothing to do here. */
5743 success = true;
5744 } else if (pa_valid(pte_to_pa(new_pte)) && pte_to_pa(*old_pte) != pte_to_pa(new_pte) &&
5745 (*old_pte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE) {
5746 /* pte has been modified by another thread and we hold the wrong PVH lock. Retry. */
5747 success = false;
5748 } else {
5749 write_pte_fast(pte_p, new_pte);
5750 success = true;
5751 }
5752 } else {
5753 success = os_atomic_cmpxchgv(pte_p, *old_pte, new_pte, old_pte, acq_rel);
5754 }
5755 __unreachable_ok_pop
5756
5757 if (success && *old_pte != new_pte) {
5758 if ((*old_pte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE) {
5759 FLUSH_PTE_STRONG();
5760 PMAP_UPDATE_TLBS(pmap, v, v + (pt_attr_page_size(pt_attr) * PAGE_RATIO), false, true);
5761 } else {
5762 FLUSH_PTE();
5763 __builtin_arm_isb(ISB_SY);
5764 }
5765 changed_wiring = ARM_PTE_IS_COMPRESSED(*old_pte, pte_p) ?
5766 (new_pte & ARM_PTE_WIRED) != 0 :
5767 (new_pte & ARM_PTE_WIRED) != (*old_pte & ARM_PTE_WIRED);
5768
5769 if (pmap != kernel_pmap && changed_wiring) {
5770 SInt16 *ptd_wiredcnt_ptr = (SInt16 *)&(ptep_get_info(pte_p)->wiredcnt);
5771 if (new_pte & ARM_PTE_WIRED) {
5772 OSAddAtomic16(1, ptd_wiredcnt_ptr);
5773 pmap_ledger_credit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
5774 } else {
5775 OSAddAtomic16(-1, ptd_wiredcnt_ptr);
5776 pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
5777 }
5778 }
5779
5780 PMAP_TRACE(4 + pt_attr_leaf_level(pt_attr), PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap),
5781 VM_KERNEL_ADDRHIDE(v), VM_KERNEL_ADDRHIDE(v + (pt_attr_page_size(pt_attr) * PAGE_RATIO)), new_pte);
5782 }
5783 return success;
5784 }
5785
5786 MARK_AS_PMAP_TEXT static pt_entry_t
5787 wimg_to_pte(unsigned int wimg, __unused pmap_paddr_t pa)
5788 {
5789 pt_entry_t pte;
5790
5791 switch (wimg & (VM_WIMG_MASK)) {
5792 case VM_WIMG_IO:
5793 // Map DRAM addresses with VM_WIMG_IO as Device-GRE instead of
5794 // Device-nGnRnE. On H14+, accesses to them can be reordered by
5795 // AP, while preserving the security benefits of using device
5796 // mapping against side-channel attacks. On pre-H14 platforms,
5797 // the accesses will still be strongly ordered.
5798 if (is_dram_addr(pa)) {
5799 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED);
5800 } else {
5801 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE);
5802 }
5803 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5804 break;
5805 case VM_WIMG_RT:
5806 #if HAS_UCNORMAL_MEM
5807 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB);
5808 #else
5809 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DISABLE);
5810 #endif
5811 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5812 break;
5813 case VM_WIMG_POSTED:
5814 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED);
5815 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5816 break;
5817 case VM_WIMG_POSTED_REORDERED:
5818 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED_REORDERED);
5819 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5820 break;
5821 case VM_WIMG_POSTED_COMBINED_REORDERED:
5822 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_POSTED_COMBINED_REORDERED);
5823 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5824 break;
5825 case VM_WIMG_WCOMB:
5826 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITECOMB);
5827 pte |= ARM_PTE_NX | ARM_PTE_PNX;
5828 break;
5829 case VM_WIMG_WTHRU:
5830 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITETHRU);
5831 #if (__ARM_VMSA__ > 7)
5832 pte |= ARM_PTE_SH(SH_OUTER_MEMORY);
5833 #else
5834 pte |= ARM_PTE_SH;
5835 #endif
5836 break;
5837 case VM_WIMG_COPYBACK:
5838 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK);
5839 #if (__ARM_VMSA__ > 7)
5840 pte |= ARM_PTE_SH(SH_OUTER_MEMORY);
5841 #else
5842 pte |= ARM_PTE_SH;
5843 #endif
5844 break;
5845 case VM_WIMG_INNERWBACK:
5846 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_INNERWRITEBACK);
5847 #if (__ARM_VMSA__ > 7)
5848 pte |= ARM_PTE_SH(SH_INNER_MEMORY);
5849 #else
5850 pte |= ARM_PTE_SH;
5851 #endif
5852 break;
5853 default:
5854 pte = ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
5855 #if (__ARM_VMSA__ > 7)
5856 pte |= ARM_PTE_SH(SH_OUTER_MEMORY);
5857 #else
5858 pte |= ARM_PTE_SH;
5859 #endif
5860 }
5861
5862 return pte;
5863 }
5864
5865
5866 /*
5867 * Construct a PTE (and the physical page attributes) for the given virtual to
5868 * physical mapping.
5869 *
5870 * This function has no side effects and is safe to call so that it is safe to
5871 * call while attempting a pmap_enter transaction.
5872 */
5873 MARK_AS_PMAP_TEXT static pt_entry_t
5874 pmap_construct_pte(
5875 const pmap_t pmap,
5876 vm_map_address_t va,
5877 pmap_paddr_t pa,
5878 vm_prot_t prot,
5879 vm_prot_t fault_type,
5880 boolean_t wired,
5881 const pt_attr_t* const pt_attr,
5882 uint16_t *pp_attr_bits /* OUTPUT */
5883 )
5884 {
5885 bool set_NX = false, set_XO = false;
5886 pt_entry_t pte = pa_to_pte(pa) | ARM_PTE_TYPE;
5887 assert(pp_attr_bits != NULL);
5888 *pp_attr_bits = 0;
5889
5890 if (wired) {
5891 pte |= ARM_PTE_WIRED;
5892 }
5893
5894 #if DEVELOPMENT || DEBUG
5895 if ((prot & VM_PROT_EXECUTE) || !nx_enabled || !pmap->nx_enabled)
5896 #else
5897 if ((prot & VM_PROT_EXECUTE))
5898 #endif
5899 {
5900 set_NX = false;
5901 } else {
5902 set_NX = true;
5903 }
5904
5905 #if (__ARM_VMSA__ > 7)
5906 if (prot == VM_PROT_EXECUTE) {
5907 set_XO = true;
5908 }
5909 #endif
5910
5911 if (set_NX) {
5912 pte |= pt_attr_leaf_xn(pt_attr);
5913 } else {
5914 #if (__ARM_VMSA__ > 7)
5915 if (pmap == kernel_pmap) {
5916 pte |= ARM_PTE_NX;
5917 } else {
5918 pte |= pt_attr_leaf_x(pt_attr);
5919 }
5920 #endif
5921 }
5922
5923 if (pmap == kernel_pmap) {
5924 #if __ARM_KERNEL_PROTECT__
5925 pte |= ARM_PTE_NG;
5926 #endif /* __ARM_KERNEL_PROTECT__ */
5927 if (prot & VM_PROT_WRITE) {
5928 pte |= ARM_PTE_AP(AP_RWNA);
5929 *pp_attr_bits |= PP_ATTR_MODIFIED | PP_ATTR_REFERENCED;
5930 } else {
5931 pte |= ARM_PTE_AP(AP_RONA);
5932 *pp_attr_bits |= PP_ATTR_REFERENCED;
5933 }
5934 #if (__ARM_VMSA__ == 7)
5935 if ((_COMM_PAGE_BASE_ADDRESS <= va) && (va < _COMM_PAGE_BASE_ADDRESS + _COMM_PAGE_AREA_LENGTH)) {
5936 pte = (pte & ~(ARM_PTE_APMASK)) | ARM_PTE_AP(AP_RORO);
5937 }
5938 #endif
5939 } else {
5940 if (pmap->type != PMAP_TYPE_NESTED) {
5941 pte |= ARM_PTE_NG;
5942 } else if ((pmap->nested_region_asid_bitmap)
5943 && (va >= pmap->nested_region_addr)
5944 && (va < (pmap->nested_region_addr + pmap->nested_region_size))) {
5945 unsigned int index = (unsigned int)((va - pmap->nested_region_addr) >> pt_attr_twig_shift(pt_attr));
5946
5947 if ((pmap->nested_region_asid_bitmap)
5948 && testbit(index, (int *)pmap->nested_region_asid_bitmap)) {
5949 pte |= ARM_PTE_NG;
5950 }
5951 }
5952 if (prot & VM_PROT_WRITE) {
5953 assert(pmap->type != PMAP_TYPE_NESTED);
5954 if (pa_valid(pa) && (!ppattr_pa_test_bits(pa, PP_ATTR_MODIFIED))) {
5955 if (fault_type & VM_PROT_WRITE) {
5956 if (set_XO) {
5957 pte |= pt_attr_leaf_rwna(pt_attr);
5958 } else {
5959 pte |= pt_attr_leaf_rw(pt_attr);
5960 }
5961 *pp_attr_bits |= PP_ATTR_REFERENCED | PP_ATTR_MODIFIED;
5962 } else {
5963 if (set_XO) {
5964 pte |= pt_attr_leaf_rona(pt_attr);
5965 } else {
5966 pte |= pt_attr_leaf_ro(pt_attr);
5967 }
5968 /*
5969 * Mark the page as MODFAULT so that a subsequent write
5970 * may be handled through arm_fast_fault().
5971 */
5972 *pp_attr_bits |= PP_ATTR_REFERENCED | PP_ATTR_MODFAULT;
5973 pte_set_was_writeable(pte, true);
5974 }
5975 } else {
5976 if (set_XO) {
5977 pte |= pt_attr_leaf_rwna(pt_attr);
5978 } else {
5979 pte |= pt_attr_leaf_rw(pt_attr);
5980 }
5981 *pp_attr_bits |= PP_ATTR_REFERENCED;
5982 }
5983 } else {
5984 if (set_XO) {
5985 pte |= pt_attr_leaf_rona(pt_attr);
5986 } else {
5987 pte |= pt_attr_leaf_ro(pt_attr);
5988 }
5989 *pp_attr_bits |= PP_ATTR_REFERENCED;
5990 }
5991 }
5992
5993 pte |= ARM_PTE_AF;
5994 return pte;
5995 }
5996
5997 MARK_AS_PMAP_TEXT kern_return_t
5998 pmap_enter_options_internal(
5999 pmap_t pmap,
6000 vm_map_address_t v,
6001 pmap_paddr_t pa,
6002 vm_prot_t prot,
6003 vm_prot_t fault_type,
6004 unsigned int flags,
6005 boolean_t wired,
6006 unsigned int options)
6007 {
6008 ppnum_t pn = (ppnum_t)atop(pa);
6009 pt_entry_t pte;
6010 pt_entry_t spte;
6011 pt_entry_t *pte_p;
6012 bool refcnt_updated;
6013 bool wiredcnt_updated;
6014 bool ro_va = false;
6015 unsigned int wimg_bits;
6016 bool committed = false, drop_refcnt = false, had_valid_mapping = false, skip_footprint_debit = false;
6017 pmap_lock_mode_t lock_mode = PMAP_LOCK_SHARED;
6018 kern_return_t kr = KERN_SUCCESS;
6019 uint16_t pp_attr_bits;
6020 volatile uint16_t *refcnt;
6021 volatile uint16_t *wiredcnt;
6022 pv_free_list_t *local_pv_free;
6023
6024 validate_pmap_mutable(pmap);
6025
6026 #if XNU_MONITOR
6027 if (__improbable((options & PMAP_OPTIONS_NOWAIT) == 0)) {
6028 panic("pmap_enter_options() called without PMAP_OPTIONS_NOWAIT set");
6029 }
6030 #endif
6031
6032 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
6033
6034 if ((v) & pt_attr_leaf_offmask(pt_attr)) {
6035 panic("pmap_enter_options() pmap %p v 0x%llx",
6036 pmap, (uint64_t)v);
6037 }
6038
6039 if ((pa) & pt_attr_leaf_offmask(pt_attr)) {
6040 panic("pmap_enter_options() pmap %p pa 0x%llx",
6041 pmap, (uint64_t)pa);
6042 }
6043
6044 /* The PA should not extend beyond the architected physical address space */
6045 pa &= ARM_PTE_PAGE_MASK;
6046
6047 if ((prot & VM_PROT_EXECUTE) && (pmap == kernel_pmap)) {
6048 #if defined(KERNEL_INTEGRITY_CTRR) && defined(CONFIG_XNUPOST)
6049 extern vm_offset_t ctrr_test_page;
6050 if (__probable(v != ctrr_test_page))
6051 #endif
6052 panic("pmap_enter_options(): attempt to add executable mapping to kernel_pmap");
6053 }
6054 if (__improbable((pmap == kernel_pmap) && zone_spans_ro_va(v, v + pt_attr_page_size(pt_attr)))) {
6055 if (__improbable(prot != VM_PROT_READ)) {
6056 panic("%s: attempt to map RO zone VA 0x%llx with prot 0x%x",
6057 __func__, (unsigned long long)v, prot);
6058 }
6059 ro_va = true;
6060 }
6061 assert(pn != vm_page_fictitious_addr);
6062
6063 refcnt_updated = false;
6064 wiredcnt_updated = false;
6065
6066 if ((prot & VM_PROT_EXECUTE) || TEST_PAGE_RATIO_4) {
6067 /*
6068 * We need to take the lock exclusive here because of SPLAY_FIND in pmap_cs_enforce.
6069 *
6070 * See rdar://problem/59655632 for thoughts on synchronization and the splay tree
6071 */
6072 lock_mode = PMAP_LOCK_EXCLUSIVE;
6073 }
6074 pmap_lock(pmap, lock_mode);
6075
6076 /*
6077 * Expand pmap to include this pte. Assume that
6078 * pmap is always expanded to include enough hardware
6079 * pages to map one VM page.
6080 */
6081 while ((pte_p = pmap_pte(pmap, v)) == PT_ENTRY_NULL) {
6082 /* Must unlock to expand the pmap. */
6083 pmap_unlock(pmap, lock_mode);
6084
6085 kr = pmap_expand(pmap, v, options, pt_attr_leaf_level(pt_attr));
6086
6087 if (kr != KERN_SUCCESS) {
6088 return kr;
6089 }
6090
6091 pmap_lock(pmap, lock_mode);
6092 }
6093
6094 if (options & PMAP_OPTIONS_NOENTER) {
6095 pmap_unlock(pmap, lock_mode);
6096 return KERN_SUCCESS;
6097 }
6098
6099 /*
6100 * Since we may not hold the pmap lock exclusive, updating the pte is
6101 * done via a cmpxchg loop.
6102 * We need to be careful about modifying non-local data structures before commiting
6103 * the new pte since we may need to re-do the transaction.
6104 */
6105 spte = os_atomic_load(pte_p, relaxed);
6106 while (!committed) {
6107 refcnt = NULL;
6108 wiredcnt = NULL;
6109 pv_alloc_return_t pv_status = PV_ALLOC_SUCCESS;
6110 had_valid_mapping = (spte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE;
6111
6112 if (pmap != kernel_pmap) {
6113 ptd_info_t *ptd_info = ptep_get_info(pte_p);
6114 refcnt = &ptd_info->refcnt;
6115 wiredcnt = &ptd_info->wiredcnt;
6116 /*
6117 * This check is really intended to ensure that mappings in a nested pmap can't be inserted
6118 * through a top-level user pmap, which would allow a non-global mapping to be inserted into a shared
6119 * region pmap and leveraged into a TLB-based write gadget (rdar://91504354).
6120 * It's also a useful sanity check for other pmap types, but note that kernel page tables may not
6121 * have PTDs, so we can't use the check there.
6122 */
6123 if (__improbable(ptep_get_pmap(pte_p) != pmap)) {
6124 panic("%s: attempt to enter mapping at pte %p owned by pmap %p through pmap %p",
6125 __func__, pte_p, ptep_get_pmap(pte_p), pmap);
6126 }
6127 /*
6128 * Bump the wired count to keep the PTE page from being reclaimed. We need this because
6129 * we may drop the PVH and pmap locks later in pmap_enter() if we need to allocate
6130 * or acquire the pmap lock exclusive.
6131 */
6132 if (!wiredcnt_updated) {
6133 OSAddAtomic16(1, (volatile int16_t*)wiredcnt);
6134 wiredcnt_updated = true;
6135 }
6136 if (!refcnt_updated) {
6137 OSAddAtomic16(1, (volatile int16_t*)refcnt);
6138 refcnt_updated = true;
6139 drop_refcnt = true;
6140 }
6141 }
6142
6143 if (had_valid_mapping && (pte_to_pa(spte) != pa)) {
6144 /*
6145 * There is already a mapping here & it's for a different physical page.
6146 * First remove that mapping.
6147 *
6148 * This requires that we take the pmap lock exclusive in order to call pmap_remove_range.
6149 */
6150 if (lock_mode == PMAP_LOCK_SHARED) {
6151 if (pmap_lock_shared_to_exclusive(pmap)) {
6152 lock_mode = PMAP_LOCK_EXCLUSIVE;
6153 } else {
6154 /*
6155 * We failed to upgrade to an exclusive lock.
6156 * As a result we no longer hold the lock at all,
6157 * so we need to re-acquire it and restart the transaction.
6158 */
6159 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
6160 lock_mode = PMAP_LOCK_EXCLUSIVE;
6161 /* pmap might have changed after we dropped the lock. Try again. */
6162 spte = os_atomic_load(pte_p, relaxed);
6163 continue;
6164 }
6165 }
6166 pmap_remove_range(pmap, v, pte_p, pte_p + PAGE_RATIO);
6167 spte = ARM_PTE_TYPE_FAULT;
6168 assert(os_atomic_load(pte_p, acquire) == ARM_PTE_TYPE_FAULT);
6169 }
6170
6171 pte = pmap_construct_pte(pmap, v, pa, prot, fault_type, wired, pt_attr, &pp_attr_bits);
6172
6173 if (pa_valid(pa)) {
6174 unsigned int pai;
6175 boolean_t is_altacct = FALSE, is_internal = FALSE, is_reusable = FALSE, is_external = FALSE;
6176
6177 is_internal = FALSE;
6178 is_altacct = FALSE;
6179
6180 pai = pa_index(pa);
6181
6182 pvh_lock(pai);
6183
6184 /*
6185 * Make sure that the current per-cpu PV free list has
6186 * enough entries (2 in the worst-case scenario) to handle the enter_pv
6187 * if the transaction succeeds. We're either in the
6188 * PPL (which can't be preempted) or we've explicitly disabled preemptions.
6189 * Note that we can still be interrupted, but a primary
6190 * interrupt handler can never enter the pmap.
6191 */
6192 #if !XNU_MONITOR
6193 assert(get_preemption_level() > 0);
6194 #endif
6195 local_pv_free = &pmap_get_cpu_data()->pv_free;
6196 pv_entry_t **pv_h = pai_to_pvh(pai);
6197 const bool allocation_required = !pvh_test_type(pv_h, PVH_TYPE_NULL) &&
6198 !(pvh_test_type(pv_h, PVH_TYPE_PTEP) && pvh_ptep(pv_h) == pte_p);
6199
6200 if (__improbable(allocation_required && (local_pv_free->count < 2))) {
6201 pv_entry_t *new_pve_p[2] = {PV_ENTRY_NULL};
6202 int new_allocated_pves = 0;
6203
6204 while (new_allocated_pves < 2) {
6205 local_pv_free = &pmap_get_cpu_data()->pv_free;
6206 pv_status = pv_alloc(pmap, pai, lock_mode, &new_pve_p[new_allocated_pves]);
6207 if (pv_status == PV_ALLOC_FAIL) {
6208 break;
6209 } else if (pv_status == PV_ALLOC_RETRY) {
6210 /*
6211 * In the case that pv_alloc() had to grab a new page of PVEs,
6212 * it will have dropped the pmap lock while doing so.
6213 * On non-PPL devices, dropping the lock re-enables preemption so we may
6214 * be on a different CPU now.
6215 */
6216 local_pv_free = &pmap_get_cpu_data()->pv_free;
6217 } else {
6218 /* If we've gotten this far then a node should've been allocated. */
6219 assert(new_pve_p[new_allocated_pves] != PV_ENTRY_NULL);
6220
6221 new_allocated_pves++;
6222 }
6223 }
6224
6225 for (int i = 0; i < new_allocated_pves; i++) {
6226 pv_free(new_pve_p[i]);
6227 }
6228 }
6229
6230 if (pv_status == PV_ALLOC_FAIL) {
6231 pvh_unlock(pai);
6232 kr = KERN_RESOURCE_SHORTAGE;
6233 break;
6234 } else if (pv_status == PV_ALLOC_RETRY) {
6235 pvh_unlock(pai);
6236 /* We dropped the pmap and PVH locks to allocate. Retry transaction. */
6237 spte = os_atomic_load(pte_p, relaxed);
6238 continue;
6239 }
6240
6241 if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) {
6242 wimg_bits = (flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT));
6243 } else {
6244 wimg_bits = pmap_cache_attributes(pn);
6245 }
6246
6247 /* We may be retrying this operation after dropping the PVH lock.
6248 * Cache attributes for the physical page may have changed while the lock
6249 * was dropped, so clear any cache attributes we may have previously set
6250 * in the PTE template. */
6251 pte &= ~(ARM_PTE_ATTRINDXMASK | ARM_PTE_SHMASK);
6252 pte |= pmap_get_pt_ops(pmap)->wimg_to_pte(wimg_bits, pa);
6253
6254 #if XNU_MONITOR
6255 /* The regular old kernel is not allowed to remap PPL pages. */
6256 if (__improbable(ppattr_pa_test_monitor(pa))) {
6257 panic("%s: page belongs to PPL, "
6258 "pmap=%p, v=0x%llx, pa=%p, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x",
6259 __FUNCTION__,
6260 pmap, v, (void*)pa, prot, fault_type, flags, wired, options);
6261 }
6262
6263 if (__improbable(pvh_get_flags(pai_to_pvh(pai)) & PVH_FLAG_LOCKDOWN_MASK)) {
6264 panic("%s: page locked down, "
6265 "pmap=%p, v=0x%llx, pa=%p, prot=0x%x, fault_type=0x%x, flags=0x%x, wired=%u, options=0x%x",
6266 __FUNCTION__,
6267 pmap, v, (void *)pa, prot, fault_type, flags, wired, options);
6268 }
6269 #endif
6270
6271
6272 committed = pmap_enter_pte(pmap, pte_p, &spte, pte, v);
6273 if (!committed) {
6274 pvh_unlock(pai);
6275 continue;
6276 }
6277 had_valid_mapping = (spte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE;
6278 /* End of transaction. Commit pv changes, pa bits, and memory accounting. */
6279
6280 assert(!had_valid_mapping || (pte_to_pa(spte) == pa));
6281 /*
6282 * If there was already a valid pte here then we reuse its reference
6283 * on the ptd and drop the one that we took above.
6284 */
6285 drop_refcnt = had_valid_mapping;
6286
6287 if (!had_valid_mapping) {
6288 pv_entry_t *new_pve_p = PV_ENTRY_NULL;
6289 int pve_ptep_idx = 0;
6290 pv_status = pmap_enter_pv(pmap, pte_p, pai, options, lock_mode, &new_pve_p, &pve_ptep_idx);
6291 /* We did all the allocations up top. So this shouldn't be able to fail. */
6292 if (pv_status != PV_ALLOC_SUCCESS) {
6293 panic("%s: unexpected pmap_enter_pv ret code: %d. new_pve_p=%p pmap=%p",
6294 __func__, pv_status, new_pve_p, pmap);
6295 }
6296
6297 if (pmap != kernel_pmap) {
6298 if (options & PMAP_OPTIONS_INTERNAL) {
6299 ppattr_pve_set_internal(pai, new_pve_p, pve_ptep_idx);
6300 if ((options & PMAP_OPTIONS_ALT_ACCT) ||
6301 PMAP_FOOTPRINT_SUSPENDED(pmap)) {
6302 /*
6303 * Make a note to ourselves that this
6304 * mapping is using alternative
6305 * accounting. We'll need this in order
6306 * to know which ledger to debit when
6307 * the mapping is removed.
6308 *
6309 * The altacct bit must be set while
6310 * the pv head is locked. Defer the
6311 * ledger accounting until after we've
6312 * dropped the lock.
6313 */
6314 ppattr_pve_set_altacct(pai, new_pve_p, pve_ptep_idx);
6315 is_altacct = TRUE;
6316 }
6317 }
6318 if (ppattr_test_reusable(pai) &&
6319 !is_altacct) {
6320 is_reusable = TRUE;
6321 } else if (options & PMAP_OPTIONS_INTERNAL) {
6322 is_internal = TRUE;
6323 } else {
6324 is_external = TRUE;
6325 }
6326 }
6327 }
6328
6329 pvh_unlock(pai);
6330
6331 if (pp_attr_bits != 0) {
6332 ppattr_pa_set_bits(pa, pp_attr_bits);
6333 }
6334
6335 if (!had_valid_mapping && (pmap != kernel_pmap)) {
6336 pmap_ledger_credit(pmap, task_ledgers.phys_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6337
6338 if (is_internal) {
6339 /*
6340 * Make corresponding adjustments to
6341 * phys_footprint statistics.
6342 */
6343 pmap_ledger_credit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6344 if (is_altacct) {
6345 /*
6346 * If this page is internal and
6347 * in an IOKit region, credit
6348 * the task's total count of
6349 * dirty, internal IOKit pages.
6350 * It should *not* count towards
6351 * the task's total physical
6352 * memory footprint, because
6353 * this entire region was
6354 * already billed to the task
6355 * at the time the mapping was
6356 * created.
6357 *
6358 * Put another way, this is
6359 * internal++ and
6360 * alternate_accounting++, so
6361 * net effect on phys_footprint
6362 * is 0. That means: don't
6363 * touch phys_footprint here.
6364 */
6365 pmap_ledger_credit(pmap, task_ledgers.alternate_accounting, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6366 } else {
6367 if (ARM_PTE_IS_COMPRESSED(spte, pte_p) && !(spte & ARM_PTE_COMPRESSED_ALT)) {
6368 /* Replacing a compressed page (with internal accounting). No change to phys_footprint. */
6369 skip_footprint_debit = true;
6370 } else {
6371 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6372 }
6373 }
6374 }
6375 if (is_reusable) {
6376 pmap_ledger_credit(pmap, task_ledgers.reusable, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6377 } else if (is_external) {
6378 pmap_ledger_credit(pmap, task_ledgers.external, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6379 }
6380 }
6381 } else {
6382 if (prot & VM_PROT_EXECUTE) {
6383 kr = KERN_FAILURE;
6384 break;
6385 }
6386
6387 wimg_bits = pmap_cache_attributes(pn);
6388 if ((flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT))) {
6389 wimg_bits = (wimg_bits & (~VM_WIMG_MASK)) | (flags & (VM_WIMG_MASK | VM_WIMG_USE_DEFAULT));
6390 }
6391
6392 pte |= pmap_get_pt_ops(pmap)->wimg_to_pte(wimg_bits, pa);
6393
6394 #if XNU_MONITOR
6395 if ((wimg_bits & PP_ATTR_MONITOR) && !pmap_ppl_disable) {
6396 uint64_t xprr_perm = pte_to_xprr_perm(pte);
6397 switch (xprr_perm) {
6398 case XPRR_KERN_RO_PERM:
6399 break;
6400 case XPRR_KERN_RW_PERM:
6401 pte &= ~ARM_PTE_XPRR_MASK;
6402 pte |= xprr_perm_to_pte(XPRR_PPL_RW_PERM);
6403 break;
6404 default:
6405 panic("Unsupported xPRR perm %llu for pte 0x%llx", xprr_perm, (uint64_t)pte);
6406 }
6407 }
6408 #endif
6409 committed = pmap_enter_pte(pmap, pte_p, &spte, pte, v);
6410 if (committed) {
6411 had_valid_mapping = (spte & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE;
6412 assert(!had_valid_mapping || (pte_to_pa(spte) == pa));
6413
6414 /**
6415 * If there was already a valid pte here then we reuse its
6416 * reference on the ptd and drop the one that we took above.
6417 */
6418 drop_refcnt = had_valid_mapping;
6419 }
6420 }
6421 if (committed) {
6422 if (ARM_PTE_IS_COMPRESSED(spte, pte_p)) {
6423 assert(pmap != kernel_pmap);
6424
6425 /* One less "compressed" */
6426 pmap_ledger_debit(pmap, task_ledgers.internal_compressed,
6427 pt_attr_page_size(pt_attr) * PAGE_RATIO);
6428
6429 if (spte & ARM_PTE_COMPRESSED_ALT) {
6430 pmap_ledger_debit(pmap, task_ledgers.alternate_accounting_compressed, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6431 } else if (!skip_footprint_debit) {
6432 /* Was part of the footprint */
6433 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6434 }
6435 /* The old entry held a reference so drop the extra one that we took above. */
6436 drop_refcnt = true;
6437 }
6438 }
6439 }
6440
6441 if (drop_refcnt && refcnt != NULL) {
6442 assert(refcnt_updated);
6443 if (OSAddAtomic16(-1, (volatile int16_t*)refcnt) <= 0) {
6444 panic("pmap_enter(): over-release of ptdp %p for pte %p", ptep_get_ptd(pte_p), pte_p);
6445 }
6446 }
6447
6448 if (wiredcnt_updated && (OSAddAtomic16(-1, (volatile int16_t*)wiredcnt) <= 0)) {
6449 panic("pmap_enter(): over-unwire of ptdp %p for pte %p", ptep_get_ptd(pte_p), pte_p);
6450 }
6451
6452 pmap_unlock(pmap, lock_mode);
6453
6454 if (__improbable(ro_va && kr == KERN_SUCCESS)) {
6455 pmap_phys_write_disable(v);
6456 }
6457
6458 return kr;
6459 }
6460
6461 kern_return_t
6462 pmap_enter_options_addr(
6463 pmap_t pmap,
6464 vm_map_address_t v,
6465 pmap_paddr_t pa,
6466 vm_prot_t prot,
6467 vm_prot_t fault_type,
6468 unsigned int flags,
6469 boolean_t wired,
6470 unsigned int options,
6471 __unused void *arg)
6472 {
6473 kern_return_t kr = KERN_FAILURE;
6474
6475
6476 PMAP_TRACE(2, PMAP_CODE(PMAP__ENTER) | DBG_FUNC_START,
6477 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v), pa, prot);
6478
6479
6480 #if XNU_MONITOR
6481 /*
6482 * If NOWAIT was not requested, loop until the enter does not
6483 * fail due to lack of resources.
6484 */
6485 while ((kr = pmap_enter_options_ppl(pmap, v, pa, prot, fault_type, flags, wired, options | PMAP_OPTIONS_NOWAIT)) == KERN_RESOURCE_SHORTAGE) {
6486 pmap_alloc_page_for_ppl((options & PMAP_OPTIONS_NOWAIT) ? PMAP_PAGES_ALLOCATE_NOWAIT : 0);
6487 if (options & PMAP_OPTIONS_NOWAIT) {
6488 break;
6489 }
6490 }
6491
6492 pmap_ledger_check_balance(pmap);
6493 #else
6494 kr = pmap_enter_options_internal(pmap, v, pa, prot, fault_type, flags, wired, options);
6495 #endif
6496
6497 PMAP_TRACE(2, PMAP_CODE(PMAP__ENTER) | DBG_FUNC_END, kr);
6498
6499 return kr;
6500 }
6501
6502 kern_return_t
6503 pmap_enter_options(
6504 pmap_t pmap,
6505 vm_map_address_t v,
6506 ppnum_t pn,
6507 vm_prot_t prot,
6508 vm_prot_t fault_type,
6509 unsigned int flags,
6510 boolean_t wired,
6511 unsigned int options,
6512 __unused void *arg)
6513 {
6514 return pmap_enter_options_addr(pmap, v, ((pmap_paddr_t)pn) << PAGE_SHIFT, prot, fault_type, flags, wired, options, arg);
6515 }
6516
6517 /*
6518 * Routine: pmap_change_wiring
6519 * Function: Change the wiring attribute for a map/virtual-address
6520 * pair.
6521 * In/out conditions:
6522 * The mapping must already exist in the pmap.
6523 */
6524 MARK_AS_PMAP_TEXT void
6525 pmap_change_wiring_internal(
6526 pmap_t pmap,
6527 vm_map_address_t v,
6528 boolean_t wired)
6529 {
6530 pt_entry_t *pte_p;
6531 pmap_paddr_t pa;
6532
6533 validate_pmap_mutable(pmap);
6534
6535 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
6536
6537 const pt_attr_t * pt_attr = pmap_get_pt_attr(pmap);
6538
6539 pte_p = pmap_pte(pmap, v);
6540 if (pte_p == PT_ENTRY_NULL) {
6541 if (!wired) {
6542 /*
6543 * The PTE may have already been cleared by a disconnect/remove operation, and the L3 table
6544 * may have been freed by a remove operation.
6545 */
6546 goto pmap_change_wiring_return;
6547 } else {
6548 panic("%s: Attempt to wire nonexistent PTE for pmap %p", __func__, pmap);
6549 }
6550 }
6551 /*
6552 * Use volatile loads to prevent the compiler from collapsing references to 'pa' back to loads of pte_p
6553 * until we've grabbed the final PVH lock; PTE contents may change during this time.
6554 */
6555 pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
6556
6557 while (pa_valid(pa)) {
6558 pmap_paddr_t new_pa;
6559
6560 pvh_lock(pa_index(pa));
6561 new_pa = pte_to_pa(*((volatile pt_entry_t*)pte_p));
6562
6563 if (pa == new_pa) {
6564 break;
6565 }
6566
6567 pvh_unlock(pa_index(pa));
6568 pa = new_pa;
6569 }
6570
6571 /* PTE checks must be performed after acquiring the PVH lock (if applicable for the PA) */
6572 if ((*pte_p == ARM_PTE_EMPTY) || (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p))) {
6573 if (!wired) {
6574 /* PTE cleared by prior remove/disconnect operation */
6575 goto pmap_change_wiring_cleanup;
6576 } else {
6577 panic("%s: Attempt to wire empty/compressed PTE %p (=0x%llx) for pmap %p",
6578 __func__, pte_p, (uint64_t)*pte_p, pmap);
6579 }
6580 }
6581
6582 assertf((*pte_p & ARM_PTE_TYPE_VALID) == ARM_PTE_TYPE, "invalid pte %p (=0x%llx)", pte_p, (uint64_t)*pte_p);
6583 if (wired != pte_is_wired(*pte_p)) {
6584 pte_set_wired(pmap, pte_p, wired);
6585 if (pmap != kernel_pmap) {
6586 if (wired) {
6587 pmap_ledger_credit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6588 } else if (!wired) {
6589 pmap_ledger_debit(pmap, task_ledgers.wired_mem, pt_attr_page_size(pt_attr) * PAGE_RATIO);
6590 }
6591 }
6592 }
6593
6594 pmap_change_wiring_cleanup:
6595 if (pa_valid(pa)) {
6596 pvh_unlock(pa_index(pa));
6597 }
6598
6599 pmap_change_wiring_return:
6600 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
6601 }
6602
6603 void
6604 pmap_change_wiring(
6605 pmap_t pmap,
6606 vm_map_address_t v,
6607 boolean_t wired)
6608 {
6609 #if XNU_MONITOR
6610 pmap_change_wiring_ppl(pmap, v, wired);
6611
6612 pmap_ledger_check_balance(pmap);
6613 #else
6614 pmap_change_wiring_internal(pmap, v, wired);
6615 #endif
6616 }
6617
6618 MARK_AS_PMAP_TEXT pmap_paddr_t
6619 pmap_find_pa_internal(
6620 pmap_t pmap,
6621 addr64_t va)
6622 {
6623 pmap_paddr_t pa = 0;
6624
6625 validate_pmap(pmap);
6626
6627 if (pmap != kernel_pmap) {
6628 pmap_lock(pmap, PMAP_LOCK_SHARED);
6629 }
6630
6631 pa = pmap_vtophys(pmap, va);
6632
6633 if (pmap != kernel_pmap) {
6634 pmap_unlock(pmap, PMAP_LOCK_SHARED);
6635 }
6636
6637 return pa;
6638 }
6639
6640 pmap_paddr_t
6641 pmap_find_pa_nofault(pmap_t pmap, addr64_t va)
6642 {
6643 pmap_paddr_t pa = 0;
6644
6645 if (pmap == kernel_pmap) {
6646 pa = mmu_kvtop(va);
6647 } else if ((current_thread()->map) && (pmap == vm_map_pmap(current_thread()->map))) {
6648 /*
6649 * Note that this doesn't account for PAN: mmu_uvtop() may return a valid
6650 * translation even if PAN would prevent kernel access through the translation.
6651 * It's therefore assumed the UVA will be accessed in a PAN-disabled context.
6652 */
6653 pa = mmu_uvtop(va);
6654 }
6655 return pa;
6656 }
6657
6658 pmap_paddr_t
6659 pmap_find_pa(
6660 pmap_t pmap,
6661 addr64_t va)
6662 {
6663 pmap_paddr_t pa = pmap_find_pa_nofault(pmap, va);
6664
6665 if (pa != 0) {
6666 return pa;
6667 }
6668
6669 if (not_in_kdp) {
6670 #if XNU_MONITOR
6671 return pmap_find_pa_ppl(pmap, va);
6672 #else
6673 return pmap_find_pa_internal(pmap, va);
6674 #endif
6675 } else {
6676 return pmap_vtophys(pmap, va);
6677 }
6678 }
6679
6680 ppnum_t
6681 pmap_find_phys_nofault(
6682 pmap_t pmap,
6683 addr64_t va)
6684 {
6685 ppnum_t ppn;
6686 ppn = atop(pmap_find_pa_nofault(pmap, va));
6687 return ppn;
6688 }
6689
6690 ppnum_t
6691 pmap_find_phys(
6692 pmap_t pmap,
6693 addr64_t va)
6694 {
6695 ppnum_t ppn;
6696 ppn = atop(pmap_find_pa(pmap, va));
6697 return ppn;
6698 }
6699
6700 /**
6701 * Translate a kernel virtual address into a physical address.
6702 *
6703 * @param va The kernel virtual address to translate. Does not work on user
6704 * virtual addresses.
6705 *
6706 * @return The physical address if the translation was successful, or zero if
6707 * no valid mappings were found for the given virtual address.
6708 */
6709 pmap_paddr_t
6710 kvtophys(vm_offset_t va)
6711 {
6712 /**
6713 * Attempt to do the translation first in hardware using the AT (address
6714 * translation) instruction. This will attempt to use the MMU to do the
6715 * translation for us.
6716 */
6717 pmap_paddr_t pa = mmu_kvtop(va);
6718
6719 if (pa) {
6720 return pa;
6721 }
6722
6723 /* If the MMU can't find the mapping, then manually walk the page tables. */
6724 return pmap_vtophys(kernel_pmap, va);
6725 }
6726
6727 /**
6728 * Variant of kvtophys that can't fail. If no mapping is found or the mapping
6729 * points to a non-kernel-managed physical page, then this call will panic().
6730 *
6731 * @note The output of this function is guaranteed to be a kernel-managed
6732 * physical page, which means it's safe to pass the output directly to
6733 * pa_index() to create a physical address index for various pmap data
6734 * structures.
6735 *
6736 * @param va The kernel virtual address to translate. Does not work on user
6737 * virtual addresses.
6738 *
6739 * @return The translated physical address for the given virtual address.
6740 */
6741 pmap_paddr_t
6742 kvtophys_nofail(vm_offset_t va)
6743 {
6744 pmap_paddr_t pa = kvtophys(va);
6745
6746 if (!pa_valid(pa)) {
6747 panic("%s: Invalid or non-kernel-managed physical page returned, "
6748 "pa: %#llx, va: %p", __func__, (uint64_t)pa, (void *)va);
6749 }
6750
6751 return pa;
6752 }
6753
6754 pmap_paddr_t
6755 pmap_vtophys(
6756 pmap_t pmap,
6757 addr64_t va)
6758 {
6759 if ((va < pmap->min) || (va >= pmap->max)) {
6760 return 0;
6761 }
6762
6763 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
6764
6765 #if (__ARM_VMSA__ == 7)
6766 tt_entry_t *tte_p, tte;
6767 pt_entry_t *pte_p;
6768 pmap_paddr_t pa;
6769
6770 tte_p = pmap_tte(pmap, va);
6771 if (tte_p == (tt_entry_t *) NULL) {
6772 return (pmap_paddr_t) 0;
6773 }
6774
6775 tte = *tte_p;
6776 if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
6777 pte_p = (pt_entry_t *) ttetokv(tte) + pte_index(pt_attr, va);
6778 pa = pte_to_pa(*pte_p) | (va & ARM_PGMASK);
6779 //LIONEL ppn = (ppnum_t) atop(pte_to_pa(*pte_p) | (va & ARM_PGMASK));
6780 #if DEVELOPMENT || DEBUG
6781 if (atop(pa) != 0 &&
6782 ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) {
6783 panic("pmap_vtophys(%p,0x%llx): compressed pte_p=%p 0x%llx with ppn=0x%x",
6784 pmap, va, pte_p, (uint64_t) (*pte_p), atop(pa));
6785 }
6786 #endif /* DEVELOPMENT || DEBUG */
6787 } else if ((tte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_BLOCK) {
6788 if ((tte & ARM_TTE_BLOCK_SUPER) == ARM_TTE_BLOCK_SUPER) {
6789 pa = suptte_to_pa(tte) | (va & ARM_TT_L1_SUPER_OFFMASK);
6790 } else {
6791 pa = sectte_to_pa(tte) | (va & ARM_TT_L1_BLOCK_OFFMASK);
6792 }
6793 } else {
6794 pa = 0;
6795 }
6796 #else
6797 tt_entry_t * ttp = NULL;
6798 tt_entry_t * ttep = NULL;
6799 tt_entry_t tte = ARM_TTE_EMPTY;
6800 pmap_paddr_t pa = 0;
6801 unsigned int cur_level;
6802
6803 ttp = pmap->tte;
6804
6805 for (cur_level = pt_attr_root_level(pt_attr); cur_level <= pt_attr_leaf_level(pt_attr); cur_level++) {
6806 ttep = &ttp[ttn_index(pt_attr, va, cur_level)];
6807
6808 tte = *ttep;
6809
6810 const uint64_t valid_mask = pt_attr->pta_level_info[cur_level].valid_mask;
6811 const uint64_t type_mask = pt_attr->pta_level_info[cur_level].type_mask;
6812 const uint64_t type_block = pt_attr->pta_level_info[cur_level].type_block;
6813 const uint64_t offmask = pt_attr->pta_level_info[cur_level].offmask;
6814
6815 if ((tte & valid_mask) != valid_mask) {
6816 return (pmap_paddr_t) 0;
6817 }
6818
6819 /* This detects both leaf entries and intermediate block mappings. */
6820 if ((tte & type_mask) == type_block) {
6821 pa = ((tte & ARM_TTE_PA_MASK & ~offmask) | (va & offmask));
6822 break;
6823 }
6824
6825 ttp = (tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
6826 }
6827 #endif
6828
6829 return pa;
6830 }
6831
6832 /*
6833 * pmap_init_pte_page - Initialize a page table page.
6834 */
6835 MARK_AS_PMAP_TEXT void
6836 pmap_init_pte_page(
6837 pmap_t pmap,
6838 pt_entry_t *pte_p,
6839 vm_offset_t va,
6840 unsigned int ttlevel,
6841 boolean_t alloc_ptd)
6842 {
6843 pt_desc_t *ptdp = NULL;
6844 pv_entry_t **pvh = pai_to_pvh(pa_index(ml_static_vtop((vm_offset_t)pte_p)));
6845
6846 if (pvh_test_type(pvh, PVH_TYPE_NULL)) {
6847 if (alloc_ptd) {
6848 /*
6849 * This path should only be invoked from arm_vm_init. If we are emulating 16KB pages
6850 * on 4KB hardware, we may already have allocated a page table descriptor for a
6851 * bootstrap request, so we check for an existing PTD here.
6852 */
6853 ptdp = ptd_alloc(pmap);
6854 if (ptdp == NULL) {
6855 panic("%s: unable to allocate PTD", __func__);
6856 }
6857 pvh_update_head_unlocked(pvh, ptdp, PVH_TYPE_PTDP);
6858 } else {
6859 panic("pmap_init_pte_page(): pte_p %p", pte_p);
6860 }
6861 } else if (pvh_test_type(pvh, PVH_TYPE_PTDP)) {
6862 ptdp = pvh_ptd(pvh);
6863 } else {
6864 panic("pmap_init_pte_page(): invalid PVH type for pte_p %p", pte_p);
6865 }
6866
6867 // below barrier ensures previous updates to the page are visible to PTW before
6868 // it is linked to the PTE of previous level
6869 __builtin_arm_dmb(DMB_ISHST);
6870 ptd_info_init(ptdp, pmap, va, ttlevel, pte_p);
6871 }
6872
6873 /*
6874 * Routine: pmap_expand
6875 *
6876 * Expands a pmap to be able to map the specified virtual address.
6877 *
6878 * Allocates new memory for the default (COARSE) translation table
6879 * entry, initializes all the pte entries to ARM_PTE_TYPE_FAULT and
6880 * also allocates space for the corresponding pv entries.
6881 *
6882 * Nothing should be locked.
6883 */
6884 MARK_AS_PMAP_TEXT static kern_return_t
6885 pmap_expand(
6886 pmap_t pmap,
6887 vm_map_address_t v,
6888 unsigned int options,
6889 unsigned int level)
6890 {
6891 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
6892
6893 if (__improbable((v < pmap->min) || (v >= pmap->max))) {
6894 return KERN_INVALID_ADDRESS;
6895 }
6896 #if (__ARM_VMSA__ == 7)
6897 vm_offset_t pa;
6898 tt_entry_t *tte_p;
6899 tt_entry_t *tt_p;
6900 unsigned int i;
6901
6902 #if DEVELOPMENT || DEBUG
6903 /*
6904 * We no longer support root level expansion; panic in case something
6905 * still attempts to trigger it.
6906 */
6907 i = tte_index(pt_attr, v);
6908
6909 if (i >= pmap->tte_index_max) {
6910 panic("%s: index out of range, index=%u, max=%u, "
6911 "pmap=%p, addr=%p, options=%u, level=%u",
6912 __func__, i, pmap->tte_index_max,
6913 pmap, (void *)v, options, level);
6914 }
6915 #endif /* DEVELOPMENT || DEBUG */
6916
6917 if (level == 1) {
6918 return KERN_SUCCESS;
6919 }
6920
6921 {
6922 tt_entry_t *tte_next_p;
6923
6924 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
6925 pa = 0;
6926 if (pmap_pte(pmap, v) != PT_ENTRY_NULL) {
6927 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
6928 return KERN_SUCCESS;
6929 }
6930 tte_p = &pmap->tte[ttenum(v & ~ARM_TT_L1_PT_OFFMASK)];
6931 for (i = 0, tte_next_p = tte_p; i < 4; i++) {
6932 if (tte_to_pa(*tte_next_p)) {
6933 pa = tte_to_pa(*tte_next_p);
6934 break;
6935 }
6936 tte_next_p++;
6937 }
6938 pa = pa & ~PAGE_MASK;
6939 if (pa) {
6940 tte_p = &pmap->tte[ttenum(v)];
6941 *tte_p = pa_to_tte(pa) | (((v >> ARM_TT_L1_SHIFT) & 0x3) << 10) | ARM_TTE_TYPE_TABLE;
6942 FLUSH_PTE();
6943 PMAP_TRACE(5, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~ARM_TT_L1_OFFMASK),
6944 VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_OFFMASK) + ARM_TT_L1_SIZE), *tte_p);
6945 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
6946 return KERN_SUCCESS;
6947 }
6948 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
6949 }
6950 v = v & ~ARM_TT_L1_PT_OFFMASK;
6951
6952
6953 while (pmap_pte(pmap, v) == PT_ENTRY_NULL) {
6954 /*
6955 * Allocate a VM page for the level 2 page table entries.
6956 */
6957 while (pmap_tt_allocate(pmap, &tt_p, PMAP_TT_L2_LEVEL, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) {
6958 if (options & PMAP_OPTIONS_NOWAIT) {
6959 return KERN_RESOURCE_SHORTAGE;
6960 }
6961 VM_PAGE_WAIT();
6962 }
6963
6964 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
6965 /*
6966 * See if someone else expanded us first
6967 */
6968 if (pmap_pte(pmap, v) == PT_ENTRY_NULL) {
6969 tt_entry_t *tte_next_p;
6970
6971 pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, PMAP_TT_L2_LEVEL, FALSE);
6972 pa = kvtophys_nofail((vm_offset_t)tt_p);
6973 tte_p = &pmap->tte[ttenum(v)];
6974 for (i = 0, tte_next_p = tte_p; i < 4; i++) {
6975 *tte_next_p = pa_to_tte(pa) | ARM_TTE_TYPE_TABLE;
6976 PMAP_TRACE(5, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + (i * ARM_TT_L1_SIZE)),
6977 VM_KERNEL_ADDRHIDE((v & ~ARM_TT_L1_PT_OFFMASK) + ((i + 1) * ARM_TT_L1_SIZE)), *tte_p);
6978 tte_next_p++;
6979 pa = pa + 0x400;
6980 }
6981 FLUSH_PTE();
6982
6983 pa = 0x0ULL;
6984 tt_p = (tt_entry_t *)NULL;
6985 }
6986 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
6987 if (tt_p != (tt_entry_t *)NULL) {
6988 pmap_tt_deallocate(pmap, tt_p, PMAP_TT_L2_LEVEL);
6989 tt_p = (tt_entry_t *)NULL;
6990 }
6991 }
6992 return KERN_SUCCESS;
6993 #else
6994 pmap_paddr_t pa;
6995 unsigned int ttlevel = pt_attr_root_level(pt_attr);
6996 tt_entry_t *tte_p;
6997 tt_entry_t *tt_p;
6998
6999 pa = 0x0ULL;
7000 tt_p = (tt_entry_t *)NULL;
7001
7002 for (; ttlevel < level; ttlevel++) {
7003 pmap_lock(pmap, PMAP_LOCK_SHARED);
7004
7005 if (pmap_ttne(pmap, ttlevel + 1, v) == PT_ENTRY_NULL) {
7006 pmap_unlock(pmap, PMAP_LOCK_SHARED);
7007 while (pmap_tt_allocate(pmap, &tt_p, ttlevel + 1, ((options & PMAP_TT_ALLOCATE_NOWAIT)? PMAP_PAGES_ALLOCATE_NOWAIT : 0)) != KERN_SUCCESS) {
7008 if (options & PMAP_OPTIONS_NOWAIT) {
7009 return KERN_RESOURCE_SHORTAGE;
7010 }
7011 #if XNU_MONITOR
7012 panic("%s: failed to allocate tt, "
7013 "pmap=%p, v=%p, options=0x%x, level=%u",
7014 __FUNCTION__,
7015 pmap, (void *)v, options, level);
7016 #else
7017 VM_PAGE_WAIT();
7018 #endif
7019 }
7020 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
7021 if ((pmap_ttne(pmap, ttlevel + 1, v) == PT_ENTRY_NULL)) {
7022 pmap_init_pte_page(pmap, (pt_entry_t *) tt_p, v, ttlevel + 1, FALSE);
7023 pa = kvtophys_nofail((vm_offset_t)tt_p);
7024 tte_p = pmap_ttne(pmap, ttlevel, v);
7025 *tte_p = (pa & ARM_TTE_TABLE_MASK) | ARM_TTE_TYPE_TABLE | ARM_TTE_VALID;
7026 PMAP_TRACE(4 + ttlevel, PMAP_CODE(PMAP__TTE), VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(v & ~pt_attr_ln_offmask(pt_attr, ttlevel)),
7027 VM_KERNEL_ADDRHIDE((v & ~pt_attr_ln_offmask(pt_attr, ttlevel)) + pt_attr_ln_size(pt_attr, ttlevel)), *tte_p);
7028 pa = 0x0ULL;
7029 tt_p = (tt_entry_t *)NULL;
7030 }
7031 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
7032 } else {
7033 pmap_unlock(pmap, PMAP_LOCK_SHARED);
7034 }
7035
7036 if (tt_p != (tt_entry_t *)NULL) {
7037 pmap_tt_deallocate(pmap, tt_p, ttlevel + 1);
7038 tt_p = (tt_entry_t *)NULL;
7039 }
7040 }
7041
7042 return KERN_SUCCESS;
7043 #endif
7044 }
7045
7046 /*
7047 * Routine: pmap_collect
7048 * Function:
7049 * Garbage collects the physical map system for
7050 * pages which are no longer used.
7051 * Success need not be guaranteed -- that is, there
7052 * may well be pages which are not referenced, but
7053 * others may be collected.
7054 */
7055 void
7056 pmap_collect(pmap_t pmap)
7057 {
7058 if (pmap == PMAP_NULL) {
7059 return;
7060 }
7061
7062 #if 0
7063 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
7064 if ((pmap->nested == FALSE) && (pmap != kernel_pmap)) {
7065 /* TODO: Scan for vm page assigned to top level page tables with no reference */
7066 }
7067 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
7068 #endif
7069
7070 return;
7071 }
7072
7073 /*
7074 * Routine: pmap_gc
7075 * Function:
7076 * Pmap garbage collection
7077 * Called by the pageout daemon when pages are scarce.
7078 *
7079 */
7080 void
7081 pmap_gc(
7082 void)
7083 {
7084 #if XNU_MONITOR
7085 /*
7086 * We cannot invoke the scheduler from the PPL, so for now we elide the
7087 * GC logic if the PPL is enabled.
7088 */
7089 #endif
7090 #if !XNU_MONITOR
7091 pmap_t pmap, pmap_next;
7092 boolean_t gc_wait;
7093
7094 if (pmap_gc_allowed &&
7095 (pmap_gc_allowed_by_time_throttle ||
7096 pmap_gc_forced)) {
7097 pmap_gc_forced = FALSE;
7098 pmap_gc_allowed_by_time_throttle = FALSE;
7099 pmap_simple_lock(&pmaps_lock);
7100 pmap = CAST_DOWN_EXPLICIT(pmap_t, queue_first(&map_pmap_list));
7101 while (!queue_end(&map_pmap_list, (queue_entry_t)pmap)) {
7102 if (!(pmap->gc_status & PMAP_GC_INFLIGHT)) {
7103 pmap->gc_status |= PMAP_GC_INFLIGHT;
7104 }
7105 pmap_simple_unlock(&pmaps_lock);
7106
7107 pmap_collect(pmap);
7108
7109 pmap_simple_lock(&pmaps_lock);
7110 gc_wait = (pmap->gc_status & PMAP_GC_WAIT);
7111 pmap->gc_status &= ~(PMAP_GC_INFLIGHT | PMAP_GC_WAIT);
7112 pmap_next = CAST_DOWN_EXPLICIT(pmap_t, queue_next(&pmap->pmaps));
7113 if (gc_wait) {
7114 if (!queue_end(&map_pmap_list, (queue_entry_t)pmap_next)) {
7115 pmap_next->gc_status |= PMAP_GC_INFLIGHT;
7116 }
7117 pmap_simple_unlock(&pmaps_lock);
7118 thread_wakeup((event_t) &pmap->gc_status);
7119 pmap_simple_lock(&pmaps_lock);
7120 }
7121 pmap = pmap_next;
7122 }
7123 pmap_simple_unlock(&pmaps_lock);
7124 }
7125 #endif
7126 }
7127
7128 /*
7129 * By default, don't attempt pmap GC more frequently
7130 * than once / 1 minutes.
7131 */
7132
7133 void
7134 compute_pmap_gc_throttle(
7135 void *arg __unused)
7136 {
7137 pmap_gc_allowed_by_time_throttle = TRUE;
7138 }
7139
7140 /*
7141 * pmap_attribute_cache_sync(vm_offset_t pa)
7142 *
7143 * Invalidates all of the instruction cache on a physical page and
7144 * pushes any dirty data from the data cache for the same physical page
7145 */
7146
7147 kern_return_t
7148 pmap_attribute_cache_sync(
7149 ppnum_t pp,
7150 vm_size_t size,
7151 __unused vm_machine_attribute_t attribute,
7152 __unused vm_machine_attribute_val_t * value)
7153 {
7154 if (size > PAGE_SIZE) {
7155 panic("pmap_attribute_cache_sync size: 0x%llx", (uint64_t)size);
7156 } else {
7157 cache_sync_page(pp);
7158 }
7159
7160 return KERN_SUCCESS;
7161 }
7162
7163 /*
7164 * pmap_sync_page_data_phys(ppnum_t pp)
7165 *
7166 * Invalidates all of the instruction cache on a physical page and
7167 * pushes any dirty data from the data cache for the same physical page
7168 */
7169 void
7170 pmap_sync_page_data_phys(
7171 ppnum_t pp)
7172 {
7173 cache_sync_page(pp);
7174 }
7175
7176 /*
7177 * pmap_sync_page_attributes_phys(ppnum_t pp)
7178 *
7179 * Write back and invalidate all cachelines on a physical page.
7180 */
7181 void
7182 pmap_sync_page_attributes_phys(
7183 ppnum_t pp)
7184 {
7185 flush_dcache((vm_offset_t) (pp << PAGE_SHIFT), PAGE_SIZE, TRUE);
7186 }
7187
7188 #if CONFIG_COREDUMP
7189 /* temporary workaround */
7190 boolean_t
7191 coredumpok(
7192 vm_map_t map,
7193 mach_vm_offset_t va)
7194 {
7195 pt_entry_t *pte_p;
7196 pt_entry_t spte;
7197
7198 pte_p = pmap_pte(map->pmap, va);
7199 if (0 == pte_p) {
7200 return FALSE;
7201 }
7202 spte = *pte_p;
7203 return (spte & ARM_PTE_ATTRINDXMASK) == ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT);
7204 }
7205 #endif
7206
7207 void
7208 fillPage(
7209 ppnum_t pn,
7210 unsigned int fill)
7211 {
7212 unsigned int *addr;
7213 int count;
7214
7215 addr = (unsigned int *) phystokv(ptoa(pn));
7216 count = PAGE_SIZE / sizeof(unsigned int);
7217 while (count--) {
7218 *addr++ = fill;
7219 }
7220 }
7221
7222 extern void mapping_set_mod(ppnum_t pn);
7223
7224 void
7225 mapping_set_mod(
7226 ppnum_t pn)
7227 {
7228 pmap_set_modify(pn);
7229 }
7230
7231 extern void mapping_set_ref(ppnum_t pn);
7232
7233 void
7234 mapping_set_ref(
7235 ppnum_t pn)
7236 {
7237 pmap_set_reference(pn);
7238 }
7239
7240 /*
7241 * Clear specified attribute bits.
7242 *
7243 * Try to force an arm_fast_fault() for all mappings of
7244 * the page - to force attributes to be set again at fault time.
7245 * If the forcing succeeds, clear the cached bits at the head.
7246 * Otherwise, something must have been wired, so leave the cached
7247 * attributes alone.
7248 */
7249 MARK_AS_PMAP_TEXT static void
7250 phys_attribute_clear_with_flush_range(
7251 ppnum_t pn,
7252 unsigned int bits,
7253 int options,
7254 void *arg,
7255 pmap_tlb_flush_range_t *flush_range)
7256 {
7257 pmap_paddr_t pa = ptoa(pn);
7258 vm_prot_t allow_mode = VM_PROT_ALL;
7259
7260 #if XNU_MONITOR
7261 if (__improbable(bits & PP_ATTR_PPL_OWNED_BITS)) {
7262 panic("%s: illegal request, "
7263 "pn=%u, bits=%#x, options=%#x, arg=%p, flush_range=%p",
7264 __FUNCTION__,
7265 pn, bits, options, arg, flush_range);
7266 }
7267 #endif
7268 if ((arg != NULL) || (flush_range != NULL)) {
7269 options = options & ~PMAP_OPTIONS_NOFLUSH;
7270 }
7271
7272 if (__improbable((bits & PP_ATTR_MODIFIED) &&
7273 (options & PMAP_OPTIONS_NOFLUSH))) {
7274 panic("phys_attribute_clear(0x%x,0x%x,0x%x,%p,%p): "
7275 "should not clear 'modified' without flushing TLBs\n",
7276 pn, bits, options, arg, flush_range);
7277 }
7278
7279 assert(pn != vm_page_fictitious_addr);
7280
7281 if (options & PMAP_OPTIONS_CLEAR_WRITE) {
7282 assert(bits == PP_ATTR_MODIFIED);
7283
7284 pmap_page_protect_options_with_flush_range(pn, (VM_PROT_ALL & ~VM_PROT_WRITE), options, flush_range);
7285 /*
7286 * We short circuit this case; it should not need to
7287 * invoke arm_force_fast_fault, so just clear the modified bit.
7288 * pmap_page_protect has taken care of resetting
7289 * the state so that we'll see the next write as a fault to
7290 * the VM (i.e. we don't want a fast fault).
7291 */
7292 ppattr_pa_clear_bits(pa, (pp_attr_t)bits);
7293 return;
7294 }
7295 if (bits & PP_ATTR_REFERENCED) {
7296 allow_mode &= ~(VM_PROT_READ | VM_PROT_EXECUTE);
7297 }
7298 if (bits & PP_ATTR_MODIFIED) {
7299 allow_mode &= ~VM_PROT_WRITE;
7300 }
7301
7302 if (bits == PP_ATTR_NOENCRYPT) {
7303 /*
7304 * We short circuit this case; it should not need to
7305 * invoke arm_force_fast_fault, so just clear and
7306 * return. On ARM, this bit is just a debugging aid.
7307 */
7308 ppattr_pa_clear_bits(pa, (pp_attr_t)bits);
7309 return;
7310 }
7311
7312 if (arm_force_fast_fault_with_flush_range(pn, allow_mode, options, flush_range)) {
7313 ppattr_pa_clear_bits(pa, (pp_attr_t)bits);
7314 }
7315 }
7316
7317 MARK_AS_PMAP_TEXT void
7318 phys_attribute_clear_internal(
7319 ppnum_t pn,
7320 unsigned int bits,
7321 int options,
7322 void *arg)
7323 {
7324 phys_attribute_clear_with_flush_range(pn, bits, options, arg, NULL);
7325 }
7326
7327 #if __ARM_RANGE_TLBI__
7328 MARK_AS_PMAP_TEXT static vm_map_address_t
7329 phys_attribute_clear_twig_internal(
7330 pmap_t pmap,
7331 vm_map_address_t start,
7332 vm_map_address_t end,
7333 unsigned int bits,
7334 unsigned int options,
7335 pmap_tlb_flush_range_t *flush_range)
7336 {
7337 pmap_assert_locked(pmap, PMAP_LOCK_SHARED);
7338 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
7339 assert(end >= start);
7340 assert((end - start) <= pt_attr_twig_size(pt_attr));
7341 const uint64_t pmap_page_size = pt_attr_page_size(pt_attr);
7342 vm_map_address_t va = start;
7343 pt_entry_t *pte_p, *start_pte_p, *end_pte_p, *curr_pte_p;
7344 tt_entry_t *tte_p;
7345 tte_p = pmap_tte(pmap, start);
7346 unsigned int npages = 0;
7347
7348 if (tte_p == (tt_entry_t *) NULL) {
7349 return end;
7350 }
7351
7352 if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
7353 pte_p = (pt_entry_t *) ttetokv(*tte_p);
7354
7355 start_pte_p = &pte_p[pte_index(pt_attr, start)];
7356 end_pte_p = start_pte_p + ((end - start) >> pt_attr_leaf_shift(pt_attr));
7357 assert(end_pte_p >= start_pte_p);
7358 for (curr_pte_p = start_pte_p; curr_pte_p < end_pte_p; curr_pte_p++, va += pmap_page_size) {
7359 if (__improbable(npages++ && pmap_pending_preemption())) {
7360 return va;
7361 }
7362 pmap_paddr_t pa = pte_to_pa(*((volatile pt_entry_t*)curr_pte_p));
7363 if (pa_valid(pa)) {
7364 ppnum_t pn = (ppnum_t) atop(pa);
7365 phys_attribute_clear_with_flush_range(pn, bits, options, NULL, flush_range);
7366 }
7367 }
7368 }
7369 return end;
7370 }
7371
7372 MARK_AS_PMAP_TEXT vm_map_address_t
7373 phys_attribute_clear_range_internal(
7374 pmap_t pmap,
7375 vm_map_address_t start,
7376 vm_map_address_t end,
7377 unsigned int bits,
7378 unsigned int options)
7379 {
7380 if (__improbable(end < start)) {
7381 panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end);
7382 }
7383 validate_pmap_mutable(pmap);
7384
7385 vm_map_address_t va = start;
7386 pmap_tlb_flush_range_t flush_range = {
7387 .ptfr_pmap = pmap,
7388 .ptfr_start = start,
7389 .ptfr_end = end,
7390 .ptfr_flush_needed = false
7391 };
7392
7393 pmap_lock(pmap, PMAP_LOCK_SHARED);
7394 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
7395
7396 while (va < end) {
7397 vm_map_address_t curr_end;
7398
7399 curr_end = ((va + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr));
7400 if (curr_end > end) {
7401 curr_end = end;
7402 }
7403
7404 va = phys_attribute_clear_twig_internal(pmap, va, curr_end, bits, options, &flush_range);
7405 if ((va < curr_end) || pmap_pending_preemption()) {
7406 break;
7407 }
7408 }
7409 pmap_unlock(pmap, PMAP_LOCK_SHARED);
7410 if (flush_range.ptfr_flush_needed) {
7411 flush_range.ptfr_end = va;
7412 pmap_get_pt_ops(pmap)->flush_tlb_region_async(
7413 flush_range.ptfr_start,
7414 flush_range.ptfr_end - flush_range.ptfr_start,
7415 flush_range.ptfr_pmap,
7416 true);
7417 sync_tlb_flush();
7418 }
7419 return va;
7420 }
7421
7422 static void
7423 phys_attribute_clear_range(
7424 pmap_t pmap,
7425 vm_map_address_t start,
7426 vm_map_address_t end,
7427 unsigned int bits,
7428 unsigned int options)
7429 {
7430 /*
7431 * We allow single-page requests to execute non-preemptibly,
7432 * as it doesn't make sense to sample AST_URGENT for a single-page
7433 * operation, and there are a couple of special use cases that
7434 * require a non-preemptible single-page operation.
7435 */
7436 if ((end - start) > (pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO)) {
7437 pmap_verify_preemptible();
7438 }
7439
7440 PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR_RANGE) | DBG_FUNC_START, bits);
7441
7442 while (start < end) {
7443 #if XNU_MONITOR
7444 start = phys_attribute_clear_range_ppl(pmap, start, end, bits, options);
7445 #else
7446 start = phys_attribute_clear_range_internal(pmap, start, end, bits, options);
7447 #endif
7448 }
7449
7450 PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR_RANGE) | DBG_FUNC_END);
7451 }
7452 #endif /* __ARM_RANGE_TLBI__ */
7453
7454 static void
7455 phys_attribute_clear(
7456 ppnum_t pn,
7457 unsigned int bits,
7458 int options,
7459 void *arg)
7460 {
7461 /*
7462 * Do we really want this tracepoint? It will be extremely chatty.
7463 * Also, should we have a corresponding trace point for the set path?
7464 */
7465 PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_START, pn, bits);
7466
7467 #if XNU_MONITOR
7468 phys_attribute_clear_ppl(pn, bits, options, arg);
7469 #else
7470 phys_attribute_clear_internal(pn, bits, options, arg);
7471 #endif
7472
7473 PMAP_TRACE(3, PMAP_CODE(PMAP__ATTRIBUTE_CLEAR) | DBG_FUNC_END);
7474 }
7475
7476 /*
7477 * Set specified attribute bits.
7478 *
7479 * Set cached value in the pv head because we have
7480 * no per-mapping hardware support for referenced and
7481 * modify bits.
7482 */
7483 MARK_AS_PMAP_TEXT void
7484 phys_attribute_set_internal(
7485 ppnum_t pn,
7486 unsigned int bits)
7487 {
7488 pmap_paddr_t pa = ptoa(pn);
7489 assert(pn != vm_page_fictitious_addr);
7490
7491 #if XNU_MONITOR
7492 if (bits & PP_ATTR_PPL_OWNED_BITS) {
7493 panic("%s: illegal request, "
7494 "pn=%u, bits=%#x",
7495 __FUNCTION__,
7496 pn, bits);
7497 }
7498 #endif
7499
7500 ppattr_pa_set_bits(pa, (uint16_t)bits);
7501
7502 return;
7503 }
7504
7505 static void
7506 phys_attribute_set(
7507 ppnum_t pn,
7508 unsigned int bits)
7509 {
7510 #if XNU_MONITOR
7511 phys_attribute_set_ppl(pn, bits);
7512 #else
7513 phys_attribute_set_internal(pn, bits);
7514 #endif
7515 }
7516
7517
7518 /*
7519 * Check specified attribute bits.
7520 *
7521 * use the software cached bits (since no hw support).
7522 */
7523 static boolean_t
7524 phys_attribute_test(
7525 ppnum_t pn,
7526 unsigned int bits)
7527 {
7528 pmap_paddr_t pa = ptoa(pn);
7529 assert(pn != vm_page_fictitious_addr);
7530 return ppattr_pa_test_bits(pa, (pp_attr_t)bits);
7531 }
7532
7533
7534 /*
7535 * Set the modify/reference bits on the specified physical page.
7536 */
7537 void
7538 pmap_set_modify(ppnum_t pn)
7539 {
7540 phys_attribute_set(pn, PP_ATTR_MODIFIED);
7541 }
7542
7543
7544 /*
7545 * Clear the modify bits on the specified physical page.
7546 */
7547 void
7548 pmap_clear_modify(
7549 ppnum_t pn)
7550 {
7551 phys_attribute_clear(pn, PP_ATTR_MODIFIED, 0, NULL);
7552 }
7553
7554
7555 /*
7556 * pmap_is_modified:
7557 *
7558 * Return whether or not the specified physical page is modified
7559 * by any physical maps.
7560 */
7561 boolean_t
7562 pmap_is_modified(
7563 ppnum_t pn)
7564 {
7565 return phys_attribute_test(pn, PP_ATTR_MODIFIED);
7566 }
7567
7568
7569 /*
7570 * Set the reference bit on the specified physical page.
7571 */
7572 static void
7573 pmap_set_reference(
7574 ppnum_t pn)
7575 {
7576 phys_attribute_set(pn, PP_ATTR_REFERENCED);
7577 }
7578
7579 /*
7580 * Clear the reference bits on the specified physical page.
7581 */
7582 void
7583 pmap_clear_reference(
7584 ppnum_t pn)
7585 {
7586 phys_attribute_clear(pn, PP_ATTR_REFERENCED, 0, NULL);
7587 }
7588
7589
7590 /*
7591 * pmap_is_referenced:
7592 *
7593 * Return whether or not the specified physical page is referenced
7594 * by any physical maps.
7595 */
7596 boolean_t
7597 pmap_is_referenced(
7598 ppnum_t pn)
7599 {
7600 return phys_attribute_test(pn, PP_ATTR_REFERENCED);
7601 }
7602
7603 /*
7604 * pmap_get_refmod(phys)
7605 * returns the referenced and modified bits of the specified
7606 * physical page.
7607 */
7608 unsigned int
7609 pmap_get_refmod(
7610 ppnum_t pn)
7611 {
7612 return ((phys_attribute_test(pn, PP_ATTR_MODIFIED)) ? VM_MEM_MODIFIED : 0)
7613 | ((phys_attribute_test(pn, PP_ATTR_REFERENCED)) ? VM_MEM_REFERENCED : 0);
7614 }
7615
7616 static inline unsigned int
7617 pmap_clear_refmod_mask_to_modified_bits(const unsigned int mask)
7618 {
7619 return ((mask & VM_MEM_MODIFIED) ? PP_ATTR_MODIFIED : 0) |
7620 ((mask & VM_MEM_REFERENCED) ? PP_ATTR_REFERENCED : 0);
7621 }
7622
7623 /*
7624 * pmap_clear_refmod(phys, mask)
7625 * clears the referenced and modified bits as specified by the mask
7626 * of the specified physical page.
7627 */
7628 void
7629 pmap_clear_refmod_options(
7630 ppnum_t pn,
7631 unsigned int mask,
7632 unsigned int options,
7633 void *arg)
7634 {
7635 unsigned int bits;
7636
7637 bits = pmap_clear_refmod_mask_to_modified_bits(mask);
7638 phys_attribute_clear(pn, bits, options, arg);
7639 }
7640
7641 /*
7642 * Perform pmap_clear_refmod_options on a virtual address range.
7643 * The operation will be performed in bulk & tlb flushes will be coalesced
7644 * if possible.
7645 *
7646 * Returns true if the operation is supported on this platform.
7647 * If this function returns false, the operation is not supported and
7648 * nothing has been modified in the pmap.
7649 */
7650 bool
7651 pmap_clear_refmod_range_options(
7652 pmap_t pmap __unused,
7653 vm_map_address_t start __unused,
7654 vm_map_address_t end __unused,
7655 unsigned int mask __unused,
7656 unsigned int options __unused)
7657 {
7658 #if __ARM_RANGE_TLBI__
7659 unsigned int bits;
7660 bits = pmap_clear_refmod_mask_to_modified_bits(mask);
7661 phys_attribute_clear_range(pmap, start, end, bits, options);
7662 return true;
7663 #else /* __ARM_RANGE_TLBI__ */
7664 #pragma unused(pmap, start, end, mask, options)
7665 /*
7666 * This operation allows the VM to bulk modify refmod bits on a virtually
7667 * contiguous range of addresses. This is large performance improvement on
7668 * platforms that support ranged tlbi instructions. But on older platforms,
7669 * we can only flush per-page or the entire asid. So we currently
7670 * only support this operation on platforms that support ranged tlbi.
7671 * instructions. On other platforms, we require that
7672 * the VM modify the bits on a per-page basis.
7673 */
7674 return false;
7675 #endif /* __ARM_RANGE_TLBI__ */
7676 }
7677
7678 void
7679 pmap_clear_refmod(
7680 ppnum_t pn,
7681 unsigned int mask)
7682 {
7683 pmap_clear_refmod_options(pn, mask, 0, NULL);
7684 }
7685
7686 unsigned int
7687 pmap_disconnect_options(
7688 ppnum_t pn,
7689 unsigned int options,
7690 void *arg)
7691 {
7692 if ((options & PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED)) {
7693 /*
7694 * On ARM, the "modified" bit is managed by software, so
7695 * we know up-front if the physical page is "modified",
7696 * without having to scan all the PTEs pointing to it.
7697 * The caller should have made the VM page "busy" so noone
7698 * should be able to establish any new mapping and "modify"
7699 * the page behind us.
7700 */
7701 if (pmap_is_modified(pn)) {
7702 /*
7703 * The page has been modified and will be sent to
7704 * the VM compressor.
7705 */
7706 options |= PMAP_OPTIONS_COMPRESSOR;
7707 } else {
7708 /*
7709 * The page hasn't been modified and will be freed
7710 * instead of compressed.
7711 */
7712 }
7713 }
7714
7715 /* disconnect the page */
7716 pmap_page_protect_options(pn, 0, options, arg);
7717
7718 /* return ref/chg status */
7719 return pmap_get_refmod(pn);
7720 }
7721
7722 /*
7723 * Routine:
7724 * pmap_disconnect
7725 *
7726 * Function:
7727 * Disconnect all mappings for this page and return reference and change status
7728 * in generic format.
7729 *
7730 */
7731 unsigned int
7732 pmap_disconnect(
7733 ppnum_t pn)
7734 {
7735 pmap_page_protect(pn, 0); /* disconnect the page */
7736 return pmap_get_refmod(pn); /* return ref/chg status */
7737 }
7738
7739 boolean_t
7740 pmap_has_managed_page(ppnum_t first, ppnum_t last)
7741 {
7742 if (ptoa(first) >= vm_last_phys) {
7743 return FALSE;
7744 }
7745 if (ptoa(last) < vm_first_phys) {
7746 return FALSE;
7747 }
7748
7749 return TRUE;
7750 }
7751
7752 /*
7753 * The state maintained by the noencrypt functions is used as a
7754 * debugging aid on ARM. This incurs some overhead on the part
7755 * of the caller. A special case check in phys_attribute_clear
7756 * (the most expensive path) currently minimizes this overhead,
7757 * but stubbing these functions out on RELEASE kernels yields
7758 * further wins.
7759 */
7760 boolean_t
7761 pmap_is_noencrypt(
7762 ppnum_t pn)
7763 {
7764 #if DEVELOPMENT || DEBUG
7765 boolean_t result = FALSE;
7766
7767 if (!pa_valid(ptoa(pn))) {
7768 return FALSE;
7769 }
7770
7771 result = (phys_attribute_test(pn, PP_ATTR_NOENCRYPT));
7772
7773 return result;
7774 #else
7775 #pragma unused(pn)
7776 return FALSE;
7777 #endif
7778 }
7779
7780 void
7781 pmap_set_noencrypt(
7782 ppnum_t pn)
7783 {
7784 #if DEVELOPMENT || DEBUG
7785 if (!pa_valid(ptoa(pn))) {
7786 return;
7787 }
7788
7789 phys_attribute_set(pn, PP_ATTR_NOENCRYPT);
7790 #else
7791 #pragma unused(pn)
7792 #endif
7793 }
7794
7795 void
7796 pmap_clear_noencrypt(
7797 ppnum_t pn)
7798 {
7799 #if DEVELOPMENT || DEBUG
7800 if (!pa_valid(ptoa(pn))) {
7801 return;
7802 }
7803
7804 phys_attribute_clear(pn, PP_ATTR_NOENCRYPT, 0, NULL);
7805 #else
7806 #pragma unused(pn)
7807 #endif
7808 }
7809
7810 #if XNU_MONITOR
7811 boolean_t
7812 pmap_is_monitor(ppnum_t pn)
7813 {
7814 assert(pa_valid(ptoa(pn)));
7815 return phys_attribute_test(pn, PP_ATTR_MONITOR);
7816 }
7817 #endif
7818
7819 void
7820 pmap_lock_phys_page(ppnum_t pn)
7821 {
7822 #if !XNU_MONITOR
7823 unsigned int pai;
7824 pmap_paddr_t phys = ptoa(pn);
7825
7826 if (pa_valid(phys)) {
7827 pai = pa_index(phys);
7828 pvh_lock(pai);
7829 } else
7830 #else
7831 (void)pn;
7832 #endif
7833 { simple_lock(&phys_backup_lock, LCK_GRP_NULL);}
7834 }
7835
7836
7837 void
7838 pmap_unlock_phys_page(ppnum_t pn)
7839 {
7840 #if !XNU_MONITOR
7841 unsigned int pai;
7842 pmap_paddr_t phys = ptoa(pn);
7843
7844 if (pa_valid(phys)) {
7845 pai = pa_index(phys);
7846 pvh_unlock(pai);
7847 } else
7848 #else
7849 (void)pn;
7850 #endif
7851 { simple_unlock(&phys_backup_lock);}
7852 }
7853
7854 MARK_AS_PMAP_TEXT static void
7855 pmap_switch_user_ttb(pmap_t pmap, pmap_cpu_data_t *cpu_data_ptr)
7856 {
7857 #if (__ARM_VMSA__ == 7)
7858 cpu_data_ptr->cpu_user_pmap = pmap;
7859 cpu_data_ptr->cpu_user_pmap_stamp = pmap->stamp;
7860 if (pmap != kernel_pmap) {
7861 cpu_data_ptr->cpu_nested_pmap = pmap->nested_pmap;
7862 }
7863
7864 #if MACH_ASSERT && __ARM_USER_PROTECT__
7865 {
7866 unsigned int ttbr0_val, ttbr1_val;
7867 __asm__ volatile ("mrc p15,0,%0,c2,c0,0\n" : "=r"(ttbr0_val));
7868 __asm__ volatile ("mrc p15,0,%0,c2,c0,1\n" : "=r"(ttbr1_val));
7869 if (ttbr0_val != ttbr1_val) {
7870 panic("Misaligned ttbr0 %08X", ttbr0_val);
7871 }
7872 if (pmap->ttep & 0x1000) {
7873 panic("Misaligned ttbr0 %08X", pmap->ttep);
7874 }
7875 }
7876 #endif
7877 #if !__ARM_USER_PROTECT__
7878 set_mmu_ttb(pmap->ttep);
7879 set_context_id(pmap->hw_asid);
7880 #endif
7881
7882 #else /* (__ARM_VMSA__ == 7) */
7883
7884 if (pmap != kernel_pmap) {
7885 cpu_data_ptr->cpu_nested_pmap = pmap->nested_pmap;
7886 cpu_data_ptr->cpu_nested_pmap_attr = (cpu_data_ptr->cpu_nested_pmap == NULL) ?
7887 NULL : pmap_get_pt_attr(cpu_data_ptr->cpu_nested_pmap);
7888 cpu_data_ptr->cpu_nested_region_addr = pmap->nested_region_addr;
7889 cpu_data_ptr->cpu_nested_region_size = pmap->nested_region_size;
7890 #if __ARM_MIXED_PAGE_SIZE__
7891 cpu_data_ptr->commpage_page_shift = pt_attr_leaf_shift(pmap_get_pt_attr(pmap));
7892 #endif
7893 }
7894
7895
7896 #if __ARM_MIXED_PAGE_SIZE__
7897 if ((pmap != kernel_pmap) && (pmap_get_pt_attr(pmap)->pta_tcr_value != get_tcr())) {
7898 set_tcr(pmap_get_pt_attr(pmap)->pta_tcr_value);
7899 }
7900 #endif /* __ARM_MIXED_PAGE_SIZE__ */
7901
7902
7903 if (pmap != kernel_pmap) {
7904 set_mmu_ttb((pmap->ttep & TTBR_BADDR_MASK) | (((uint64_t)pmap->hw_asid) << TTBR_ASID_SHIFT));
7905 } else if (!pmap_user_ttb_is_clear()) {
7906 pmap_clear_user_ttb_internal();
7907 }
7908 #endif /* (__ARM_VMSA__ == 7) */
7909 }
7910
7911 MARK_AS_PMAP_TEXT void
7912 pmap_clear_user_ttb_internal(void)
7913 {
7914 #if (__ARM_VMSA__ > 7)
7915 set_mmu_ttb(invalid_ttep & TTBR_BADDR_MASK);
7916 #else
7917 set_mmu_ttb(kernel_pmap->ttep);
7918 #endif
7919 }
7920
7921 void
7922 pmap_clear_user_ttb(void)
7923 {
7924 PMAP_TRACE(3, PMAP_CODE(PMAP__CLEAR_USER_TTB) | DBG_FUNC_START, NULL, 0, 0);
7925 #if XNU_MONITOR
7926 pmap_clear_user_ttb_ppl();
7927 #else
7928 pmap_clear_user_ttb_internal();
7929 #endif
7930 PMAP_TRACE(3, PMAP_CODE(PMAP__CLEAR_USER_TTB) | DBG_FUNC_END);
7931 }
7932
7933
7934 #if defined(__arm64__)
7935 /*
7936 * Marker for use in multi-pass fast-fault PV list processing.
7937 * ARM_PTE_COMPRESSED should never otherwise be set on PTEs processed by
7938 * these functions, as compressed PTEs should never be present in PV lists.
7939 * Note that this only holds true for arm64; for arm32 we don't have enough
7940 * SW bits in the PTE, so the same bit does double-duty as the COMPRESSED
7941 * and WRITEABLE marker depending on whether the PTE is valid.
7942 */
7943 #define ARM_PTE_FF_MARKER ARM_PTE_COMPRESSED
7944 _Static_assert(ARM_PTE_COMPRESSED != ARM_PTE_WRITEABLE, "compressed bit aliases writeable");
7945 _Static_assert(ARM_PTE_COMPRESSED != ARM_PTE_WIRED, "compressed bit aliases wired");
7946 #endif
7947
7948
7949 MARK_AS_PMAP_TEXT static boolean_t
7950 arm_force_fast_fault_with_flush_range(
7951 ppnum_t ppnum,
7952 vm_prot_t allow_mode,
7953 int options,
7954 pmap_tlb_flush_range_t *flush_range)
7955 {
7956 pmap_paddr_t phys = ptoa(ppnum);
7957 pv_entry_t *pve_p;
7958 pt_entry_t *pte_p;
7959 unsigned int pai;
7960 unsigned int pass1_updated = 0;
7961 unsigned int pass2_updated = 0;
7962 boolean_t result;
7963 pv_entry_t **pv_h;
7964 bool is_reusable;
7965 bool ref_fault;
7966 bool mod_fault;
7967 bool clear_write_fault = false;
7968 bool ref_aliases_mod = false;
7969 bool mustsynch = ((options & PMAP_OPTIONS_FF_LOCKED) == 0);
7970
7971 assert(ppnum != vm_page_fictitious_addr);
7972
7973 if (!pa_valid(phys)) {
7974 return FALSE; /* Not a managed page. */
7975 }
7976
7977 result = TRUE;
7978 ref_fault = false;
7979 mod_fault = false;
7980 pai = pa_index(phys);
7981 if (__probable(mustsynch)) {
7982 pvh_lock(pai);
7983 }
7984 pv_h = pai_to_pvh(pai);
7985
7986 #if XNU_MONITOR
7987 if (__improbable(ppattr_pa_test_monitor(phys))) {
7988 panic("%s: PA 0x%llx belongs to PPL.", __func__, (uint64_t)phys);
7989 }
7990 #endif
7991 pte_p = PT_ENTRY_NULL;
7992 pve_p = PV_ENTRY_NULL;
7993 if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) {
7994 pte_p = pvh_ptep(pv_h);
7995 } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) {
7996 pve_p = pvh_pve_list(pv_h);
7997 } else if (__improbable(!pvh_test_type(pv_h, PVH_TYPE_NULL))) {
7998 panic("%s: invalid PV head 0x%llx for PA 0x%llx", __func__, (uint64_t)(*pv_h), (uint64_t)phys);
7999 }
8000
8001 is_reusable = ppattr_test_reusable(pai);
8002
8003 /*
8004 * issue_tlbi is used to indicate that this function will need to issue at least one TLB
8005 * invalidation during pass 2. tlb_flush_needed only indicates that PTE permissions have
8006 * changed and that a TLB flush will be needed *at some point*, so we'll need to call
8007 * FLUSH_PTE_STRONG() to synchronize prior PTE updates. In the case of a flush_range
8008 * operation, TLB invalidation may be handled by the caller so it's possible for
8009 * tlb_flush_needed to be true while issue_tlbi is false.
8010 */
8011 bool issue_tlbi = false;
8012 bool tlb_flush_needed = false;
8013
8014 pv_entry_t *orig_pve_p = pve_p;
8015 pt_entry_t *orig_pte_p = pte_p;
8016 int pve_ptep_idx = 0;
8017
8018 /*
8019 * Pass 1: Make any necessary PTE updates, marking PTEs that will require
8020 * TLB invalidation in pass 2.
8021 */
8022 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
8023 pt_entry_t spte;
8024 pt_entry_t tmplate;
8025
8026 if (pve_p != PV_ENTRY_NULL) {
8027 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
8028 if (pte_p == PT_ENTRY_NULL) {
8029 goto fff_skip_pve_pass1;
8030 }
8031 }
8032
8033 #ifdef PVH_FLAG_IOMMU
8034 if (pvh_ptep_is_iommu(pte_p)) {
8035 goto fff_skip_pve_pass1;
8036 }
8037 #endif
8038 if (*pte_p == ARM_PTE_EMPTY) {
8039 panic("pte is empty: pte_p=%p ppnum=0x%x", pte_p, ppnum);
8040 }
8041 if (ARM_PTE_IS_COMPRESSED(*pte_p, pte_p)) {
8042 panic("pte is COMPRESSED: pte_p=%p ppnum=0x%x", pte_p, ppnum);
8043 }
8044
8045 const pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
8046 const pmap_t pmap = ptdp->pmap;
8047 const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
8048 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
8049
8050 assert(va >= pmap->min && va < pmap->max);
8051
8052 /* update pmap stats and ledgers */
8053 const bool is_internal = ppattr_pve_is_internal(pai, pve_p, pve_ptep_idx);
8054 const bool is_altacct = ppattr_pve_is_altacct(pai, pve_p, pve_ptep_idx);
8055 if (is_altacct) {
8056 /*
8057 * We do not track "reusable" status for
8058 * "alternate accounting" mappings.
8059 */
8060 } else if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) &&
8061 is_reusable &&
8062 is_internal &&
8063 pmap != kernel_pmap) {
8064 /* one less "reusable" */
8065 pmap_ledger_debit(pmap, task_ledgers.reusable, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8066 /* one more "internal" */
8067 pmap_ledger_credit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8068 pmap_ledger_credit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8069
8070 /*
8071 * Since the page is being marked non-reusable, we assume that it will be
8072 * modified soon. Avoid the cost of another trap to handle the fast
8073 * fault when we next write to this page.
8074 */
8075 clear_write_fault = true;
8076 } else if ((options & PMAP_OPTIONS_SET_REUSABLE) &&
8077 !is_reusable &&
8078 is_internal &&
8079 pmap != kernel_pmap) {
8080 /* one more "reusable" */
8081 pmap_ledger_credit(pmap, task_ledgers.reusable, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8082 pmap_ledger_debit(pmap, task_ledgers.internal, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8083 pmap_ledger_debit(pmap, task_ledgers.phys_footprint, pt_attr_page_size(pt_attr) * PAGE_RATIO);
8084 }
8085
8086 bool wiredskip = pte_is_wired(*pte_p) &&
8087 ((options & PMAP_OPTIONS_FF_WIRED) == 0);
8088
8089 if (wiredskip) {
8090 result = FALSE;
8091 goto fff_skip_pve_pass1;
8092 }
8093
8094 spte = *pte_p;
8095 tmplate = spte;
8096
8097 if ((allow_mode & VM_PROT_READ) != VM_PROT_READ) {
8098 /* read protection sets the pte to fault */
8099 tmplate = tmplate & ~ARM_PTE_AF;
8100 ref_fault = true;
8101 }
8102 if ((allow_mode & VM_PROT_WRITE) != VM_PROT_WRITE) {
8103 /* take away write permission if set */
8104 if (pmap == kernel_pmap) {
8105 if ((tmplate & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RWNA)) {
8106 tmplate = ((tmplate & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RONA));
8107 pte_set_was_writeable(tmplate, true);
8108 mod_fault = true;
8109 }
8110 } else {
8111 if ((tmplate & ARM_PTE_APMASK) == pt_attr_leaf_rw(pt_attr)) {
8112 tmplate = ((tmplate & ~ARM_PTE_APMASK) | pt_attr_leaf_ro(pt_attr));
8113 pte_set_was_writeable(tmplate, true);
8114 mod_fault = true;
8115 }
8116 }
8117 }
8118
8119 #if MACH_ASSERT && XNU_MONITOR
8120 if (is_pte_xprr_protected(pmap, spte)) {
8121 if (pte_to_xprr_perm(spte) != pte_to_xprr_perm(tmplate)) {
8122 panic("%s: attempted to mutate an xPRR mapping pte_p=%p, pmap=%p, pv_h=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx, "
8123 "ppnum=0x%x, options=0x%x, allow_mode=0x%x",
8124 __FUNCTION__, pte_p, pmap, pv_h, pve_p, (unsigned long long)spte, (unsigned long long)tmplate, (unsigned long long)va,
8125 ppnum, options, allow_mode);
8126 }
8127 }
8128 #endif /* MACH_ASSERT && XNU_MONITOR */
8129
8130 if (result && (tmplate != spte)) {
8131 if ((spte & (~ARM_PTE_WRITEABLE)) != (tmplate & (~ARM_PTE_WRITEABLE)) &&
8132 !(options & PMAP_OPTIONS_NOFLUSH)) {
8133 tlb_flush_needed = true;
8134 if (!flush_range || (flush_range->ptfr_pmap != pmap) ||
8135 va >= flush_range->ptfr_end || va < flush_range->ptfr_start) {
8136 #ifdef ARM_PTE_FF_MARKER
8137 assert(!(spte & ARM_PTE_FF_MARKER));
8138 tmplate |= ARM_PTE_FF_MARKER;
8139 ++pass1_updated;
8140 #endif
8141 issue_tlbi = true;
8142 }
8143 }
8144 write_pte_fast(pte_p, tmplate);
8145 }
8146
8147 fff_skip_pve_pass1:
8148 pte_p = PT_ENTRY_NULL;
8149 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
8150 pve_ptep_idx = 0;
8151 pve_p = pve_next(pve_p);
8152 }
8153 }
8154
8155 if (tlb_flush_needed) {
8156 FLUSH_PTE_STRONG();
8157 }
8158
8159 if (!issue_tlbi) {
8160 goto fff_finish;
8161 }
8162
8163 /* Pass 2: Issue any required TLB invalidations */
8164 pve_p = orig_pve_p;
8165 pte_p = orig_pte_p;
8166 pve_ptep_idx = 0;
8167
8168 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
8169 if (pve_p != PV_ENTRY_NULL) {
8170 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
8171 if (pte_p == PT_ENTRY_NULL) {
8172 goto fff_skip_pve_pass2;
8173 }
8174 }
8175
8176 #ifdef PVH_FLAG_IOMMU
8177 if (pvh_ptep_is_iommu(pte_p)) {
8178 goto fff_skip_pve_pass2;
8179 }
8180 #endif
8181
8182 #ifdef ARM_PTE_FF_MARKER
8183 pt_entry_t spte = *pte_p;
8184
8185 if (!(spte & ARM_PTE_FF_MARKER)) {
8186 goto fff_skip_pve_pass2;
8187 } else {
8188 spte &= (~ARM_PTE_FF_MARKER);
8189 /* No need to synchronize with the TLB flush; we're changing a SW-managed bit */
8190 write_pte_fast(pte_p, spte);
8191 ++pass2_updated;
8192 }
8193 #endif
8194 const pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
8195 const pmap_t pmap = ptdp->pmap;
8196 const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
8197
8198 if (!flush_range || (flush_range->ptfr_pmap != pmap) ||
8199 (va >= flush_range->ptfr_end) || (va < flush_range->ptfr_start)) {
8200 pmap_get_pt_ops(pmap)->flush_tlb_region_async(va,
8201 pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap, true);
8202 }
8203
8204 fff_skip_pve_pass2:
8205 pte_p = PT_ENTRY_NULL;
8206 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
8207 pve_ptep_idx = 0;
8208 pve_p = pve_next(pve_p);
8209 }
8210 }
8211
8212 fff_finish:
8213 if (__improbable(pass1_updated != pass2_updated)) {
8214 panic("%s: first pass (%u) and second pass (%u) disagree on updated mappings",
8215 __func__, pass1_updated, pass2_updated);
8216 }
8217
8218 /*
8219 * If we are using the same approach for ref and mod
8220 * faults on this PTE, do not clear the write fault;
8221 * this would cause both ref and mod to be set on the
8222 * page again, and prevent us from taking ANY read/write
8223 * fault on the mapping.
8224 */
8225 if (clear_write_fault && !ref_aliases_mod) {
8226 arm_clear_fast_fault(ppnum, VM_PROT_WRITE, PT_ENTRY_NULL);
8227 }
8228 if (tlb_flush_needed) {
8229 if (flush_range) {
8230 /* Delayed flush. Signal to the caller that the flush is needed. */
8231 flush_range->ptfr_flush_needed = true;
8232 } else {
8233 sync_tlb_flush();
8234 }
8235 }
8236
8237 /* update global "reusable" status for this page */
8238 if ((options & PMAP_OPTIONS_CLEAR_REUSABLE) && is_reusable) {
8239 ppattr_clear_reusable(pai);
8240 } else if ((options & PMAP_OPTIONS_SET_REUSABLE) && !is_reusable) {
8241 ppattr_set_reusable(pai);
8242 }
8243
8244 if (mod_fault) {
8245 ppattr_set_modfault(pai);
8246 }
8247 if (ref_fault) {
8248 ppattr_set_reffault(pai);
8249 }
8250 if (__probable(mustsynch)) {
8251 pvh_unlock(pai);
8252 }
8253 return result;
8254 }
8255
8256 MARK_AS_PMAP_TEXT boolean_t
8257 arm_force_fast_fault_internal(
8258 ppnum_t ppnum,
8259 vm_prot_t allow_mode,
8260 int options)
8261 {
8262 if (__improbable((options & (PMAP_OPTIONS_FF_LOCKED | PMAP_OPTIONS_NOFLUSH)) != 0)) {
8263 panic("arm_force_fast_fault(0x%x, 0x%x, 0x%x): invalid options", ppnum, allow_mode, options);
8264 }
8265 return arm_force_fast_fault_with_flush_range(ppnum, allow_mode, options, NULL);
8266 }
8267
8268 /*
8269 * Routine: arm_force_fast_fault
8270 *
8271 * Function:
8272 * Force all mappings for this page to fault according
8273 * to the access modes allowed, so we can gather ref/modify
8274 * bits again.
8275 */
8276
8277 boolean_t
8278 arm_force_fast_fault(
8279 ppnum_t ppnum,
8280 vm_prot_t allow_mode,
8281 int options,
8282 __unused void *arg)
8283 {
8284 pmap_paddr_t phys = ptoa(ppnum);
8285
8286 assert(ppnum != vm_page_fictitious_addr);
8287
8288 if (!pa_valid(phys)) {
8289 return FALSE; /* Not a managed page. */
8290 }
8291
8292 #if XNU_MONITOR
8293 return arm_force_fast_fault_ppl(ppnum, allow_mode, options);
8294 #else
8295 return arm_force_fast_fault_internal(ppnum, allow_mode, options);
8296 #endif
8297 }
8298
8299 /*
8300 * Routine: arm_clear_fast_fault
8301 *
8302 * Function:
8303 * Clear pending force fault for all mappings for this page based on
8304 * the observed fault type, update ref/modify bits.
8305 */
8306 MARK_AS_PMAP_TEXT static boolean_t
8307 arm_clear_fast_fault(
8308 ppnum_t ppnum,
8309 vm_prot_t fault_type,
8310 pt_entry_t *pte_p)
8311 {
8312 pmap_paddr_t pa = ptoa(ppnum);
8313 pv_entry_t *pve_p;
8314 unsigned int pai;
8315 boolean_t result;
8316 bool tlb_flush_needed = false;
8317 pv_entry_t **pv_h;
8318 unsigned int npve = 0;
8319 unsigned int pass1_updated = 0;
8320 unsigned int pass2_updated = 0;
8321
8322 assert(ppnum != vm_page_fictitious_addr);
8323
8324 if (!pa_valid(pa)) {
8325 return FALSE; /* Not a managed page. */
8326 }
8327
8328 result = FALSE;
8329 pai = pa_index(pa);
8330 pvh_assert_locked(pai);
8331 pv_h = pai_to_pvh(pai);
8332
8333 pve_p = PV_ENTRY_NULL;
8334 if (pte_p == PT_ENTRY_NULL) {
8335 if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) {
8336 pte_p = pvh_ptep(pv_h);
8337 } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) {
8338 pve_p = pvh_pve_list(pv_h);
8339 } else if (__improbable(!pvh_test_type(pv_h, PVH_TYPE_NULL))) {
8340 panic("%s: invalid PV head 0x%llx for PA 0x%llx", __func__, (uint64_t)(*pv_h), (uint64_t)pa);
8341 }
8342 }
8343
8344 pv_entry_t *orig_pve_p = pve_p;
8345 pt_entry_t *orig_pte_p = pte_p;
8346 int pve_ptep_idx = 0;
8347
8348 /*
8349 * Pass 1: Make any necessary PTE updates, marking PTEs that will require
8350 * TLB invalidation in pass 2.
8351 */
8352 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
8353 pt_entry_t spte;
8354 pt_entry_t tmplate;
8355
8356 if (pve_p != PV_ENTRY_NULL) {
8357 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
8358 if (pte_p == PT_ENTRY_NULL) {
8359 goto cff_skip_pve_pass1;
8360 }
8361 }
8362
8363 #ifdef PVH_FLAG_IOMMU
8364 if (pvh_ptep_is_iommu(pte_p)) {
8365 goto cff_skip_pve_pass1;
8366 }
8367 #endif
8368 if (*pte_p == ARM_PTE_EMPTY) {
8369 panic("pte is empty: pte_p=%p ppnum=0x%x", pte_p, ppnum);
8370 }
8371
8372 const pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
8373 const pmap_t pmap = ptdp->pmap;
8374 __assert_only const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
8375
8376 assert(va >= pmap->min && va < pmap->max);
8377
8378 spte = *pte_p;
8379 tmplate = spte;
8380
8381 if ((fault_type & VM_PROT_WRITE) && (pte_was_writeable(spte))) {
8382 {
8383 if (pmap == kernel_pmap) {
8384 tmplate = ((spte & ~ARM_PTE_APMASK) | ARM_PTE_AP(AP_RWNA));
8385 } else {
8386 assert(pmap->type != PMAP_TYPE_NESTED);
8387 tmplate = ((spte & ~ARM_PTE_APMASK) | pt_attr_leaf_rw(pmap_get_pt_attr(pmap)));
8388 }
8389 }
8390
8391 tmplate |= ARM_PTE_AF;
8392
8393 pte_set_was_writeable(tmplate, false);
8394 ppattr_pa_set_bits(pa, PP_ATTR_REFERENCED | PP_ATTR_MODIFIED);
8395 } else if ((fault_type & VM_PROT_READ) && ((spte & ARM_PTE_AF) != ARM_PTE_AF)) {
8396 tmplate = spte | ARM_PTE_AF;
8397
8398 {
8399 ppattr_pa_set_bits(pa, PP_ATTR_REFERENCED);
8400 }
8401 }
8402
8403 #if MACH_ASSERT && XNU_MONITOR
8404 if (is_pte_xprr_protected(pmap, spte)) {
8405 if (pte_to_xprr_perm(spte) != pte_to_xprr_perm(tmplate)) {
8406 panic("%s: attempted to mutate an xPRR mapping pte_p=%p, pmap=%p, pv_h=%p, pve_p=%p, pte=0x%llx, tmplate=0x%llx, va=0x%llx, "
8407 "ppnum=0x%x, fault_type=0x%x",
8408 __FUNCTION__, pte_p, pmap, pv_h, pve_p, (unsigned long long)spte, (unsigned long long)tmplate, (unsigned long long)va,
8409 ppnum, fault_type);
8410 }
8411 }
8412 #endif /* MACH_ASSERT && XNU_MONITOR */
8413
8414 assert(spte != ARM_PTE_TYPE_FAULT);
8415 if (spte != tmplate) {
8416 if ((spte & (~ARM_PTE_WRITEABLE)) != (tmplate & (~ARM_PTE_WRITEABLE))) {
8417 #ifdef ARM_PTE_FF_MARKER
8418 assert(!(spte & ARM_PTE_FF_MARKER));
8419 tmplate |= ARM_PTE_FF_MARKER;
8420 ++pass1_updated;
8421 #endif
8422 tlb_flush_needed = true;
8423 }
8424 write_pte_fast(pte_p, tmplate);
8425 result = TRUE;
8426 }
8427
8428 cff_skip_pve_pass1:
8429 pte_p = PT_ENTRY_NULL;
8430 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
8431 pve_ptep_idx = 0;
8432 pve_p = pve_next(pve_p);
8433 ++npve;
8434 if (__improbable(npve == PMAP_MAX_PV_LIST_CHUNK_SIZE)) {
8435 break;
8436 }
8437 }
8438 }
8439
8440 if (!tlb_flush_needed) {
8441 goto cff_finish;
8442 }
8443
8444 FLUSH_PTE_STRONG();
8445
8446 /* Pass 2: Issue any required TLB invalidations */
8447 pve_p = orig_pve_p;
8448 pte_p = orig_pte_p;
8449 pve_ptep_idx = 0;
8450 npve = 0;
8451
8452 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
8453 if (pve_p != PV_ENTRY_NULL) {
8454 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
8455 if (pte_p == PT_ENTRY_NULL) {
8456 goto cff_skip_pve_pass2;
8457 }
8458 }
8459
8460 #ifdef PVH_FLAG_IOMMU
8461 if (pvh_ptep_is_iommu(pte_p)) {
8462 goto cff_skip_pve_pass2;
8463 }
8464 #endif
8465
8466 #ifdef ARM_PTE_FF_MARKER
8467 pt_entry_t spte = *pte_p;
8468
8469 if (!(spte & ARM_PTE_FF_MARKER)) {
8470 goto cff_skip_pve_pass2;
8471 } else {
8472 spte &= (~ARM_PTE_FF_MARKER);
8473 /* No need to synchronize with the TLB flush; we're changing a SW-managed bit */
8474 write_pte_fast(pte_p, spte);
8475 ++pass2_updated;
8476 }
8477 #endif
8478 const pt_desc_t * const ptdp = ptep_get_ptd(pte_p);
8479 const pmap_t pmap = ptdp->pmap;
8480 const vm_map_address_t va = ptd_get_va(ptdp, pte_p);
8481
8482 pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap, true);
8483
8484 cff_skip_pve_pass2:
8485 pte_p = PT_ENTRY_NULL;
8486 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
8487 pve_ptep_idx = 0;
8488 pve_p = pve_next(pve_p);
8489 ++npve;
8490 if (__improbable(npve == PMAP_MAX_PV_LIST_CHUNK_SIZE)) {
8491 break;
8492 }
8493 }
8494 }
8495
8496 cff_finish:
8497 if (__improbable(pass1_updated != pass2_updated)) {
8498 panic("%s: first pass (%u) and second pass (%u) disagree on updated mappings",
8499 __func__, pass1_updated, pass2_updated);
8500 }
8501 if (tlb_flush_needed) {
8502 sync_tlb_flush();
8503 }
8504 return result;
8505 }
8506
8507 /*
8508 * Determine if the fault was induced by software tracking of
8509 * modify/reference bits. If so, re-enable the mapping (and set
8510 * the appropriate bits).
8511 *
8512 * Returns KERN_SUCCESS if the fault was induced and was
8513 * successfully handled.
8514 *
8515 * Returns KERN_FAILURE if the fault was not induced and
8516 * the function was unable to deal with it.
8517 *
8518 * Returns KERN_PROTECTION_FAILURE if the pmap layer explictly
8519 * disallows this type of access.
8520 */
8521 MARK_AS_PMAP_TEXT kern_return_t
8522 arm_fast_fault_internal(
8523 pmap_t pmap,
8524 vm_map_address_t va,
8525 vm_prot_t fault_type,
8526 __unused bool was_af_fault,
8527 __unused bool from_user)
8528 {
8529 kern_return_t result = KERN_FAILURE;
8530 pt_entry_t *ptep;
8531 pt_entry_t spte = ARM_PTE_TYPE_FAULT;
8532 unsigned int pai;
8533 pmap_paddr_t pa;
8534 validate_pmap_mutable(pmap);
8535
8536 pmap_lock(pmap, PMAP_LOCK_SHARED);
8537
8538 /*
8539 * If the entry doesn't exist, is completely invalid, or is already
8540 * valid, we can't fix it here.
8541 */
8542
8543 const uint64_t pmap_page_size = pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO;
8544 ptep = pmap_pte(pmap, va & ~(pmap_page_size - 1));
8545 if (ptep != PT_ENTRY_NULL) {
8546 while (true) {
8547 spte = *((volatile pt_entry_t*)ptep);
8548
8549 pa = pte_to_pa(spte);
8550
8551 if ((spte == ARM_PTE_TYPE_FAULT) ||
8552 ARM_PTE_IS_COMPRESSED(spte, ptep)) {
8553 pmap_unlock(pmap, PMAP_LOCK_SHARED);
8554 return result;
8555 }
8556
8557 if (!pa_valid(pa)) {
8558 pmap_unlock(pmap, PMAP_LOCK_SHARED);
8559 #if XNU_MONITOR
8560 if (pmap_cache_attributes((ppnum_t)atop(pa)) & PP_ATTR_MONITOR) {
8561 return KERN_PROTECTION_FAILURE;
8562 } else
8563 #endif
8564 return result;
8565 }
8566 pai = pa_index(pa);
8567 pvh_lock(pai);
8568 if (*ptep == spte) {
8569 /*
8570 * Double-check the spte value, as we care about the AF bit.
8571 * It's also possible that pmap_page_protect() transitioned the
8572 * PTE to compressed/empty before we grabbed the PVH lock.
8573 */
8574 break;
8575 }
8576 pvh_unlock(pai);
8577 }
8578 } else {
8579 pmap_unlock(pmap, PMAP_LOCK_SHARED);
8580 return result;
8581 }
8582
8583
8584 if ((result != KERN_SUCCESS) &&
8585 ((ppattr_test_reffault(pai)) || ((fault_type & VM_PROT_WRITE) && ppattr_test_modfault(pai)))) {
8586 /*
8587 * An attempted access will always clear ref/mod fault state, as
8588 * appropriate for the fault type. arm_clear_fast_fault will
8589 * update the associated PTEs for the page as appropriate; if
8590 * any PTEs are updated, we redrive the access. If the mapping
8591 * does not actually allow for the attempted access, the
8592 * following fault will (hopefully) fail to update any PTEs, and
8593 * thus cause arm_fast_fault to decide that it failed to handle
8594 * the fault.
8595 */
8596 if (ppattr_test_reffault(pai)) {
8597 ppattr_clear_reffault(pai);
8598 }
8599 if ((fault_type & VM_PROT_WRITE) && ppattr_test_modfault(pai)) {
8600 ppattr_clear_modfault(pai);
8601 }
8602
8603 if (arm_clear_fast_fault((ppnum_t)atop(pa), fault_type, PT_ENTRY_NULL)) {
8604 /*
8605 * Should this preserve KERN_PROTECTION_FAILURE? The
8606 * cost of not doing so is a another fault in a case
8607 * that should already result in an exception.
8608 */
8609 result = KERN_SUCCESS;
8610 }
8611 }
8612
8613 /*
8614 * If the PTE already has sufficient permissions, we can report the fault as handled.
8615 * This may happen, for example, if multiple threads trigger roughly simultaneous faults
8616 * on mappings of the same page
8617 */
8618 if ((result == KERN_FAILURE) && (spte & ARM_PTE_AF)) {
8619 uintptr_t ap_ro, ap_rw, ap_x;
8620 if (pmap == kernel_pmap) {
8621 ap_ro = ARM_PTE_AP(AP_RONA);
8622 ap_rw = ARM_PTE_AP(AP_RWNA);
8623 ap_x = ARM_PTE_NX;
8624 } else {
8625 ap_ro = pt_attr_leaf_ro(pmap_get_pt_attr(pmap));
8626 ap_rw = pt_attr_leaf_rw(pmap_get_pt_attr(pmap));
8627 ap_x = pt_attr_leaf_x(pmap_get_pt_attr(pmap));
8628 }
8629 /*
8630 * NOTE: this doesn't currently handle user-XO mappings. Depending upon the
8631 * hardware they may be xPRR-protected, in which case they'll be handled
8632 * by the is_pte_xprr_protected() case above. Additionally, the exception
8633 * handling path currently does not call arm_fast_fault() without at least
8634 * VM_PROT_READ in fault_type.
8635 */
8636 if (((spte & ARM_PTE_APMASK) == ap_rw) ||
8637 (!(fault_type & VM_PROT_WRITE) && ((spte & ARM_PTE_APMASK) == ap_ro))) {
8638 if (!(fault_type & VM_PROT_EXECUTE) || ((spte & ARM_PTE_XMASK) == ap_x)) {
8639 result = KERN_SUCCESS;
8640 }
8641 }
8642 }
8643
8644 if ((result == KERN_FAILURE) && arm_clear_fast_fault((ppnum_t)atop(pa), fault_type, ptep)) {
8645 /*
8646 * A prior arm_clear_fast_fault() operation may have returned early due to
8647 * another pending PV list operation or an excessively large PV list.
8648 * Attempt a targeted fixup of the PTE that caused the fault to avoid repeatedly
8649 * taking a fault on the same mapping.
8650 */
8651 result = KERN_SUCCESS;
8652 }
8653
8654 pvh_unlock(pai);
8655 pmap_unlock(pmap, PMAP_LOCK_SHARED);
8656 return result;
8657 }
8658
8659 kern_return_t
8660 arm_fast_fault(
8661 pmap_t pmap,
8662 vm_map_address_t va,
8663 vm_prot_t fault_type,
8664 bool was_af_fault,
8665 __unused bool from_user)
8666 {
8667 kern_return_t result = KERN_FAILURE;
8668
8669 if (va < pmap->min || va >= pmap->max) {
8670 return result;
8671 }
8672
8673 PMAP_TRACE(3, PMAP_CODE(PMAP__FAST_FAULT) | DBG_FUNC_START,
8674 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(va), fault_type,
8675 from_user);
8676
8677 #if (__ARM_VMSA__ == 7)
8678 if (pmap != kernel_pmap) {
8679 pmap_cpu_data_t *cpu_data_ptr = pmap_get_cpu_data();
8680 pmap_t cur_pmap;
8681 pmap_t cur_user_pmap;
8682
8683 cur_pmap = current_pmap();
8684 cur_user_pmap = cpu_data_ptr->cpu_user_pmap;
8685
8686 if ((cur_user_pmap == cur_pmap) && (cur_pmap == pmap)) {
8687 if (cpu_data_ptr->cpu_user_pmap_stamp != pmap->stamp) {
8688 pmap_set_pmap(pmap, current_thread());
8689 result = KERN_SUCCESS;
8690 goto done;
8691 }
8692 }
8693 }
8694 #endif
8695
8696 #if XNU_MONITOR
8697 result = arm_fast_fault_ppl(pmap, va, fault_type, was_af_fault, from_user);
8698 #else
8699 result = arm_fast_fault_internal(pmap, va, fault_type, was_af_fault, from_user);
8700 #endif
8701
8702 #if (__ARM_VMSA__ == 7)
8703 done:
8704 #endif
8705
8706 PMAP_TRACE(3, PMAP_CODE(PMAP__FAST_FAULT) | DBG_FUNC_END, result);
8707
8708 return result;
8709 }
8710
8711 void
8712 pmap_copy_page(
8713 ppnum_t psrc,
8714 ppnum_t pdst)
8715 {
8716 bcopy_phys((addr64_t) (ptoa(psrc)),
8717 (addr64_t) (ptoa(pdst)),
8718 PAGE_SIZE);
8719 }
8720
8721
8722 /*
8723 * pmap_copy_page copies the specified (machine independent) pages.
8724 */
8725 void
8726 pmap_copy_part_page(
8727 ppnum_t psrc,
8728 vm_offset_t src_offset,
8729 ppnum_t pdst,
8730 vm_offset_t dst_offset,
8731 vm_size_t len)
8732 {
8733 bcopy_phys((addr64_t) (ptoa(psrc) + src_offset),
8734 (addr64_t) (ptoa(pdst) + dst_offset),
8735 len);
8736 }
8737
8738
8739 /*
8740 * pmap_zero_page zeros the specified (machine independent) page.
8741 */
8742 void
8743 pmap_zero_page(
8744 ppnum_t pn)
8745 {
8746 assert(pn != vm_page_fictitious_addr);
8747 bzero_phys((addr64_t) ptoa(pn), PAGE_SIZE);
8748 }
8749
8750 /*
8751 * pmap_zero_part_page
8752 * zeros the specified (machine independent) part of a page.
8753 */
8754 void
8755 pmap_zero_part_page(
8756 ppnum_t pn,
8757 vm_offset_t offset,
8758 vm_size_t len)
8759 {
8760 assert(pn != vm_page_fictitious_addr);
8761 assert(offset + len <= PAGE_SIZE);
8762 bzero_phys((addr64_t) (ptoa(pn) + offset), len);
8763 }
8764
8765 void
8766 pmap_map_globals(
8767 void)
8768 {
8769 pt_entry_t *ptep, pte;
8770
8771 ptep = pmap_pte(kernel_pmap, LOWGLOBAL_ALIAS);
8772 assert(ptep != PT_ENTRY_NULL);
8773 assert(*ptep == ARM_PTE_EMPTY);
8774
8775 pte = pa_to_pte(ml_static_vtop((vm_offset_t)&lowGlo)) | AP_RONA | ARM_PTE_NX | ARM_PTE_PNX | ARM_PTE_AF | ARM_PTE_TYPE;
8776 #if __ARM_KERNEL_PROTECT__
8777 pte |= ARM_PTE_NG;
8778 #endif /* __ARM_KERNEL_PROTECT__ */
8779 pte |= ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK);
8780 #if (__ARM_VMSA__ > 7)
8781 pte |= ARM_PTE_SH(SH_OUTER_MEMORY);
8782 #else
8783 pte |= ARM_PTE_SH;
8784 #endif
8785 *ptep = pte;
8786 FLUSH_PTE();
8787 PMAP_UPDATE_TLBS(kernel_pmap, LOWGLOBAL_ALIAS, LOWGLOBAL_ALIAS + PAGE_SIZE, false, true);
8788
8789 #if KASAN
8790 kasan_notify_address(LOWGLOBAL_ALIAS, PAGE_SIZE);
8791 #endif
8792 }
8793
8794 vm_offset_t
8795 pmap_cpu_windows_copy_addr(int cpu_num, unsigned int index)
8796 {
8797 if (__improbable(index >= CPUWINDOWS_MAX)) {
8798 panic("%s: invalid index %u", __func__, index);
8799 }
8800 return (vm_offset_t)(CPUWINDOWS_BASE + (PAGE_SIZE * ((CPUWINDOWS_MAX * cpu_num) + index)));
8801 }
8802
8803 MARK_AS_PMAP_TEXT unsigned int
8804 pmap_map_cpu_windows_copy_internal(
8805 ppnum_t pn,
8806 vm_prot_t prot,
8807 unsigned int wimg_bits)
8808 {
8809 pt_entry_t *ptep = NULL, pte;
8810 pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data();
8811 unsigned int cpu_num;
8812 unsigned int i;
8813 vm_offset_t cpu_copywindow_vaddr = 0;
8814 bool need_strong_sync = false;
8815
8816 #if XNU_MONITOR
8817 unsigned int cacheattr = (!pa_valid(ptoa(pn) & ARM_PTE_PAGE_MASK) ? pmap_cache_attributes(pn) : 0);
8818 need_strong_sync = ((cacheattr & PMAP_IO_RANGE_STRONG_SYNC) != 0);
8819 #endif
8820
8821 #if XNU_MONITOR
8822 #ifdef __ARM_COHERENT_IO__
8823 if (__improbable(pa_valid(ptoa(pn) & ARM_PTE_PAGE_MASK) && !pmap_ppl_disable)) {
8824 panic("%s: attempted to map a managed page, "
8825 "pn=%u, prot=0x%x, wimg_bits=0x%x",
8826 __FUNCTION__,
8827 pn, prot, wimg_bits);
8828 }
8829 if (__improbable((cacheattr & PP_ATTR_MONITOR) && (prot != VM_PROT_READ) && !pmap_ppl_disable)) {
8830 panic("%s: attempt to map PPL-protected I/O address 0x%llx as writable", __func__, (uint64_t)ptoa(pn));
8831 }
8832
8833 #else /* __ARM_COHERENT_IO__ */
8834 #error CPU copy windows are not properly supported with both the PPL and incoherent IO
8835 #endif /* __ARM_COHERENT_IO__ */
8836 #endif /* XNU_MONITOR */
8837 cpu_num = pmap_cpu_data->cpu_number;
8838
8839 for (i = 0; i < CPUWINDOWS_MAX; i++) {
8840 cpu_copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_num, i);
8841 ptep = pmap_pte(kernel_pmap, cpu_copywindow_vaddr);
8842 assert(!ARM_PTE_IS_COMPRESSED(*ptep, ptep));
8843 if (*ptep == ARM_PTE_TYPE_FAULT) {
8844 break;
8845 }
8846 }
8847 if (i == CPUWINDOWS_MAX) {
8848 panic("pmap_map_cpu_windows_copy: out of window");
8849 }
8850
8851 pte = pa_to_pte(ptoa(pn)) | ARM_PTE_TYPE | ARM_PTE_AF | ARM_PTE_NX | ARM_PTE_PNX;
8852 #if __ARM_KERNEL_PROTECT__
8853 pte |= ARM_PTE_NG;
8854 #endif /* __ARM_KERNEL_PROTECT__ */
8855
8856 pte |= wimg_to_pte(wimg_bits, ptoa(pn));
8857
8858 if (prot & VM_PROT_WRITE) {
8859 pte |= ARM_PTE_AP(AP_RWNA);
8860 } else {
8861 pte |= ARM_PTE_AP(AP_RONA);
8862 }
8863
8864 write_pte_fast(ptep, pte);
8865 /*
8866 * Invalidate tlb. Cover nested cpu_copywindow_vaddr usage with the interrupted context
8867 * in pmap_unmap_cpu_windows_copy() after clearing the pte and before tlb invalidate.
8868 */
8869 FLUSH_PTE_STRONG();
8870 PMAP_UPDATE_TLBS(kernel_pmap, cpu_copywindow_vaddr, cpu_copywindow_vaddr + PAGE_SIZE, pmap_cpu_data->copywindow_strong_sync[i], true);
8871 pmap_cpu_data->copywindow_strong_sync[i] = need_strong_sync;
8872
8873 return i;
8874 }
8875
8876 unsigned int
8877 pmap_map_cpu_windows_copy(
8878 ppnum_t pn,
8879 vm_prot_t prot,
8880 unsigned int wimg_bits)
8881 {
8882 #if XNU_MONITOR
8883 return pmap_map_cpu_windows_copy_ppl(pn, prot, wimg_bits);
8884 #else
8885 return pmap_map_cpu_windows_copy_internal(pn, prot, wimg_bits);
8886 #endif
8887 }
8888
8889 MARK_AS_PMAP_TEXT void
8890 pmap_unmap_cpu_windows_copy_internal(
8891 unsigned int index)
8892 {
8893 pt_entry_t *ptep;
8894 unsigned int cpu_num;
8895 vm_offset_t cpu_copywindow_vaddr = 0;
8896 pmap_cpu_data_t *pmap_cpu_data = pmap_get_cpu_data();
8897
8898 cpu_num = pmap_cpu_data->cpu_number;
8899
8900 cpu_copywindow_vaddr = pmap_cpu_windows_copy_addr(cpu_num, index);
8901 /* Issue full-system DSB to ensure prior operations on the per-CPU window
8902 * (which are likely to have been on I/O memory) are complete before
8903 * tearing down the mapping. */
8904 __builtin_arm_dsb(DSB_SY);
8905 ptep = pmap_pte(kernel_pmap, cpu_copywindow_vaddr);
8906 write_pte_strong(ptep, ARM_PTE_TYPE_FAULT);
8907 PMAP_UPDATE_TLBS(kernel_pmap, cpu_copywindow_vaddr, cpu_copywindow_vaddr + PAGE_SIZE, pmap_cpu_data->copywindow_strong_sync[index], true);
8908 }
8909
8910 void
8911 pmap_unmap_cpu_windows_copy(
8912 unsigned int index)
8913 {
8914 #if XNU_MONITOR
8915 return pmap_unmap_cpu_windows_copy_ppl(index);
8916 #else
8917 return pmap_unmap_cpu_windows_copy_internal(index);
8918 #endif
8919 }
8920
8921 #if XNU_MONITOR
8922
8923 MARK_AS_PMAP_TEXT void
8924 pmap_invoke_with_page(
8925 ppnum_t page_number,
8926 void *ctx,
8927 void (*callback)(void *ctx, ppnum_t page_number, const void *page))
8928 {
8929 #pragma unused(page_number, ctx, callback)
8930 }
8931
8932 /*
8933 * Loop over every pmap_io_range (I/O ranges marked as owned by
8934 * the PPL in the device tree) and conditionally call callback() on each range
8935 * that needs to be included in the hibernation image.
8936 *
8937 * @param ctx Will be passed as-is into the callback method. Use NULL if no
8938 * context is needed in the callback.
8939 * @param callback Callback function invoked on each range (gated by flag).
8940 */
8941 MARK_AS_PMAP_TEXT void
8942 pmap_hibernate_invoke(void *ctx, void (*callback)(void *ctx, uint64_t addr, uint64_t len))
8943 {
8944 extern const pmap_io_range_t* io_attr_table;
8945 extern const unsigned int num_io_rgns;
8946 for (unsigned int i = 0; i < num_io_rgns; ++i) {
8947 if (io_attr_table[i].wimg & PMAP_IO_RANGE_NEEDS_HIBERNATING) {
8948 callback(ctx, io_attr_table[i].addr, io_attr_table[i].len);
8949 }
8950 }
8951 }
8952
8953 /**
8954 * Set the HASHED pv_head_table flag for the passed in physical page if it's a
8955 * PPL-owned page. Otherwise, do nothing.
8956 *
8957 * @param addr Physical address of the page to set the HASHED flag on.
8958 */
8959 MARK_AS_PMAP_TEXT void
8960 pmap_set_ppl_hashed_flag(const pmap_paddr_t addr)
8961 {
8962 /* Ignore non-managed kernel memory. */
8963 if (!pa_valid(addr)) {
8964 return;
8965 }
8966
8967 const unsigned int pai = pa_index(addr);
8968 if (pp_attr_table[pai] & PP_ATTR_MONITOR) {
8969 pv_entry_t **pv_h = pai_to_pvh(pai);
8970
8971 /* Mark that the PPL-owned page has been hashed into the hibernation image. */
8972 pvh_lock(pai);
8973 pvh_set_flags(pv_h, pvh_get_flags(pv_h) | PVH_FLAG_HASHED);
8974 pvh_unlock(pai);
8975 }
8976 }
8977
8978 /**
8979 * Loop through every physical page in the system and clear out the HASHED flag
8980 * on every PPL-owned page. That flag is used to keep track of which pages have
8981 * been hashed into the hibernation image during the hibernation entry process.
8982 *
8983 * The HASHED flag needs to be cleared out between hibernation cycles because the
8984 * pv_head_table and pp_attr_table's might have been copied into the hibernation
8985 * image with the HASHED flag set on certain pages. It's important to clear the
8986 * HASHED flag to ensure that the enforcement of all PPL-owned memory being hashed
8987 * into the hibernation image can't be compromised across hibernation cycles.
8988 */
8989 MARK_AS_PMAP_TEXT void
8990 pmap_clear_ppl_hashed_flag_all(void)
8991 {
8992 const unsigned int last_index = pa_index(vm_last_phys);
8993 pv_entry_t **pv_h = NULL;
8994
8995 for (int pai = 0; pai < last_index; ++pai) {
8996 pv_h = pai_to_pvh(pai);
8997
8998 /* Test for PPL-owned pages that have the HASHED flag set in its pv_head_table entry. */
8999 if ((pvh_get_flags(pv_h) & PVH_FLAG_HASHED) &&
9000 (pp_attr_table[pai] & PP_ATTR_MONITOR)) {
9001 pvh_lock(pai);
9002 pvh_set_flags(pv_h, pvh_get_flags(pv_h) & ~PVH_FLAG_HASHED);
9003 pvh_unlock(pai);
9004 }
9005 }
9006 }
9007
9008 /**
9009 * Enforce that all PPL-owned pages were hashed into the hibernation image. The
9010 * ppl_hib driver will call this after all wired pages have been copied into the
9011 * hibernation image.
9012 */
9013 MARK_AS_PMAP_TEXT void
9014 pmap_check_ppl_hashed_flag_all(void)
9015 {
9016 const unsigned int last_index = pa_index(vm_last_phys);
9017 pv_entry_t **pv_h = NULL;
9018
9019 for (int pai = 0; pai < last_index; ++pai) {
9020 pv_h = pai_to_pvh(pai);
9021
9022 /**
9023 * The PMAP stacks are explicitly not saved into the image so skip checking
9024 * the pages that contain the PMAP stacks.
9025 */
9026 const bool is_pmap_stack = (pai >= pa_index(pmap_stacks_start_pa)) &&
9027 (pai < pa_index(pmap_stacks_end_pa));
9028
9029 if (!is_pmap_stack &&
9030 (pp_attr_table[pai] & PP_ATTR_MONITOR) &&
9031 !(pvh_get_flags(pv_h) & PVH_FLAG_HASHED)) {
9032 panic("Found PPL-owned page that was not hashed into the hibernation image: pai %d", pai);
9033 }
9034 }
9035 }
9036
9037 #endif /* XNU_MONITOR */
9038
9039 /*
9040 * Indicate that a pmap is intended to be used as a nested pmap
9041 * within one or more larger address spaces. This must be set
9042 * before pmap_nest() is called with this pmap as the 'subordinate'.
9043 */
9044 MARK_AS_PMAP_TEXT void
9045 pmap_set_nested_internal(
9046 pmap_t pmap)
9047 {
9048 validate_pmap_mutable(pmap);
9049 if (__improbable(pmap->type != PMAP_TYPE_USER)) {
9050 panic("%s: attempt to nest unsupported pmap %p of type 0x%hhx",
9051 __func__, pmap, pmap->type);
9052 }
9053 pmap->type = PMAP_TYPE_NESTED;
9054 pmap_get_pt_ops(pmap)->free_id(pmap);
9055 }
9056
9057 void
9058 pmap_set_nested(
9059 pmap_t pmap)
9060 {
9061 #if XNU_MONITOR
9062 pmap_set_nested_ppl(pmap);
9063 #else
9064 pmap_set_nested_internal(pmap);
9065 #endif
9066 }
9067
9068 /*
9069 * pmap_trim_range(pmap, start, end)
9070 *
9071 * pmap = pmap to operate on
9072 * start = start of the range
9073 * end = end of the range
9074 *
9075 * Attempts to deallocate TTEs for the given range in the nested range.
9076 */
9077 MARK_AS_PMAP_TEXT static void
9078 pmap_trim_range(
9079 pmap_t pmap,
9080 addr64_t start,
9081 addr64_t end)
9082 {
9083 addr64_t cur;
9084 addr64_t nested_region_start;
9085 addr64_t nested_region_end;
9086 addr64_t adjusted_start;
9087 addr64_t adjusted_end;
9088 addr64_t adjust_offmask;
9089 tt_entry_t * tte_p;
9090 pt_entry_t * pte_p;
9091 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
9092
9093 if (__improbable(end < start)) {
9094 panic("%s: invalid address range, "
9095 "pmap=%p, start=%p, end=%p",
9096 __func__,
9097 pmap, (void*)start, (void*)end);
9098 }
9099
9100 nested_region_start = pmap->nested_region_addr;
9101 nested_region_end = nested_region_start + pmap->nested_region_size;
9102
9103 if (__improbable((start < nested_region_start) || (end > nested_region_end))) {
9104 panic("%s: range outside nested region %p-%p, "
9105 "pmap=%p, start=%p, end=%p",
9106 __func__, (void *)nested_region_start, (void *)nested_region_end,
9107 pmap, (void*)start, (void*)end);
9108 }
9109
9110 /* Contract the range to TT page boundaries. */
9111 adjust_offmask = pt_attr_leaf_table_offmask(pt_attr);
9112 adjusted_start = ((start + adjust_offmask) & ~adjust_offmask);
9113 adjusted_end = end & ~adjust_offmask;
9114
9115 /* Iterate over the range, trying to remove TTEs. */
9116 for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += pt_attr_twig_size(pt_attr)) {
9117 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
9118
9119 tte_p = pmap_tte(pmap, cur);
9120
9121 if ((tte_p != NULL) && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) {
9122 pte_p = (pt_entry_t *) ttetokv(*tte_p);
9123
9124 /* pmap_tte_deallocate()/pmap_tte_remove() will drop the pmap lock */
9125 if ((pmap->type == PMAP_TYPE_NESTED) && (ptep_get_info(pte_p)->refcnt == 0)) {
9126 /* Deallocate for the nested map. */
9127 pmap_tte_deallocate(pmap, cur, cur + PAGE_SIZE, false, tte_p, pt_attr_twig_level(pt_attr));
9128 } else if (pmap->type == PMAP_TYPE_USER) {
9129 /**
9130 * Just remove for the parent map. If the leaf table pointed
9131 * to by the TTE being removed (owned by the nested pmap)
9132 * has any mappings, then this call will panic. This
9133 * enforces the policy that tables being trimmed must be
9134 * empty to prevent possible use-after-free attacks.
9135 */
9136 pmap_tte_remove(pmap, cur, cur + PAGE_SIZE, false, tte_p, pt_attr_twig_level(pt_attr));
9137 } else {
9138 panic("%s: Unsupported pmap type for nesting %p %d", __func__, pmap, pmap->type);
9139 }
9140 } else {
9141 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
9142 }
9143 }
9144
9145 #if (__ARM_VMSA__ > 7)
9146 /* Remove empty L2 TTs. */
9147 adjusted_start = ((start + pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL)) & ~pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL));
9148 adjusted_end = end & ~pt_attr_ln_offmask(pt_attr, PMAP_TT_L1_LEVEL);
9149
9150 for (cur = adjusted_start; (cur < adjusted_end) && (cur >= adjusted_start); cur += pt_attr_ln_size(pt_attr, PMAP_TT_L1_LEVEL)) {
9151 /* For each L1 entry in our range... */
9152 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
9153
9154 bool remove_tt1e = true;
9155 tt_entry_t * tt1e_p = pmap_tt1e(pmap, cur);
9156 tt_entry_t * tt2e_start;
9157 tt_entry_t * tt2e_end;
9158 tt_entry_t * tt2e_p;
9159 tt_entry_t tt1e;
9160
9161 if (tt1e_p == NULL) {
9162 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
9163 continue;
9164 }
9165
9166 tt1e = *tt1e_p;
9167
9168 if (tt1e == ARM_TTE_TYPE_FAULT) {
9169 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
9170 continue;
9171 }
9172
9173 tt2e_start = &((tt_entry_t*) phystokv(tt1e & ARM_TTE_TABLE_MASK))[0];
9174 tt2e_end = &tt2e_start[pt_attr_page_size(pt_attr) / sizeof(*tt2e_start)];
9175
9176 for (tt2e_p = tt2e_start; tt2e_p < tt2e_end; tt2e_p++) {
9177 if (*tt2e_p != ARM_TTE_TYPE_FAULT) {
9178 /*
9179 * If any TTEs are populated, don't remove the
9180 * L1 TT.
9181 */
9182 remove_tt1e = false;
9183 }
9184 }
9185
9186 if (remove_tt1e) {
9187 pmap_tte_deallocate(pmap, cur, cur + PAGE_SIZE, false, tt1e_p, PMAP_TT_L1_LEVEL);
9188 } else {
9189 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
9190 }
9191 }
9192 #endif /* (__ARM_VMSA__ > 7) */
9193 }
9194
9195 /*
9196 * pmap_trim_internal(grand, subord, vstart, size)
9197 *
9198 * grand = pmap subord is nested in
9199 * subord = nested pmap
9200 * vstart = start of the used range in grand
9201 * size = size of the used range
9202 *
9203 * Attempts to trim the shared region page tables down to only cover the given
9204 * range in subord and grand.
9205 */
9206 MARK_AS_PMAP_TEXT void
9207 pmap_trim_internal(
9208 pmap_t grand,
9209 pmap_t subord,
9210 addr64_t vstart,
9211 uint64_t size)
9212 {
9213 addr64_t vend;
9214 addr64_t adjust_offmask;
9215
9216 if (__improbable(os_add_overflow(vstart, size, &vend))) {
9217 panic("%s: grand addr wraps around, "
9218 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9219 __func__, grand, subord, (void*)vstart, size);
9220 }
9221
9222 validate_pmap_mutable(grand);
9223 validate_pmap(subord);
9224
9225 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand);
9226
9227 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9228
9229 if (__improbable(subord->type != PMAP_TYPE_NESTED)) {
9230 panic("%s: subord is of non-nestable type 0x%hhx, "
9231 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9232 __func__, subord->type, grand, subord, (void*)vstart, size);
9233 }
9234
9235 if (__improbable(grand->type != PMAP_TYPE_USER)) {
9236 panic("%s: grand is of unsupprted type 0x%hhx for nesting, "
9237 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9238 __func__, grand->type, grand, subord, (void*)vstart, size);
9239 }
9240
9241 if (__improbable(grand->nested_pmap != subord)) {
9242 panic("%s: grand->nested != subord, "
9243 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9244 __func__, grand, subord, (void*)vstart, size);
9245 }
9246
9247 if (__improbable((size != 0) &&
9248 ((vstart < grand->nested_region_addr) || (vend > (grand->nested_region_addr + grand->nested_region_size))))) {
9249 panic("%s: grand range not in nested region, "
9250 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9251 __func__, grand, subord, (void*)vstart, size);
9252 }
9253
9254
9255 if (!grand->nested_has_no_bounds_ref) {
9256 assert(subord->nested_bounds_set);
9257
9258 if (!grand->nested_bounds_set) {
9259 /* Inherit the bounds from subord. */
9260 grand->nested_region_true_start = subord->nested_region_true_start;
9261 grand->nested_region_true_end = subord->nested_region_true_end;
9262 grand->nested_bounds_set = true;
9263 }
9264
9265 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9266 return;
9267 }
9268
9269 if ((!subord->nested_bounds_set) && size) {
9270 adjust_offmask = pt_attr_leaf_table_offmask(pt_attr);
9271
9272 subord->nested_region_true_start = vstart;
9273 subord->nested_region_true_end = vend;
9274 subord->nested_region_true_start &= ~adjust_offmask;
9275
9276 if (__improbable(os_add_overflow(subord->nested_region_true_end, adjust_offmask, &subord->nested_region_true_end))) {
9277 panic("%s: padded true end wraps around, "
9278 "grand=%p, subord=%p, vstart=%p, size=%#llx",
9279 __func__, grand, subord, (void*)vstart, size);
9280 }
9281
9282 subord->nested_region_true_end &= ~adjust_offmask;
9283 subord->nested_bounds_set = true;
9284 }
9285
9286 if (subord->nested_bounds_set) {
9287 /* Inherit the bounds from subord. */
9288 grand->nested_region_true_start = subord->nested_region_true_start;
9289 grand->nested_region_true_end = subord->nested_region_true_end;
9290 grand->nested_bounds_set = true;
9291
9292 /* If we know the bounds, we can trim the pmap. */
9293 grand->nested_has_no_bounds_ref = false;
9294 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9295 } else {
9296 /* Don't trim if we don't know the bounds. */
9297 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9298 return;
9299 }
9300
9301 /* Trim grand to only cover the given range. */
9302 pmap_trim_range(grand, grand->nested_region_addr, grand->nested_region_true_start);
9303 pmap_trim_range(grand, grand->nested_region_true_end, (grand->nested_region_addr + grand->nested_region_size));
9304
9305 /* Try to trim subord. */
9306 pmap_trim_subord(subord);
9307 }
9308
9309 MARK_AS_PMAP_TEXT static void
9310 pmap_trim_self(pmap_t pmap)
9311 {
9312 if (pmap->nested_has_no_bounds_ref && pmap->nested_pmap) {
9313 /* If we have a no bounds ref, we need to drop it. */
9314 pmap_lock(pmap->nested_pmap, PMAP_LOCK_SHARED);
9315 pmap->nested_has_no_bounds_ref = false;
9316 boolean_t nested_bounds_set = pmap->nested_pmap->nested_bounds_set;
9317 vm_map_offset_t nested_region_true_start = pmap->nested_pmap->nested_region_true_start;
9318 vm_map_offset_t nested_region_true_end = pmap->nested_pmap->nested_region_true_end;
9319 pmap_unlock(pmap->nested_pmap, PMAP_LOCK_SHARED);
9320
9321 if (nested_bounds_set) {
9322 pmap_trim_range(pmap, pmap->nested_region_addr, nested_region_true_start);
9323 pmap_trim_range(pmap, nested_region_true_end, (pmap->nested_region_addr + pmap->nested_region_size));
9324 }
9325 /*
9326 * Try trimming the nested pmap, in case we had the
9327 * last reference.
9328 */
9329 pmap_trim_subord(pmap->nested_pmap);
9330 }
9331 }
9332
9333 /*
9334 * pmap_trim_subord(grand, subord)
9335 *
9336 * grand = pmap that we have nested subord in
9337 * subord = nested pmap we are attempting to trim
9338 *
9339 * Trims subord if possible
9340 */
9341 MARK_AS_PMAP_TEXT static void
9342 pmap_trim_subord(pmap_t subord)
9343 {
9344 bool contract_subord = false;
9345
9346 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9347
9348 subord->nested_no_bounds_refcnt--;
9349
9350 if ((subord->nested_no_bounds_refcnt == 0) && (subord->nested_bounds_set)) {
9351 /* If this was the last no bounds reference, trim subord. */
9352 contract_subord = true;
9353 }
9354
9355 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9356
9357 if (contract_subord) {
9358 pmap_trim_range(subord, subord->nested_region_addr, subord->nested_region_true_start);
9359 pmap_trim_range(subord, subord->nested_region_true_end, subord->nested_region_addr + subord->nested_region_size);
9360 }
9361 }
9362
9363 void
9364 pmap_trim(
9365 pmap_t grand,
9366 pmap_t subord,
9367 addr64_t vstart,
9368 uint64_t size)
9369 {
9370 #if XNU_MONITOR
9371 pmap_trim_ppl(grand, subord, vstart, size);
9372
9373 pmap_ledger_check_balance(grand);
9374 pmap_ledger_check_balance(subord);
9375 #else
9376 pmap_trim_internal(grand, subord, vstart, size);
9377 #endif
9378 }
9379
9380 #if HAS_APPLE_PAC
9381 void *
9382 pmap_sign_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key)
9383 {
9384 void *res = NULL;
9385 uint64_t current_intr_state = pmap_interrupts_disable();
9386
9387 uint64_t saved_jop_state = ml_enable_user_jop_key(jop_key);
9388 switch (key) {
9389 case ptrauth_key_asia:
9390 res = ptrauth_sign_unauthenticated(value, ptrauth_key_asia, discriminator);
9391 break;
9392 case ptrauth_key_asda:
9393 res = ptrauth_sign_unauthenticated(value, ptrauth_key_asda, discriminator);
9394 break;
9395 default:
9396 panic("attempt to sign user pointer without process independent key");
9397 }
9398 ml_disable_user_jop_key(jop_key, saved_jop_state);
9399
9400 pmap_interrupts_restore(current_intr_state);
9401
9402 return res;
9403 }
9404
9405 void *
9406 pmap_sign_user_ptr(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key)
9407 {
9408 return pmap_sign_user_ptr_internal(value, key, discriminator, jop_key);
9409 }
9410
9411 void *
9412 pmap_auth_user_ptr_internal(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key)
9413 {
9414 if ((key != ptrauth_key_asia) && (key != ptrauth_key_asda)) {
9415 panic("attempt to auth user pointer without process independent key");
9416 }
9417
9418 void *res = NULL;
9419 uint64_t current_intr_state = pmap_interrupts_disable();
9420
9421 uint64_t saved_jop_state = ml_enable_user_jop_key(jop_key);
9422 res = ml_auth_ptr_unchecked(value, key, discriminator);
9423 ml_disable_user_jop_key(jop_key, saved_jop_state);
9424
9425 pmap_interrupts_restore(current_intr_state);
9426
9427 return res;
9428 }
9429
9430 void *
9431 pmap_auth_user_ptr(void *value, ptrauth_key key, uint64_t discriminator, uint64_t jop_key)
9432 {
9433 return pmap_auth_user_ptr_internal(value, key, discriminator, jop_key);
9434 }
9435 #endif /* HAS_APPLE_PAC */
9436
9437 /*
9438 * Marker to indicate that a pmap_[un]nest() operation has finished operating on
9439 * the 'subordinate' pmap and has begun operating on the 'grand' pmap. This
9440 * flag is supplied in the low-order bit of the 'vrestart' param as well as the
9441 * return value, to indicate where a preempted [un]nest operation should resume.
9442 * When the return value contains the ending address of the nested region with
9443 * PMAP_NEST_GRAND in the low-order bit, the operation has completed.
9444 */
9445 #define PMAP_NEST_GRAND ((vm_map_offset_t) 0x1)
9446
9447 /*
9448 * kern_return_t pmap_nest(grand, subord, vstart, size)
9449 *
9450 * grand = the pmap that we will nest subord into
9451 * subord = the pmap that goes into the grand
9452 * vstart = start of range in pmap to be inserted
9453 * size = Size of nest area (up to 16TB)
9454 *
9455 * Inserts a pmap into another. This is used to implement shared segments.
9456 *
9457 */
9458
9459 /**
9460 * Embeds a range of mappings from one pmap ('subord') into another ('grand')
9461 * by inserting the twig-level TTEs from 'subord' directly into 'grand'.
9462 * This function operates in 3 main phases:
9463 * 1. Bookkeeping to ensure tracking structures for the nested region are set up.
9464 * 2. Expansion of subord to ensure the required leaf-level page table pages for
9465 * the mapping range are present in subord.
9466 * 3. Copying of twig-level TTEs from subord to grand, such that grand ultimately
9467 * contains pointers to subord's leaf-level pagetable pages for the specified
9468 * VA range.
9469 *
9470 * This function may return early due to pending AST_URGENT preemption; if so
9471 * it will indicate the need to be re-entered.
9472 *
9473 * @param grand pmap to insert the TTEs into. Must be a user pmap.
9474 * @param subord pmap from which to extract the TTEs. Must be a nested pmap.
9475 * @param vstart twig-aligned virtual address for the beginning of the nesting range
9476 * @param size twig-aligned size of the nesting range
9477 * @param vrestart the twig-aligned starting address of the current call. May contain
9478 * PMAP_NEST_GRAND in bit 0 to indicate the operation should skip to step 3) above.
9479 * @param krp Should be initialized to KERN_SUCCESS by caller, will be set to
9480 * KERN_RESOURCE_SHORTAGE on allocation failure.
9481 *
9482 * @return the virtual address at which to restart the operation, possibly including
9483 * PMAP_NEST_GRAND to indicate the phase at which to restart. If
9484 * (vstart + size) | PMAP_NEST_GRAND is returned, the operation completed.
9485 */
9486 MARK_AS_PMAP_TEXT vm_map_offset_t
9487 pmap_nest_internal(
9488 pmap_t grand,
9489 pmap_t subord,
9490 addr64_t vstart,
9491 uint64_t size,
9492 vm_map_offset_t vrestart,
9493 kern_return_t *krp)
9494 {
9495 kern_return_t kr = KERN_FAILURE;
9496 vm_map_offset_t vaddr;
9497 tt_entry_t *stte_p;
9498 tt_entry_t *gtte_p;
9499 unsigned int nested_region_asid_bitmap_size;
9500 unsigned int* nested_region_asid_bitmap;
9501 int expand_options = 0;
9502 bool deref_subord = true;
9503
9504 addr64_t vend;
9505 if (__improbable(os_add_overflow(vstart, size, &vend))) {
9506 panic("%s: %p grand addr wraps around: 0x%llx + 0x%llx", __func__, grand, vstart, size);
9507 }
9508 if (__improbable(((vrestart & ~PMAP_NEST_GRAND) > vend) ||
9509 ((vrestart & ~PMAP_NEST_GRAND) < vstart))) {
9510 panic("%s: vrestart 0x%llx is outside range [0x%llx, 0x%llx)", __func__,
9511 (unsigned long long)vrestart, (unsigned long long)vstart, (unsigned long long)vend);
9512 }
9513
9514 assert(krp != NULL);
9515 validate_pmap_mutable(grand);
9516 validate_pmap(subord);
9517 #if XNU_MONITOR
9518 /*
9519 * Ordering is important here. validate_pmap() has already ensured subord is a
9520 * PPL-controlled pmap pointer, but it could have already been destroyed or could
9521 * be in the process of being destroyed. If destruction is already committed,
9522 * then the check of ref_count below will cover us. If destruction is initiated
9523 * during or after this call, then pmap_destroy() will catch the non-zero
9524 * nested_count.
9525 */
9526 os_atomic_inc(&subord->nested_count, relaxed);
9527 os_atomic_thread_fence(seq_cst);
9528 #endif
9529 if (__improbable(os_atomic_inc_orig(&subord->ref_count, relaxed) <= 0)) {
9530 panic("%s: invalid subordinate pmap %p", __func__, subord);
9531 }
9532
9533 const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand);
9534 if (__improbable(pmap_get_pt_attr(subord) != pt_attr)) {
9535 panic("%s: attempt to nest pmap %p into pmap %p with mismatched attributes", __func__, subord, grand);
9536 }
9537
9538 #if XNU_MONITOR
9539 expand_options |= PMAP_TT_ALLOCATE_NOWAIT;
9540 #endif
9541
9542 if (__improbable(((size | vstart | (vrestart & ~PMAP_NEST_GRAND)) &
9543 (pt_attr_leaf_table_offmask(pt_attr))) != 0x0ULL)) {
9544 panic("pmap_nest() pmap %p unaligned nesting request 0x%llx, 0x%llx, 0x%llx",
9545 grand, vstart, size, (unsigned long long)vrestart);
9546 }
9547
9548 if (__improbable(subord->type != PMAP_TYPE_NESTED)) {
9549 panic("%s: subordinate pmap %p is of non-nestable type 0x%hhx", __func__, subord, subord->type);
9550 }
9551
9552 if (__improbable(grand->type != PMAP_TYPE_USER)) {
9553 panic("%s: grand pmap %p is of unsupported type 0x%hhx for nesting", __func__, grand, grand->type);
9554 }
9555
9556 if (subord->nested_region_asid_bitmap == NULL) {
9557 nested_region_asid_bitmap_size = (unsigned int)(size >> pt_attr_twig_shift(pt_attr)) / (sizeof(unsigned int) * NBBY);
9558
9559 #if XNU_MONITOR
9560 pmap_paddr_t pa = 0;
9561
9562 if (__improbable((nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE)) {
9563 panic("%s: nested_region_asid_bitmap_size=%u will not fit in a page, "
9564 "grand=%p, subord=%p, vstart=0x%llx, size=%llx",
9565 __FUNCTION__, nested_region_asid_bitmap_size,
9566 grand, subord, vstart, size);
9567 }
9568
9569 kr = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT);
9570
9571 if (kr != KERN_SUCCESS) {
9572 goto nest_cleanup;
9573 }
9574
9575 assert(pa);
9576
9577 nested_region_asid_bitmap = (unsigned int *)phystokv(pa);
9578 #else
9579 nested_region_asid_bitmap = kalloc_data(
9580 nested_region_asid_bitmap_size * sizeof(unsigned int),
9581 Z_WAITOK | Z_ZERO);
9582 #endif
9583
9584 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9585 if (subord->nested_region_asid_bitmap == NULL) {
9586 subord->nested_region_asid_bitmap_size = nested_region_asid_bitmap_size;
9587 subord->nested_region_addr = vstart;
9588 subord->nested_region_size = (mach_vm_offset_t) size;
9589
9590 /**
9591 * Ensure that the rest of the subord->nested_region_* fields are
9592 * initialized and visible before setting the nested_region_asid_bitmap
9593 * field (which is used as the flag to say that the rest are initialized).
9594 */
9595 __builtin_arm_dmb(DMB_ISHST);
9596 subord->nested_region_asid_bitmap = nested_region_asid_bitmap;
9597 nested_region_asid_bitmap = NULL;
9598 }
9599 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9600 if (nested_region_asid_bitmap != NULL) {
9601 #if XNU_MONITOR
9602 pmap_pages_free(kvtophys_nofail((vm_offset_t)nested_region_asid_bitmap), PAGE_SIZE);
9603 #else
9604 kfree_data(nested_region_asid_bitmap,
9605 nested_region_asid_bitmap_size * sizeof(unsigned int));
9606 #endif
9607 }
9608 }
9609
9610 /**
9611 * Ensure subsequent reads of the subord->nested_region_* fields don't get
9612 * speculated before their initialization.
9613 */
9614 __builtin_arm_dmb(DMB_ISHLD);
9615
9616 if ((subord->nested_region_addr + subord->nested_region_size) < vend) {
9617 uint64_t new_size;
9618 unsigned int new_nested_region_asid_bitmap_size;
9619 unsigned int* new_nested_region_asid_bitmap;
9620
9621 nested_region_asid_bitmap = NULL;
9622 nested_region_asid_bitmap_size = 0;
9623 new_size = vend - subord->nested_region_addr;
9624
9625 /* We explicitly add 1 to the bitmap allocation size in order to avoid issues with truncation. */
9626 new_nested_region_asid_bitmap_size = (unsigned int)((new_size >> pt_attr_twig_shift(pt_attr)) / (sizeof(unsigned int) * NBBY)) + 1;
9627
9628 #if XNU_MONITOR
9629 pmap_paddr_t pa = 0;
9630
9631 if (__improbable((new_nested_region_asid_bitmap_size * sizeof(unsigned int)) > PAGE_SIZE)) {
9632 panic("%s: new_nested_region_asid_bitmap_size=%u will not fit in a page, "
9633 "grand=%p, subord=%p, vstart=0x%llx, new_size=%llx",
9634 __FUNCTION__, new_nested_region_asid_bitmap_size,
9635 grand, subord, vstart, new_size);
9636 }
9637
9638 kr = pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, PMAP_PAGES_ALLOCATE_NOWAIT);
9639
9640 if (kr != KERN_SUCCESS) {
9641 goto nest_cleanup;
9642 }
9643
9644 assert(pa);
9645
9646 new_nested_region_asid_bitmap = (unsigned int *)phystokv(pa);
9647 #else
9648 new_nested_region_asid_bitmap = kalloc_data(
9649 new_nested_region_asid_bitmap_size * sizeof(unsigned int),
9650 Z_WAITOK | Z_ZERO);
9651 #endif
9652 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9653 if (subord->nested_region_size < new_size) {
9654 bcopy(subord->nested_region_asid_bitmap,
9655 new_nested_region_asid_bitmap, subord->nested_region_asid_bitmap_size);
9656 nested_region_asid_bitmap_size = subord->nested_region_asid_bitmap_size;
9657 nested_region_asid_bitmap = subord->nested_region_asid_bitmap;
9658 subord->nested_region_asid_bitmap = new_nested_region_asid_bitmap;
9659 subord->nested_region_asid_bitmap_size = new_nested_region_asid_bitmap_size;
9660 subord->nested_region_size = new_size;
9661 new_nested_region_asid_bitmap = NULL;
9662 }
9663 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9664 if (nested_region_asid_bitmap != NULL) {
9665 #if XNU_MONITOR
9666 pmap_pages_free(kvtophys_nofail((vm_offset_t)nested_region_asid_bitmap), PAGE_SIZE);
9667 #else
9668 kfree_data(nested_region_asid_bitmap,
9669 nested_region_asid_bitmap_size * sizeof(unsigned int));
9670 #endif
9671 }
9672 if (new_nested_region_asid_bitmap != NULL) {
9673 #if XNU_MONITOR
9674 pmap_pages_free(kvtophys_nofail((vm_offset_t)new_nested_region_asid_bitmap), PAGE_SIZE);
9675 #else
9676 kfree_data(new_nested_region_asid_bitmap,
9677 new_nested_region_asid_bitmap_size * sizeof(unsigned int));
9678 #endif
9679 }
9680 }
9681
9682 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9683
9684 if (os_atomic_cmpxchg(&grand->nested_pmap, PMAP_NULL, subord, relaxed)) {
9685 /*
9686 * If this is grand's first nesting operation, keep the reference on subord.
9687 * It will be released by pmap_destroy_internal() when grand is destroyed.
9688 */
9689 deref_subord = false;
9690
9691 if (!subord->nested_bounds_set) {
9692 /*
9693 * We are nesting without the shared regions bounds
9694 * being known. We'll have to trim the pmap later.
9695 */
9696 grand->nested_has_no_bounds_ref = true;
9697 subord->nested_no_bounds_refcnt++;
9698 }
9699
9700 grand->nested_region_addr = vstart;
9701 grand->nested_region_size = (mach_vm_offset_t) size;
9702 } else {
9703 if (__improbable(grand->nested_pmap != subord)) {
9704 panic("pmap_nest() pmap %p has a nested pmap", grand);
9705 } else if (__improbable(grand->nested_region_addr > vstart)) {
9706 panic("pmap_nest() pmap %p : attempt to nest outside the nested region", grand);
9707 } else if ((grand->nested_region_addr + grand->nested_region_size) < vend) {
9708 grand->nested_region_size = (mach_vm_offset_t)(vstart - grand->nested_region_addr + size);
9709 }
9710 }
9711
9712 vaddr = vrestart & ~PMAP_NEST_GRAND;
9713 if (vaddr < subord->nested_region_true_start) {
9714 vaddr = subord->nested_region_true_start;
9715 }
9716
9717 addr64_t true_end = vend;
9718 if (true_end > subord->nested_region_true_end) {
9719 true_end = subord->nested_region_true_end;
9720 }
9721 __unused unsigned int ttecount = 0;
9722
9723 if (vrestart & PMAP_NEST_GRAND) {
9724 goto nest_grand;
9725 }
9726 #if (__ARM_VMSA__ == 7)
9727
9728 while (vaddr < true_end) {
9729 stte_p = pmap_tte(subord, vaddr);
9730 if ((stte_p == (tt_entry_t *)NULL) || (((*stte_p) & ARM_TTE_TYPE_MASK) != ARM_TTE_TYPE_TABLE)) {
9731 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9732 kr = pmap_expand(subord, vaddr, expand_options, PMAP_TT_L2_LEVEL);
9733
9734 if (kr != KERN_SUCCESS) {
9735 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9736 goto done;
9737 }
9738
9739 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9740 }
9741 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9742 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9743 stte_p = pmap_tte(grand, vaddr);
9744 if (stte_p == (tt_entry_t *)NULL) {
9745 pmap_unlock(grand, PMAP_LOCK_EXCLUSIVE);
9746 kr = pmap_expand(grand, vaddr, expand_options, PMAP_TT_L1_LEVEL);
9747
9748 if (kr != KERN_SUCCESS) {
9749 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9750 goto done;
9751 }
9752 } else {
9753 pmap_unlock(grand, PMAP_LOCK_EXCLUSIVE);
9754 kr = KERN_SUCCESS;
9755 }
9756 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9757 vaddr += ARM_TT_L1_SIZE;
9758 vrestart = vaddr;
9759 }
9760
9761 #else
9762 while (vaddr < true_end) {
9763 stte_p = pmap_tte(subord, vaddr);
9764 if (stte_p == PT_ENTRY_NULL || *stte_p == ARM_TTE_EMPTY) {
9765 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9766 kr = pmap_expand(subord, vaddr, expand_options, pt_attr_leaf_level(pt_attr));
9767
9768 if (kr != KERN_SUCCESS) {
9769 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9770 goto done;
9771 }
9772
9773 pmap_lock(subord, PMAP_LOCK_EXCLUSIVE);
9774 }
9775 vaddr += pt_attr_twig_size(pt_attr);
9776 vrestart = vaddr;
9777 ++ttecount;
9778 if (__improbable(!(ttecount % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
9779 pmap_pending_preemption())) {
9780 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9781 kr = KERN_SUCCESS;
9782 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9783 goto done;
9784 }
9785 }
9786 #endif
9787 /*
9788 * copy TTEs from subord pmap into grand pmap
9789 */
9790
9791 vaddr = (vm_map_offset_t) vstart;
9792 if (vaddr < subord->nested_region_true_start) {
9793 vaddr = subord->nested_region_true_start;
9794 }
9795 vrestart = vaddr | PMAP_NEST_GRAND;
9796
9797 nest_grand:
9798 pmap_unlock(subord, PMAP_LOCK_EXCLUSIVE);
9799 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9800 #if (__ARM_VMSA__ == 7)
9801 while (vaddr < true_end) {
9802 stte_p = pmap_tte(subord, vaddr);
9803 gtte_p = pmap_tte(grand, vaddr);
9804 if (__improbable(*gtte_p != ARM_TTE_EMPTY)) {
9805 panic("%s: attempting to overwrite non-empty TTE %p in pmap %p",
9806 __func__, gtte_p, grand);
9807 }
9808 *gtte_p = *stte_p;
9809 vaddr += ARM_TT_L1_SIZE;
9810 }
9811 vrestart = vaddr | PMAP_NEST_GRAND;
9812 #else
9813 while (vaddr < true_end) {
9814 stte_p = pmap_tte(subord, vaddr);
9815 gtte_p = pmap_tte(grand, vaddr);
9816 if (gtte_p == PT_ENTRY_NULL) {
9817 pmap_unlock(grand, PMAP_LOCK_EXCLUSIVE);
9818 kr = pmap_expand(grand, vaddr, expand_options, pt_attr_twig_level(pt_attr));
9819 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
9820
9821 if (kr != KERN_SUCCESS) {
9822 goto done;
9823 }
9824
9825 gtte_p = pmap_tt2e(grand, vaddr);
9826 }
9827 /* Don't leak a page table page. Don't violate break-before-make. */
9828 if (__improbable(*gtte_p != ARM_TTE_EMPTY)) {
9829 panic("%s: attempting to overwrite non-empty TTE %p in pmap %p",
9830 __func__, gtte_p, grand);
9831 }
9832 *gtte_p = *stte_p;
9833
9834 vaddr += pt_attr_twig_size(pt_attr);
9835 vrestart = vaddr | PMAP_NEST_GRAND;
9836 ++ttecount;
9837 if (__improbable(!(ttecount % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
9838 pmap_pending_preemption())) {
9839 break;
9840 }
9841 }
9842 #endif
9843 if (vaddr >= true_end) {
9844 vrestart = vend | PMAP_NEST_GRAND;
9845 }
9846
9847 kr = KERN_SUCCESS;
9848 done:
9849
9850 FLUSH_PTE();
9851 __builtin_arm_isb(ISB_SY);
9852
9853 pmap_unlock(grand, PMAP_LOCK_EXCLUSIVE);
9854 #if XNU_MONITOR
9855 nest_cleanup:
9856 if (kr != KERN_SUCCESS) {
9857 pmap_pin_kernel_pages((vm_offset_t)krp, sizeof(*krp));
9858 *krp = kr;
9859 pmap_unpin_kernel_pages((vm_offset_t)krp, sizeof(*krp));
9860 }
9861 #else
9862 if (kr != KERN_SUCCESS) {
9863 *krp = kr;
9864 }
9865 #endif
9866 if (deref_subord) {
9867 #if XNU_MONITOR
9868 os_atomic_dec(&subord->nested_count, relaxed);
9869 #endif
9870 pmap_destroy_internal(subord);
9871 }
9872 return vrestart;
9873 }
9874
9875 kern_return_t
9876 pmap_nest(
9877 pmap_t grand,
9878 pmap_t subord,
9879 addr64_t vstart,
9880 uint64_t size)
9881 {
9882 kern_return_t kr = KERN_SUCCESS;
9883 vm_map_offset_t vaddr = (vm_map_offset_t)vstart;
9884 vm_map_offset_t vend = vaddr + size;
9885 __unused vm_map_offset_t vlast = vaddr;
9886
9887 PMAP_TRACE(2, PMAP_CODE(PMAP__NEST) | DBG_FUNC_START,
9888 VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(subord),
9889 VM_KERNEL_ADDRHIDE(vstart));
9890
9891 pmap_verify_preemptible();
9892 #if XNU_MONITOR
9893 while (vaddr != (vend | PMAP_NEST_GRAND)) {
9894 vaddr = pmap_nest_ppl(grand, subord, vstart, size, vaddr, &kr);
9895 if (kr == KERN_RESOURCE_SHORTAGE) {
9896 pmap_alloc_page_for_ppl(0);
9897 kr = KERN_SUCCESS;
9898 } else if (kr != KERN_SUCCESS) {
9899 break;
9900 } else if (vaddr == vlast) {
9901 panic("%s: failed to make forward progress from 0x%llx to 0x%llx at 0x%llx",
9902 __func__, (unsigned long long)vstart, (unsigned long long)vend, (unsigned long long)vaddr);
9903 }
9904 vlast = vaddr;
9905 }
9906
9907 pmap_ledger_check_balance(grand);
9908 pmap_ledger_check_balance(subord);
9909 #else
9910 while ((vaddr != (vend | PMAP_NEST_GRAND)) && (kr == KERN_SUCCESS)) {
9911 vaddr = pmap_nest_internal(grand, subord, vstart, size, vaddr, &kr);
9912 }
9913 #endif
9914
9915 PMAP_TRACE(2, PMAP_CODE(PMAP__NEST) | DBG_FUNC_END, kr);
9916
9917 return kr;
9918 }
9919
9920 /*
9921 * kern_return_t pmap_unnest(grand, vaddr)
9922 *
9923 * grand = the pmap that will have the virtual range unnested
9924 * vaddr = start of range in pmap to be unnested
9925 * size = size of range in pmap to be unnested
9926 *
9927 */
9928
9929 kern_return_t
9930 pmap_unnest(
9931 pmap_t grand,
9932 addr64_t vaddr,
9933 uint64_t size)
9934 {
9935 return pmap_unnest_options(grand, vaddr, size, 0);
9936 }
9937
9938 /**
9939 * Undoes a prior pmap_nest() operation by removing a range of nesting mappings
9940 * from a top-level pmap ('grand'). The corresponding mappings in the nested
9941 * pmap will be marked non-global to avoid TLB conflicts with pmaps that may
9942 * still have the region nested. The mappings in 'grand' will be left empty
9943 * with the assumption that they will be demand-filled by subsequent access faults.
9944 *
9945 * This function operates in 2 main phases:
9946 * 1. Iteration over the nested pmap's mappings for the specified range to mark
9947 * them non-global.
9948 * 2. Clearing of the twig-level TTEs for the address range in grand.
9949 *
9950 * This function may return early due to pending AST_URGENT preemption; if so
9951 * it will indicate the need to be re-entered.
9952 *
9953 * @param grand pmap from which to unnest mappings
9954 * @param vaddr twig-aligned virtual address for the beginning of the nested range
9955 * @param size twig-aligned size of the nested range
9956 * @param vrestart the page-aligned starting address of the current call. May contain
9957 * PMAP_NEST_GRAND in bit 0 to indicate the operation should skip to step 2) above.
9958 * @param option Extra control flags; may contain PMAP_UNNEST_CLEAN to indicate that
9959 * grand is being torn down and step 1) above is not needed.
9960 *
9961 * @return the virtual address at which to restart the operation, possibly including
9962 * PMAP_NEST_GRAND to indicate the phase at which to restart. If
9963 * (vaddr + size) | PMAP_NEST_GRAND is returned, the operation completed.
9964 */
9965 MARK_AS_PMAP_TEXT vm_map_offset_t
9966 pmap_unnest_options_internal(
9967 pmap_t grand,
9968 addr64_t vaddr,
9969 uint64_t size,
9970 vm_map_offset_t vrestart,
9971 unsigned int option)
9972 {
9973 vm_map_offset_t start;
9974 vm_map_offset_t addr;
9975 tt_entry_t *tte_p;
9976 unsigned int current_index;
9977 unsigned int start_index;
9978 unsigned int max_index;
9979 unsigned int entry_count = 0;
9980
9981 addr64_t vend;
9982 addr64_t true_end;
9983 if (__improbable(os_add_overflow(vaddr, size, &vend))) {
9984 panic("%s: %p vaddr wraps around: 0x%llx + 0x%llx", __func__, grand, vaddr, size);
9985 }
9986 if (__improbable(((vrestart & ~PMAP_NEST_GRAND) > vend) ||
9987 ((vrestart & ~PMAP_NEST_GRAND) < vaddr))) {
9988 panic("%s: vrestart 0x%llx is outside range [0x%llx, 0x%llx)", __func__,
9989 (unsigned long long)vrestart, (unsigned long long)vaddr, (unsigned long long)vend);
9990 }
9991
9992 validate_pmap_mutable(grand);
9993
9994 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(grand);
9995
9996 if (__improbable(((size | vaddr) & pt_attr_twig_offmask(pt_attr)) != 0x0ULL)) {
9997 panic("%s: unaligned base address 0x%llx or size 0x%llx", __func__,
9998 (unsigned long long)vaddr, (unsigned long long)size);
9999 }
10000
10001 if (__improbable(grand->nested_pmap == NULL)) {
10002 panic("%s: %p has no nested pmap", __func__, grand);
10003 }
10004
10005 true_end = vend;
10006 if (true_end > grand->nested_pmap->nested_region_true_end) {
10007 true_end = grand->nested_pmap->nested_region_true_end;
10008 }
10009
10010 if (((option & PMAP_UNNEST_CLEAN) == 0) && !(vrestart & PMAP_NEST_GRAND)) {
10011 if ((vaddr < grand->nested_region_addr) || (vend > (grand->nested_region_addr + grand->nested_region_size))) {
10012 panic("%s: %p: unnest request to not-fully-nested region [%p, %p)", __func__, grand, (void*)vaddr, (void*)vend);
10013 }
10014
10015 pmap_lock(grand->nested_pmap, PMAP_LOCK_EXCLUSIVE);
10016
10017 start = vrestart;
10018 if (start < grand->nested_pmap->nested_region_true_start) {
10019 start = grand->nested_pmap->nested_region_true_start;
10020 }
10021 start_index = (unsigned int)((start - grand->nested_region_addr) >> pt_attr_twig_shift(pt_attr));
10022 max_index = (unsigned int)((true_end - grand->nested_region_addr) >> pt_attr_twig_shift(pt_attr));
10023 bool flush_tlb = false;
10024
10025 for (current_index = start_index, addr = start; current_index < max_index; current_index++) {
10026 pt_entry_t *bpte, *cpte;
10027
10028 vm_map_offset_t vlim = (addr + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr);
10029
10030 bpte = pmap_pte(grand->nested_pmap, addr);
10031
10032 /*
10033 * If we've re-entered this function partway through unnesting a leaf region, the
10034 * 'unnest' bit will be set in the ASID bitmap, but we won't have finished updating
10035 * the run of PTEs. We therefore also need to check for a non-twig-aligned starting
10036 * address.
10037 */
10038 if (!testbit(current_index, (int *)grand->nested_pmap->nested_region_asid_bitmap) ||
10039 (addr & pt_attr_twig_offmask(pt_attr))) {
10040 /*
10041 * Mark the 'twig' region as being unnested. Every mapping entered within
10042 * the nested pmap in this region will now be marked non-global. Do this
10043 * before marking any of the PTEs within the region as non-global to avoid
10044 * the possibility of pmap_enter() subsequently inserting a global mapping
10045 * in the region, which could lead to a TLB conflict if a non-global entry
10046 * is later inserted for the same VA in a pmap which has fully unnested this
10047 * region.
10048 */
10049 setbit(current_index, (int *)grand->nested_pmap->nested_region_asid_bitmap);
10050 for (cpte = bpte; (bpte != NULL) && (addr < vlim); cpte += PAGE_RATIO) {
10051 pmap_paddr_t pa;
10052 unsigned int pai = 0;
10053 boolean_t managed = FALSE;
10054 pt_entry_t spte;
10055
10056 if ((*cpte != ARM_PTE_TYPE_FAULT)
10057 && (!ARM_PTE_IS_COMPRESSED(*cpte, cpte))) {
10058 spte = *((volatile pt_entry_t*)cpte);
10059 while (!managed) {
10060 pa = pte_to_pa(spte);
10061 if (!pa_valid(pa)) {
10062 break;
10063 }
10064 pai = pa_index(pa);
10065 pvh_lock(pai);
10066 spte = *((volatile pt_entry_t*)cpte);
10067 pa = pte_to_pa(spte);
10068 if (pai == pa_index(pa)) {
10069 managed = TRUE;
10070 break; // Leave the PVH locked as we'll unlock it after we update the PTE
10071 }
10072 pvh_unlock(pai);
10073 }
10074
10075 if (((spte & ARM_PTE_NG) != ARM_PTE_NG)) {
10076 write_pte_fast(cpte, (spte | ARM_PTE_NG));
10077 flush_tlb = true;
10078 }
10079
10080 if (managed) {
10081 pvh_assert_locked(pai);
10082 pvh_unlock(pai);
10083 }
10084 }
10085
10086 addr += (pt_attr_page_size(pt_attr) * PAGE_RATIO);
10087 vrestart = addr;
10088 ++entry_count;
10089 if (__improbable(!(entry_count % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
10090 pmap_pending_preemption())) {
10091 goto unnest_subord_done;
10092 }
10093 }
10094 }
10095 addr = vlim;
10096 vrestart = addr;
10097 ++entry_count;
10098 if (__improbable(!(entry_count % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
10099 pmap_pending_preemption())) {
10100 break;
10101 }
10102 }
10103
10104 unnest_subord_done:
10105 if (flush_tlb) {
10106 FLUSH_PTE_STRONG();
10107 PMAP_UPDATE_TLBS(grand->nested_pmap, start, vrestart, false, true);
10108 }
10109
10110 pmap_unlock(grand->nested_pmap, PMAP_LOCK_EXCLUSIVE);
10111 if (current_index < max_index) {
10112 return vrestart;
10113 }
10114 }
10115
10116 pmap_lock(grand, PMAP_LOCK_EXCLUSIVE);
10117
10118 /*
10119 * invalidate all pdes for segment at vaddr in pmap grand
10120 */
10121 if (vrestart & PMAP_NEST_GRAND) {
10122 addr = vrestart & ~PMAP_NEST_GRAND;
10123 if (__improbable(addr & pt_attr_twig_offmask(pt_attr)) != 0x0ULL) {
10124 panic("%s: unaligned vrestart 0x%llx", __func__, (unsigned long long)addr);
10125 }
10126 } else {
10127 addr = vaddr;
10128 vrestart = vaddr | PMAP_NEST_GRAND;
10129 }
10130
10131 if (addr < grand->nested_pmap->nested_region_true_start) {
10132 addr = grand->nested_pmap->nested_region_true_start;
10133 }
10134
10135 while (addr < true_end) {
10136 tte_p = pmap_tte(grand, addr);
10137 /*
10138 * The nested pmap may have been trimmed before pmap_nest() completed for grand,
10139 * so it's possible that a region we're trying to unnest may not have been
10140 * nested in the first place.
10141 */
10142 if (tte_p != NULL) {
10143 *tte_p = ARM_TTE_TYPE_FAULT;
10144 }
10145 addr += pt_attr_twig_size(pt_attr);
10146 vrestart = addr | PMAP_NEST_GRAND;
10147 ++entry_count;
10148 if (__improbable(!(entry_count % PMAP_DEFAULT_PREEMPTION_CHECK_PAGE_INTERVAL) &&
10149 pmap_pending_preemption())) {
10150 break;
10151 }
10152 }
10153 if (addr >= true_end) {
10154 vrestart = vend | PMAP_NEST_GRAND;
10155 }
10156
10157 FLUSH_PTE_STRONG();
10158 PMAP_UPDATE_TLBS(grand, start, addr, false, false);
10159
10160 pmap_unlock(grand, PMAP_LOCK_EXCLUSIVE);
10161
10162 return vrestart;
10163 }
10164
10165 kern_return_t
10166 pmap_unnest_options(
10167 pmap_t grand,
10168 addr64_t vaddr,
10169 uint64_t size,
10170 unsigned int option)
10171 {
10172 vm_map_offset_t vrestart = (vm_map_offset_t)vaddr;
10173 vm_map_offset_t vend = vaddr + size;
10174 __unused vm_map_offset_t vlast = vrestart;
10175
10176 PMAP_TRACE(2, PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_START,
10177 VM_KERNEL_ADDRHIDE(grand), VM_KERNEL_ADDRHIDE(vaddr));
10178
10179 pmap_verify_preemptible();
10180 while (vrestart != (vend | PMAP_NEST_GRAND)) {
10181 #if XNU_MONITOR
10182 vrestart = pmap_unnest_options_ppl(grand, vaddr, size, vrestart, option);
10183 if (vrestart == vlast) {
10184 panic("%s: failed to make forward progress from 0x%llx to 0x%llx at 0x%llx",
10185 __func__, (unsigned long long)vaddr, (unsigned long long)vend, (unsigned long long)vrestart);
10186 }
10187 vlast = vrestart;
10188 #else
10189 vrestart = pmap_unnest_options_internal(grand, vaddr, size, vrestart, option);
10190 #endif
10191 }
10192
10193 PMAP_TRACE(2, PMAP_CODE(PMAP__UNNEST) | DBG_FUNC_END, KERN_SUCCESS);
10194
10195 return KERN_SUCCESS;
10196 }
10197
10198 boolean_t
10199 pmap_adjust_unnest_parameters(
10200 __unused pmap_t p,
10201 __unused vm_map_offset_t *s,
10202 __unused vm_map_offset_t *e)
10203 {
10204 return TRUE; /* to get to log_unnest_badness()... */
10205 }
10206
10207 /*
10208 * disable no-execute capability on
10209 * the specified pmap
10210 */
10211 #if DEVELOPMENT || DEBUG
10212 void
10213 pmap_disable_NX(
10214 pmap_t pmap)
10215 {
10216 pmap->nx_enabled = FALSE;
10217 }
10218 #else
10219 void
10220 pmap_disable_NX(
10221 __unused pmap_t pmap)
10222 {
10223 }
10224 #endif
10225
10226 /*
10227 * flush a range of hardware TLB entries.
10228 * NOTE: assumes the smallest TLB entry in use will be for
10229 * an ARM small page (4K).
10230 */
10231
10232 #define ARM_FULL_TLB_FLUSH_THRESHOLD 64
10233
10234 #if __ARM_RANGE_TLBI__
10235 #define ARM64_RANGE_TLB_FLUSH_THRESHOLD 1
10236 #define ARM64_FULL_TLB_FLUSH_THRESHOLD ARM64_TLB_RANGE_PAGES
10237 #else
10238 #define ARM64_FULL_TLB_FLUSH_THRESHOLD 256
10239 #endif // __ARM_RANGE_TLBI__
10240
10241 static void
10242 flush_mmu_tlb_region_asid_async(
10243 vm_offset_t va,
10244 size_t length,
10245 pmap_t pmap,
10246 bool last_level_only __unused)
10247 {
10248 #if (__ARM_VMSA__ == 7)
10249 vm_offset_t end = va + length;
10250 uint32_t asid;
10251
10252 asid = pmap->hw_asid;
10253
10254 if (length / ARM_SMALL_PAGE_SIZE > ARM_FULL_TLB_FLUSH_THRESHOLD) {
10255 boolean_t flush_all = FALSE;
10256
10257 if ((asid == 0) || (pmap->type == PMAP_TYPE_NESTED)) {
10258 flush_all = TRUE;
10259 }
10260 if (flush_all) {
10261 flush_mmu_tlb_async();
10262 } else {
10263 flush_mmu_tlb_asid_async(asid);
10264 }
10265
10266 return;
10267 }
10268 if (pmap->type == PMAP_TYPE_NESTED) {
10269 #if !__ARM_MP_EXT__
10270 flush_mmu_tlb();
10271 #else
10272 va = arm_trunc_page(va);
10273 while (va < end) {
10274 flush_mmu_tlb_mva_entries_async(va);
10275 va += ARM_SMALL_PAGE_SIZE;
10276 }
10277 #endif
10278 return;
10279 }
10280 va = arm_trunc_page(va) | (asid & 0xff);
10281 flush_mmu_tlb_entries_async(va, end);
10282
10283 #else
10284 unsigned long pmap_page_shift = pt_attr_leaf_shift(pmap_get_pt_attr(pmap));
10285 const uint64_t pmap_page_size = 1ULL << pmap_page_shift;
10286 ppnum_t npages = (ppnum_t)(length >> pmap_page_shift);
10287 uint32_t asid;
10288
10289 asid = pmap->hw_asid;
10290
10291 if (npages > ARM64_FULL_TLB_FLUSH_THRESHOLD) {
10292 boolean_t flush_all = FALSE;
10293
10294 if ((asid == 0) || (pmap->type == PMAP_TYPE_NESTED)) {
10295 flush_all = TRUE;
10296 }
10297 if (flush_all) {
10298 flush_mmu_tlb_async();
10299 } else {
10300 flush_mmu_tlb_asid_async((uint64_t)asid << TLBI_ASID_SHIFT);
10301 }
10302 return;
10303 }
10304 #if __ARM_RANGE_TLBI__
10305 if (npages > ARM64_RANGE_TLB_FLUSH_THRESHOLD) {
10306 va = generate_rtlbi_param(npages, asid, va, pmap_page_shift);
10307 if (pmap->type == PMAP_TYPE_NESTED) {
10308 flush_mmu_tlb_allrange_async(va, last_level_only);
10309 } else {
10310 flush_mmu_tlb_range_async(va, last_level_only);
10311 }
10312 return;
10313 }
10314 #endif
10315 vm_offset_t end = tlbi_asid(asid) | tlbi_addr(va + length);
10316 va = tlbi_asid(asid) | tlbi_addr(va);
10317
10318 if (pmap->type == PMAP_TYPE_NESTED) {
10319 flush_mmu_tlb_allentries_async(va, end, pmap_page_size, last_level_only);
10320 } else {
10321 flush_mmu_tlb_entries_async(va, end, pmap_page_size, last_level_only);
10322 }
10323
10324 #endif
10325 }
10326
10327 MARK_AS_PMAP_TEXT static void
10328 flush_mmu_tlb_full_asid_async(pmap_t pmap)
10329 {
10330 #if (__ARM_VMSA__ == 7)
10331 flush_mmu_tlb_asid_async(pmap->hw_asid);
10332 #else /* (__ARM_VMSA__ == 7) */
10333 flush_mmu_tlb_asid_async((uint64_t)(pmap->hw_asid) << TLBI_ASID_SHIFT);
10334 #endif /* (__ARM_VMSA__ == 7) */
10335 }
10336
10337 void
10338 flush_mmu_tlb_region(
10339 vm_offset_t va,
10340 unsigned length)
10341 {
10342 flush_mmu_tlb_region_asid_async(va, length, kernel_pmap, true);
10343 sync_tlb_flush();
10344 }
10345
10346 unsigned int
10347 pmap_cache_attributes(
10348 ppnum_t pn)
10349 {
10350 pmap_paddr_t paddr;
10351 unsigned int pai;
10352 unsigned int result;
10353 pp_attr_t pp_attr_current;
10354
10355 paddr = ptoa(pn);
10356
10357 assert(vm_last_phys > vm_first_phys); // Check that pmap has been bootstrapped
10358
10359 if (!pa_valid(paddr)) {
10360 pmap_io_range_t *io_rgn = pmap_find_io_attr(paddr);
10361 return (io_rgn == NULL) ? VM_WIMG_IO : io_rgn->wimg;
10362 }
10363
10364 result = VM_WIMG_DEFAULT;
10365
10366 pai = pa_index(paddr);
10367
10368 pp_attr_current = pp_attr_table[pai];
10369 if (pp_attr_current & PP_ATTR_WIMG_MASK) {
10370 result = pp_attr_current & PP_ATTR_WIMG_MASK;
10371 }
10372 return result;
10373 }
10374
10375 MARK_AS_PMAP_TEXT static void
10376 pmap_sync_wimg(ppnum_t pn, unsigned int wimg_bits_prev, unsigned int wimg_bits_new)
10377 {
10378 if ((wimg_bits_prev != wimg_bits_new)
10379 && ((wimg_bits_prev == VM_WIMG_COPYBACK)
10380 || ((wimg_bits_prev == VM_WIMG_INNERWBACK)
10381 && (wimg_bits_new != VM_WIMG_COPYBACK))
10382 || ((wimg_bits_prev == VM_WIMG_WTHRU)
10383 && ((wimg_bits_new != VM_WIMG_COPYBACK) || (wimg_bits_new != VM_WIMG_INNERWBACK))))) {
10384 pmap_sync_page_attributes_phys(pn);
10385 }
10386
10387 if ((wimg_bits_new == VM_WIMG_RT) && (wimg_bits_prev != VM_WIMG_RT)) {
10388 pmap_force_dcache_clean(phystokv(ptoa(pn)), PAGE_SIZE);
10389 }
10390 }
10391
10392 MARK_AS_PMAP_TEXT __unused void
10393 pmap_update_compressor_page_internal(ppnum_t pn, unsigned int prev_cacheattr, unsigned int new_cacheattr)
10394 {
10395 pmap_paddr_t paddr = ptoa(pn);
10396 const unsigned int pai = pa_index(paddr);
10397
10398 if (__improbable(!pa_valid(paddr))) {
10399 panic("%s called on non-managed page 0x%08x", __func__, pn);
10400 }
10401
10402 pvh_lock(pai);
10403
10404 #if XNU_MONITOR
10405 if (__improbable(ppattr_pa_test_monitor(paddr))) {
10406 panic("%s invoked on PPL page 0x%08x", __func__, pn);
10407 }
10408 #endif
10409
10410 pmap_update_cache_attributes_locked(pn, new_cacheattr);
10411
10412 pvh_unlock(pai);
10413
10414 pmap_sync_wimg(pn, prev_cacheattr & VM_WIMG_MASK, new_cacheattr & VM_WIMG_MASK);
10415 }
10416
10417 void *
10418 pmap_map_compressor_page(ppnum_t pn)
10419 {
10420 #if __ARM_PTE_PHYSMAP__
10421 unsigned int cacheattr = pmap_cache_attributes(pn) & VM_WIMG_MASK;
10422 if (cacheattr != VM_WIMG_DEFAULT) {
10423 #if XNU_MONITOR
10424 pmap_update_compressor_page_ppl(pn, cacheattr, VM_WIMG_DEFAULT);
10425 #else
10426 pmap_update_compressor_page_internal(pn, cacheattr, VM_WIMG_DEFAULT);
10427 #endif
10428 }
10429 #endif
10430 return (void*)phystokv(ptoa(pn));
10431 }
10432
10433 void
10434 pmap_unmap_compressor_page(ppnum_t pn __unused, void *kva __unused)
10435 {
10436 #if __ARM_PTE_PHYSMAP__
10437 unsigned int cacheattr = pmap_cache_attributes(pn) & VM_WIMG_MASK;
10438 if (cacheattr != VM_WIMG_DEFAULT) {
10439 #if XNU_MONITOR
10440 pmap_update_compressor_page_ppl(pn, VM_WIMG_DEFAULT, cacheattr);
10441 #else
10442 pmap_update_compressor_page_internal(pn, VM_WIMG_DEFAULT, cacheattr);
10443 #endif
10444 }
10445 #endif
10446 }
10447
10448 MARK_AS_PMAP_TEXT boolean_t
10449 pmap_batch_set_cache_attributes_internal(
10450 ppnum_t pn,
10451 unsigned int cacheattr,
10452 unsigned int page_cnt,
10453 unsigned int page_index,
10454 boolean_t doit,
10455 unsigned int *res)
10456 {
10457 pmap_paddr_t paddr;
10458 unsigned int pai;
10459 pp_attr_t pp_attr_current;
10460 pp_attr_t pp_attr_template;
10461 unsigned int wimg_bits_prev, wimg_bits_new;
10462
10463 if (cacheattr & VM_WIMG_USE_DEFAULT) {
10464 cacheattr = VM_WIMG_DEFAULT;
10465 }
10466
10467 if ((doit == FALSE) && (*res == 0)) {
10468 pmap_pin_kernel_pages((vm_offset_t)res, sizeof(*res));
10469 *res = page_cnt;
10470 pmap_unpin_kernel_pages((vm_offset_t)res, sizeof(*res));
10471 if (platform_cache_batch_wimg(cacheattr & (VM_WIMG_MASK), page_cnt << PAGE_SHIFT) == FALSE) {
10472 return FALSE;
10473 }
10474 }
10475
10476 paddr = ptoa(pn);
10477
10478 if (!pa_valid(paddr)) {
10479 panic("pmap_batch_set_cache_attributes(): pn 0x%08x not managed", pn);
10480 }
10481
10482 pai = pa_index(paddr);
10483
10484 if (doit) {
10485 pvh_lock(pai);
10486 #if XNU_MONITOR
10487 if (ppattr_pa_test_monitor(paddr)) {
10488 panic("%s invoked on PPL page 0x%llx", __func__, (uint64_t)paddr);
10489 }
10490 #endif
10491 }
10492
10493 do {
10494 pp_attr_current = pp_attr_table[pai];
10495 wimg_bits_prev = VM_WIMG_DEFAULT;
10496 if (pp_attr_current & PP_ATTR_WIMG_MASK) {
10497 wimg_bits_prev = pp_attr_current & PP_ATTR_WIMG_MASK;
10498 }
10499
10500 pp_attr_template = (pp_attr_current & ~PP_ATTR_WIMG_MASK) | PP_ATTR_WIMG(cacheattr & (VM_WIMG_MASK));
10501
10502 if (!doit) {
10503 break;
10504 }
10505
10506 /* WIMG bits should only be updated under the PVH lock, but we should do this in a CAS loop
10507 * to avoid losing simultaneous updates to other bits like refmod. */
10508 } while (!OSCompareAndSwap16(pp_attr_current, pp_attr_template, &pp_attr_table[pai]));
10509
10510 wimg_bits_new = VM_WIMG_DEFAULT;
10511 if (pp_attr_template & PP_ATTR_WIMG_MASK) {
10512 wimg_bits_new = pp_attr_template & PP_ATTR_WIMG_MASK;
10513 }
10514
10515 if (doit) {
10516 if (wimg_bits_new != wimg_bits_prev) {
10517 pmap_update_cache_attributes_locked(pn, cacheattr);
10518 }
10519 pvh_unlock(pai);
10520 if ((wimg_bits_new == VM_WIMG_RT) && (wimg_bits_prev != VM_WIMG_RT)) {
10521 pmap_force_dcache_clean(phystokv(paddr), PAGE_SIZE);
10522 }
10523 } else {
10524 if (wimg_bits_new == VM_WIMG_COPYBACK) {
10525 return FALSE;
10526 }
10527 if (wimg_bits_prev == wimg_bits_new) {
10528 pmap_pin_kernel_pages((vm_offset_t)res, sizeof(*res));
10529 *res = *res - 1;
10530 pmap_unpin_kernel_pages((vm_offset_t)res, sizeof(*res));
10531 if (!platform_cache_batch_wimg(wimg_bits_new, (*res) << PAGE_SHIFT)) {
10532 return FALSE;
10533 }
10534 }
10535 return TRUE;
10536 }
10537
10538 if (page_cnt == (page_index + 1)) {
10539 wimg_bits_prev = VM_WIMG_COPYBACK;
10540 if (((wimg_bits_prev != wimg_bits_new))
10541 && ((wimg_bits_prev == VM_WIMG_COPYBACK)
10542 || ((wimg_bits_prev == VM_WIMG_INNERWBACK)
10543 && (wimg_bits_new != VM_WIMG_COPYBACK))
10544 || ((wimg_bits_prev == VM_WIMG_WTHRU)
10545 && ((wimg_bits_new != VM_WIMG_COPYBACK) || (wimg_bits_new != VM_WIMG_INNERWBACK))))) {
10546 platform_cache_flush_wimg(wimg_bits_new);
10547 }
10548 }
10549
10550 return TRUE;
10551 }
10552
10553 boolean_t
10554 pmap_batch_set_cache_attributes(
10555 ppnum_t pn,
10556 unsigned int cacheattr,
10557 unsigned int page_cnt,
10558 unsigned int page_index,
10559 boolean_t doit,
10560 unsigned int *res)
10561 {
10562 #if XNU_MONITOR
10563 return pmap_batch_set_cache_attributes_ppl(pn, cacheattr, page_cnt, page_index, doit, res);
10564 #else
10565 return pmap_batch_set_cache_attributes_internal(pn, cacheattr, page_cnt, page_index, doit, res);
10566 #endif
10567 }
10568
10569 MARK_AS_PMAP_TEXT static void
10570 pmap_set_cache_attributes_priv(
10571 ppnum_t pn,
10572 unsigned int cacheattr,
10573 boolean_t external __unused)
10574 {
10575 pmap_paddr_t paddr;
10576 unsigned int pai;
10577 pp_attr_t pp_attr_current;
10578 pp_attr_t pp_attr_template;
10579 unsigned int wimg_bits_prev, wimg_bits_new;
10580
10581 paddr = ptoa(pn);
10582
10583 if (!pa_valid(paddr)) {
10584 return; /* Not a managed page. */
10585 }
10586
10587 if (cacheattr & VM_WIMG_USE_DEFAULT) {
10588 cacheattr = VM_WIMG_DEFAULT;
10589 }
10590
10591 pai = pa_index(paddr);
10592
10593 pvh_lock(pai);
10594
10595 #if XNU_MONITOR
10596 if (external && ppattr_pa_test_monitor(paddr)) {
10597 panic("%s invoked on PPL page 0x%llx", __func__, (uint64_t)paddr);
10598 } else if (!external && !ppattr_pa_test_monitor(paddr)) {
10599 panic("%s invoked on non-PPL page 0x%llx", __func__, (uint64_t)paddr);
10600 }
10601 #endif
10602
10603 do {
10604 pp_attr_current = pp_attr_table[pai];
10605 wimg_bits_prev = VM_WIMG_DEFAULT;
10606 if (pp_attr_current & PP_ATTR_WIMG_MASK) {
10607 wimg_bits_prev = pp_attr_current & PP_ATTR_WIMG_MASK;
10608 }
10609
10610 pp_attr_template = (pp_attr_current & ~PP_ATTR_WIMG_MASK) | PP_ATTR_WIMG(cacheattr & (VM_WIMG_MASK));
10611
10612 /* WIMG bits should only be updated under the PVH lock, but we should do this in a CAS loop
10613 * to avoid losing simultaneous updates to other bits like refmod. */
10614 } while (!OSCompareAndSwap16(pp_attr_current, pp_attr_template, &pp_attr_table[pai]));
10615
10616 wimg_bits_new = VM_WIMG_DEFAULT;
10617 if (pp_attr_template & PP_ATTR_WIMG_MASK) {
10618 wimg_bits_new = pp_attr_template & PP_ATTR_WIMG_MASK;
10619 }
10620
10621 if (wimg_bits_new != wimg_bits_prev) {
10622 pmap_update_cache_attributes_locked(pn, cacheattr);
10623 }
10624
10625 pvh_unlock(pai);
10626
10627 pmap_sync_wimg(pn, wimg_bits_prev, wimg_bits_new);
10628 }
10629
10630 MARK_AS_PMAP_TEXT void
10631 pmap_set_cache_attributes_internal(
10632 ppnum_t pn,
10633 unsigned int cacheattr)
10634 {
10635 pmap_set_cache_attributes_priv(pn, cacheattr, TRUE);
10636 }
10637
10638 void
10639 pmap_set_cache_attributes(
10640 ppnum_t pn,
10641 unsigned int cacheattr)
10642 {
10643 #if XNU_MONITOR
10644 pmap_set_cache_attributes_ppl(pn, cacheattr);
10645 #else
10646 pmap_set_cache_attributes_internal(pn, cacheattr);
10647 #endif
10648 }
10649
10650 MARK_AS_PMAP_TEXT void
10651 pmap_update_cache_attributes_locked(
10652 ppnum_t ppnum,
10653 unsigned attributes)
10654 {
10655 pmap_paddr_t phys = ptoa(ppnum);
10656 pv_entry_t *pve_p;
10657 pt_entry_t *pte_p;
10658 pv_entry_t **pv_h;
10659 pt_entry_t tmplate;
10660 unsigned int pai;
10661 boolean_t tlb_flush_needed = FALSE;
10662
10663 PMAP_TRACE(2, PMAP_CODE(PMAP__UPDATE_CACHING) | DBG_FUNC_START, ppnum, attributes);
10664
10665 if (pmap_panic_dev_wimg_on_managed) {
10666 switch (attributes & VM_WIMG_MASK) {
10667 case VM_WIMG_IO: // nGnRnE
10668 case VM_WIMG_POSTED: // nGnRE
10669 /* supported on DRAM, but slow, so we disallow */
10670
10671 case VM_WIMG_POSTED_REORDERED: // nGRE
10672 case VM_WIMG_POSTED_COMBINED_REORDERED: // GRE
10673 /* unsupported on DRAM */
10674
10675 panic("%s: trying to use unsupported VM_WIMG type for managed page, VM_WIMG=%x, ppnum=%#x",
10676 __FUNCTION__, attributes & VM_WIMG_MASK, ppnum);
10677 break;
10678
10679 default:
10680 /* not device type memory, all good */
10681
10682 break;
10683 }
10684 }
10685
10686 #if __ARM_PTE_PHYSMAP__
10687 vm_offset_t kva = phystokv(phys);
10688 pte_p = pmap_pte(kernel_pmap, kva);
10689
10690 tmplate = *pte_p;
10691 tmplate &= ~(ARM_PTE_ATTRINDXMASK | ARM_PTE_SHMASK);
10692 #if XNU_MONITOR
10693 tmplate |= (wimg_to_pte(attributes, phys) & ~ARM_PTE_XPRR_MASK);
10694 #else
10695 tmplate |= wimg_to_pte(attributes, phys);
10696 #endif
10697 #if (__ARM_VMSA__ > 7)
10698 if (tmplate & ARM_PTE_HINT_MASK) {
10699 panic("%s: physical aperture PTE %p has hint bit set, va=%p, pte=0x%llx",
10700 __FUNCTION__, pte_p, (void *)kva, tmplate);
10701 }
10702 #endif
10703 write_pte_strong(pte_p, tmplate);
10704 flush_mmu_tlb_region_asid_async(kva, PAGE_SIZE, kernel_pmap, true);
10705 tlb_flush_needed = TRUE;
10706 #endif
10707
10708 pai = pa_index(phys);
10709
10710 pv_h = pai_to_pvh(pai);
10711
10712 pte_p = PT_ENTRY_NULL;
10713 pve_p = PV_ENTRY_NULL;
10714 if (pvh_test_type(pv_h, PVH_TYPE_PTEP)) {
10715 pte_p = pvh_ptep(pv_h);
10716 } else if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) {
10717 pve_p = pvh_pve_list(pv_h);
10718 pte_p = PT_ENTRY_NULL;
10719 }
10720
10721 int pve_ptep_idx = 0;
10722 while ((pve_p != PV_ENTRY_NULL) || (pte_p != PT_ENTRY_NULL)) {
10723 vm_map_address_t va;
10724 pmap_t pmap;
10725
10726 if (pve_p != PV_ENTRY_NULL) {
10727 pte_p = pve_get_ptep(pve_p, pve_ptep_idx);
10728 if (pte_p == PT_ENTRY_NULL) {
10729 goto cache_skip_pve;
10730 }
10731 }
10732
10733 #ifdef PVH_FLAG_IOMMU
10734 if (pvh_ptep_is_iommu(pte_p)) {
10735 goto cache_skip_pve;
10736 }
10737 #endif
10738 pmap = ptep_get_pmap(pte_p);
10739 va = ptep_get_va(pte_p);
10740
10741 tmplate = *pte_p;
10742 tmplate &= ~(ARM_PTE_ATTRINDXMASK | ARM_PTE_SHMASK);
10743 tmplate |= pmap_get_pt_ops(pmap)->wimg_to_pte(attributes, phys);
10744
10745 write_pte_strong(pte_p, tmplate);
10746 pmap_get_pt_ops(pmap)->flush_tlb_region_async(va, pt_attr_page_size(pmap_get_pt_attr(pmap)) * PAGE_RATIO, pmap, true);
10747 tlb_flush_needed = TRUE;
10748
10749 cache_skip_pve:
10750 pte_p = PT_ENTRY_NULL;
10751 if ((pve_p != PV_ENTRY_NULL) && (++pve_ptep_idx == PTE_PER_PVE)) {
10752 pve_ptep_idx = 0;
10753 pve_p = pve_next(pve_p);
10754 }
10755 }
10756 if (tlb_flush_needed) {
10757 pmap_sync_tlb((attributes & VM_WIMG_MASK) == VM_WIMG_RT);
10758 }
10759
10760 PMAP_TRACE(2, PMAP_CODE(PMAP__UPDATE_CACHING) | DBG_FUNC_END, ppnum, attributes);
10761 }
10762
10763 #if (__ARM_VMSA__ == 7)
10764 void
10765 pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
10766 vm_map_address_t *user_commpage_addr)
10767 {
10768 pmap_paddr_t pa;
10769 kern_return_t kr;
10770
10771 assert(kernel_data_addr != NULL);
10772 assert(kernel_text_addr != NULL);
10773 assert(user_commpage_addr != NULL);
10774
10775 (void) pmap_pages_alloc_zeroed(&pa, PAGE_SIZE, 0);
10776
10777 kr = pmap_enter(kernel_pmap, _COMM_PAGE_BASE_ADDRESS, atop(pa), VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10778 assert(kr == KERN_SUCCESS);
10779
10780 *kernel_data_addr = phystokv(pa);
10781 // We don't have PFZ for 32 bit arm, always NULL
10782 *kernel_text_addr = 0;
10783 *user_commpage_addr = 0;
10784 }
10785
10786 #else /* __ARM_VMSA__ == 7 */
10787
10788 /**
10789 * Mark a pmap as being dedicated to use for a commpage mapping.
10790 * The pmap itself will never be activated on a CPU; its mappings will
10791 * only be embedded in userspace pmaps at a fixed virtual address.
10792 *
10793 * @param pmap the pmap to mark as belonging to a commpage.
10794 */
10795 static void
10796 pmap_set_commpage(pmap_t pmap)
10797 {
10798 #if XNU_MONITOR
10799 assert(!pmap_ppl_locked_down);
10800 #endif
10801 assert(pmap->type == PMAP_TYPE_USER);
10802 pmap->type = PMAP_TYPE_COMMPAGE;
10803 /*
10804 * Free the pmap's ASID. This pmap should not ever be directly
10805 * activated in a CPU's TTBR. Freeing the ASID will not only reduce
10806 * ASID space contention but will also cause pmap_switch() to panic
10807 * if an attacker tries to activate this pmap. Disable preemption to
10808 * accommodate the *_nopreempt spinlock in free_asid().
10809 */
10810 mp_disable_preemption();
10811 pmap_get_pt_ops(pmap)->free_id(pmap);
10812 mp_enable_preemption();
10813 }
10814
10815 static void
10816 pmap_update_tt3e(
10817 pmap_t pmap,
10818 vm_address_t address,
10819 tt_entry_t template)
10820 {
10821 tt_entry_t *ptep, pte;
10822
10823 ptep = pmap_tt3e(pmap, address);
10824 if (ptep == NULL) {
10825 panic("%s: no ptep?", __FUNCTION__);
10826 }
10827
10828 pte = *ptep;
10829 pte = tte_to_pa(pte) | template;
10830 write_pte_strong(ptep, pte);
10831 }
10832
10833 /* Note absence of non-global bit */
10834 #define PMAP_COMM_PAGE_PTE_TEMPLATE (ARM_PTE_TYPE_VALID \
10835 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) \
10836 | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_NX \
10837 | ARM_PTE_PNX | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF)
10838
10839 /* Note absence of non-global bit and no-execute bit. */
10840 #define PMAP_COMM_PAGE_TEXT_PTE_TEMPLATE (ARM_PTE_TYPE_VALID \
10841 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_WRITEBACK) \
10842 | ARM_PTE_SH(SH_INNER_MEMORY) | ARM_PTE_PNX \
10843 | ARM_PTE_AP(AP_RORO) | ARM_PTE_AF)
10844
10845 void
10846 pmap_create_sharedpages(vm_map_address_t *kernel_data_addr, vm_map_address_t *kernel_text_addr,
10847 vm_map_address_t *user_text_addr)
10848 {
10849 kern_return_t kr;
10850 pmap_paddr_t data_pa = 0; // data address
10851 pmap_paddr_t text_pa = 0; // text address
10852
10853 *kernel_data_addr = 0;
10854 *kernel_text_addr = 0;
10855 *user_text_addr = 0;
10856
10857 #if XNU_MONITOR
10858 data_pa = pmap_alloc_page_for_kern(0);
10859 assert(data_pa);
10860 memset((char *) phystokv(data_pa), 0, PAGE_SIZE);
10861 #if CONFIG_ARM_PFZ
10862 text_pa = pmap_alloc_page_for_kern(0);
10863 assert(text_pa);
10864 memset((char *) phystokv(text_pa), 0, PAGE_SIZE);
10865 #endif
10866
10867 #else /* XNU_MONITOR */
10868 (void) pmap_pages_alloc_zeroed(&data_pa, PAGE_SIZE, 0);
10869 #if CONFIG_ARM_PFZ
10870 (void) pmap_pages_alloc_zeroed(&text_pa, PAGE_SIZE, 0);
10871 #endif
10872
10873 #endif /* XNU_MONITOR */
10874
10875 /*
10876 * In order to avoid burning extra pages on mapping the shared page, we
10877 * create a dedicated pmap for the shared page. We forcibly nest the
10878 * translation tables from this pmap into other pmaps. The level we
10879 * will nest at depends on the MMU configuration (page size, TTBR range,
10880 * etc). Typically, this is at L1 for 4K tasks and L2 for 16K tasks.
10881 *
10882 * Note that this is NOT "the nested pmap" (which is used to nest the
10883 * shared cache).
10884 *
10885 * Note that we update parameters of the entry for our unique needs (NG
10886 * entry, etc.).
10887 */
10888 sharedpage_pmap_default = pmap_create_options(NULL, 0x0, 0);
10889 assert(sharedpage_pmap_default != NULL);
10890 pmap_set_commpage(sharedpage_pmap_default);
10891
10892 /* The user 64-bit mapping... */
10893 kr = pmap_enter_addr(sharedpage_pmap_default, _COMM_PAGE64_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10894 assert(kr == KERN_SUCCESS);
10895 pmap_update_tt3e(sharedpage_pmap_default, _COMM_PAGE64_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE);
10896 #if CONFIG_ARM_PFZ
10897 /* User mapping of comm page text section for 64 bit mapping only
10898 *
10899 * We don't insert it into the 32 bit mapping because we don't want 32 bit
10900 * user processes to get this page mapped in, they should never call into
10901 * this page.
10902 *
10903 * The data comm page is in a pre-reserved L3 VA range and the text commpage
10904 * is slid in the same L3 as the data commpage. It is either outside the
10905 * max of user VA or is pre-reserved in the vm_map_exec(). This means that
10906 * it is reserved and unavailable to mach VM for future mappings.
10907 */
10908 const pt_attr_t * const pt_attr = pmap_get_pt_attr(sharedpage_pmap_default);
10909 int num_ptes = pt_attr_leaf_size(pt_attr) >> PTE_SHIFT;
10910
10911 vm_map_address_t commpage_text_va = 0;
10912
10913 do {
10914 int text_leaf_index = random() % num_ptes;
10915
10916 // Generate a VA for the commpage text with the same root and twig index as data
10917 // comm page, but with new leaf index we've just generated.
10918 commpage_text_va = (_COMM_PAGE64_BASE_ADDRESS & ~pt_attr_leaf_index_mask(pt_attr));
10919 commpage_text_va |= (text_leaf_index << pt_attr_leaf_shift(pt_attr));
10920 } while (commpage_text_va == _COMM_PAGE64_BASE_ADDRESS); // Try again if we collide (should be unlikely)
10921
10922 // Assert that this is empty
10923 __assert_only pt_entry_t *ptep = pmap_pte(sharedpage_pmap_default, commpage_text_va);
10924 assert(ptep != PT_ENTRY_NULL);
10925 assert(*ptep == ARM_TTE_EMPTY);
10926
10927 // At this point, we've found the address we want to insert our comm page at
10928 kr = pmap_enter_addr(sharedpage_pmap_default, commpage_text_va, text_pa, VM_PROT_READ | VM_PROT_EXECUTE, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10929 assert(kr == KERN_SUCCESS);
10930 // Mark it as global page R/X so that it doesn't get thrown out on tlb flush
10931 pmap_update_tt3e(sharedpage_pmap_default, commpage_text_va, PMAP_COMM_PAGE_TEXT_PTE_TEMPLATE);
10932
10933 *user_text_addr = commpage_text_va;
10934 #endif
10935
10936 /* ...and the user 32-bit mapping. */
10937 kr = pmap_enter_addr(sharedpage_pmap_default, _COMM_PAGE32_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10938 assert(kr == KERN_SUCCESS);
10939 pmap_update_tt3e(sharedpage_pmap_default, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE);
10940
10941 #if __ARM_MIXED_PAGE_SIZE__
10942 /**
10943 * To handle 4K tasks a new view/pmap of the shared page is needed. These are a
10944 * new set of page tables that point to the exact same 16K shared page as
10945 * before. Only the first 4K of the 16K shared page is mapped since that's
10946 * the only part that contains relevant data.
10947 */
10948 sharedpage_pmap_4k = pmap_create_options(NULL, 0x0, PMAP_CREATE_FORCE_4K_PAGES);
10949 assert(sharedpage_pmap_4k != NULL);
10950 pmap_set_commpage(sharedpage_pmap_4k);
10951
10952 /* The user 64-bit mapping... */
10953 kr = pmap_enter_addr(sharedpage_pmap_4k, _COMM_PAGE64_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10954 assert(kr == KERN_SUCCESS);
10955 pmap_update_tt3e(sharedpage_pmap_4k, _COMM_PAGE64_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE);
10956
10957 /* ...and the user 32-bit mapping. */
10958 kr = pmap_enter_addr(sharedpage_pmap_4k, _COMM_PAGE32_BASE_ADDRESS, data_pa, VM_PROT_READ, VM_PROT_NONE, VM_WIMG_USE_DEFAULT, TRUE);
10959 assert(kr == KERN_SUCCESS);
10960 pmap_update_tt3e(sharedpage_pmap_4k, _COMM_PAGE32_BASE_ADDRESS, PMAP_COMM_PAGE_PTE_TEMPLATE);
10961
10962 #endif
10963
10964 /* For manipulation in kernel, go straight to physical page */
10965 *kernel_data_addr = phystokv(data_pa);
10966 *kernel_text_addr = (text_pa) ? phystokv(text_pa) : 0;
10967 }
10968
10969
10970 /*
10971 * Asserts to ensure that the TTEs we nest to map the shared page do not overlap
10972 * with user controlled TTEs for regions that aren't explicitly reserved by the
10973 * VM (e.g., _COMM_PAGE64_NESTING_START/_COMM_PAGE64_BASE_ADDRESS).
10974 */
10975 #if (ARM_PGSHIFT == 14)
10976 static_assert((_COMM_PAGE32_BASE_ADDRESS & ~ARM_TT_L2_OFFMASK) >= VM_MAX_ADDRESS);
10977 #elif (ARM_PGSHIFT == 12)
10978 static_assert((_COMM_PAGE32_BASE_ADDRESS & ~ARM_TT_L1_OFFMASK) >= VM_MAX_ADDRESS);
10979 #else
10980 #error Nested shared page mapping is unsupported on this config
10981 #endif
10982
10983 MARK_AS_PMAP_TEXT kern_return_t
10984 pmap_insert_sharedpage_internal(
10985 pmap_t pmap)
10986 {
10987 kern_return_t kr = KERN_SUCCESS;
10988 vm_offset_t sharedpage_vaddr;
10989 pt_entry_t *ttep, *src_ttep;
10990 int options = 0;
10991 pmap_t sharedpage_pmap = sharedpage_pmap_default;
10992
10993 /* Validate the pmap input before accessing its data. */
10994 validate_pmap_mutable(pmap);
10995
10996 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
10997 const unsigned int sharedpage_level = pt_attr_commpage_level(pt_attr);
10998
10999 #if __ARM_MIXED_PAGE_SIZE__
11000 #if !__ARM_16K_PG__
11001 /* The following code assumes that sharedpage_pmap_default is a 16KB pmap. */
11002 #error "pmap_insert_sharedpage_internal requires a 16KB default kernel page size when __ARM_MIXED_PAGE_SIZE__ is enabled"
11003 #endif /* !__ARM_16K_PG__ */
11004
11005 /* Choose the correct shared page pmap to use. */
11006 const uint64_t pmap_page_size = pt_attr_page_size(pt_attr);
11007 if (pmap_page_size == 16384) {
11008 sharedpage_pmap = sharedpage_pmap_default;
11009 } else if (pmap_page_size == 4096) {
11010 sharedpage_pmap = sharedpage_pmap_4k;
11011 } else {
11012 panic("No shared page pmap exists for the wanted page size: %llu", pmap_page_size);
11013 }
11014 #endif /* __ARM_MIXED_PAGE_SIZE__ */
11015
11016 #if XNU_MONITOR
11017 options |= PMAP_OPTIONS_NOWAIT;
11018 #endif /* XNU_MONITOR */
11019
11020 #if _COMM_PAGE_AREA_LENGTH != PAGE_SIZE
11021 #error We assume a single page.
11022 #endif
11023
11024 if (pmap_is_64bit(pmap)) {
11025 sharedpage_vaddr = _COMM_PAGE64_BASE_ADDRESS;
11026 } else {
11027 sharedpage_vaddr = _COMM_PAGE32_BASE_ADDRESS;
11028 }
11029
11030
11031 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
11032
11033 /*
11034 * For 4KB pages, we either "nest" at the level one page table (1GB) or level
11035 * two (2MB) depending on the address space layout. For 16KB pages, each level
11036 * one entry is 64GB, so we must go to the second level entry (32MB) in order
11037 * to "nest".
11038 *
11039 * Note: This is not "nesting" in the shared cache sense. This definition of
11040 * nesting just means inserting pointers to pre-allocated tables inside of
11041 * the passed in pmap to allow us to share page tables (which map the shared
11042 * page) for every task. This saves at least one page of memory per process
11043 * compared to creating new page tables in every process for mapping the
11044 * shared page.
11045 */
11046
11047 /**
11048 * Allocate the twig page tables if needed, and slam a pointer to the shared
11049 * page's tables into place.
11050 */
11051 while ((ttep = pmap_ttne(pmap, sharedpage_level, sharedpage_vaddr)) == TT_ENTRY_NULL) {
11052 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
11053
11054 kr = pmap_expand(pmap, sharedpage_vaddr, options, sharedpage_level);
11055
11056 if (kr != KERN_SUCCESS) {
11057 #if XNU_MONITOR
11058 if (kr == KERN_RESOURCE_SHORTAGE) {
11059 return kr;
11060 } else
11061 #endif
11062 {
11063 panic("Failed to pmap_expand for commpage, pmap=%p", pmap);
11064 }
11065 }
11066
11067 pmap_lock(pmap, PMAP_LOCK_EXCLUSIVE);
11068 }
11069
11070 if (*ttep != ARM_PTE_EMPTY) {
11071 panic("%s: Found something mapped at the commpage address?!", __FUNCTION__);
11072 }
11073
11074 src_ttep = pmap_ttne(sharedpage_pmap, sharedpage_level, sharedpage_vaddr);
11075
11076 *ttep = *src_ttep;
11077 FLUSH_PTE_STRONG();
11078
11079 pmap_unlock(pmap, PMAP_LOCK_EXCLUSIVE);
11080
11081 return kr;
11082 }
11083
11084 static void
11085 pmap_unmap_sharedpage(
11086 pmap_t pmap)
11087 {
11088 pt_entry_t *ttep;
11089 vm_offset_t sharedpage_vaddr;
11090 pmap_t sharedpage_pmap = sharedpage_pmap_default;
11091
11092 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
11093 const unsigned int sharedpage_level = pt_attr_commpage_level(pt_attr);
11094
11095 #if __ARM_MIXED_PAGE_SIZE__
11096 #if !__ARM_16K_PG__
11097 /* The following code assumes that sharedpage_pmap_default is a 16KB pmap. */
11098 #error "pmap_unmap_sharedpage requires a 16KB default kernel page size when __ARM_MIXED_PAGE_SIZE__ is enabled"
11099 #endif /* !__ARM_16K_PG__ */
11100
11101 /* Choose the correct shared page pmap to use. */
11102 const uint64_t pmap_page_size = pt_attr_page_size(pt_attr);
11103 if (pmap_page_size == 16384) {
11104 sharedpage_pmap = sharedpage_pmap_default;
11105 } else if (pmap_page_size == 4096) {
11106 sharedpage_pmap = sharedpage_pmap_4k;
11107 } else {
11108 panic("No shared page pmap exists for the wanted page size: %llu", pmap_page_size);
11109 }
11110 #endif /* __ARM_MIXED_PAGE_SIZE__ */
11111
11112 #if _COMM_PAGE_AREA_LENGTH != PAGE_SIZE
11113 #error We assume a single page.
11114 #endif
11115
11116 if (pmap_is_64bit(pmap)) {
11117 sharedpage_vaddr = _COMM_PAGE64_BASE_ADDRESS;
11118 } else {
11119 sharedpage_vaddr = _COMM_PAGE32_BASE_ADDRESS;
11120 }
11121
11122
11123 ttep = pmap_ttne(pmap, sharedpage_level, sharedpage_vaddr);
11124
11125 if (ttep == NULL) {
11126 return;
11127 }
11128
11129 /* It had better be mapped to the shared page. */
11130 if (*ttep != ARM_TTE_EMPTY && *ttep != *pmap_ttne(sharedpage_pmap, sharedpage_level, sharedpage_vaddr)) {
11131 panic("%s: Something other than commpage mapped in shared page slot?", __FUNCTION__);
11132 }
11133
11134 *ttep = ARM_TTE_EMPTY;
11135 FLUSH_PTE_STRONG();
11136
11137 flush_mmu_tlb_region_asid_async(sharedpage_vaddr, PAGE_SIZE, pmap, false);
11138 sync_tlb_flush();
11139 }
11140
11141 void
11142 pmap_insert_sharedpage(
11143 pmap_t pmap)
11144 {
11145 #if XNU_MONITOR
11146 kern_return_t kr = KERN_FAILURE;
11147
11148 while ((kr = pmap_insert_sharedpage_ppl(pmap)) == KERN_RESOURCE_SHORTAGE) {
11149 pmap_alloc_page_for_ppl(0);
11150 }
11151
11152 pmap_ledger_check_balance(pmap);
11153
11154 if (kr != KERN_SUCCESS) {
11155 panic("%s: failed to insert the shared page, kr=%d, "
11156 "pmap=%p",
11157 __FUNCTION__, kr,
11158 pmap);
11159 }
11160 #else
11161 pmap_insert_sharedpage_internal(pmap);
11162 #endif
11163 }
11164
11165 static boolean_t
11166 pmap_is_64bit(
11167 pmap_t pmap)
11168 {
11169 return pmap->is_64bit;
11170 }
11171
11172 bool
11173 pmap_is_exotic(
11174 pmap_t pmap __unused)
11175 {
11176 return false;
11177 }
11178
11179 #endif
11180
11181 /* ARMTODO -- an implementation that accounts for
11182 * holes in the physical map, if any.
11183 */
11184 boolean_t
11185 pmap_valid_page(
11186 ppnum_t pn)
11187 {
11188 return pa_valid(ptoa(pn));
11189 }
11190
11191 boolean_t
11192 pmap_bootloader_page(
11193 ppnum_t pn)
11194 {
11195 pmap_paddr_t paddr = ptoa(pn);
11196
11197 if (pa_valid(paddr)) {
11198 return FALSE;
11199 }
11200 pmap_io_range_t *io_rgn = pmap_find_io_attr(paddr);
11201 return (io_rgn != NULL) && (io_rgn->wimg & PMAP_IO_RANGE_CARVEOUT);
11202 }
11203
11204 MARK_AS_PMAP_TEXT boolean_t
11205 pmap_is_empty_internal(
11206 pmap_t pmap,
11207 vm_map_offset_t va_start,
11208 vm_map_offset_t va_end)
11209 {
11210 vm_map_offset_t block_start, block_end;
11211 tt_entry_t *tte_p;
11212
11213 if (pmap == NULL) {
11214 return TRUE;
11215 }
11216
11217 validate_pmap(pmap);
11218
11219 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
11220 unsigned int initial_not_in_kdp = not_in_kdp;
11221
11222 if ((pmap != kernel_pmap) && (initial_not_in_kdp)) {
11223 pmap_lock(pmap, PMAP_LOCK_SHARED);
11224 }
11225
11226 #if (__ARM_VMSA__ == 7)
11227 if (tte_index(pt_attr, va_end) >= pmap->tte_index_max) {
11228 if ((pmap != kernel_pmap) && (initial_not_in_kdp)) {
11229 pmap_unlock(pmap, PMAP_LOCK_SHARED);
11230 }
11231 return TRUE;
11232 }
11233 #endif
11234
11235 /* TODO: This will be faster if we increment ttep at each level. */
11236 block_start = va_start;
11237
11238 while (block_start < va_end) {
11239 pt_entry_t *bpte_p, *epte_p;
11240 pt_entry_t *pte_p;
11241
11242 block_end = (block_start + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr);
11243 if (block_end > va_end) {
11244 block_end = va_end;
11245 }
11246
11247 tte_p = pmap_tte(pmap, block_start);
11248 if ((tte_p != PT_ENTRY_NULL)
11249 && ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE)) {
11250 pte_p = (pt_entry_t *) ttetokv(*tte_p);
11251 bpte_p = &pte_p[pte_index(pt_attr, block_start)];
11252 epte_p = &pte_p[pte_index(pt_attr, block_end)];
11253
11254 for (pte_p = bpte_p; pte_p < epte_p; pte_p++) {
11255 if (*pte_p != ARM_PTE_EMPTY) {
11256 if ((pmap != kernel_pmap) && (initial_not_in_kdp)) {
11257 pmap_unlock(pmap, PMAP_LOCK_SHARED);
11258 }
11259 return FALSE;
11260 }
11261 }
11262 }
11263 block_start = block_end;
11264 }
11265
11266 if ((pmap != kernel_pmap) && (initial_not_in_kdp)) {
11267 pmap_unlock(pmap, PMAP_LOCK_SHARED);
11268 }
11269
11270 return TRUE;
11271 }
11272
11273 boolean_t
11274 pmap_is_empty(
11275 pmap_t pmap,
11276 vm_map_offset_t va_start,
11277 vm_map_offset_t va_end)
11278 {
11279 #if XNU_MONITOR
11280 return pmap_is_empty_ppl(pmap, va_start, va_end);
11281 #else
11282 return pmap_is_empty_internal(pmap, va_start, va_end);
11283 #endif
11284 }
11285
11286 vm_map_offset_t
11287 pmap_max_offset(
11288 boolean_t is64,
11289 unsigned int option)
11290 {
11291 return (is64) ? pmap_max_64bit_offset(option) : pmap_max_32bit_offset(option);
11292 }
11293
11294 vm_map_offset_t
11295 pmap_max_64bit_offset(
11296 __unused unsigned int option)
11297 {
11298 vm_map_offset_t max_offset_ret = 0;
11299
11300 #if defined(__arm64__)
11301 #define ARM64_MIN_MAX_ADDRESS (SHARED_REGION_BASE_ARM64 + SHARED_REGION_SIZE_ARM64 + 0x20000000) // end of shared region + 512MB for various purposes
11302 _Static_assert((ARM64_MIN_MAX_ADDRESS > SHARED_REGION_BASE_ARM64) && (ARM64_MIN_MAX_ADDRESS <= MACH_VM_MAX_ADDRESS),
11303 "Minimum address space size outside allowable range");
11304 const vm_map_offset_t min_max_offset = ARM64_MIN_MAX_ADDRESS; // end of shared region + 512MB for various purposes
11305 if (option == ARM_PMAP_MAX_OFFSET_DEFAULT) {
11306 max_offset_ret = arm64_pmap_max_offset_default;
11307 } else if (option == ARM_PMAP_MAX_OFFSET_MIN) {
11308 max_offset_ret = min_max_offset;
11309 } else if (option == ARM_PMAP_MAX_OFFSET_MAX) {
11310 max_offset_ret = MACH_VM_MAX_ADDRESS;
11311 } else if (option == ARM_PMAP_MAX_OFFSET_DEVICE) {
11312 if (arm64_pmap_max_offset_default) {
11313 max_offset_ret = arm64_pmap_max_offset_default;
11314 } else if (max_mem > 0xC0000000) {
11315 max_offset_ret = min_max_offset + 0x138000000; // Max offset is 13.375GB for devices with > 3GB of memory
11316 } else if (max_mem > 0x40000000) {
11317 max_offset_ret = min_max_offset + 0x38000000; // Max offset is 9.375GB for devices with > 1GB and <= 3GB of memory
11318 } else {
11319 max_offset_ret = min_max_offset;
11320 }
11321 } else if (option == ARM_PMAP_MAX_OFFSET_JUMBO) {
11322 if (arm64_pmap_max_offset_default) {
11323 // Allow the boot-arg to override jumbo size
11324 max_offset_ret = arm64_pmap_max_offset_default;
11325 } else {
11326 max_offset_ret = MACH_VM_MAX_ADDRESS; // Max offset is 64GB for pmaps with special "jumbo" blessing
11327 }
11328 } else {
11329 panic("pmap_max_64bit_offset illegal option 0x%x", option);
11330 }
11331
11332 assert(max_offset_ret <= MACH_VM_MAX_ADDRESS);
11333 assert(max_offset_ret >= min_max_offset);
11334 #else
11335 panic("Can't run pmap_max_64bit_offset on non-64bit architectures");
11336 #endif
11337
11338 return max_offset_ret;
11339 }
11340
11341 vm_map_offset_t
11342 pmap_max_32bit_offset(
11343 unsigned int option)
11344 {
11345 vm_map_offset_t max_offset_ret = 0;
11346
11347 if (option == ARM_PMAP_MAX_OFFSET_DEFAULT) {
11348 max_offset_ret = arm_pmap_max_offset_default;
11349 } else if (option == ARM_PMAP_MAX_OFFSET_MIN) {
11350 max_offset_ret = 0x80000000;
11351 } else if (option == ARM_PMAP_MAX_OFFSET_MAX) {
11352 max_offset_ret = VM_MAX_ADDRESS;
11353 } else if (option == ARM_PMAP_MAX_OFFSET_DEVICE) {
11354 if (arm_pmap_max_offset_default) {
11355 max_offset_ret = arm_pmap_max_offset_default;
11356 } else if (max_mem > 0x20000000) {
11357 max_offset_ret = 0x80000000;
11358 } else {
11359 max_offset_ret = 0x80000000;
11360 }
11361 } else if (option == ARM_PMAP_MAX_OFFSET_JUMBO) {
11362 max_offset_ret = 0x80000000;
11363 } else {
11364 panic("pmap_max_32bit_offset illegal option 0x%x", option);
11365 }
11366
11367 assert(max_offset_ret <= MACH_VM_MAX_ADDRESS);
11368 return max_offset_ret;
11369 }
11370
11371 #if CONFIG_DTRACE
11372 /*
11373 * Constrain DTrace copyin/copyout actions
11374 */
11375 extern kern_return_t dtrace_copyio_preflight(addr64_t);
11376 extern kern_return_t dtrace_copyio_postflight(addr64_t);
11377
11378 kern_return_t
11379 dtrace_copyio_preflight(
11380 __unused addr64_t va)
11381 {
11382 if (current_map() == kernel_map) {
11383 return KERN_FAILURE;
11384 } else {
11385 return KERN_SUCCESS;
11386 }
11387 }
11388
11389 kern_return_t
11390 dtrace_copyio_postflight(
11391 __unused addr64_t va)
11392 {
11393 return KERN_SUCCESS;
11394 }
11395 #endif /* CONFIG_DTRACE */
11396
11397
11398 void
11399 pmap_flush_context_init(__unused pmap_flush_context *pfc)
11400 {
11401 }
11402
11403
11404 void
11405 pmap_flush(
11406 __unused pmap_flush_context *cpus_to_flush)
11407 {
11408 /* not implemented yet */
11409 return;
11410 }
11411
11412 #if XNU_MONITOR
11413
11414 /*
11415 * Enforce that the address range described by kva and nbytes is not currently
11416 * PPL-owned, and won't become PPL-owned while pinned. This is to prevent
11417 * unintentionally writing to PPL-owned memory.
11418 */
11419 static void
11420 pmap_pin_kernel_pages(vm_offset_t kva, size_t nbytes)
11421 {
11422 vm_offset_t end;
11423 if (os_add_overflow(kva, nbytes, &end)) {
11424 panic("%s(%p, 0x%llx): overflow", __func__, (void*)kva, (uint64_t)nbytes);
11425 }
11426 for (vm_offset_t ckva = trunc_page(kva); ckva < end; ckva = round_page(ckva + 1)) {
11427 pmap_paddr_t pa = kvtophys_nofail(ckva);
11428 pp_attr_t attr;
11429 unsigned int pai = pa_index(pa);
11430 if (ckva == phystokv(pa)) {
11431 panic("%s(%p): attempt to pin static mapping for page 0x%llx", __func__, (void*)kva, (uint64_t)pa);
11432 }
11433 do {
11434 attr = pp_attr_table[pai] & ~PP_ATTR_NO_MONITOR;
11435 if (attr & PP_ATTR_MONITOR) {
11436 panic("%s(%p): physical page 0x%llx belongs to PPL", __func__, (void*)kva, (uint64_t)pa);
11437 }
11438 } while (!OSCompareAndSwap16(attr, attr | PP_ATTR_NO_MONITOR, &pp_attr_table[pai]));
11439 }
11440 }
11441
11442 static void
11443 pmap_unpin_kernel_pages(vm_offset_t kva, size_t nbytes)
11444 {
11445 vm_offset_t end;
11446 if (os_add_overflow(kva, nbytes, &end)) {
11447 panic("%s(%p, 0x%llx): overflow", __func__, (void*)kva, (uint64_t)nbytes);
11448 }
11449 for (vm_offset_t ckva = trunc_page(kva); ckva < end; ckva = round_page(ckva + 1)) {
11450 pmap_paddr_t pa = kvtophys_nofail(ckva);
11451
11452 if (!(pp_attr_table[pa_index(pa)] & PP_ATTR_NO_MONITOR)) {
11453 panic("%s(%p): physical page 0x%llx not pinned", __func__, (void*)kva, (uint64_t)pa);
11454 }
11455 assert(!(pp_attr_table[pa_index(pa)] & PP_ATTR_MONITOR));
11456 ppattr_pa_clear_no_monitor(pa);
11457 }
11458 }
11459
11460 /**
11461 * Lock down a page, making all mappings read-only, and preventing further
11462 * mappings or removal of this particular kva's mapping. Effectively, it makes
11463 * the physical page at kva immutable (see the ppl_writable parameter for an
11464 * exception to this).
11465 *
11466 * @param kva Valid address to any mapping of the physical page to lockdown.
11467 * @param lockdown_flag Bit within PVH_FLAG_LOCKDOWN_MASK specifying the lockdown reason
11468 * @param ppl_writable True if the PPL should still be able to write to the page
11469 * using the physical aperture mapping. False will make the
11470 * page read-only for both the kernel and PPL in the
11471 * physical aperture.
11472 */
11473 MARK_AS_PMAP_TEXT static void
11474 pmap_ppl_lockdown_page(vm_address_t kva, uint64_t lockdown_flag, bool ppl_writable)
11475 {
11476 const pmap_paddr_t pa = kvtophys_nofail(kva);
11477 const unsigned int pai = pa_index(pa);
11478
11479 assert(lockdown_flag & PVH_FLAG_LOCKDOWN_MASK);
11480 pvh_lock(pai);
11481 pv_entry_t **pvh = pai_to_pvh(pai);
11482 const vm_offset_t pvh_flags = pvh_get_flags(pvh);
11483
11484 if (__improbable(ppattr_pa_test_monitor(pa))) {
11485 panic("%s: %#lx (page %llx) belongs to PPL", __func__, kva, pa);
11486 }
11487
11488 if (__improbable(pvh_flags & (PVH_FLAG_LOCKDOWN_MASK | PVH_FLAG_EXEC))) {
11489 panic("%s: %#lx already locked down/executable (%#llx)",
11490 __func__, kva, (uint64_t)pvh_flags);
11491 }
11492
11493 pvh_set_flags(pvh, pvh_flags | lockdown_flag);
11494
11495 /* Update the physical aperture mapping to prevent kernel write access. */
11496 const unsigned int new_xprr_perm =
11497 (ppl_writable) ? XPRR_PPL_RW_PERM : XPRR_KERN_RO_PERM;
11498 pmap_set_xprr_perm(pai, XPRR_KERN_RW_PERM, new_xprr_perm);
11499
11500 pvh_unlock(pai);
11501
11502 pmap_page_protect_options_internal((ppnum_t)atop(pa), VM_PROT_READ, 0, NULL);
11503
11504 /**
11505 * Double-check that the mapping didn't change physical addresses before the
11506 * LOCKDOWN flag was set (there is a brief window between the above
11507 * kvtophys() and pvh_lock() calls where the mapping could have changed).
11508 *
11509 * This doesn't solve the ABA problem, but this doesn't have to since once
11510 * the pvh_lock() is grabbed no new mappings can be created on this physical
11511 * page without the LOCKDOWN flag already set (so any future mappings can
11512 * only be RO, and no existing mappings can be removed).
11513 */
11514 if (kvtophys_nofail(kva) != pa) {
11515 panic("%s: Physical address of mapping changed while setting LOCKDOWN "
11516 "flag %#lx %#llx", __func__, kva, (uint64_t)pa);
11517 }
11518 }
11519
11520 /**
11521 * Helper for releasing a page from being locked down to the PPL, making it writable to the
11522 * kernel once again.
11523 *
11524 * @note This must be paired with a pmap_ppl_lockdown_page() call. Any attempts
11525 * to unlockdown a page that was never locked down, will panic.
11526 *
11527 * @param pai physical page index to release from lockdown. PVH lock for this page must be held.
11528 * @param lockdown_flag Bit within PVH_FLAG_LOCKDOWN_MASK specifying the lockdown reason
11529 * @param ppl_writable This must match whatever `ppl_writable` parameter was
11530 * passed to the paired pmap_ppl_lockdown_page() call. Any
11531 * deviation will result in a panic.
11532 */
11533 MARK_AS_PMAP_TEXT static void
11534 pmap_ppl_unlockdown_page_locked(unsigned int pai, uint64_t lockdown_flag, bool ppl_writable)
11535 {
11536 pvh_assert_locked(pai);
11537 pv_entry_t **pvh = pai_to_pvh(pai);
11538 const vm_offset_t pvh_flags = pvh_get_flags(pvh);
11539
11540 if (__improbable(!(pvh_flags & lockdown_flag))) {
11541 panic("%s: unlockdown attempt on not locked down pai %d, type=0x%llx, PVH flags=0x%llx",
11542 __func__, pai, (unsigned long long)lockdown_flag, (unsigned long long)pvh_flags);
11543 }
11544
11545 pvh_set_flags(pvh, pvh_flags & ~lockdown_flag);
11546
11547 /* Restore the pre-lockdown physical aperture mapping permissions. */
11548 const unsigned int old_xprr_perm =
11549 (ppl_writable) ? XPRR_PPL_RW_PERM : XPRR_KERN_RO_PERM;
11550 pmap_set_xprr_perm(pai, old_xprr_perm, XPRR_KERN_RW_PERM);
11551 }
11552
11553 /**
11554 * Release a page from being locked down to the PPL, making it writable to the
11555 * kernel once again.
11556 *
11557 * @note This must be paired with a pmap_ppl_lockdown_page() call. Any attempts
11558 * to unlockdown a page that was never locked down, will panic.
11559 *
11560 * @param kva Valid address to any mapping of the physical page to unlockdown.
11561 * @param lockdown_flag Bit within PVH_FLAG_LOCKDOWN_MASK specifying the lockdown reason
11562 * @param ppl_writable This must match whatever `ppl_writable` parameter was
11563 * passed to the paired pmap_ppl_lockdown_page() call. Any
11564 * deviation will result in a panic.
11565 */
11566 MARK_AS_PMAP_TEXT static void
11567 pmap_ppl_unlockdown_page(vm_address_t kva, uint64_t lockdown_flag, bool ppl_writable)
11568 {
11569 const pmap_paddr_t pa = kvtophys_nofail(kva);
11570 const unsigned int pai = pa_index(pa);
11571
11572 assert(lockdown_flag & PVH_FLAG_LOCKDOWN_MASK);
11573 pvh_lock(pai);
11574 pmap_ppl_unlockdown_page_locked(pai, lockdown_flag, ppl_writable);
11575 pvh_unlock(pai);
11576 }
11577
11578 #else /* XNU_MONITOR */
11579
11580 static void __unused
11581 pmap_pin_kernel_pages(vm_offset_t kva __unused, size_t nbytes __unused)
11582 {
11583 }
11584
11585 static void __unused
11586 pmap_unpin_kernel_pages(vm_offset_t kva __unused, size_t nbytes __unused)
11587 {
11588 }
11589
11590 #endif /* !XNU_MONITOR */
11591
11592
11593 MARK_AS_PMAP_TEXT static inline void
11594 pmap_cs_lockdown_pages(vm_address_t kva, vm_size_t size, bool ppl_writable)
11595 {
11596 #if XNU_MONITOR
11597 pmap_ppl_lockdown_pages(kva, size, PVH_FLAG_LOCKDOWN_CS, ppl_writable);
11598 #else
11599 pmap_ppl_lockdown_pages(kva, size, 0, ppl_writable);
11600 #endif
11601 }
11602
11603 MARK_AS_PMAP_TEXT static inline void
11604 pmap_cs_unlockdown_pages(vm_address_t kva, vm_size_t size, bool ppl_writable)
11605 {
11606 #if XNU_MONITOR
11607 pmap_ppl_unlockdown_pages(kva, size, PVH_FLAG_LOCKDOWN_CS, ppl_writable);
11608 #else
11609 pmap_ppl_unlockdown_pages(kva, size, 0, ppl_writable);
11610 #endif
11611 }
11612
11613 /**
11614 * Perform basic validation checks on the destination only and
11615 * corresponding offset/sizes prior to writing to a read only allocation.
11616 *
11617 * @note Should be called before writing to an allocation from the read
11618 * only allocator.
11619 *
11620 * @param zid The ID of the zone the allocation belongs to.
11621 * @param va VA of element being modified (destination).
11622 * @param offset Offset being written to, in the element.
11623 * @param new_data_size Size of modification.
11624 *
11625 */
11626
11627 MARK_AS_PMAP_TEXT static void
11628 pmap_ro_zone_validate_element_dst(
11629 zone_id_t zid,
11630 vm_offset_t va,
11631 vm_offset_t offset,
11632 vm_size_t new_data_size)
11633 {
11634 vm_size_t elem_size = zone_elem_size_ro(zid);
11635 vm_offset_t sum = 0, page = trunc_page(va);
11636
11637 if (__improbable(new_data_size > (elem_size - offset))) {
11638 panic("%s: New data size %lu too large for elem size %lu at addr %p",
11639 __func__, (uintptr_t)new_data_size, (uintptr_t)elem_size, (void*)va);
11640 }
11641 if (__improbable(offset >= elem_size)) {
11642 panic("%s: Offset %lu too large for elem size %lu at addr %p",
11643 __func__, (uintptr_t)offset, (uintptr_t)elem_size, (void*)va);
11644 }
11645 if (__improbable(os_add3_overflow(va, offset, new_data_size, &sum))) {
11646 panic("%s: Integer addition overflow %p + %lu + %lu = %lu",
11647 __func__, (void*)va, (uintptr_t)offset, (uintptr_t) new_data_size,
11648 (uintptr_t)sum);
11649 }
11650 if (__improbable((va - page) % elem_size)) {
11651 panic("%s: Start of element %p is not aligned to element size %lu",
11652 __func__, (void *)va, (uintptr_t)elem_size);
11653 }
11654
11655 /* Check element is from correct zone */
11656 zone_require_ro(zid, elem_size, (void*)va);
11657 }
11658
11659
11660 /**
11661 * Perform basic validation checks on the source, destination and
11662 * corresponding offset/sizes prior to writing to a read only allocation.
11663 *
11664 * @note Should be called before writing to an allocation from the read
11665 * only allocator.
11666 *
11667 * @param zid The ID of the zone the allocation belongs to.
11668 * @param va VA of element being modified (destination).
11669 * @param offset Offset being written to, in the element.
11670 * @param new_data Pointer to new data (source).
11671 * @param new_data_size Size of modification.
11672 *
11673 */
11674
11675 MARK_AS_PMAP_TEXT static void
11676 pmap_ro_zone_validate_element(
11677 zone_id_t zid,
11678 vm_offset_t va,
11679 vm_offset_t offset,
11680 const vm_offset_t new_data,
11681 vm_size_t new_data_size)
11682 {
11683 vm_offset_t sum = 0;
11684
11685 if (__improbable(os_add_overflow(new_data, new_data_size, &sum))) {
11686 panic("%s: Integer addition overflow %p + %lu = %lu",
11687 __func__, (void*)new_data, (uintptr_t)new_data_size, (uintptr_t)sum);
11688 }
11689
11690 pmap_ro_zone_validate_element_dst(zid, va, offset, new_data_size);
11691 }
11692
11693 /**
11694 * Ensure that physical page is locked down and pinned, before writing to it.
11695 *
11696 * @note Should be called before writing to an allocation from the read
11697 * only allocator. This function pairs with pmap_ro_zone_unlock_phy_page,
11698 * ensure that it is called after the modification.
11699 *
11700 *
11701 * @param pa Physical address of the element being modified.
11702 * @param va Virtual address of element being modified.
11703 * @param size Size of the modification.
11704 *
11705 */
11706
11707 MARK_AS_PMAP_TEXT static void
11708 pmap_ro_zone_lock_phy_page(
11709 const pmap_paddr_t pa,
11710 vm_offset_t va,
11711 vm_size_t size)
11712 {
11713 const unsigned int pai = pa_index(pa);
11714 pvh_lock(pai);
11715
11716 /* Ensure that the physical page is locked down */
11717 #if XNU_MONITOR
11718 pv_entry_t **pvh = pai_to_pvh(pai);
11719 if (!(pvh_get_flags(pvh) & PVH_FLAG_LOCKDOWN_RO)) {
11720 panic("%s: Physical page not locked down %llx", __func__, pa);
11721 }
11722 #endif /* XNU_MONITOR */
11723
11724 /* Ensure page can't become PPL-owned memory before the memcpy occurs */
11725 pmap_pin_kernel_pages(va, size);
11726 }
11727
11728 /**
11729 * Unlock and unpin physical page after writing to it.
11730 *
11731 * @note Should be called after writing to an allocation from the read
11732 * only allocator. This function pairs with pmap_ro_zone_lock_phy_page,
11733 * ensure that it has been called prior to the modification.
11734 *
11735 * @param pa Physical address of the element that was modified.
11736 * @param va Virtual address of element that was modified.
11737 * @param size Size of the modification.
11738 *
11739 */
11740
11741 MARK_AS_PMAP_TEXT static void
11742 pmap_ro_zone_unlock_phy_page(
11743 const pmap_paddr_t pa,
11744 vm_offset_t va,
11745 vm_size_t size)
11746 {
11747 const unsigned int pai = pa_index(pa);
11748 pmap_unpin_kernel_pages(va, size);
11749 pvh_unlock(pai);
11750 }
11751
11752 /**
11753 * Function to copy kauth_cred from new_data to kv.
11754 * Function defined in "kern_prot.c"
11755 *
11756 * @note Will be removed upon completion of
11757 * <rdar://problem/72635194> Compiler PAC support for memcpy.
11758 *
11759 * @param kv Address to copy new data to.
11760 * @param new_data Pointer to new data.
11761 *
11762 */
11763
11764 extern void
11765 kauth_cred_copy(const uintptr_t kv, const uintptr_t new_data);
11766
11767 /**
11768 * Zalloc-specific memcpy that writes through the physical aperture
11769 * and ensures the element being modified is from a read-only zone.
11770 *
11771 * @note Designed to work only with the zone allocator's read-only submap.
11772 *
11773 * @param zid The ID of the zone to allocate from.
11774 * @param va VA of element to be modified.
11775 * @param offset Offset from element.
11776 * @param new_data Pointer to new data.
11777 * @param new_data_size Size of modification.
11778 *
11779 */
11780
11781 void
11782 pmap_ro_zone_memcpy(
11783 zone_id_t zid,
11784 vm_offset_t va,
11785 vm_offset_t offset,
11786 const vm_offset_t new_data,
11787 vm_size_t new_data_size)
11788 {
11789 #if XNU_MONITOR
11790 pmap_ro_zone_memcpy_ppl(zid, va, offset, new_data, new_data_size);
11791 #else /* XNU_MONITOR */
11792 pmap_ro_zone_memcpy_internal(zid, va, offset, new_data, new_data_size);
11793 #endif /* XNU_MONITOR */
11794 }
11795
11796 MARK_AS_PMAP_TEXT void
11797 pmap_ro_zone_memcpy_internal(
11798 zone_id_t zid,
11799 vm_offset_t va,
11800 vm_offset_t offset,
11801 const vm_offset_t new_data,
11802 vm_size_t new_data_size)
11803 {
11804 const pmap_paddr_t pa = kvtophys_nofail(va + offset);
11805
11806 if (!new_data || new_data_size == 0) {
11807 return;
11808 }
11809
11810 pmap_ro_zone_validate_element(zid, va, offset, new_data, new_data_size);
11811 pmap_ro_zone_lock_phy_page(pa, va, new_data_size);
11812 memcpy((void*)phystokv(pa), (void*)new_data, new_data_size);
11813 pmap_ro_zone_unlock_phy_page(pa, va, new_data_size);
11814 }
11815
11816 /**
11817 * Zalloc-specific function to atomically mutate fields of an element that
11818 * belongs to a read-only zone, via the physcial aperture.
11819 *
11820 * @note Designed to work only with the zone allocator's read-only submap.
11821 *
11822 * @param zid The ID of the zone the element belongs to.
11823 * @param va VA of element to be modified.
11824 * @param offset Offset in element.
11825 * @param op Atomic operation to perform.
11826 * @param value Mutation value.
11827 *
11828 */
11829
11830 uint64_t
11831 pmap_ro_zone_atomic_op(
11832 zone_id_t zid,
11833 vm_offset_t va,
11834 vm_offset_t offset,
11835 zro_atomic_op_t op,
11836 uint64_t value)
11837 {
11838 #if XNU_MONITOR
11839 return pmap_ro_zone_atomic_op_ppl(zid, va, offset, op, value);
11840 #else /* XNU_MONITOR */
11841 return pmap_ro_zone_atomic_op_internal(zid, va, offset, op, value);
11842 #endif /* XNU_MONITOR */
11843 }
11844
11845 MARK_AS_PMAP_TEXT uint64_t
11846 pmap_ro_zone_atomic_op_internal(
11847 zone_id_t zid,
11848 vm_offset_t va,
11849 vm_offset_t offset,
11850 zro_atomic_op_t op,
11851 uint64_t value)
11852 {
11853 const pmap_paddr_t pa = kvtophys_nofail(va + offset);
11854 vm_size_t value_size = op & 0xf;
11855
11856 pmap_ro_zone_validate_element_dst(zid, va, offset, value_size);
11857 pmap_ro_zone_lock_phy_page(pa, va, value_size);
11858 value = __zalloc_ro_mut_atomic(phystokv(pa), op, value);
11859 pmap_ro_zone_unlock_phy_page(pa, va, value_size);
11860
11861 return value;
11862 }
11863
11864 /**
11865 * bzero for allocations from read only zones, that writes through the
11866 * physical aperture.
11867 *
11868 * @note This is called by the zfree path of all allocations from read
11869 * only zones.
11870 *
11871 * @param zid The ID of the zone the allocation belongs to.
11872 * @param va VA of element to be zeroed.
11873 * @param offset Offset in the element.
11874 * @param size Size of allocation.
11875 *
11876 */
11877
11878 void
11879 pmap_ro_zone_bzero(
11880 zone_id_t zid,
11881 vm_offset_t va,
11882 vm_offset_t offset,
11883 vm_size_t size)
11884 {
11885 #if XNU_MONITOR
11886 pmap_ro_zone_bzero_ppl(zid, va, offset, size);
11887 #else /* XNU_MONITOR */
11888 pmap_ro_zone_bzero_internal(zid, va, offset, size);
11889 #endif /* XNU_MONITOR */
11890 }
11891
11892 MARK_AS_PMAP_TEXT void
11893 pmap_ro_zone_bzero_internal(
11894 zone_id_t zid,
11895 vm_offset_t va,
11896 vm_offset_t offset,
11897 vm_size_t size)
11898 {
11899 const pmap_paddr_t pa = kvtophys_nofail(va + offset);
11900 pmap_ro_zone_validate_element(zid, va, offset, 0, size);
11901 pmap_ro_zone_lock_phy_page(pa, va, size);
11902 bzero((void*)phystokv(pa), size);
11903 pmap_ro_zone_unlock_phy_page(pa, va, size);
11904 }
11905
11906 /**
11907 * Removes write access from the Physical Aperture.
11908 *
11909 * @note For non-PPL devices, it simply makes all virtual mappings RO.
11910 * @note Designed to work only with the zone allocator's read-only submap.
11911 *
11912 * @param va VA of the page to restore write access to.
11913 *
11914 */
11915 MARK_AS_PMAP_TEXT static void
11916 pmap_phys_write_disable(vm_address_t va)
11917 {
11918 #if XNU_MONITOR
11919 pmap_ppl_lockdown_page(va, PVH_FLAG_LOCKDOWN_RO, true);
11920 #else /* XNU_MONITOR */
11921 pmap_page_protect(atop_kernel(kvtophys(va)), VM_PROT_READ);
11922 #endif /* XNU_MONITOR */
11923 }
11924
11925 #define PMAP_RESIDENT_INVALID ((mach_vm_size_t)-1)
11926
11927 MARK_AS_PMAP_TEXT mach_vm_size_t
11928 pmap_query_resident_internal(
11929 pmap_t pmap,
11930 vm_map_address_t start,
11931 vm_map_address_t end,
11932 mach_vm_size_t *compressed_bytes_p)
11933 {
11934 mach_vm_size_t resident_bytes = 0;
11935 mach_vm_size_t compressed_bytes = 0;
11936
11937 pt_entry_t *bpte, *epte;
11938 pt_entry_t *pte_p;
11939 tt_entry_t *tte_p;
11940
11941 if (pmap == NULL) {
11942 return PMAP_RESIDENT_INVALID;
11943 }
11944
11945 validate_pmap(pmap);
11946
11947 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
11948
11949 /* Ensure that this request is valid, and addresses exactly one TTE. */
11950 if (__improbable((start % pt_attr_page_size(pt_attr)) ||
11951 (end % pt_attr_page_size(pt_attr)))) {
11952 panic("%s: address range %p, %p not page-aligned to 0x%llx", __func__, (void*)start, (void*)end, pt_attr_page_size(pt_attr));
11953 }
11954
11955 if (__improbable((end < start) || (end > ((start + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr))))) {
11956 panic("%s: invalid address range %p, %p", __func__, (void*)start, (void*)end);
11957 }
11958
11959 pmap_lock(pmap, PMAP_LOCK_SHARED);
11960 tte_p = pmap_tte(pmap, start);
11961 if (tte_p == (tt_entry_t *) NULL) {
11962 pmap_unlock(pmap, PMAP_LOCK_SHARED);
11963 return PMAP_RESIDENT_INVALID;
11964 }
11965 if ((*tte_p & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE) {
11966 pte_p = (pt_entry_t *) ttetokv(*tte_p);
11967 bpte = &pte_p[pte_index(pt_attr, start)];
11968 epte = &pte_p[pte_index(pt_attr, end)];
11969
11970 for (; bpte < epte; bpte++) {
11971 if (ARM_PTE_IS_COMPRESSED(*bpte, bpte)) {
11972 compressed_bytes += pt_attr_page_size(pt_attr);
11973 } else if (pa_valid(pte_to_pa(*bpte))) {
11974 resident_bytes += pt_attr_page_size(pt_attr);
11975 }
11976 }
11977 }
11978 pmap_unlock(pmap, PMAP_LOCK_SHARED);
11979
11980 if (compressed_bytes_p) {
11981 pmap_pin_kernel_pages((vm_offset_t)compressed_bytes_p, sizeof(*compressed_bytes_p));
11982 *compressed_bytes_p += compressed_bytes;
11983 pmap_unpin_kernel_pages((vm_offset_t)compressed_bytes_p, sizeof(*compressed_bytes_p));
11984 }
11985
11986 return resident_bytes;
11987 }
11988
11989 mach_vm_size_t
11990 pmap_query_resident(
11991 pmap_t pmap,
11992 vm_map_address_t start,
11993 vm_map_address_t end,
11994 mach_vm_size_t *compressed_bytes_p)
11995 {
11996 mach_vm_size_t total_resident_bytes;
11997 mach_vm_size_t compressed_bytes;
11998 vm_map_address_t va;
11999
12000
12001 if (pmap == PMAP_NULL) {
12002 if (compressed_bytes_p) {
12003 *compressed_bytes_p = 0;
12004 }
12005 return 0;
12006 }
12007
12008 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
12009
12010 total_resident_bytes = 0;
12011 compressed_bytes = 0;
12012
12013 PMAP_TRACE(3, PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_START,
12014 VM_KERNEL_ADDRHIDE(pmap), VM_KERNEL_ADDRHIDE(start),
12015 VM_KERNEL_ADDRHIDE(end));
12016
12017 va = start;
12018 while (va < end) {
12019 vm_map_address_t l;
12020 mach_vm_size_t resident_bytes;
12021
12022 l = ((va + pt_attr_twig_size(pt_attr)) & ~pt_attr_twig_offmask(pt_attr));
12023
12024 if (l > end) {
12025 l = end;
12026 }
12027 #if XNU_MONITOR
12028 resident_bytes = pmap_query_resident_ppl(pmap, va, l, compressed_bytes_p);
12029 #else
12030 resident_bytes = pmap_query_resident_internal(pmap, va, l, compressed_bytes_p);
12031 #endif
12032 if (resident_bytes == PMAP_RESIDENT_INVALID) {
12033 break;
12034 }
12035
12036 total_resident_bytes += resident_bytes;
12037
12038 va = l;
12039 }
12040
12041 if (compressed_bytes_p) {
12042 *compressed_bytes_p = compressed_bytes;
12043 }
12044
12045 PMAP_TRACE(3, PMAP_CODE(PMAP__QUERY_RESIDENT) | DBG_FUNC_END,
12046 total_resident_bytes);
12047
12048 return total_resident_bytes;
12049 }
12050
12051 #if MACH_ASSERT
12052 static void
12053 pmap_check_ledgers(
12054 pmap_t pmap)
12055 {
12056 int pid;
12057 char *procname;
12058
12059 if (pmap->pmap_pid == 0) {
12060 /*
12061 * This pmap was not or is no longer fully associated
12062 * with a task (e.g. the old pmap after a fork()/exec() or
12063 * spawn()). Its "ledger" still points at a task that is
12064 * now using a different (and active) address space, so
12065 * we can't check that all the pmap ledgers are balanced here.
12066 *
12067 * If the "pid" is set, that means that we went through
12068 * pmap_set_process() in task_terminate_internal(), so
12069 * this task's ledger should not have been re-used and
12070 * all the pmap ledgers should be back to 0.
12071 */
12072 return;
12073 }
12074
12075 pid = pmap->pmap_pid;
12076 procname = pmap->pmap_procname;
12077
12078 vm_map_pmap_check_ledgers(pmap, pmap->ledger, pid, procname);
12079 }
12080 #endif /* MACH_ASSERT */
12081
12082 void
12083 pmap_advise_pagezero_range(__unused pmap_t p, __unused uint64_t a)
12084 {
12085 }
12086
12087 /**
12088 * The minimum shared region nesting size is used by the VM to determine when to
12089 * break up large mappings to nested regions. The smallest size that these
12090 * mappings can be broken into is determined by what page table level those
12091 * regions are being nested in at and the size of the page tables.
12092 *
12093 * For instance, if a nested region is nesting at L2 for a process utilizing
12094 * 16KB page tables, then the minimum nesting size would be 32MB (size of an L2
12095 * block entry).
12096 *
12097 * @param pmap The target pmap to determine the block size based on whether it's
12098 * using 16KB or 4KB page tables.
12099 */
12100 uint64_t
12101 pmap_shared_region_size_min(__unused pmap_t pmap)
12102 {
12103 #if (__ARM_VMSA__ > 7)
12104 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
12105
12106 /**
12107 * We always nest the shared region at L2 (32MB for 16KB pages, 2MB for
12108 * 4KB pages). This means that a target pmap will contain L2 entries that
12109 * point to shared L3 page tables in the shared region pmap.
12110 */
12111 return pt_attr_twig_size(pt_attr);
12112
12113 #else
12114 return ARM_NESTING_SIZE_MIN;
12115 #endif
12116 }
12117
12118 boolean_t
12119 pmap_enforces_execute_only(
12120 #if (__ARM_VMSA__ == 7)
12121 __unused
12122 #endif
12123 pmap_t pmap)
12124 {
12125 #if (__ARM_VMSA__ > 7)
12126 return pmap != kernel_pmap;
12127 #else
12128 return FALSE;
12129 #endif
12130 }
12131
12132 MARK_AS_PMAP_TEXT void
12133 pmap_set_vm_map_cs_enforced_internal(
12134 pmap_t pmap,
12135 bool new_value)
12136 {
12137 validate_pmap_mutable(pmap);
12138 pmap->pmap_vm_map_cs_enforced = new_value;
12139 }
12140
12141 void
12142 pmap_set_vm_map_cs_enforced(
12143 pmap_t pmap,
12144 bool new_value)
12145 {
12146 #if XNU_MONITOR
12147 pmap_set_vm_map_cs_enforced_ppl(pmap, new_value);
12148 #else
12149 pmap_set_vm_map_cs_enforced_internal(pmap, new_value);
12150 #endif
12151 }
12152
12153 extern int cs_process_enforcement_enable;
12154 bool
12155 pmap_get_vm_map_cs_enforced(
12156 pmap_t pmap)
12157 {
12158 if (cs_process_enforcement_enable) {
12159 return true;
12160 }
12161 return pmap->pmap_vm_map_cs_enforced;
12162 }
12163
12164 MARK_AS_PMAP_TEXT void
12165 pmap_set_jit_entitled_internal(
12166 __unused pmap_t pmap)
12167 {
12168 return;
12169 }
12170
12171 void
12172 pmap_set_jit_entitled(
12173 pmap_t pmap)
12174 {
12175 #if XNU_MONITOR
12176 pmap_set_jit_entitled_ppl(pmap);
12177 #else
12178 pmap_set_jit_entitled_internal(pmap);
12179 #endif
12180 }
12181
12182 bool
12183 pmap_get_jit_entitled(
12184 __unused pmap_t pmap)
12185 {
12186 return false;
12187 }
12188
12189 MARK_AS_PMAP_TEXT kern_return_t
12190 pmap_query_page_info_internal(
12191 pmap_t pmap,
12192 vm_map_offset_t va,
12193 int *disp_p)
12194 {
12195 pmap_paddr_t pa;
12196 int disp;
12197 unsigned int pai;
12198 pt_entry_t *pte;
12199 pv_entry_t **pv_h, *pve_p;
12200
12201 if (pmap == PMAP_NULL || pmap == kernel_pmap) {
12202 pmap_pin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p));
12203 *disp_p = 0;
12204 pmap_unpin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p));
12205 return KERN_INVALID_ARGUMENT;
12206 }
12207
12208 disp = 0;
12209
12210 validate_pmap(pmap);
12211 pmap_lock(pmap, PMAP_LOCK_SHARED);
12212
12213 pte = pmap_pte(pmap, va);
12214 if (pte == PT_ENTRY_NULL) {
12215 goto done;
12216 }
12217
12218 pa = pte_to_pa(*((volatile pt_entry_t*)pte));
12219 if (pa == 0) {
12220 if (ARM_PTE_IS_COMPRESSED(*pte, pte)) {
12221 disp |= PMAP_QUERY_PAGE_COMPRESSED;
12222 if (*pte & ARM_PTE_COMPRESSED_ALT) {
12223 disp |= PMAP_QUERY_PAGE_COMPRESSED_ALTACCT;
12224 }
12225 }
12226 } else {
12227 disp |= PMAP_QUERY_PAGE_PRESENT;
12228 pai = pa_index(pa);
12229 if (!pa_valid(pa)) {
12230 goto done;
12231 }
12232 pvh_lock(pai);
12233 pv_h = pai_to_pvh(pai);
12234 pve_p = PV_ENTRY_NULL;
12235 int pve_ptep_idx = 0;
12236 if (pvh_test_type(pv_h, PVH_TYPE_PVEP)) {
12237 pve_p = pvh_pve_list(pv_h);
12238 while (pve_p != PV_ENTRY_NULL &&
12239 (pve_ptep_idx = pve_find_ptep_index(pve_p, pte)) == -1) {
12240 pve_p = pve_next(pve_p);
12241 }
12242 }
12243
12244 if (ppattr_pve_is_altacct(pai, pve_p, pve_ptep_idx)) {
12245 disp |= PMAP_QUERY_PAGE_ALTACCT;
12246 } else if (ppattr_test_reusable(pai)) {
12247 disp |= PMAP_QUERY_PAGE_REUSABLE;
12248 } else if (ppattr_pve_is_internal(pai, pve_p, pve_ptep_idx)) {
12249 disp |= PMAP_QUERY_PAGE_INTERNAL;
12250 }
12251 pvh_unlock(pai);
12252 }
12253
12254 done:
12255 pmap_unlock(pmap, PMAP_LOCK_SHARED);
12256 pmap_pin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p));
12257 *disp_p = disp;
12258 pmap_unpin_kernel_pages((vm_offset_t)disp_p, sizeof(*disp_p));
12259 return KERN_SUCCESS;
12260 }
12261
12262 kern_return_t
12263 pmap_query_page_info(
12264 pmap_t pmap,
12265 vm_map_offset_t va,
12266 int *disp_p)
12267 {
12268 #if XNU_MONITOR
12269 return pmap_query_page_info_ppl(pmap, va, disp_p);
12270 #else
12271 return pmap_query_page_info_internal(pmap, va, disp_p);
12272 #endif
12273 }
12274
12275
12276
12277 static vm_map_size_t
12278 pmap_user_va_size(pmap_t pmap __unused)
12279 {
12280 #if (__ARM_VMSA__ == 7)
12281 return VM_MAX_ADDRESS;
12282 #else
12283 #if __ARM_MIXED_PAGE_SIZE__
12284 uint64_t tcr_value = pmap_get_pt_attr(pmap)->pta_tcr_value;
12285 return 1ULL << (64 - ((tcr_value >> TCR_T0SZ_SHIFT) & TCR_TSZ_MASK));
12286 #else
12287 return 1ULL << (64 - T0SZ_BOOT);
12288 #endif
12289 #endif /* __ARM_VMSA > 7 */
12290 }
12291
12292
12293
12294 kern_return_t
12295 pmap_load_legacy_trust_cache(struct pmap_legacy_trust_cache __unused *trust_cache,
12296 const vm_size_t __unused trust_cache_len)
12297 {
12298 // Unsupported
12299 return KERN_NOT_SUPPORTED;
12300 }
12301
12302 pmap_tc_ret_t
12303 pmap_load_image4_trust_cache(struct pmap_image4_trust_cache __unused *trust_cache,
12304 const vm_size_t __unused trust_cache_len,
12305 uint8_t const * __unused img4_manifest,
12306 const vm_size_t __unused img4_manifest_buffer_len,
12307 const vm_size_t __unused img4_manifest_actual_len,
12308 bool __unused dry_run)
12309 {
12310 // Unsupported
12311 return PMAP_TC_UNKNOWN_FORMAT;
12312 }
12313
12314 bool
12315 pmap_in_ppl(void)
12316 {
12317 // Unsupported
12318 return false;
12319 }
12320
12321 bool
12322 pmap_has_ppl(void)
12323 {
12324 // Unsupported
12325 return false;
12326 }
12327
12328 void
12329 pmap_lockdown_image4_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags)
12330 {
12331 // Unsupported
12332 }
12333
12334 void
12335 pmap_lockdown_image4_late_slab(__unused vm_offset_t slab, __unused vm_size_t slab_len, __unused uint64_t flags)
12336 {
12337 // Unsupported
12338 }
12339
12340 void *
12341 pmap_claim_reserved_ppl_page(void)
12342 {
12343 // Unsupported
12344 return NULL;
12345 }
12346
12347 void
12348 pmap_free_reserved_ppl_page(void __unused *kva)
12349 {
12350 // Unsupported
12351 }
12352
12353
12354 MARK_AS_PMAP_TEXT bool
12355 pmap_is_trust_cache_loaded_internal(const uuid_t uuid)
12356 {
12357 bool found = false;
12358
12359 pmap_simple_lock(&pmap_loaded_trust_caches_lock);
12360
12361 for (struct pmap_image4_trust_cache const *c = pmap_image4_trust_caches; c != NULL; c = c->next) {
12362 if (bcmp(uuid, c->module->uuid, sizeof(uuid_t)) == 0) {
12363 found = true;
12364 goto done;
12365 }
12366 }
12367
12368 #ifdef PLATFORM_BridgeOS
12369 for (struct pmap_legacy_trust_cache const *c = pmap_legacy_trust_caches; c != NULL; c = c->next) {
12370 if (bcmp(uuid, c->uuid, sizeof(uuid_t)) == 0) {
12371 found = true;
12372 goto done;
12373 }
12374 }
12375 #endif
12376
12377 done:
12378 pmap_simple_unlock(&pmap_loaded_trust_caches_lock);
12379 return found;
12380 }
12381
12382 bool
12383 pmap_is_trust_cache_loaded(const uuid_t uuid)
12384 {
12385 #if XNU_MONITOR
12386 return pmap_is_trust_cache_loaded_ppl(uuid);
12387 #else
12388 return pmap_is_trust_cache_loaded_internal(uuid);
12389 #endif
12390 }
12391
12392 MARK_AS_PMAP_TEXT bool
12393 pmap_lookup_in_loaded_trust_caches_internal(const uint8_t cdhash[CS_CDHASH_LEN])
12394 {
12395 struct pmap_image4_trust_cache const *cache = NULL;
12396 #ifdef PLATFORM_BridgeOS
12397 struct pmap_legacy_trust_cache const *legacy = NULL;
12398 #endif
12399
12400 pmap_simple_lock(&pmap_loaded_trust_caches_lock);
12401
12402 for (cache = pmap_image4_trust_caches; cache != NULL; cache = cache->next) {
12403 uint8_t hash_type = 0, flags = 0;
12404
12405 if (lookup_in_trust_cache_module(cache->module, cdhash, &hash_type, &flags)) {
12406 goto done;
12407 }
12408 }
12409
12410 #ifdef PLATFORM_BridgeOS
12411 for (legacy = pmap_legacy_trust_caches; legacy != NULL; legacy = legacy->next) {
12412 for (uint32_t i = 0; i < legacy->num_hashes; i++) {
12413 if (bcmp(legacy->hashes[i], cdhash, CS_CDHASH_LEN) == 0) {
12414 goto done;
12415 }
12416 }
12417 }
12418 #endif
12419
12420 done:
12421 pmap_simple_unlock(&pmap_loaded_trust_caches_lock);
12422
12423 if (cache != NULL) {
12424 return true;
12425 #ifdef PLATFORM_BridgeOS
12426 } else if (legacy != NULL) {
12427 return true;
12428 #endif
12429 }
12430
12431 return false;
12432 }
12433
12434 bool
12435 pmap_lookup_in_loaded_trust_caches(const uint8_t cdhash[CS_CDHASH_LEN])
12436 {
12437 #if XNU_MONITOR
12438 return pmap_lookup_in_loaded_trust_caches_ppl(cdhash);
12439 #else
12440 return pmap_lookup_in_loaded_trust_caches_internal(cdhash);
12441 #endif
12442 }
12443
12444 MARK_AS_PMAP_TEXT uint32_t
12445 pmap_lookup_in_static_trust_cache_internal(const uint8_t cdhash[CS_CDHASH_LEN])
12446 {
12447 // Awkward indirection, because the PPL macros currently force their functions to be static.
12448 return lookup_in_static_trust_cache(cdhash);
12449 }
12450
12451 uint32_t
12452 pmap_lookup_in_static_trust_cache(const uint8_t cdhash[CS_CDHASH_LEN])
12453 {
12454 #if XNU_MONITOR
12455 return pmap_lookup_in_static_trust_cache_ppl(cdhash);
12456 #else
12457 return pmap_lookup_in_static_trust_cache_internal(cdhash);
12458 #endif
12459 }
12460
12461 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_compilation_service_cdhash_lock, 0);
12462 MARK_AS_PMAP_DATA uint8_t pmap_compilation_service_cdhash[CS_CDHASH_LEN] = { 0 };
12463
12464 MARK_AS_PMAP_TEXT void
12465 pmap_set_compilation_service_cdhash_internal(const uint8_t cdhash[CS_CDHASH_LEN])
12466 {
12467
12468 pmap_simple_lock(&pmap_compilation_service_cdhash_lock);
12469 memcpy(pmap_compilation_service_cdhash, cdhash, CS_CDHASH_LEN);
12470 pmap_simple_unlock(&pmap_compilation_service_cdhash_lock);
12471
12472 pmap_cs_log_info("Added Compilation Service CDHash through the PPL: 0x%02X 0x%02X 0x%02X 0x%02X", cdhash[0], cdhash[1], cdhash[2], cdhash[4]);
12473 }
12474
12475 MARK_AS_PMAP_TEXT bool
12476 pmap_match_compilation_service_cdhash_internal(const uint8_t cdhash[CS_CDHASH_LEN])
12477 {
12478 bool match = false;
12479
12480 pmap_simple_lock(&pmap_compilation_service_cdhash_lock);
12481 if (bcmp(pmap_compilation_service_cdhash, cdhash, CS_CDHASH_LEN) == 0) {
12482 match = true;
12483 }
12484 pmap_simple_unlock(&pmap_compilation_service_cdhash_lock);
12485
12486 if (match) {
12487 pmap_cs_log_info("Matched Compilation Service CDHash through the PPL");
12488 }
12489
12490 return match;
12491 }
12492
12493 void
12494 pmap_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])
12495 {
12496 #if XNU_MONITOR
12497 pmap_set_compilation_service_cdhash_ppl(cdhash);
12498 #else
12499 pmap_set_compilation_service_cdhash_internal(cdhash);
12500 #endif
12501 }
12502
12503 bool
12504 pmap_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])
12505 {
12506 #if XNU_MONITOR
12507 return pmap_match_compilation_service_cdhash_ppl(cdhash);
12508 #else
12509 return pmap_match_compilation_service_cdhash_internal(cdhash);
12510 #endif
12511 }
12512
12513 /*
12514 * As part of supporting local signing on the device, we need the PMAP layer
12515 * to store the local signing key so that PMAP_CS can validate with it. We
12516 * store it at the PMAP layer such that it is accessible to both AMFI and
12517 * PMAP_CS should they need it.
12518 */
12519 MARK_AS_PMAP_DATA static bool pmap_local_signing_public_key_set = false;
12520 MARK_AS_PMAP_DATA static uint8_t pmap_local_signing_public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE] = { 0 };
12521
12522 MARK_AS_PMAP_TEXT void
12523 pmap_set_local_signing_public_key_internal(const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE])
12524 {
12525 bool key_set = false;
12526
12527 /*
12528 * os_atomic_cmpxchg returns true in case the exchange was successful. For us,
12529 * a successful exchange means that the local signing public key has _not_ been
12530 * set. In case the key has been set, we panic as we would never expect the
12531 * kernel to attempt to set the key more than once.
12532 */
12533 key_set = !os_atomic_cmpxchg(&pmap_local_signing_public_key_set, false, true, relaxed);
12534
12535 if (key_set) {
12536 panic("attempted to set the local signing public key multiple times");
12537 }
12538
12539 memcpy(pmap_local_signing_public_key, public_key, PMAP_ECC_P384_PUBLIC_KEY_SIZE);
12540 pmap_cs_log_info("set local signing public key");
12541 }
12542
12543 void
12544 pmap_set_local_signing_public_key(const uint8_t public_key[PMAP_ECC_P384_PUBLIC_KEY_SIZE])
12545 {
12546 #if XNU_MONITOR
12547 return pmap_set_local_signing_public_key_ppl(public_key);
12548 #else
12549 return pmap_set_local_signing_public_key_internal(public_key);
12550 #endif
12551 }
12552
12553 uint8_t*
12554 pmap_get_local_signing_public_key(void)
12555 {
12556 bool key_set = os_atomic_load(&pmap_local_signing_public_key_set, relaxed);
12557
12558 if (key_set) {
12559 return pmap_local_signing_public_key;
12560 }
12561
12562 return NULL;
12563 }
12564
12565 /*
12566 * Locally signed applications need to be explicitly authorized by an entitled application
12567 * before we allow them to run.
12568 */
12569 MARK_AS_PMAP_DATA static uint8_t pmap_local_signing_cdhash[CS_CDHASH_LEN] = {0};
12570 MARK_AS_PMAP_DATA SIMPLE_LOCK_DECLARE(pmap_local_signing_cdhash_lock, 0);
12571
12572 MARK_AS_PMAP_TEXT void
12573 pmap_unrestrict_local_signing_internal(
12574 const uint8_t cdhash[CS_CDHASH_LEN])
12575 {
12576
12577 pmap_simple_lock(&pmap_local_signing_cdhash_lock);
12578 memcpy(pmap_local_signing_cdhash, cdhash, sizeof(pmap_local_signing_cdhash));
12579 pmap_simple_unlock(&pmap_local_signing_cdhash_lock);
12580
12581 pmap_cs_log_debug("unrestricted local signing for CDHash: 0x%02X%02X%02X%02X%02X...",
12582 cdhash[0], cdhash[1], cdhash[2], cdhash[3], cdhash[4]);
12583 }
12584
12585 void
12586 pmap_unrestrict_local_signing(
12587 const uint8_t cdhash[CS_CDHASH_LEN])
12588 {
12589 #if XNU_MONITOR
12590 return pmap_unrestrict_local_signing_ppl(cdhash);
12591 #else
12592 return pmap_unrestrict_local_signing_internal(cdhash);
12593 #endif
12594 }
12595
12596 #if PMAP_CS
12597 MARK_AS_PMAP_TEXT static void
12598 pmap_restrict_local_signing(void)
12599 {
12600 pmap_simple_lock(&pmap_local_signing_cdhash_lock);
12601 memset(pmap_local_signing_cdhash, 0, sizeof(pmap_local_signing_cdhash));
12602 pmap_simple_unlock(&pmap_local_signing_cdhash_lock);
12603 }
12604
12605 MARK_AS_PMAP_TEXT static bool
12606 pmap_local_signing_restricted(
12607 const uint8_t cdhash[CS_CDHASH_LEN])
12608 {
12609 pmap_simple_lock(&pmap_local_signing_cdhash_lock);
12610 int ret = memcmp(pmap_local_signing_cdhash, cdhash, sizeof(pmap_local_signing_cdhash));
12611 pmap_simple_unlock(&pmap_local_signing_cdhash_lock);
12612
12613 return ret != 0;
12614 }
12615
12616 MARK_AS_PMAP_TEXT bool
12617 pmap_cs_query_entitlements_internal(
12618 pmap_t pmap,
12619 CEQuery_t query,
12620 size_t queryLength,
12621 CEQueryContext_t finalContext)
12622 {
12623 struct pmap_cs_code_directory *cd_entry = NULL;
12624 bool ret = false;
12625
12626 if (!pmap_cs) {
12627 panic("PMAP_CS: cannot query for entitlements as pmap_cs is turned off");
12628 }
12629
12630 /*
12631 * When a pmap has not been passed in, we assume the caller wants to check the
12632 * entitlements on the current user space process.
12633 */
12634 if (pmap == NULL) {
12635 pmap = current_pmap();
12636 }
12637
12638 if (pmap == kernel_pmap) {
12639 /*
12640 * Instead of panicking we will just return false.
12641 */
12642 return false;
12643 }
12644
12645 if (query == NULL || queryLength > 64) {
12646 panic("PMAP_CS: bogus entitlements query");
12647 } else {
12648 pmap_cs_assert_addr((vm_address_t)query, sizeof(CEQueryOperation_t) * queryLength, false, true);
12649 }
12650
12651 if (finalContext != NULL) {
12652 pmap_cs_assert_addr((vm_address_t)finalContext, sizeof(*finalContext), false, false);
12653 }
12654
12655 validate_pmap(pmap);
12656 pmap_lock(pmap, PMAP_LOCK_SHARED);
12657
12658 cd_entry = pmap_cs_code_directory_from_region(pmap->pmap_cs_main);
12659 if (cd_entry == NULL) {
12660 pmap_cs_log_error("attempted to query entitlements from an invalid pmap or a retired code directory");
12661 goto out;
12662 }
12663
12664 if (cd_entry->ce_ctx == NULL) {
12665 pmap_cs_log_debug("%s: code signature doesn't have any entitlements", cd_entry->identifier);
12666 goto out;
12667 }
12668
12669 der_vm_context_t executionContext = cd_entry->ce_ctx->der_context;
12670
12671 for (size_t op = 0; op < queryLength; op++) {
12672 executionContext = amfi->CoreEntitlements.der_vm_execute(executionContext, query[op]);
12673 }
12674
12675 if (amfi->CoreEntitlements.der_vm_context_is_valid(executionContext)) {
12676 ret = true;
12677 if (finalContext != NULL) {
12678 pmap_pin_kernel_pages((vm_offset_t)finalContext, sizeof(*finalContext));
12679 finalContext->der_context = executionContext;
12680 pmap_unpin_kernel_pages((vm_offset_t)finalContext, sizeof(*finalContext));
12681 }
12682 } else {
12683 ret = false;
12684 }
12685
12686 out:
12687 if (cd_entry) {
12688 lck_rw_unlock_shared(&cd_entry->rwlock);
12689 cd_entry = NULL;
12690 }
12691 pmap_unlock(pmap, PMAP_LOCK_SHARED);
12692
12693 return ret;
12694 }
12695 #endif
12696
12697 bool
12698 pmap_query_entitlements(
12699 __unused pmap_t pmap,
12700 __unused CEQuery_t query,
12701 __unused size_t queryLength,
12702 __unused CEQueryContext_t finalContext)
12703 {
12704 #if !PMAP_SUPPORTS_ENTITLEMENT_CHECKS
12705 panic("PMAP_CS: do not use this API without checking for \'#if PMAP_SUPPORTS_ENTITLEMENT_CHECKS\'");
12706 #else
12707
12708 #if XNU_MONITOR
12709 return pmap_cs_query_entitlements_ppl(pmap, query, queryLength, finalContext);
12710 #else
12711 return pmap_cs_query_entitlements_internal(pmap, query, queryLength, finalContext);
12712 #endif
12713
12714 #endif /* !PMAP_SUPPORTS_ENTITLEMENT_CHECKS */
12715 }
12716
12717 MARK_AS_PMAP_TEXT void
12718 pmap_footprint_suspend_internal(
12719 vm_map_t map,
12720 boolean_t suspend)
12721 {
12722 #if DEVELOPMENT || DEBUG
12723 if (suspend) {
12724 current_thread()->pmap_footprint_suspended = TRUE;
12725 map->pmap->footprint_was_suspended = TRUE;
12726 } else {
12727 current_thread()->pmap_footprint_suspended = FALSE;
12728 }
12729 #else /* DEVELOPMENT || DEBUG */
12730 (void) map;
12731 (void) suspend;
12732 #endif /* DEVELOPMENT || DEBUG */
12733 }
12734
12735 void
12736 pmap_footprint_suspend(
12737 vm_map_t map,
12738 boolean_t suspend)
12739 {
12740 #if XNU_MONITOR
12741 pmap_footprint_suspend_ppl(map, suspend);
12742 #else
12743 pmap_footprint_suspend_internal(map, suspend);
12744 #endif
12745 }
12746
12747 MARK_AS_PMAP_TEXT void
12748 pmap_nop_internal(pmap_t pmap __unused)
12749 {
12750 validate_pmap_mutable(pmap);
12751 }
12752
12753 void
12754 pmap_nop(pmap_t pmap)
12755 {
12756 #if XNU_MONITOR
12757 pmap_nop_ppl(pmap);
12758 #else
12759 pmap_nop_internal(pmap);
12760 #endif
12761 }
12762
12763 #if defined(__arm64__) && (DEVELOPMENT || DEBUG)
12764
12765 struct page_table_dump_header {
12766 uint64_t pa;
12767 uint64_t num_entries;
12768 uint64_t start_va;
12769 uint64_t end_va;
12770 };
12771
12772 static kern_return_t
12773 pmap_dump_page_tables_recurse(pmap_t pmap,
12774 const tt_entry_t *ttp,
12775 unsigned int cur_level,
12776 unsigned int level_mask,
12777 uint64_t start_va,
12778 void *buf_start,
12779 void *buf_end,
12780 size_t *bytes_copied)
12781 {
12782 const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
12783 uint64_t num_entries = pt_attr_page_size(pt_attr) / sizeof(*ttp);
12784
12785 uint64_t size = pt_attr->pta_level_info[cur_level].size;
12786 uint64_t valid_mask = pt_attr->pta_level_info[cur_level].valid_mask;
12787 uint64_t type_mask = pt_attr->pta_level_info[cur_level].type_mask;
12788 uint64_t type_block = pt_attr->pta_level_info[cur_level].type_block;
12789
12790 void *bufp = (uint8_t*)buf_start + *bytes_copied;
12791
12792 if (cur_level == pt_attr_root_level(pt_attr)) {
12793 num_entries = pmap_root_alloc_size(pmap) / sizeof(tt_entry_t);
12794 }
12795
12796 uint64_t tt_size = num_entries * sizeof(tt_entry_t);
12797 const tt_entry_t *tt_end = &ttp[num_entries];
12798
12799 if (((vm_offset_t)buf_end - (vm_offset_t)bufp) < (tt_size + sizeof(struct page_table_dump_header))) {
12800 return KERN_INSUFFICIENT_BUFFER_SIZE;
12801 }
12802
12803 if (level_mask & (1U << cur_level)) {
12804 struct page_table_dump_header *header = (struct page_table_dump_header*)bufp;
12805 header->pa = ml_static_vtop((vm_offset_t)ttp);
12806 header->num_entries = num_entries;
12807 header->start_va = start_va;
12808 header->end_va = start_va + (num_entries * size);
12809
12810 bcopy(ttp, (uint8_t*)bufp + sizeof(*header), tt_size);
12811 *bytes_copied = *bytes_copied + sizeof(*header) + tt_size;
12812 }
12813 uint64_t current_va = start_va;
12814
12815 for (const tt_entry_t *ttep = ttp; ttep < tt_end; ttep++, current_va += size) {
12816 tt_entry_t tte = *ttep;
12817
12818 if (!(tte & valid_mask)) {
12819 continue;
12820 }
12821
12822 if ((tte & type_mask) == type_block) {
12823 continue;
12824 } else {
12825 if (cur_level >= pt_attr_leaf_level(pt_attr)) {
12826 panic("%s: corrupt entry %#llx at %p, "
12827 "ttp=%p, cur_level=%u, bufp=%p, buf_end=%p",
12828 __FUNCTION__, tte, ttep,
12829 ttp, cur_level, bufp, buf_end);
12830 }
12831
12832 const tt_entry_t *next_tt = (const tt_entry_t*)phystokv(tte & ARM_TTE_TABLE_MASK);
12833
12834 kern_return_t recurse_result = pmap_dump_page_tables_recurse(pmap, next_tt, cur_level + 1,
12835 level_mask, current_va, buf_start, buf_end, bytes_copied);
12836
12837 if (recurse_result != KERN_SUCCESS) {
12838 return recurse_result;
12839 }
12840 }
12841 }
12842
12843 return KERN_SUCCESS;
12844 }
12845
12846 kern_return_t
12847 pmap_dump_page_tables(pmap_t pmap, void *bufp, void *buf_end, unsigned int level_mask, size_t *bytes_copied)
12848 {
12849 if (not_in_kdp) {
12850 panic("pmap_dump_page_tables must only be called from kernel debugger context");
12851 }
12852 return pmap_dump_page_tables_recurse(pmap, pmap->tte, pt_attr_root_level(pmap_get_pt_attr(pmap)),
12853 level_mask, pmap->min, bufp, buf_end, bytes_copied);
12854 }
12855
12856 #else /* defined(__arm64__) && (DEVELOPMENT || DEBUG) */
12857
12858 kern_return_t
12859 pmap_dump_page_tables(pmap_t pmap __unused, void *bufp __unused, void *buf_end __unused,
12860 unsigned int level_mask __unused, size_t *bytes_copied __unused)
12861 {
12862 return KERN_NOT_SUPPORTED;
12863 }
12864 #endif /* !defined(__arm64__) */
12865
12866
12867 #ifdef CONFIG_XNUPOST
12868 #ifdef __arm64__
12869 static volatile bool pmap_test_took_fault = false;
12870
12871 static bool
12872 pmap_test_fault_handler(arm_saved_state_t * state)
12873 {
12874 bool retval = false;
12875 uint32_t esr = get_saved_state_esr(state);
12876 esr_exception_class_t class = ESR_EC(esr);
12877 fault_status_t fsc = ISS_IA_FSC(ESR_ISS(esr));
12878
12879 if ((class == ESR_EC_DABORT_EL1) &&
12880 ((fsc == FSC_PERMISSION_FAULT_L3) || (fsc == FSC_ACCESS_FLAG_FAULT_L3))) {
12881 pmap_test_took_fault = true;
12882 /* return to the instruction immediately after the call to NX page */
12883 set_saved_state_pc(state, get_saved_state_pc(state) + 4);
12884 retval = true;
12885 }
12886
12887 return retval;
12888 }
12889
12890 // Disable KASAN instrumentation, as the test pmap's TTBR0 space will not be in the shadow map
12891 static NOKASAN bool
12892 pmap_test_access(pmap_t pmap, vm_map_address_t va, bool should_fault, bool is_write)
12893 {
12894 pmap_t old_pmap = NULL;
12895
12896 pmap_test_took_fault = false;
12897
12898 /*
12899 * We're potentially switching pmaps without using the normal thread
12900 * mechanism; disable interrupts and preemption to avoid any unexpected
12901 * memory accesses.
12902 */
12903 uint64_t old_int_state = pmap_interrupts_disable();
12904 mp_disable_preemption();
12905
12906 if (pmap != NULL) {
12907 old_pmap = current_pmap();
12908 pmap_switch(pmap);
12909
12910 /* Disable PAN; pmap shouldn't be the kernel pmap. */
12911 #if __ARM_PAN_AVAILABLE__
12912 __builtin_arm_wsr("pan", 0);
12913 #endif /* __ARM_PAN_AVAILABLE__ */
12914 }
12915
12916 ml_expect_fault_begin(pmap_test_fault_handler, va);
12917
12918 if (is_write) {
12919 *((volatile uint64_t*)(va)) = 0xdec0de;
12920 } else {
12921 volatile uint64_t tmp = *((volatile uint64_t*)(va));
12922 (void)tmp;
12923 }
12924
12925 /* Save the fault bool, and undo the gross stuff we did. */
12926 bool took_fault = pmap_test_took_fault;
12927 ml_expect_fault_end();
12928
12929 if (pmap != NULL) {
12930 #if __ARM_PAN_AVAILABLE__
12931 __builtin_arm_wsr("pan", 1);
12932 #endif /* __ARM_PAN_AVAILABLE__ */
12933
12934 pmap_switch(old_pmap);
12935 }
12936
12937 mp_enable_preemption();
12938 pmap_interrupts_restore(old_int_state);
12939 bool retval = (took_fault == should_fault);
12940 return retval;
12941 }
12942
12943 static bool
12944 pmap_test_read(pmap_t pmap, vm_map_address_t va, bool should_fault)
12945 {
12946 bool retval = pmap_test_access(pmap, va, should_fault, false);
12947
12948 if (!retval) {
12949 T_FAIL("%s: %s, "
12950 "pmap=%p, va=%p, should_fault=%u",
12951 __func__, should_fault ? "did not fault" : "faulted",
12952 pmap, (void*)va, (unsigned)should_fault);
12953 }
12954
12955 return retval;
12956 }
12957
12958 static bool
12959 pmap_test_write(pmap_t pmap, vm_map_address_t va, bool should_fault)
12960 {
12961 bool retval = pmap_test_access(pmap, va, should_fault, true);
12962
12963 if (!retval) {
12964 T_FAIL("%s: %s, "
12965 "pmap=%p, va=%p, should_fault=%u",
12966 __func__, should_fault ? "did not fault" : "faulted",
12967 pmap, (void*)va, (unsigned)should_fault);
12968 }
12969
12970 return retval;
12971 }
12972
12973 static bool
12974 pmap_test_check_refmod(pmap_paddr_t pa, unsigned int should_be_set)
12975 {
12976 unsigned int should_be_clear = (~should_be_set) & (VM_MEM_REFERENCED | VM_MEM_MODIFIED);
12977 unsigned int bits = pmap_get_refmod((ppnum_t)atop(pa));
12978
12979 bool retval = (((bits & should_be_set) == should_be_set) && ((bits & should_be_clear) == 0));
12980
12981 if (!retval) {
12982 T_FAIL("%s: bits=%u, "
12983 "pa=%p, should_be_set=%u",
12984 __func__, bits,
12985 (void*)pa, should_be_set);
12986 }
12987
12988 return retval;
12989 }
12990
12991 static __attribute__((noinline)) bool
12992 pmap_test_read_write(pmap_t pmap, vm_map_address_t va, bool allow_read, bool allow_write)
12993 {
12994 bool retval = (pmap_test_read(pmap, va, !allow_read) | pmap_test_write(pmap, va, !allow_write));
12995 return retval;
12996 }
12997
12998 static int
12999 pmap_test_test_config(unsigned int flags)
13000 {
13001 T_LOG("running pmap_test_test_config flags=0x%X", flags);
13002 unsigned int map_count = 0;
13003 unsigned long page_ratio = 0;
13004 pmap_t pmap = pmap_create_options(NULL, 0, flags);
13005
13006 if (!pmap) {
13007 panic("Failed to allocate pmap");
13008 }
13009
13010 __unused const pt_attr_t * const pt_attr = pmap_get_pt_attr(pmap);
13011 uintptr_t native_page_size = pt_attr_page_size(native_pt_attr);
13012 uintptr_t pmap_page_size = pt_attr_page_size(pt_attr);
13013 uintptr_t pmap_twig_size = pt_attr_twig_size(pt_attr);
13014
13015 if (pmap_page_size <= native_page_size) {
13016 page_ratio = native_page_size / pmap_page_size;
13017 } else {
13018 /*
13019 * We claim to support a page_ratio of less than 1, which is
13020 * not currently supported by the pmap layer; panic.
13021 */
13022 panic("%s: page_ratio < 1, native_page_size=%lu, pmap_page_size=%lu"
13023 "flags=%u",
13024 __func__, native_page_size, pmap_page_size,
13025 flags);
13026 }
13027
13028 if (PAGE_RATIO > 1) {
13029 /*
13030 * The kernel is deliberately pretending to have 16KB pages.
13031 * The pmap layer has code that supports this, so pretend the
13032 * page size is larger than it is.
13033 */
13034 pmap_page_size = PAGE_SIZE;
13035 native_page_size = PAGE_SIZE;
13036 }
13037
13038 /*
13039 * Get two pages from the VM; one to be mapped wired, and one to be
13040 * mapped nonwired.
13041 */
13042 vm_page_t unwired_vm_page = vm_page_grab();
13043 vm_page_t wired_vm_page = vm_page_grab();
13044
13045 if ((unwired_vm_page == VM_PAGE_NULL) || (wired_vm_page == VM_PAGE_NULL)) {
13046 panic("Failed to grab VM pages");
13047 }
13048
13049 ppnum_t pn = VM_PAGE_GET_PHYS_PAGE(unwired_vm_page);
13050 ppnum_t wired_pn = VM_PAGE_GET_PHYS_PAGE(wired_vm_page);
13051
13052 pmap_paddr_t pa = ptoa(pn);
13053 pmap_paddr_t wired_pa = ptoa(wired_pn);
13054
13055 /*
13056 * We'll start mappings at the second twig TT. This keeps us from only
13057 * using the first entry in each TT, which would trivially be address
13058 * 0; one of the things we will need to test is retrieving the VA for
13059 * a given PTE.
13060 */
13061 vm_map_address_t va_base = pmap_twig_size;
13062 vm_map_address_t wired_va_base = ((2 * pmap_twig_size) - pmap_page_size);
13063
13064 if (wired_va_base < (va_base + (page_ratio * pmap_page_size))) {
13065 /*
13066 * Not exactly a functional failure, but this test relies on
13067 * there being a spare PTE slot we can use to pin the TT.
13068 */
13069 panic("Cannot pin translation table");
13070 }
13071
13072 /*
13073 * Create the wired mapping; this will prevent the pmap layer from
13074 * reclaiming our test TTs, which would interfere with this test
13075 * ("interfere" -> "make it panic").
13076 */
13077 pmap_enter_addr(pmap, wired_va_base, wired_pa, VM_PROT_READ, VM_PROT_READ, 0, true);
13078
13079 #if XNU_MONITOR
13080 /*
13081 * If the PPL is enabled, make sure that the kernel cannot write
13082 * to PPL memory.
13083 */
13084 if (!pmap_ppl_disable) {
13085 T_LOG("Validate that kernel cannot write to PPL memory.");
13086 pt_entry_t * ptep = pmap_pte(pmap, va_base);
13087 pmap_test_write(NULL, (vm_map_address_t)ptep, true);
13088 }
13089 #endif
13090
13091 /*
13092 * Create read-only mappings of the nonwired page; if the pmap does
13093 * not use the same page size as the kernel, create multiple mappings
13094 * so that the kernel page is fully mapped.
13095 */
13096 for (map_count = 0; map_count < page_ratio; map_count++) {
13097 pmap_enter_addr(pmap, va_base + (pmap_page_size * map_count), pa + (pmap_page_size * (map_count)), VM_PROT_READ, VM_PROT_READ, 0, false);
13098 }
13099
13100 /* Validate that all the PTEs have the expected PA and VA. */
13101 for (map_count = 0; map_count < page_ratio; map_count++) {
13102 pt_entry_t * ptep = pmap_pte(pmap, va_base + (pmap_page_size * map_count));
13103
13104 if (pte_to_pa(*ptep) != (pa + (pmap_page_size * map_count))) {
13105 T_FAIL("Unexpected pa=%p, expected %p, map_count=%u",
13106 (void*)pte_to_pa(*ptep), (void*)(pa + (pmap_page_size * map_count)), map_count);
13107 }
13108
13109 if (ptep_get_va(ptep) != (va_base + (pmap_page_size * map_count))) {
13110 T_FAIL("Unexpected va=%p, expected %p, map_count=%u",
13111 (void*)ptep_get_va(ptep), (void*)(va_base + (pmap_page_size * map_count)), map_count);
13112 }
13113 }
13114
13115 T_LOG("Validate that reads to our mapping do not fault.");
13116 pmap_test_read(pmap, va_base, false);
13117
13118 T_LOG("Validate that writes to our mapping fault.");
13119 pmap_test_write(pmap, va_base, true);
13120
13121 T_LOG("Make the first mapping writable.");
13122 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
13123
13124 T_LOG("Validate that writes to our mapping do not fault.");
13125 pmap_test_write(pmap, va_base, false);
13126
13127
13128 T_LOG("Make the first mapping XO.");
13129 pmap_enter_addr(pmap, va_base, pa, VM_PROT_EXECUTE, VM_PROT_EXECUTE, 0, false);
13130
13131 T_LOG("Validate that reads to our mapping do not fault.");
13132 pmap_test_read(pmap, va_base, false);
13133
13134 T_LOG("Validate that writes to our mapping fault.");
13135 pmap_test_write(pmap, va_base, true);
13136
13137
13138 /*
13139 * For page ratios of greater than 1: validate that writes to the other
13140 * mappings still fault. Remove the mappings afterwards (we're done
13141 * with page ratio testing).
13142 */
13143 for (map_count = 1; map_count < page_ratio; map_count++) {
13144 pmap_test_write(pmap, va_base + (pmap_page_size * map_count), true);
13145 pmap_remove(pmap, va_base + (pmap_page_size * map_count), va_base + (pmap_page_size * map_count) + pmap_page_size);
13146 }
13147
13148 T_LOG("Mark the page unreferenced and unmodified.");
13149 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13150 pmap_test_check_refmod(pa, 0);
13151
13152 /*
13153 * Begin testing the ref/mod state machine. Re-enter the mapping with
13154 * different protection/fault_type settings, and confirm that the
13155 * ref/mod state matches our expectations at each step.
13156 */
13157 T_LOG("!ref/!mod: read, no fault. Expect ref/!mod");
13158 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_NONE, 0, false);
13159 pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
13160
13161 T_LOG("!ref/!mod: read, read fault. Expect ref/!mod");
13162 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13163 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false);
13164 pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
13165
13166 T_LOG("!ref/!mod: rw, read fault. Expect ref/!mod");
13167 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13168 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_NONE, 0, false);
13169 pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
13170
13171 T_LOG("ref/!mod: rw, read fault. Expect ref/!mod");
13172 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ, 0, false);
13173 pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
13174
13175 T_LOG("!ref/!mod: rw, rw fault. Expect ref/mod");
13176 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13177 pmap_enter_addr(pmap, va_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
13178 pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
13179
13180 /*
13181 * Shared memory testing; we'll have two mappings; one read-only,
13182 * one read-write.
13183 */
13184 vm_map_address_t rw_base = va_base;
13185 vm_map_address_t ro_base = va_base + pmap_page_size;
13186
13187 pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
13188 pmap_enter_addr(pmap, ro_base, pa, VM_PROT_READ, VM_PROT_READ, 0, false);
13189
13190 /*
13191 * Test that we take faults as expected for unreferenced/unmodified
13192 * pages. Also test the arm_fast_fault interface, to ensure that
13193 * mapping permissions change as expected.
13194 */
13195 T_LOG("!ref/!mod: expect no access");
13196 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13197 pmap_test_read_write(pmap, ro_base, false, false);
13198 pmap_test_read_write(pmap, rw_base, false, false);
13199
13200 T_LOG("Read fault; expect !ref/!mod -> ref/!mod, read access");
13201 arm_fast_fault(pmap, rw_base, VM_PROT_READ, false, false);
13202 pmap_test_check_refmod(pa, VM_MEM_REFERENCED);
13203 pmap_test_read_write(pmap, ro_base, true, false);
13204 pmap_test_read_write(pmap, rw_base, true, false);
13205
13206 T_LOG("Write fault; expect ref/!mod -> ref/mod, read and write access");
13207 arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false);
13208 pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
13209 pmap_test_read_write(pmap, ro_base, true, false);
13210 pmap_test_read_write(pmap, rw_base, true, true);
13211
13212 T_LOG("Write fault; expect !ref/!mod -> ref/mod, read and write access");
13213 pmap_clear_refmod(pn, VM_MEM_MODIFIED | VM_MEM_REFERENCED);
13214 arm_fast_fault(pmap, rw_base, VM_PROT_READ | VM_PROT_WRITE, false, false);
13215 pmap_test_check_refmod(pa, VM_MEM_REFERENCED | VM_MEM_MODIFIED);
13216 pmap_test_read_write(pmap, ro_base, true, false);
13217 pmap_test_read_write(pmap, rw_base, true, true);
13218
13219 T_LOG("RW protect both mappings; should not change protections.");
13220 pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE);
13221 pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ | VM_PROT_WRITE);
13222 pmap_test_read_write(pmap, ro_base, true, false);
13223 pmap_test_read_write(pmap, rw_base, true, true);
13224
13225 T_LOG("Read protect both mappings; RW mapping should become RO.");
13226 pmap_protect(pmap, ro_base, ro_base + pmap_page_size, VM_PROT_READ);
13227 pmap_protect(pmap, rw_base, rw_base + pmap_page_size, VM_PROT_READ);
13228 pmap_test_read_write(pmap, ro_base, true, false);
13229 pmap_test_read_write(pmap, rw_base, true, false);
13230
13231 T_LOG("RW protect the page; mappings should not change protections.");
13232 pmap_enter_addr(pmap, rw_base, pa, VM_PROT_READ | VM_PROT_WRITE, VM_PROT_READ | VM_PROT_WRITE, 0, false);
13233 pmap_page_protect(pn, VM_PROT_ALL);
13234 pmap_test_read_write(pmap, ro_base, true, false);
13235 pmap_test_read_write(pmap, rw_base, true, true);
13236
13237 T_LOG("Read protect the page; RW mapping should become RO.");
13238 pmap_page_protect(pn, VM_PROT_READ);
13239 pmap_test_read_write(pmap, ro_base, true, false);
13240 pmap_test_read_write(pmap, rw_base, true, false);
13241
13242 T_LOG("Validate that disconnect removes all known mappings of the page.");
13243 pmap_disconnect(pn);
13244 if (!pmap_verify_free(pn)) {
13245 T_FAIL("Page still has mappings");
13246 }
13247
13248 T_LOG("Remove the wired mapping, so we can tear down the test map.");
13249 pmap_remove(pmap, wired_va_base, wired_va_base + pmap_page_size);
13250 pmap_destroy(pmap);
13251
13252 T_LOG("Release the pages back to the VM.");
13253 vm_page_lock_queues();
13254 vm_page_free(unwired_vm_page);
13255 vm_page_free(wired_vm_page);
13256 vm_page_unlock_queues();
13257
13258 T_LOG("Testing successful!");
13259 return 0;
13260 }
13261 #endif /* __arm64__ */
13262
13263 kern_return_t
13264 pmap_test(void)
13265 {
13266 T_LOG("Starting pmap_tests");
13267 #ifdef __arm64__
13268 int flags = 0;
13269 flags |= PMAP_CREATE_64BIT;
13270
13271 #if __ARM_MIXED_PAGE_SIZE__
13272 T_LOG("Testing VM_PAGE_SIZE_4KB");
13273 pmap_test_test_config(flags | PMAP_CREATE_FORCE_4K_PAGES);
13274 T_LOG("Testing VM_PAGE_SIZE_16KB");
13275 pmap_test_test_config(flags);
13276 #else /* __ARM_MIXED_PAGE_SIZE__ */
13277 pmap_test_test_config(flags);
13278 #endif /* __ARM_MIXED_PAGE_SIZE__ */
13279
13280 #endif /* __arm64__ */
13281 T_PASS("completed pmap_test successfully");
13282 return KERN_SUCCESS;
13283 }
13284 #endif /* CONFIG_XNUPOST */
13285
13286 /*
13287 * The following function should never make it to RELEASE code, since
13288 * it provides a way to get the PPL to modify text pages.
13289 */
13290 #if DEVELOPMENT || DEBUG
13291
13292 #define ARM_UNDEFINED_INSN 0xe7f000f0
13293 #define ARM_UNDEFINED_INSN_THUMB 0xde00
13294
13295 /**
13296 * Forcibly overwrite executable text with an illegal instruction.
13297 *
13298 * @note Only used for xnu unit testing.
13299 *
13300 * @param pa The physical address to corrupt.
13301 *
13302 * @return KERN_SUCCESS on success.
13303 */
13304 kern_return_t
13305 pmap_test_text_corruption(pmap_paddr_t pa)
13306 {
13307 #if XNU_MONITOR
13308 return pmap_test_text_corruption_ppl(pa);
13309 #else /* XNU_MONITOR */
13310 return pmap_test_text_corruption_internal(pa);
13311 #endif /* XNU_MONITOR */
13312 }
13313
13314 MARK_AS_PMAP_TEXT kern_return_t
13315 pmap_test_text_corruption_internal(pmap_paddr_t pa)
13316 {
13317 vm_offset_t va = phystokv(pa);
13318 unsigned int pai = pa_index(pa);
13319
13320 assert(pa_valid(pa));
13321
13322 pvh_lock(pai);
13323
13324 pv_entry_t **pv_h = pai_to_pvh(pai);
13325 assert(!pvh_test_type(pv_h, PVH_TYPE_NULL));
13326 #if defined(PVH_FLAG_EXEC)
13327 const bool need_ap_twiddle = pvh_get_flags(pv_h) & PVH_FLAG_EXEC;
13328
13329 if (need_ap_twiddle) {
13330 pmap_set_ptov_ap(pai, AP_RWNA, FALSE);
13331 }
13332 #endif /* defined(PVH_FLAG_EXEC) */
13333
13334 /*
13335 * The low bit in an instruction address indicates a THUMB instruction
13336 */
13337 if (va & 1) {
13338 va &= ~(vm_offset_t)1;
13339 *(uint16_t *)va = ARM_UNDEFINED_INSN_THUMB;
13340 } else {
13341 *(uint32_t *)va = ARM_UNDEFINED_INSN;
13342 }
13343
13344 #if defined(PVH_FLAG_EXEC)
13345 if (need_ap_twiddle) {
13346 pmap_set_ptov_ap(pai, AP_RONA, FALSE);
13347 }
13348 #endif /* defined(PVH_FLAG_EXEC) */
13349
13350 InvalidatePoU_IcacheRegion(va, sizeof(uint32_t));
13351
13352 pvh_unlock(pai);
13353
13354 return KERN_SUCCESS;
13355 }
13356
13357 #endif /* DEVELOPMENT || DEBUG */
13358