xref: /xnu-12377.81.4/san/memory/kasan-arm64.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/thread.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
42 #include <vm/pmap.h>
43 #include "kasan.h"
44 #include "kasan_internal.h"
45 #include "memintrinsics.h"
46 
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
50 
51 #include <libkern/kernel_mach_header.h>
52 
53 #if KASAN_CLASSIC
54 #include "kasan-classic-arm64.h"
55 #elif KASAN_TBI
56 #include "kasan-tbi-arm64.h"
57 _Static_assert((VM_MEMTAG_PTR_SIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS), "Kernel pointers leave no room for tagging");
58 #else /* KASAN_CLASSIC || KASAN_TBI */
59 #error "No model defined for the shadow table"
60 #endif /* KASAN_CLASSIC || KASAN_TBI */
61 
62 #if KASAN_LIGHT
63 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
64 #endif /* KASAN_LIGHT */
65 
66 extern thread_t kasan_lock_holder;
67 
68 extern uint64_t *cpu_tte;
69 extern unsigned long gVirtBase, gPhysBase;
70 
71 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
72 extern vm_map_address_t phystokv(pmap_paddr_t pa);
73 
74 vm_offset_t physmap_vbase;
75 vm_offset_t physmap_vtop;
76 
77 vm_offset_t shadow_pbase;
78 vm_offset_t shadow_ptop;
79 #if HIBERNATION
80 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
81 vm_offset_t shadow_pnext;
82 #else
83 static vm_offset_t shadow_pnext;
84 #endif
85 
86 static vm_offset_t unmutable_valid_access_page;
87 static vm_offset_t bootstrap_pgtable_phys;
88 
89 extern vm_offset_t intstack, intstack_top;
90 extern vm_offset_t excepstack, excepstack_top;
91 
92 static lck_grp_t kasan_vm_lock_grp;
93 static lck_ticket_t kasan_vm_lock;
94 
95 #if CONFIG_SPTM
96 void kasan_bootstrap(boot_args *, vm_offset_t pgtable, sptm_bootstrap_args_xnu_t *sptm_boot_args);
97 #else
98 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
99 #endif /* CONFIG_SPTM */
100 
101 _Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
102 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
103 _Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
104 _Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
105 
106 #define KASAN_ARM64_MAP_STATIC_VALID_PAGE       0x1
107 #define KASAN_ARM64_PREALLOCATE_L1L2            0x2
108 #define KASAN_ARM64_NO_PHYSMAP                  0x4
109 
110 #define KASAN_ARM64_MAP                         (0)
111 #define KASAN_ARM64_STATIC_VALID_MAP            (KASAN_ARM64_MAP | KASAN_ARM64_MAP_STATIC_VALID_PAGE)
112 #define KASAN_ARM64_PREALLOCATE_TRANSLATION     (KASAN_ARM64_PREALLOCATE_L1L2)
113 #define KASAN_ARM64_MAP_EARLY                   (KASAN_ARM64_MAP | KASAN_ARM64_NO_PHYSMAP)
114 #define KASAN_ARM64_MAP_STATIC_EARLY            (KASAN_ARM64_STATIC_VALID_MAP | KASAN_ARM64_NO_PHYSMAP)
115 
116 
117 /*
118  * KASAN runs both early on, when the 1:1 mapping hasn't been established yet,
119  * and later when memory management is fully set up. This internal version of
120  * phystokv switches between accessing physical memory directly and using the
121  * physmap.
122  */
123 static vm_map_address_t
kasan_arm64_phystokv(uintptr_t pa,__unused bool early)124 kasan_arm64_phystokv(uintptr_t pa, __unused bool early)
125 {
126 #if CONFIG_SPTM
127 	return phystokv(pa);
128 #else
129 	return early ? (pa) : phystokv(pa);
130 #endif /* CONFIG_SPTM */
131 }
132 
133 #if CONFIG_SPTM
134 static uintptr_t
kasan_arm64_kvtophys(vm_map_address_t va)135 kasan_arm64_kvtophys(vm_map_address_t va)
136 {
137 	sptm_paddr_t pa;
138 	if (sptm_kvtophys(va, &pa) != LIBSPTM_SUCCESS) {
139 		return 0;
140 	}
141 
142 	return (vm_map_address_t)pa;
143 }
144 #endif /* CONFIG_SPTM */
145 
146 /*
147  * Physical pages used to back up the shadow table are stolen early on at
148  * boot and later managed in a fairly simple, linear, fashion.
149  */
150 static uintptr_t
kasan_arm64_alloc_page(void)151 kasan_arm64_alloc_page(void)
152 {
153 	if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
154 		panic("KASAN: OOM");
155 	}
156 
157 	uintptr_t mem = shadow_pnext;
158 	shadow_pnext += ARM_PGBYTES;
159 	shadow_pages_used++;
160 
161 	return mem;
162 }
163 
164 static uintptr_t
kasan_arm64_alloc_zero_page(bool early)165 kasan_arm64_alloc_zero_page(bool early)
166 {
167 	uintptr_t mem = kasan_arm64_alloc_page();
168 	__nosan_bzero((void *)kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
169 
170 #if CONFIG_SPTM
171 	/* Retype the frame so that we can later map it via the SPTM */
172 	sptm_retype_params_t retype_params = { .level = 3 };
173 	sptm_retype((sptm_paddr_t)mem, XNU_DEFAULT, XNU_PAGE_TABLE, retype_params);
174 #endif /* CONFIG_SPTM */
175 
176 	return mem;
177 }
178 
179 static uintptr_t
kasan_arm64_alloc_valid_page(bool early)180 kasan_arm64_alloc_valid_page(bool early)
181 {
182 	uintptr_t mem = kasan_arm64_alloc_page();
183 	kasan_impl_fill_valid_range(kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
184 	return mem;
185 }
186 
187 static void
kasan_arm64_align_to_page(vm_offset_t * addrp,vm_offset_t * sizep)188 kasan_arm64_align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
189 {
190 	vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
191 	*sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
192 	*addrp = addr_aligned;
193 }
194 
195 static uint64_t *
kasan_arm64_lookup_l1(uint64_t * base,vm_offset_t address)196 kasan_arm64_lookup_l1(uint64_t *base, vm_offset_t address)
197 {
198 	return base + L1_TABLE_T1_INDEX(address, TCR_EL1_BOOT);
199 }
200 
201 static uint64_t *
kasan_arm64_lookup_l2(uint64_t * base,vm_offset_t address)202 kasan_arm64_lookup_l2(uint64_t *base, vm_offset_t address)
203 {
204 	return base + L2_TABLE_INDEX(address);
205 }
206 
207 static uint64_t *
kasan_arm64_lookup_l3(uint64_t * base,vm_offset_t address)208 kasan_arm64_lookup_l3(uint64_t *base, vm_offset_t address)
209 {
210 	return base + L3_TABLE_INDEX(address);
211 }
212 
213 /*
214  * kasan_arm_pte_map() is the hearth of the arch-specific handling of the shadow
215  * table. It walks the existing page tables that map shadow ranges and
216  * allocates/creates valid entries as required. Options are:
217  *  - static_valid: instead of creating a new backing shadow page, point to
218  *    the 'full valid access' one created early at boot.
219  *  - preallocate_translation_only: do not add the final shadow table entry, but
220  *    only add the L1/L2 pages for a valid translation.
221  *  - early: xnu is running before the VM is fully setup, so handle physical
222  *    address directly instead of going through the physmap.
223  */
224 static void
kasan_arm64_pte_map(vm_offset_t shadow_base,uint64_t * base,uint8_t options)225 kasan_arm64_pte_map(vm_offset_t shadow_base, uint64_t *base, uint8_t options)
226 {
227 #if CONFIG_SPTM
228 	const sptm_paddr_t root_pt_paddr = (sptm_paddr_t)kasan_arm64_kvtophys((vm_map_address_t)base);
229 	const sptm_vaddr_t vaddr = (sptm_vaddr_t)(shadow_base & ~PAGE_MASK);
230 #endif /* CONFIG_SPTM */
231 
232 	bool early = options & KASAN_ARM64_NO_PHYSMAP;
233 	uint64_t *pte;
234 
235 	/* lookup L1 entry */
236 	pte = kasan_arm64_lookup_l1(base, shadow_base);
237 #if CONFIG_SPTM
238 	assert((*pte & ARM_TTE_VALID) && ((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE));
239 #else
240 	if (*pte & ARM_TTE_VALID) {
241 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
242 	} else {
243 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
244 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
245 	}
246 #endif /* CONFIG_SPTM */
247 
248 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
249 
250 	/* lookup L2 entry */
251 	pte = kasan_arm64_lookup_l2(base, shadow_base);
252 	if (*pte & ARM_TTE_VALID) {
253 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
254 	} else {
255 #if CONFIG_SPTM
256 		const sptm_tte_t tte = (sptm_tte_t)((uint64_t)kasan_arm64_alloc_zero_page(early)
257 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
258 
259 		sptm_map_table(root_pt_paddr, vaddr, 2, tte);
260 #else
261 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
262 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
263 #endif /* CONFIG_SPTM */
264 	}
265 
266 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
267 
268 	if (options & KASAN_ARM64_PREALLOCATE_L1L2) {
269 		return;
270 	}
271 
272 	bool static_valid = options & KASAN_ARM64_MAP_STATIC_VALID_PAGE;
273 
274 	/* lookup L3 entry */
275 	pte = kasan_arm64_lookup_l3(base, shadow_base);
276 
277 	if ((*pte & ARM_PTE_TYPE_MASK) == ARM_PTE_TYPE_VALID) {
278 		bool pte_rona = (*pte & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RONA);
279 		if (!pte_rona || static_valid) {
280 			return;
281 		}
282 	}
283 
284 	/* create new L3 entry */
285 	uint64_t newpte;
286 	if (static_valid) {
287 		/* map the zero page RO */
288 		newpte = (uint64_t)unmutable_valid_access_page | ARM_PTE_AP(AP_RONA);
289 	} else {
290 		newpte = (uint64_t)kasan_arm64_alloc_valid_page(early) | ARM_PTE_AP(AP_RWNA);
291 	}
292 
293 	newpte |= ARM_PTE_TYPE_VALID
294 	    | ARM_PTE_AF
295 	    | ARM_PTE_SH(SH_OUTER_MEMORY)
296 	    | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
297 	    | ARM_PTE_NX
298 	    | ARM_PTE_PNX;
299 
300 #if CONFIG_SPTM
301 	/* Unmap the page first if the valid page was previously mapped */
302 	if ((*pte & ARM_PTE_TYPE_MASK) == ARM_PTE_TYPE_VALID) {
303 		sptm_unmap_region(root_pt_paddr, vaddr, 1, 0);
304 	}
305 
306 	/* Perform the new mapping */
307 	sptm_return_t ret = sptm_map_page(root_pt_paddr, vaddr, newpte);
308 	assert(ret == SPTM_SUCCESS);
309 #else
310 	*pte = newpte;
311 #endif /* CONFIG_SPTM */
312 }
313 
314 static void
kasan_map_shadow_internal(vm_offset_t address,vm_size_t size,uint8_t options)315 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, uint8_t options)
316 {
317 	size = (size + KASAN_SIZE_ALIGNMENT) & ~KASAN_SIZE_ALIGNMENT;
318 	vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
319 	vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
320 
321 	assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
322 	assert((size & KASAN_SIZE_ALIGNMENT) == 0);
323 
324 	for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
325 		kasan_arm64_pte_map(shadow_base, cpu_tte, options);
326 	}
327 
328 	flush_mmu_tlb();
329 }
330 
331 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool static_valid)332 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool static_valid)
333 {
334 	uint8_t options = KASAN_ARM64_MAP;
335 
336 	if (static_valid) {
337 		options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
338 #if KASAN_LIGHT
339 	} else if (!kasan_zone_maps_owned(address, size)) {
340 		options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
341 #endif /* KASAN_LIGHT */
342 	}
343 
344 	kasan_map_shadow_internal(address, size, options);
345 }
346 
347 /*
348  * TODO: mappings here can be reclaimed after kasan_init()
349  */
350 static void
kasan_arm64_do_map_shadow_early(vm_offset_t address,vm_size_t size,uint8_t options)351 kasan_arm64_do_map_shadow_early(vm_offset_t address, vm_size_t size, uint8_t options)
352 {
353 	kasan_arm64_align_to_page(&address, &size);
354 	vm_size_t j;
355 
356 	for (j = 0; j < size; j += ARM_PGBYTES) {
357 		vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
358 
359 		assert(virt_shadow_target >= KASAN_SHADOW_MIN);
360 		assert(virt_shadow_target < KASAN_SHADOW_MAX);
361 
362 		kasan_arm64_pte_map(virt_shadow_target, (uint64_t *)bootstrap_pgtable_phys, options);
363 	}
364 
365 	flush_mmu_tlb();
366 }
367 
368 
369 static void
kasan_map_shadow_early(vm_offset_t address,vm_size_t size)370 kasan_map_shadow_early(vm_offset_t address, vm_size_t size)
371 {
372 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_EARLY);
373 }
374 
375 static void
kasan_map_shadow_static_early(vm_offset_t address,vm_size_t size)376 kasan_map_shadow_static_early(vm_offset_t address, vm_size_t size)
377 {
378 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_STATIC_EARLY);
379 }
380 
381 void
kasan_arch_init(void)382 kasan_arch_init(void)
383 {
384 	/* Map the physical aperture */
385 	kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
386 
387 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR) || defined(KERNEL_INTEGRITY_PV_CTRR)
388 	/* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
389 	kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS,
390 	    VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, KASAN_ARM64_PREALLOCATE_TRANSLATION);
391 #endif
392 }
393 
394 #if CONFIG_SPTM
395 /*
396  * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
397  * run until kasan_init().
398  */
399 void
kasan_bootstrap(boot_args * args,vm_offset_t pgtable,sptm_bootstrap_args_xnu_t * sptm_boot_args)400 kasan_bootstrap(boot_args *args, vm_offset_t pgtable, sptm_bootstrap_args_xnu_t *sptm_boot_args)
401 {
402 	uintptr_t tosteal;
403 	vm_address_t pbase = args->physBase;
404 	kernel_vbase = sptm_boot_args->executables_papt_start;
405 	kernel_vtop = sptm_boot_args->executables_papt_end;
406 
407 	/* Reserve physical memory at the end for KASAN shadow table and quarantines */
408 	extern uint64_t memSize;
409 	tosteal = (memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
410 	tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
411 
412 	/* Make it disappear from xnu view */
413 	memSize -= tosteal;
414 	shadow_pbase = vm_map_round_page(pbase + memSize, ARM_PGMASK);
415 	shadow_ptop = shadow_pbase + tosteal;
416 	shadow_pnext = shadow_pbase;
417 	shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
418 
419 	/*
420 	 * Set aside a page to represent all those regions that allow any
421 	 * access and that won't mutate over their lifetime.
422 	 */
423 	unmutable_valid_access_page = kasan_arm64_alloc_page();
424 	kasan_impl_fill_valid_range(kasan_arm64_phystokv(unmutable_valid_access_page, false), ARM_PGBYTES);
425 
426 	/* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
427 	bootstrap_pgtable_phys = pgtable;
428 
429 	/* Blanket map all of what we got from iBoot, as we'd later do in kasan_init() */
430 	const size_t size_to_map = phystokv(sptm_boot_args->first_avail_phys) - sptm_boot_args->physmap_base;
431 	kasan_map_shadow_static_early(sptm_boot_args->physmap_base, size_to_map);
432 
433 #if ARM_LARGE_MEMORY
434 	/*
435 	 * Large memory systems map available memory first, everything else after.
436 	 * Due to this, the above call to kasan_map_shadow_static_early() will only
437 	 * cover memory allocated during early bootstrap, and not all of the iBoot-loaded
438 	 * images. Map the rest here.
439 	 */
440 	kasan_map_shadow_static_early(sptm_boot_args->executables_papt_start,
441 	    sptm_boot_args->executables_papt_end - sptm_boot_args->executables_papt_start);
442 #endif /* ARM_LARGE_MEMORY */
443 
444 	vm_offset_t intstack_virt = (vm_offset_t)&intstack;
445 	vm_offset_t excepstack_virt = (vm_offset_t)&excepstack;
446 	vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
447 	vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
448 
449 	kasan_map_shadow_early(intstack_virt, intstack_size);
450 	kasan_map_shadow_early(excepstack_virt, excepstack_size);
451 
452 	/* Upgrade the deviceTree mapping if necessary */
453 	if ((vm_offset_t)args->deviceTreeP < (vm_offset_t)&_mh_execute_header) {
454 		kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength);
455 	}
456 }
457 #else
458 /*
459  * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
460  * run until kasan_init(). Called while running with identity (V=P) map active.
461  */
462 void
kasan_bootstrap(boot_args * args,vm_offset_t pgtable)463 kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
464 {
465 	uintptr_t tosteal;
466 	/* Base address for the virtual identity mapping */
467 	vm_address_t p2v = args->virtBase - args->physBase;
468 
469 	vm_address_t pbase = args->physBase;
470 	vm_address_t ptop = args->topOfKernelData;
471 	kernel_vbase = args->virtBase;
472 	kernel_vtop = kernel_vbase + ptop - pbase;
473 
474 	/* Reserve physical memory at the end for KASAN shadow table and quarantines */
475 	tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
476 	tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
477 
478 	/* Make it disappear from xnu view */
479 	args->memSize -= tosteal;
480 
481 	shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
482 	shadow_ptop = shadow_pbase + tosteal;
483 	shadow_pnext = shadow_pbase;
484 	shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
485 
486 	/*
487 	 * Set aside a page to represent all those regions that allow any
488 	 * access and that won't mutate over their lifetime.
489 	 */
490 	unmutable_valid_access_page = kasan_arm64_alloc_page();
491 	kasan_impl_fill_valid_range(unmutable_valid_access_page, ARM_PGBYTES);
492 
493 	/* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
494 	bootstrap_pgtable_phys = pgtable;
495 	/* Blanket map all of what we got from iBoot, as we'd later do in kasan_init() */
496 	kasan_map_shadow_static_early(kernel_vbase, args->memSize);
497 
498 	vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
499 	vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
500 	vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
501 	vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
502 
503 	kasan_map_shadow_early(intstack_virt, intstack_size);
504 	kasan_map_shadow_early(excepstack_virt, excepstack_size);
505 
506 	/* Upgrade the deviceTree mapping if necessary */
507 	if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
508 		kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength);
509 	}
510 }
511 #endif /* CONFIG_SPTM */
512 
513 bool
kasan_is_shadow_mapped(uintptr_t shadowp)514 kasan_is_shadow_mapped(uintptr_t shadowp)
515 {
516 	uint64_t *pte;
517 	uint64_t *base = cpu_tte;
518 
519 	assert(shadowp >= KASAN_SHADOW_MIN);
520 	assert(shadowp < KASAN_SHADOW_MAX);
521 
522 	/* lookup L1 entry */
523 	pte = kasan_arm64_lookup_l1(base, shadowp);
524 	if (!(*pte & ARM_TTE_VALID)) {
525 		return false;
526 	}
527 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
528 
529 	/* lookup L2 entry */
530 	pte = kasan_arm64_lookup_l2(base, shadowp);
531 	if (!(*pte & ARM_TTE_VALID)) {
532 		return false;
533 	}
534 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
535 
536 	/* lookup L3 entry */
537 	pte = kasan_arm64_lookup_l3(base, shadowp);
538 	if ((*pte & ARM_PTE_TYPE_MASK) != ARM_PTE_TYPE_VALID) {
539 		return false;
540 	}
541 
542 	return true;
543 }
544 
545 void
kasan_lock_init(void)546 kasan_lock_init(void)
547 {
548 	lck_grp_init(&kasan_vm_lock_grp, "kasan lock", LCK_GRP_ATTR_NULL);
549 	lck_ticket_init(&kasan_vm_lock, &kasan_vm_lock_grp);
550 }
551 
552 /*
553  * KASAN may be called from interrupt context, so we disable interrupts to
554  * ensure atomicity manipulating the global objects.
555  */
556 void
kasan_lock(boolean_t * b)557 kasan_lock(boolean_t *b)
558 {
559 	*b = ml_set_interrupts_enabled(false);
560 	lck_ticket_lock(&kasan_vm_lock, &kasan_vm_lock_grp);
561 	kasan_lock_holder = current_thread();
562 }
563 
564 void
kasan_unlock(boolean_t b)565 kasan_unlock(boolean_t b)
566 {
567 	kasan_lock_holder = THREAD_NULL;
568 	lck_ticket_unlock(&kasan_vm_lock);
569 	ml_set_interrupts_enabled(b);
570 }
571