xref: /xnu-8020.121.3/san/memory/kasan-arm64.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
42 #include <vm/pmap.h>
43 #include "kasan.h"
44 #include "kasan_internal.h"
45 #include "memintrinsics.h"
46 
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
50 
51 #include <libkern/kernel_mach_header.h>
52 
53 #if KASAN_CLASSIC
54 #include "kasan-classic-arm64.h"
55 #elif KASAN_TBI
56 #include "kasan-tbi-arm64.h"
57 _Static_assert((KASAN_TBI_ADDR_SIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS), "Kernel pointers leave no room for tagging");
58 #else /* KASAN_CLASSIC || KASAN_TBI */
59 #error "No model defined for the shadow table"
60 #endif /* KASAN_CLASSIC || KASAN_TBI */
61 
62 extern uint64_t *cpu_tte;
63 extern unsigned long gVirtBase, gPhysBase;
64 
65 typedef uint64_t pmap_paddr_t;
66 extern vm_map_address_t phystokv(pmap_paddr_t pa);
67 
68 vm_offset_t physmap_vbase;
69 vm_offset_t physmap_vtop;
70 
71 vm_offset_t shadow_pbase;
72 vm_offset_t shadow_ptop;
73 #if HIBERNATION
74 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
75 vm_offset_t shadow_pnext;
76 #else
77 static vm_offset_t shadow_pnext;
78 #endif
79 
80 static vm_offset_t unmutable_valid_access_page;
81 static vm_offset_t bootstrap_pgtable_phys;
82 
83 extern vm_offset_t intstack, intstack_top;
84 extern vm_offset_t excepstack, excepstack_top;
85 
86 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
87 
88 _Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
89 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
90 _Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
91 _Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
92 
93 #define KASAN_ARM64_MAP_STATIC_VALID_PAGE       0x1
94 #define KASAN_ARM64_PREALLOCATE_L1L2            0x2
95 #define KASAN_ARM64_NO_PHYSMAP                  0x4
96 
97 #define KASAN_ARM64_MAP                         (0)
98 #define KASAN_ARM64_STATIC_VALID_MAP            (KASAN_ARM64_MAP | KASAN_ARM64_MAP_STATIC_VALID_PAGE)
99 #define KASAN_ARM64_PREALLOCATE_TRANSLATION     (KASAN_ARM64_PREALLOCATE_L1L2)
100 #define KASAN_ARM64_MAP_EARLY                   (KASAN_ARM64_MAP | KASAN_ARM64_NO_PHYSMAP)
101 #define KASAN_ARM64_MAP_STATIC_EARLY            (KASAN_ARM64_STATIC_VALID_MAP | KASAN_ARM64_NO_PHYSMAP)
102 
103 
104 /*
105  * KASAN runs both early on, when the 1:1 mapping hasn't been established yet,
106  * and later when memory management is fully set up. This internal version of
107  * phystokv switches between accessing physical memory directly and using the
108  * physmap.
109  */
110 static vm_map_address_t
kasan_arm64_phystokv(uintptr_t pa,bool early)111 kasan_arm64_phystokv(uintptr_t pa, bool early)
112 {
113 	return early ? (pa) : phystokv(pa);
114 }
115 
116 /*
117  * Physical pages used to back up the shadow table are stolen early on at
118  * boot and later managed in a fairly simple, linear, fashion.
119  */
120 static uintptr_t
kasan_arm64_alloc_page(void)121 kasan_arm64_alloc_page(void)
122 {
123 	if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
124 		panic("KASAN: OOM");
125 	}
126 
127 	uintptr_t mem = shadow_pnext;
128 	shadow_pnext += ARM_PGBYTES;
129 	shadow_pages_used++;
130 
131 	return mem;
132 }
133 
134 static uintptr_t
kasan_arm64_alloc_zero_page(bool early)135 kasan_arm64_alloc_zero_page(bool early)
136 {
137 	uintptr_t mem = kasan_arm64_alloc_page();
138 	__nosan_bzero((void *)kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
139 	return mem;
140 }
141 
142 static uintptr_t
kasan_arm64_alloc_valid_page(bool early)143 kasan_arm64_alloc_valid_page(bool early)
144 {
145 	uintptr_t mem = kasan_arm64_alloc_page();
146 	kasan_impl_fill_valid_range(kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
147 	return mem;
148 }
149 
150 static void
kasan_arm64_align_to_page(vm_offset_t * addrp,vm_offset_t * sizep)151 kasan_arm64_align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
152 {
153 	vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
154 	*sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
155 	*addrp = addr_aligned;
156 }
157 
158 static uint64_t *
kasan_arm64_lookup_l1(uint64_t * base,vm_offset_t address)159 kasan_arm64_lookup_l1(uint64_t *base, vm_offset_t address)
160 {
161 	return base + ((address & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
162 }
163 
164 static uint64_t *
kasan_arm64_lookup_l2(uint64_t * base,vm_offset_t address)165 kasan_arm64_lookup_l2(uint64_t *base, vm_offset_t address)
166 {
167 	return base + ((address & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
168 }
169 
170 static uint64_t *
kasan_arm64_lookup_l3(uint64_t * base,vm_offset_t address)171 kasan_arm64_lookup_l3(uint64_t *base, vm_offset_t address)
172 {
173 	return base + ((address & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
174 }
175 
176 /*
177  * kasan_arm_pte_map() is the hearth of the arch-specific handling of the shadow
178  * table. It walks the existing page tables that map shadow ranges and
179  * allocates/creates valid entries as required. Options are:
180  *  - static_valid: instead of creating a new backing shadow page, point to
181  *    the 'full valid access' one created early at boot.
182  *  - preallocate_translation_only: do not add the final shadow table entry, but
183  *    only add the L1/L2 pages for a valid translation.
184  *  - early: xnu is running before the VM is fully setup, so handle physical
185  *    address directly instead of going through the physmap.
186  */
187 static void
kasan_arm64_pte_map(vm_offset_t shadow_base,uint64_t * base,uint8_t options)188 kasan_arm64_pte_map(vm_offset_t shadow_base, uint64_t *base, uint8_t options)
189 {
190 	uint64_t *pte;
191 
192 	bool static_valid = options & KASAN_ARM64_MAP_STATIC_VALID_PAGE;
193 	bool preallocate_translation_only = options & KASAN_ARM64_PREALLOCATE_L1L2;
194 	bool early = options & KASAN_ARM64_NO_PHYSMAP;
195 
196 	/* lookup L1 entry */
197 	pte = kasan_arm64_lookup_l1(base, shadow_base);
198 	if (*pte & ARM_TTE_VALID) {
199 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
200 	} else {
201 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
202 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
203 	}
204 
205 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
206 
207 	/* lookup L2 entry */
208 	pte = kasan_arm64_lookup_l2(base, shadow_base);
209 	if (*pte & ARM_TTE_VALID) {
210 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
211 	} else {
212 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
213 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
214 	}
215 
216 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
217 
218 	if (preallocate_translation_only) {
219 		return;
220 	}
221 
222 	/* lookup L3 entry */
223 	pte = kasan_arm64_lookup_l3(base, shadow_base);
224 	if ((*pte & ARM_PTE_TYPE_VALID) &&
225 	    ((((*pte) & ARM_PTE_APMASK) != ARM_PTE_AP(AP_RONA)) || static_valid)) {
226 		/* nothing to do - page already mapped and we are not upgrading */
227 	} else {
228 		/* create new L3 entry */
229 		uint64_t newpte;
230 		if (static_valid) {
231 			/* map the zero page RO */
232 			newpte = (uint64_t)unmutable_valid_access_page | ARM_PTE_AP(AP_RONA);
233 		} else {
234 			newpte = (uint64_t)kasan_arm64_alloc_valid_page(early) | ARM_PTE_AP(AP_RWNA);
235 		}
236 		newpte |= ARM_PTE_TYPE_VALID
237 		    | ARM_PTE_AF
238 		    | ARM_PTE_SH(SH_OUTER_MEMORY)
239 		    | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
240 		    | ARM_PTE_NX
241 		    | ARM_PTE_PNX;
242 		*pte = newpte;
243 	}
244 }
245 
246 static void
kasan_map_shadow_internal(vm_offset_t address,vm_size_t size,uint8_t options)247 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, uint8_t options)
248 {
249 	size = (size + KASAN_SIZE_ALIGNMENT) & ~KASAN_SIZE_ALIGNMENT;
250 	vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
251 	vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
252 
253 	assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
254 	assert((size & KASAN_SIZE_ALIGNMENT) == 0);
255 
256 	for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
257 		kasan_arm64_pte_map(shadow_base, cpu_tte, options);
258 	}
259 
260 	flush_mmu_tlb();
261 }
262 
263 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool static_valid)264 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool static_valid)
265 {
266 	uint8_t options = KASAN_ARM64_MAP;
267 
268 	if (static_valid) {
269 		options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
270 	}
271 
272 	kasan_map_shadow_internal(address, size, options);
273 }
274 
275 /*
276  * TODO: mappings here can be reclaimed after kasan_init()
277  */
278 static void
kasan_arm64_do_map_shadow_early(vm_offset_t address,vm_size_t size,uint8_t options)279 kasan_arm64_do_map_shadow_early(vm_offset_t address, vm_size_t size, uint8_t options)
280 {
281 	kasan_arm64_align_to_page(&address, &size);
282 	vm_size_t j;
283 
284 	for (j = 0; j < size; j += ARM_PGBYTES) {
285 		vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
286 
287 		assert(virt_shadow_target >= KASAN_SHADOW_MIN);
288 		assert(virt_shadow_target < KASAN_SHADOW_MAX);
289 
290 		kasan_arm64_pte_map(virt_shadow_target, (uint64_t *)bootstrap_pgtable_phys, options);
291 	}
292 
293 	flush_mmu_tlb();
294 }
295 
296 
297 static void
kasan_map_shadow_early(vm_offset_t address,vm_size_t size)298 kasan_map_shadow_early(vm_offset_t address, vm_size_t size)
299 {
300 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_EARLY);
301 }
302 
303 static void
kasan_map_shadow_static_early(vm_offset_t address,vm_size_t size)304 kasan_map_shadow_static_early(vm_offset_t address, vm_size_t size)
305 {
306 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_STATIC_EARLY);
307 }
308 
309 void
kasan_arch_init(void)310 kasan_arch_init(void)
311 {
312 	/* Map the physical aperture */
313 	kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
314 
315 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
316 	/* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
317 	kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS,
318 	    VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, KASAN_ARM64_PREALLOCATE_TRANSLATION);
319 #endif
320 }
321 
322 /*
323  * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
324  * run until kasan_init(). Called while running with identity (V=P) map active.
325  */
326 void
kasan_bootstrap(boot_args * args,vm_offset_t pgtable)327 kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
328 {
329 	uintptr_t tosteal;
330 	/* Base address for the virtual identity mapping */
331 	vm_address_t p2v = args->virtBase - args->physBase;
332 
333 	vm_address_t pbase = args->physBase;
334 	vm_address_t ptop = args->topOfKernelData;
335 	kernel_vbase = args->virtBase;
336 	kernel_vtop = kernel_vbase + ptop - pbase;
337 
338 	/* Reserve physical memory at the end for KASAN shadow table and quarantines */
339 	tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
340 	tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
341 
342 	/* Make it disappear from xnu view */
343 	args->memSize -= tosteal;
344 
345 	shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
346 	shadow_ptop = shadow_pbase + tosteal;
347 	shadow_pnext = shadow_pbase;
348 	shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
349 
350 	/*
351 	 * Set aside a page to represent all those regions that allow any
352 	 * access and that won't mutate over their lifetime.
353 	 */
354 	unmutable_valid_access_page = kasan_arm64_alloc_page();
355 	kasan_impl_fill_valid_range(unmutable_valid_access_page, ARM_PGBYTES);
356 
357 	/* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
358 	bootstrap_pgtable_phys = pgtable;
359 	/* Blanket map all of what we got from iBoot, as we'd later do in kasan_init() */
360 	kasan_map_shadow_static_early(kernel_vbase, args->memSize);
361 
362 	vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
363 	vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
364 	vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
365 	vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
366 
367 	kasan_map_shadow_early(intstack_virt, intstack_size);
368 	kasan_map_shadow_early(excepstack_virt, excepstack_size);
369 
370 	/* Upgrade the deviceTree mapping if necessary */
371 	if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
372 		kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength);
373 	}
374 }
375 
376 bool
kasan_is_shadow_mapped(uintptr_t shadowp)377 kasan_is_shadow_mapped(uintptr_t shadowp)
378 {
379 	uint64_t *pte;
380 	uint64_t *base = cpu_tte;
381 
382 	assert(shadowp >= KASAN_SHADOW_MIN);
383 	assert(shadowp < KASAN_SHADOW_MAX);
384 
385 	/* lookup L1 entry */
386 	pte = kasan_arm64_lookup_l1(base, shadowp);
387 	if (!(*pte & ARM_TTE_VALID)) {
388 		return false;
389 	}
390 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
391 
392 	/* lookup L2 entry */
393 	pte = kasan_arm64_lookup_l2(base, shadowp);
394 	if (!(*pte & ARM_TTE_VALID)) {
395 		return false;
396 	}
397 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
398 
399 	/* lookup L3 entry */
400 	pte = kasan_arm64_lookup_l3(base, shadowp);
401 	if (!(*pte & ARM_PTE_TYPE_VALID)) {
402 		return false;
403 	}
404 
405 	return true;
406 }
407