xref: /xnu-8792.61.2/san/memory/kasan-arm64.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/thread.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
42 #include <vm/pmap.h>
43 #include "kasan.h"
44 #include "kasan_internal.h"
45 #include "memintrinsics.h"
46 
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
50 
51 #include <libkern/kernel_mach_header.h>
52 
53 #if KASAN_CLASSIC
54 #include "kasan-classic-arm64.h"
55 #elif KASAN_TBI
56 #include "kasan-tbi-arm64.h"
57 _Static_assert((KASAN_TBI_ADDR_SIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS), "Kernel pointers leave no room for tagging");
58 #else /* KASAN_CLASSIC || KASAN_TBI */
59 #error "No model defined for the shadow table"
60 #endif /* KASAN_CLASSIC || KASAN_TBI */
61 
62 #if KASAN_LIGHT
63 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
64 #endif /* KASAN_LIGHT */
65 
66 extern thread_t kasan_lock_holder;
67 
68 extern uint64_t *cpu_tte;
69 extern unsigned long gVirtBase, gPhysBase;
70 
71 typedef uint64_t pmap_paddr_t;
72 extern vm_map_address_t phystokv(pmap_paddr_t pa);
73 
74 vm_offset_t physmap_vbase;
75 vm_offset_t physmap_vtop;
76 
77 vm_offset_t shadow_pbase;
78 vm_offset_t shadow_ptop;
79 #if HIBERNATION
80 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
81 vm_offset_t shadow_pnext;
82 #else
83 static vm_offset_t shadow_pnext;
84 #endif
85 
86 static vm_offset_t unmutable_valid_access_page;
87 static vm_offset_t bootstrap_pgtable_phys;
88 
89 extern vm_offset_t intstack, intstack_top;
90 extern vm_offset_t excepstack, excepstack_top;
91 
92 static lck_ticket_t kasan_vm_lock;
93 
94 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
95 
96 _Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
97 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
98 _Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
99 _Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
100 
101 #define KASAN_ARM64_MAP_STATIC_VALID_PAGE       0x1
102 #define KASAN_ARM64_PREALLOCATE_L1L2            0x2
103 #define KASAN_ARM64_NO_PHYSMAP                  0x4
104 
105 #define KASAN_ARM64_MAP                         (0)
106 #define KASAN_ARM64_STATIC_VALID_MAP            (KASAN_ARM64_MAP | KASAN_ARM64_MAP_STATIC_VALID_PAGE)
107 #define KASAN_ARM64_PREALLOCATE_TRANSLATION     (KASAN_ARM64_PREALLOCATE_L1L2)
108 #define KASAN_ARM64_MAP_EARLY                   (KASAN_ARM64_MAP | KASAN_ARM64_NO_PHYSMAP)
109 #define KASAN_ARM64_MAP_STATIC_EARLY            (KASAN_ARM64_STATIC_VALID_MAP | KASAN_ARM64_NO_PHYSMAP)
110 
111 
112 /*
113  * KASAN runs both early on, when the 1:1 mapping hasn't been established yet,
114  * and later when memory management is fully set up. This internal version of
115  * phystokv switches between accessing physical memory directly and using the
116  * physmap.
117  */
118 static vm_map_address_t
kasan_arm64_phystokv(uintptr_t pa,bool early)119 kasan_arm64_phystokv(uintptr_t pa, bool early)
120 {
121 	return early ? (pa) : phystokv(pa);
122 }
123 
124 /*
125  * Physical pages used to back up the shadow table are stolen early on at
126  * boot and later managed in a fairly simple, linear, fashion.
127  */
128 static uintptr_t
kasan_arm64_alloc_page(void)129 kasan_arm64_alloc_page(void)
130 {
131 	if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
132 		panic("KASAN: OOM");
133 	}
134 
135 	uintptr_t mem = shadow_pnext;
136 	shadow_pnext += ARM_PGBYTES;
137 	shadow_pages_used++;
138 
139 	return mem;
140 }
141 
142 static uintptr_t
kasan_arm64_alloc_zero_page(bool early)143 kasan_arm64_alloc_zero_page(bool early)
144 {
145 	uintptr_t mem = kasan_arm64_alloc_page();
146 	__nosan_bzero((void *)kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
147 	return mem;
148 }
149 
150 static uintptr_t
kasan_arm64_alloc_valid_page(bool early)151 kasan_arm64_alloc_valid_page(bool early)
152 {
153 	uintptr_t mem = kasan_arm64_alloc_page();
154 	kasan_impl_fill_valid_range(kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
155 	return mem;
156 }
157 
158 static void
kasan_arm64_align_to_page(vm_offset_t * addrp,vm_offset_t * sizep)159 kasan_arm64_align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
160 {
161 	vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
162 	*sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
163 	*addrp = addr_aligned;
164 }
165 
166 static uint64_t *
kasan_arm64_lookup_l1(uint64_t * base,vm_offset_t address)167 kasan_arm64_lookup_l1(uint64_t *base, vm_offset_t address)
168 {
169 	return base + ((address & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
170 }
171 
172 static uint64_t *
kasan_arm64_lookup_l2(uint64_t * base,vm_offset_t address)173 kasan_arm64_lookup_l2(uint64_t *base, vm_offset_t address)
174 {
175 	return base + ((address & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
176 }
177 
178 static uint64_t *
kasan_arm64_lookup_l3(uint64_t * base,vm_offset_t address)179 kasan_arm64_lookup_l3(uint64_t *base, vm_offset_t address)
180 {
181 	return base + ((address & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
182 }
183 
184 /*
185  * kasan_arm_pte_map() is the hearth of the arch-specific handling of the shadow
186  * table. It walks the existing page tables that map shadow ranges and
187  * allocates/creates valid entries as required. Options are:
188  *  - static_valid: instead of creating a new backing shadow page, point to
189  *    the 'full valid access' one created early at boot.
190  *  - preallocate_translation_only: do not add the final shadow table entry, but
191  *    only add the L1/L2 pages for a valid translation.
192  *  - early: xnu is running before the VM is fully setup, so handle physical
193  *    address directly instead of going through the physmap.
194  */
195 static void
kasan_arm64_pte_map(vm_offset_t shadow_base,uint64_t * base,uint8_t options)196 kasan_arm64_pte_map(vm_offset_t shadow_base, uint64_t *base, uint8_t options)
197 {
198 	bool early = options & KASAN_ARM64_NO_PHYSMAP;
199 	uint64_t *pte;
200 
201 	/* lookup L1 entry */
202 	pte = kasan_arm64_lookup_l1(base, shadow_base);
203 	if (*pte & ARM_TTE_VALID) {
204 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
205 	} else {
206 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
207 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
208 	}
209 
210 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
211 
212 	/* lookup L2 entry */
213 	pte = kasan_arm64_lookup_l2(base, shadow_base);
214 	if (*pte & ARM_TTE_VALID) {
215 		assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
216 	} else {
217 		*pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
218 		    & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
219 	}
220 
221 	base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
222 
223 	if (options & KASAN_ARM64_PREALLOCATE_L1L2) {
224 		return;
225 	}
226 
227 	bool static_valid = options & KASAN_ARM64_MAP_STATIC_VALID_PAGE;
228 
229 	/* lookup L3 entry */
230 	pte = kasan_arm64_lookup_l3(base, shadow_base);
231 
232 	if (*pte & ARM_PTE_TYPE_VALID) {
233 		bool pte_rona = (*pte & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RONA);
234 		if (!pte_rona || static_valid) {
235 			return;
236 		}
237 	}
238 
239 	/* create new L3 entry */
240 	uint64_t newpte;
241 	if (static_valid) {
242 		/* map the zero page RO */
243 		newpte = (uint64_t)unmutable_valid_access_page | ARM_PTE_AP(AP_RONA);
244 	} else {
245 		newpte = (uint64_t)kasan_arm64_alloc_valid_page(early) | ARM_PTE_AP(AP_RWNA);
246 	}
247 
248 	newpte |= ARM_PTE_TYPE_VALID
249 	    | ARM_PTE_AF
250 	    | ARM_PTE_SH(SH_OUTER_MEMORY)
251 	    | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
252 	    | ARM_PTE_NX
253 	    | ARM_PTE_PNX;
254 	*pte = newpte;
255 }
256 
257 static void
kasan_map_shadow_internal(vm_offset_t address,vm_size_t size,uint8_t options)258 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, uint8_t options)
259 {
260 	size = (size + KASAN_SIZE_ALIGNMENT) & ~KASAN_SIZE_ALIGNMENT;
261 	vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
262 	vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
263 
264 	assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
265 	assert((size & KASAN_SIZE_ALIGNMENT) == 0);
266 
267 	for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
268 		kasan_arm64_pte_map(shadow_base, cpu_tte, options);
269 	}
270 
271 	flush_mmu_tlb();
272 }
273 
274 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool static_valid)275 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool static_valid)
276 {
277 	uint8_t options = KASAN_ARM64_MAP;
278 
279 	if (static_valid) {
280 		options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
281 #if KASAN_LIGHT
282 	} else if (!kasan_zone_maps_owned(address, size)) {
283 		options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
284 #endif /* KASAN_LIGHT */
285 	}
286 
287 	kasan_map_shadow_internal(address, size, options);
288 }
289 
290 /*
291  * TODO: mappings here can be reclaimed after kasan_init()
292  */
293 static void
kasan_arm64_do_map_shadow_early(vm_offset_t address,vm_size_t size,uint8_t options)294 kasan_arm64_do_map_shadow_early(vm_offset_t address, vm_size_t size, uint8_t options)
295 {
296 	kasan_arm64_align_to_page(&address, &size);
297 	vm_size_t j;
298 
299 	for (j = 0; j < size; j += ARM_PGBYTES) {
300 		vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
301 
302 		assert(virt_shadow_target >= KASAN_SHADOW_MIN);
303 		assert(virt_shadow_target < KASAN_SHADOW_MAX);
304 
305 		kasan_arm64_pte_map(virt_shadow_target, (uint64_t *)bootstrap_pgtable_phys, options);
306 	}
307 
308 	flush_mmu_tlb();
309 }
310 
311 
312 static void
kasan_map_shadow_early(vm_offset_t address,vm_size_t size)313 kasan_map_shadow_early(vm_offset_t address, vm_size_t size)
314 {
315 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_EARLY);
316 }
317 
318 static void
kasan_map_shadow_static_early(vm_offset_t address,vm_size_t size)319 kasan_map_shadow_static_early(vm_offset_t address, vm_size_t size)
320 {
321 	kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_STATIC_EARLY);
322 }
323 
324 void
kasan_arch_init(void)325 kasan_arch_init(void)
326 {
327 	/* Map the physical aperture */
328 	kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
329 
330 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
331 	/* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
332 	kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS,
333 	    VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, KASAN_ARM64_PREALLOCATE_TRANSLATION);
334 #endif
335 }
336 
337 /*
338  * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
339  * run until kasan_init(). Called while running with identity (V=P) map active.
340  */
341 void
kasan_bootstrap(boot_args * args,vm_offset_t pgtable)342 kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
343 {
344 	uintptr_t tosteal;
345 	/* Base address for the virtual identity mapping */
346 	vm_address_t p2v = args->virtBase - args->physBase;
347 
348 	vm_address_t pbase = args->physBase;
349 	vm_address_t ptop = args->topOfKernelData;
350 	kernel_vbase = args->virtBase;
351 	kernel_vtop = kernel_vbase + ptop - pbase;
352 
353 	/* Reserve physical memory at the end for KASAN shadow table and quarantines */
354 	tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
355 	tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
356 
357 	/* Make it disappear from xnu view */
358 	args->memSize -= tosteal;
359 
360 	shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
361 	shadow_ptop = shadow_pbase + tosteal;
362 	shadow_pnext = shadow_pbase;
363 	shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
364 
365 	/*
366 	 * Set aside a page to represent all those regions that allow any
367 	 * access and that won't mutate over their lifetime.
368 	 */
369 	unmutable_valid_access_page = kasan_arm64_alloc_page();
370 	kasan_impl_fill_valid_range(unmutable_valid_access_page, ARM_PGBYTES);
371 
372 	/* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
373 	bootstrap_pgtable_phys = pgtable;
374 	/* Blanket map all of what we got from iBoot, as we'd later do in kasan_init() */
375 	kasan_map_shadow_static_early(kernel_vbase, args->memSize);
376 
377 	vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
378 	vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
379 	vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
380 	vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
381 
382 	kasan_map_shadow_early(intstack_virt, intstack_size);
383 	kasan_map_shadow_early(excepstack_virt, excepstack_size);
384 
385 	/* Upgrade the deviceTree mapping if necessary */
386 	if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
387 		kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength);
388 	}
389 }
390 
391 bool
kasan_is_shadow_mapped(uintptr_t shadowp)392 kasan_is_shadow_mapped(uintptr_t shadowp)
393 {
394 	uint64_t *pte;
395 	uint64_t *base = cpu_tte;
396 
397 	assert(shadowp >= KASAN_SHADOW_MIN);
398 	assert(shadowp < KASAN_SHADOW_MAX);
399 
400 	/* lookup L1 entry */
401 	pte = kasan_arm64_lookup_l1(base, shadowp);
402 	if (!(*pte & ARM_TTE_VALID)) {
403 		return false;
404 	}
405 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
406 
407 	/* lookup L2 entry */
408 	pte = kasan_arm64_lookup_l2(base, shadowp);
409 	if (!(*pte & ARM_TTE_VALID)) {
410 		return false;
411 	}
412 	base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
413 
414 	/* lookup L3 entry */
415 	pte = kasan_arm64_lookup_l3(base, shadowp);
416 	if (!(*pte & ARM_PTE_TYPE_VALID)) {
417 		return false;
418 	}
419 
420 	return true;
421 }
422 
423 void
kasan_lock_init(void)424 kasan_lock_init(void)
425 {
426 	lck_ticket_init(&kasan_vm_lock, LCK_GRP_NULL);
427 }
428 
429 /*
430  * KASAN may be called from interrupt context, so we disable interrupts to
431  * ensure atomicity manipulating the global objects.
432  */
433 void
kasan_lock(boolean_t * b)434 kasan_lock(boolean_t *b)
435 {
436 	*b = ml_set_interrupts_enabled(false);
437 	lck_ticket_lock(&kasan_vm_lock, LCK_GRP_NULL);
438 	kasan_lock_holder = current_thread();
439 }
440 
441 void
kasan_unlock(boolean_t b)442 kasan_unlock(boolean_t b)
443 {
444 	kasan_lock_holder = THREAD_NULL;
445 	lck_ticket_unlock(&kasan_vm_lock);
446 	ml_set_interrupts_enabled(b);
447 }
448