1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/thread.h>
36 #include <kern/simple_lock.h>
37 #include <kern/debug.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <libkern/libkern.h>
41 #include <sys/queue.h>
42 #include <vm/pmap.h>
43 #include "kasan.h"
44 #include "kasan_internal.h"
45 #include "memintrinsics.h"
46
47 #include <pexpert/device_tree.h>
48 #include <pexpert/arm64/boot.h>
49 #include <arm64/tlb.h>
50
51 #include <libkern/kernel_mach_header.h>
52
53 #if KASAN_CLASSIC
54 #include "kasan-classic-arm64.h"
55 #elif KASAN_TBI
56 #include "kasan-tbi-arm64.h"
57 _Static_assert((KASAN_TBI_ADDR_SIZE > VM_KERNEL_POINTER_SIGNIFICANT_BITS), "Kernel pointers leave no room for tagging");
58 #else /* KASAN_CLASSIC || KASAN_TBI */
59 #error "No model defined for the shadow table"
60 #endif /* KASAN_CLASSIC || KASAN_TBI */
61
62 #if KASAN_LIGHT
63 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
64 #endif /* KASAN_LIGHT */
65
66 extern thread_t kasan_lock_holder;
67
68 extern uint64_t *cpu_tte;
69 extern unsigned long gVirtBase, gPhysBase;
70
71 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
72 extern vm_map_address_t phystokv(pmap_paddr_t pa);
73
74 vm_offset_t physmap_vbase;
75 vm_offset_t physmap_vtop;
76
77 vm_offset_t shadow_pbase;
78 vm_offset_t shadow_ptop;
79 #if HIBERNATION
80 // if we're building a kernel with hibernation support, hibernate_write_image depends on this symbol
81 vm_offset_t shadow_pnext;
82 #else
83 static vm_offset_t shadow_pnext;
84 #endif
85
86 static vm_offset_t unmutable_valid_access_page;
87 static vm_offset_t bootstrap_pgtable_phys;
88
89 extern vm_offset_t intstack, intstack_top;
90 extern vm_offset_t excepstack, excepstack_top;
91
92 static lck_grp_t kasan_vm_lock_grp;
93 static lck_ticket_t kasan_vm_lock;
94
95 void kasan_bootstrap(boot_args *, vm_offset_t pgtable);
96
97 _Static_assert(KASAN_OFFSET == KASAN_OFFSET_ARM64, "KASan inconsistent shadow offset");
98 _Static_assert(VM_MAX_KERNEL_ADDRESS < KASAN_SHADOW_MIN, "KASan shadow overlaps with kernel VM");
99 _Static_assert((VM_MIN_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 >= KASAN_SHADOW_MIN, "KASan shadow does not cover kernel VM");
100 _Static_assert((VM_MAX_KERNEL_ADDRESS >> KASAN_SCALE) + KASAN_OFFSET_ARM64 < KASAN_SHADOW_MAX, "KASan shadow does not cover kernel VM");
101
102 #define KASAN_ARM64_MAP_STATIC_VALID_PAGE 0x1
103 #define KASAN_ARM64_PREALLOCATE_L1L2 0x2
104 #define KASAN_ARM64_NO_PHYSMAP 0x4
105
106 #define KASAN_ARM64_MAP (0)
107 #define KASAN_ARM64_STATIC_VALID_MAP (KASAN_ARM64_MAP | KASAN_ARM64_MAP_STATIC_VALID_PAGE)
108 #define KASAN_ARM64_PREALLOCATE_TRANSLATION (KASAN_ARM64_PREALLOCATE_L1L2)
109 #define KASAN_ARM64_MAP_EARLY (KASAN_ARM64_MAP | KASAN_ARM64_NO_PHYSMAP)
110 #define KASAN_ARM64_MAP_STATIC_EARLY (KASAN_ARM64_STATIC_VALID_MAP | KASAN_ARM64_NO_PHYSMAP)
111
112
113 /*
114 * KASAN runs both early on, when the 1:1 mapping hasn't been established yet,
115 * and later when memory management is fully set up. This internal version of
116 * phystokv switches between accessing physical memory directly and using the
117 * physmap.
118 */
119 static vm_map_address_t
kasan_arm64_phystokv(uintptr_t pa,bool early)120 kasan_arm64_phystokv(uintptr_t pa, bool early)
121 {
122 return early ? (pa) : phystokv(pa);
123 }
124
125 /*
126 * Physical pages used to back up the shadow table are stolen early on at
127 * boot and later managed in a fairly simple, linear, fashion.
128 */
129 static uintptr_t
kasan_arm64_alloc_page(void)130 kasan_arm64_alloc_page(void)
131 {
132 if (shadow_pnext + ARM_PGBYTES >= shadow_ptop) {
133 panic("KASAN: OOM");
134 }
135
136 uintptr_t mem = shadow_pnext;
137 shadow_pnext += ARM_PGBYTES;
138 shadow_pages_used++;
139
140 return mem;
141 }
142
143 static uintptr_t
kasan_arm64_alloc_zero_page(bool early)144 kasan_arm64_alloc_zero_page(bool early)
145 {
146 uintptr_t mem = kasan_arm64_alloc_page();
147 __nosan_bzero((void *)kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
148 return mem;
149 }
150
151 static uintptr_t
kasan_arm64_alloc_valid_page(bool early)152 kasan_arm64_alloc_valid_page(bool early)
153 {
154 uintptr_t mem = kasan_arm64_alloc_page();
155 kasan_impl_fill_valid_range(kasan_arm64_phystokv(mem, early), ARM_PGBYTES);
156 return mem;
157 }
158
159 static void
kasan_arm64_align_to_page(vm_offset_t * addrp,vm_offset_t * sizep)160 kasan_arm64_align_to_page(vm_offset_t *addrp, vm_offset_t *sizep)
161 {
162 vm_offset_t addr_aligned = vm_map_trunc_page(*addrp, ARM_PGMASK);
163 *sizep = vm_map_round_page(*sizep + (*addrp - addr_aligned), ARM_PGMASK);
164 *addrp = addr_aligned;
165 }
166
167 static uint64_t *
kasan_arm64_lookup_l1(uint64_t * base,vm_offset_t address)168 kasan_arm64_lookup_l1(uint64_t *base, vm_offset_t address)
169 {
170 return base + ((address & ARM_TT_L1_INDEX_MASK) >> ARM_TT_L1_SHIFT);
171 }
172
173 static uint64_t *
kasan_arm64_lookup_l2(uint64_t * base,vm_offset_t address)174 kasan_arm64_lookup_l2(uint64_t *base, vm_offset_t address)
175 {
176 return base + ((address & ARM_TT_L2_INDEX_MASK) >> ARM_TT_L2_SHIFT);
177 }
178
179 static uint64_t *
kasan_arm64_lookup_l3(uint64_t * base,vm_offset_t address)180 kasan_arm64_lookup_l3(uint64_t *base, vm_offset_t address)
181 {
182 return base + ((address & ARM_TT_L3_INDEX_MASK) >> ARM_TT_L3_SHIFT);
183 }
184
185 /*
186 * kasan_arm_pte_map() is the hearth of the arch-specific handling of the shadow
187 * table. It walks the existing page tables that map shadow ranges and
188 * allocates/creates valid entries as required. Options are:
189 * - static_valid: instead of creating a new backing shadow page, point to
190 * the 'full valid access' one created early at boot.
191 * - preallocate_translation_only: do not add the final shadow table entry, but
192 * only add the L1/L2 pages for a valid translation.
193 * - early: xnu is running before the VM is fully setup, so handle physical
194 * address directly instead of going through the physmap.
195 */
196 static void
kasan_arm64_pte_map(vm_offset_t shadow_base,uint64_t * base,uint8_t options)197 kasan_arm64_pte_map(vm_offset_t shadow_base, uint64_t *base, uint8_t options)
198 {
199 bool early = options & KASAN_ARM64_NO_PHYSMAP;
200 uint64_t *pte;
201
202 /* lookup L1 entry */
203 pte = kasan_arm64_lookup_l1(base, shadow_base);
204 if (*pte & ARM_TTE_VALID) {
205 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
206 } else {
207 *pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
208 & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
209 }
210
211 base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
212
213 /* lookup L2 entry */
214 pte = kasan_arm64_lookup_l2(base, shadow_base);
215 if (*pte & ARM_TTE_VALID) {
216 assert((*pte & ARM_TTE_TYPE_MASK) == ARM_TTE_TYPE_TABLE);
217 } else {
218 *pte = ((uint64_t)kasan_arm64_alloc_zero_page(early)
219 & ARM_TTE_TABLE_MASK) | ARM_TTE_VALID | ARM_TTE_TYPE_TABLE;
220 }
221
222 base = (uint64_t *)kasan_arm64_phystokv(*pte & ARM_TTE_TABLE_MASK, early);
223
224 if (options & KASAN_ARM64_PREALLOCATE_L1L2) {
225 return;
226 }
227
228 bool static_valid = options & KASAN_ARM64_MAP_STATIC_VALID_PAGE;
229
230 /* lookup L3 entry */
231 pte = kasan_arm64_lookup_l3(base, shadow_base);
232
233 if (*pte & ARM_PTE_TYPE_VALID) {
234 bool pte_rona = (*pte & ARM_PTE_APMASK) == ARM_PTE_AP(AP_RONA);
235 if (!pte_rona || static_valid) {
236 return;
237 }
238 }
239
240 /* create new L3 entry */
241 uint64_t newpte;
242 if (static_valid) {
243 /* map the zero page RO */
244 newpte = (uint64_t)unmutable_valid_access_page | ARM_PTE_AP(AP_RONA);
245 } else {
246 newpte = (uint64_t)kasan_arm64_alloc_valid_page(early) | ARM_PTE_AP(AP_RWNA);
247 }
248
249 newpte |= ARM_PTE_TYPE_VALID
250 | ARM_PTE_AF
251 | ARM_PTE_SH(SH_OUTER_MEMORY)
252 | ARM_PTE_ATTRINDX(CACHE_ATTRINDX_DEFAULT)
253 | ARM_PTE_NX
254 | ARM_PTE_PNX;
255 *pte = newpte;
256 }
257
258 static void
kasan_map_shadow_internal(vm_offset_t address,vm_size_t size,uint8_t options)259 kasan_map_shadow_internal(vm_offset_t address, vm_size_t size, uint8_t options)
260 {
261 size = (size + KASAN_SIZE_ALIGNMENT) & ~KASAN_SIZE_ALIGNMENT;
262 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), ARM_PGMASK);
263 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), ARM_PGMASK);
264
265 assert(shadow_base >= KASAN_SHADOW_MIN && shadow_top <= KASAN_SHADOW_MAX);
266 assert((size & KASAN_SIZE_ALIGNMENT) == 0);
267
268 for (; shadow_base < shadow_top; shadow_base += ARM_PGBYTES) {
269 kasan_arm64_pte_map(shadow_base, cpu_tte, options);
270 }
271
272 flush_mmu_tlb();
273 }
274
275 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool static_valid)276 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool static_valid)
277 {
278 uint8_t options = KASAN_ARM64_MAP;
279
280 if (static_valid) {
281 options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
282 #if KASAN_LIGHT
283 } else if (!kasan_zone_maps_owned(address, size)) {
284 options |= KASAN_ARM64_MAP_STATIC_VALID_PAGE;
285 #endif /* KASAN_LIGHT */
286 }
287
288 kasan_map_shadow_internal(address, size, options);
289 }
290
291 /*
292 * TODO: mappings here can be reclaimed after kasan_init()
293 */
294 static void
kasan_arm64_do_map_shadow_early(vm_offset_t address,vm_size_t size,uint8_t options)295 kasan_arm64_do_map_shadow_early(vm_offset_t address, vm_size_t size, uint8_t options)
296 {
297 kasan_arm64_align_to_page(&address, &size);
298 vm_size_t j;
299
300 for (j = 0; j < size; j += ARM_PGBYTES) {
301 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
302
303 assert(virt_shadow_target >= KASAN_SHADOW_MIN);
304 assert(virt_shadow_target < KASAN_SHADOW_MAX);
305
306 kasan_arm64_pte_map(virt_shadow_target, (uint64_t *)bootstrap_pgtable_phys, options);
307 }
308
309 flush_mmu_tlb();
310 }
311
312
313 static void
kasan_map_shadow_early(vm_offset_t address,vm_size_t size)314 kasan_map_shadow_early(vm_offset_t address, vm_size_t size)
315 {
316 kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_EARLY);
317 }
318
319 static void
kasan_map_shadow_static_early(vm_offset_t address,vm_size_t size)320 kasan_map_shadow_static_early(vm_offset_t address, vm_size_t size)
321 {
322 kasan_arm64_do_map_shadow_early(address, size, KASAN_ARM64_MAP_STATIC_EARLY);
323 }
324
325 void
kasan_arch_init(void)326 kasan_arch_init(void)
327 {
328 /* Map the physical aperture */
329 kasan_map_shadow(physmap_vbase, physmap_vtop - physmap_vbase, true);
330
331 #if defined(KERNEL_INTEGRITY_KTRR) || defined(KERNEL_INTEGRITY_CTRR)
332 /* Pre-allocate all the L3 page table pages to avoid triggering KTRR */
333 kasan_map_shadow_internal(VM_MIN_KERNEL_ADDRESS,
334 VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS + 1, KASAN_ARM64_PREALLOCATE_TRANSLATION);
335 #endif
336 }
337
338 /*
339 * Steal memory for the shadow, and shadow map the bootstrap page tables so we can
340 * run until kasan_init(). Called while running with identity (V=P) map active.
341 */
342 void
kasan_bootstrap(boot_args * args,vm_offset_t pgtable)343 kasan_bootstrap(boot_args *args, vm_offset_t pgtable)
344 {
345 uintptr_t tosteal;
346 /* Base address for the virtual identity mapping */
347 vm_address_t p2v = args->virtBase - args->physBase;
348
349 vm_address_t pbase = args->physBase;
350 vm_address_t ptop = args->topOfKernelData;
351 kernel_vbase = args->virtBase;
352 kernel_vtop = kernel_vbase + ptop - pbase;
353
354 /* Reserve physical memory at the end for KASAN shadow table and quarantines */
355 tosteal = (args->memSize * STOLEN_MEM_PERCENT) / 100 + STOLEN_MEM_BYTES;
356 tosteal = vm_map_trunc_page(tosteal, ARM_PGMASK);
357
358 /* Make it disappear from xnu view */
359 args->memSize -= tosteal;
360
361 shadow_pbase = vm_map_round_page(pbase + args->memSize, ARM_PGMASK);
362 shadow_ptop = shadow_pbase + tosteal;
363 shadow_pnext = shadow_pbase;
364 shadow_pages_total = (uint32_t)((shadow_ptop - shadow_pbase) / ARM_PGBYTES);
365
366 /*
367 * Set aside a page to represent all those regions that allow any
368 * access and that won't mutate over their lifetime.
369 */
370 unmutable_valid_access_page = kasan_arm64_alloc_page();
371 kasan_impl_fill_valid_range(unmutable_valid_access_page, ARM_PGBYTES);
372
373 /* Shadow the KVA bootstrap mapping: start of kernel Mach-O to end of physical */
374 bootstrap_pgtable_phys = pgtable;
375 /* Blanket map all of what we got from iBoot, as we'd later do in kasan_init() */
376 kasan_map_shadow_static_early(kernel_vbase, args->memSize);
377
378 vm_offset_t intstack_virt = (vm_offset_t)&intstack + p2v;
379 vm_offset_t excepstack_virt = (vm_offset_t)&excepstack + p2v;
380 vm_offset_t intstack_size = (vm_offset_t)&intstack_top - (vm_offset_t)&intstack;
381 vm_offset_t excepstack_size = (vm_offset_t)&excepstack_top - (vm_offset_t)&excepstack;
382
383 kasan_map_shadow_early(intstack_virt, intstack_size);
384 kasan_map_shadow_early(excepstack_virt, excepstack_size);
385
386 /* Upgrade the deviceTree mapping if necessary */
387 if ((vm_offset_t)args->deviceTreeP - p2v < (vm_offset_t)&_mh_execute_header) {
388 kasan_map_shadow_early((vm_offset_t)args->deviceTreeP, args->deviceTreeLength);
389 }
390 }
391
392 bool
kasan_is_shadow_mapped(uintptr_t shadowp)393 kasan_is_shadow_mapped(uintptr_t shadowp)
394 {
395 uint64_t *pte;
396 uint64_t *base = cpu_tte;
397
398 assert(shadowp >= KASAN_SHADOW_MIN);
399 assert(shadowp < KASAN_SHADOW_MAX);
400
401 /* lookup L1 entry */
402 pte = kasan_arm64_lookup_l1(base, shadowp);
403 if (!(*pte & ARM_TTE_VALID)) {
404 return false;
405 }
406 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
407
408 /* lookup L2 entry */
409 pte = kasan_arm64_lookup_l2(base, shadowp);
410 if (!(*pte & ARM_TTE_VALID)) {
411 return false;
412 }
413 base = (uint64_t *)phystokv(*pte & ARM_TTE_TABLE_MASK);
414
415 /* lookup L3 entry */
416 pte = kasan_arm64_lookup_l3(base, shadowp);
417 if (!(*pte & ARM_PTE_TYPE_VALID)) {
418 return false;
419 }
420
421 return true;
422 }
423
424 void
kasan_lock_init(void)425 kasan_lock_init(void)
426 {
427 lck_grp_init(&kasan_vm_lock_grp, "kasan lock", LCK_GRP_ATTR_NULL);
428 lck_ticket_init(&kasan_vm_lock, &kasan_vm_lock_grp);
429 }
430
431 /*
432 * KASAN may be called from interrupt context, so we disable interrupts to
433 * ensure atomicity manipulating the global objects.
434 */
435 void
kasan_lock(boolean_t * b)436 kasan_lock(boolean_t *b)
437 {
438 *b = ml_set_interrupts_enabled(false);
439 lck_ticket_lock(&kasan_vm_lock, &kasan_vm_lock_grp);
440 kasan_lock_holder = current_thread();
441 }
442
443 void
kasan_unlock(boolean_t b)444 kasan_unlock(boolean_t b)
445 {
446 kasan_lock_holder = THREAD_NULL;
447 lck_ticket_unlock(&kasan_vm_lock);
448 ml_set_interrupts_enabled(b);
449 }
450