1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <i386/proc_reg.h>
35 #include <i386/machine_routines.h>
36 #include <kern/debug.h>
37 #include <kern/thread.h>
38 #include <mach/mach_vm.h>
39 #include <mach/vm_param.h>
40 #include <sys/param.h>
41 #include <i386/pmap.h>
42 #include <libkern/libkern.h>
43 #include <pexpert/i386/efi.h>
44 #include <pexpert/i386/boot.h>
45 #include <sys/queue.h>
46 #include "kasan.h"
47 #include "kasan_internal.h"
48 #include <vm/pmap.h>
49 #include <pexpert/i386/efi.h>
50 #include <pexpert/i386/boot.h>
51 #include "memintrinsics.h"
52
53 #define STOLEN_MEM_PERCENT 25UL
54 #define STOLEN_MEM_BYTES 0
55
56 extern thread_t kasan_lock_holder;
57
58 extern uint64_t *IdlePML4;
59 #define phys2virt(x) ((uintptr_t)(x) + physmap_base)
60
61 vm_offset_t shadow_pbase;
62 vm_offset_t shadow_ptop;
63 vm_offset_t shadow_pnext;
64 unsigned shadow_stolen_idx;
65
66 static vm_offset_t zero_superpage_phys;
67 decl_simple_lock_data(, kasan_vm_lock);
68
69 typedef struct {
70 unsigned int pml4 : 9;
71 unsigned int pdpt : 9;
72 unsigned int pd : 9;
73 unsigned int pt : 9;
74 unsigned int offset : 12;
75 } split_addr_t;
76
77 static split_addr_t
split_address(vm_offset_t address)78 split_address(vm_offset_t address)
79 {
80 split_addr_t addr;
81
82 addr.pml4 = (address >> 39) & 0x1ff;
83 addr.pdpt = (address >> 30) & 0x1ff;
84 addr.pd = (address >> 21) & 0x1ff;
85 addr.pt = (address >> 12) & 0x1ff;
86 // addr.offset = address & PAGE_MASK;
87
88 return addr;
89 }
90
91 static uintptr_t
alloc_page(void)92 alloc_page(void)
93 {
94 if (shadow_pnext + I386_PGBYTES >= shadow_ptop) {
95 panic("KASAN: OOM");
96 }
97
98 uintptr_t mem = shadow_pnext;
99 shadow_pnext += I386_PGBYTES;
100 shadow_pages_used++;
101
102 return mem;
103 }
104
105 #define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
106
107 static uintptr_t
alloc_superpage(void)108 alloc_superpage(void)
109 {
110 uintptr_t mem;
111 shadow_pnext = ROUND_SUPERPAGE(shadow_pnext);
112 assert((shadow_pnext & I386_LPGMASK) == 0);
113 mem = shadow_pnext;
114 shadow_pnext += I386_LPGBYTES;
115 shadow_pages_used += I386_LPGBYTES / I386_PGBYTES;
116 /* XXX: not accounting for superpage rounding */
117 return mem;
118 }
119
120 static uintptr_t
alloc_page_zero(void)121 alloc_page_zero(void)
122 {
123 uintptr_t mem = alloc_page();
124 bzero_phys(mem, I386_PGBYTES);
125 return mem;
126 }
127
128 static void
kasan_map_shadow_superpage_zero(vm_offset_t address,vm_size_t size)129 kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size)
130 {
131 address = vm_map_trunc_page(address, I386_LPGMASK);
132 size = vm_map_round_page(size, I386_LPGMASK);
133
134 vm_size_t j;
135 for (j = 0; j < size; j += I386_LPGBYTES * 8) {
136 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
137
138 split_addr_t addr = split_address(virt_shadow_target);
139 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
140 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
141
142 uint64_t *L3;
143 uint64_t *L2;
144 uint64_t *L1;
145
146 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
147 if (L3 == NULL) {
148 uintptr_t pmem = alloc_page_zero();
149 L3 = (uint64_t *)phys2virt(pmem);
150 IdlePML4[addr.pml4] = pmem
151 | INTEL_PTE_VALID
152 | INTEL_PTE_WRITE;
153 } else {
154 L3 = (uint64_t *)phys2virt(L3);
155 }
156
157 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
158 if (L2 == NULL) {
159 uintptr_t pmem = alloc_page_zero();
160 L2 = (uint64_t *)phys2virt(pmem);
161 L3[addr.pdpt] = pmem
162 | INTEL_PTE_VALID
163 | INTEL_PTE_WRITE;
164 } else {
165 L2 = (uint64_t *)phys2virt(L2);
166 }
167
168 L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK);
169 if (L1 == NULL) {
170 L2[addr.pd] = (uint64_t)zero_superpage_phys
171 | INTEL_PTE_VALID
172 | INTEL_PTE_PS
173 | INTEL_PTE_NX;
174 } else {
175 panic("Unexpected shadow mapping, addr = %lx, sz = %lu",
176 address, size);
177 }
178
179 /* adding a new entry, this is not strictly required */
180 invlpg(virt_shadow_target);
181 }
182 }
183
184 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool cannot_poison)185 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool cannot_poison)
186 {
187 size = kasan_granule_round(size);
188 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK);
189 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK);
190
191 assert(kasan_granule_partial(size) == 0);
192
193 for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) {
194 split_addr_t addr = split_address(shadow_base);
195 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
196 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
197
198 uint64_t *L3;
199 uint64_t *L2;
200 uint64_t *L1;
201 uint64_t *pte;
202
203 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
204 if (L3 == NULL) {
205 uintptr_t pmem = alloc_page_zero();
206 L3 = (uint64_t *)phys2virt(pmem);
207 IdlePML4[addr.pml4] = pmem
208 | INTEL_PTE_VALID
209 | INTEL_PTE_WRITE;
210 } else {
211 L3 = (uint64_t *)phys2virt(L3);
212 }
213
214 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
215 if (L2 == NULL) {
216 uintptr_t pmem = alloc_page_zero();
217 L2 = (uint64_t *)phys2virt(pmem);
218 L3[addr.pdpt] = pmem
219 | INTEL_PTE_VALID
220 | INTEL_PTE_WRITE;
221 } else {
222 L2 = (uint64_t *)phys2virt(L2);
223 }
224
225 uint64_t pde = L2[addr.pd];
226 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
227 /* Already mapped as a superpage */
228 continue;
229 }
230
231 L1 = (uint64_t *)(pde & ~PAGE_MASK);
232 if (L1 == NULL) {
233 uintptr_t pmem = alloc_page_zero();
234 L1 = (uint64_t *)phys2virt(pmem);
235 L2[addr.pd] = pmem
236 | INTEL_PTE_VALID
237 | INTEL_PTE_WRITE;
238 } else {
239 L1 = (uint64_t *)phys2virt(L1);
240 }
241
242 pte = (uint64_t *)(L1[addr.pt] & ~PAGE_MASK);
243 if (pte == NULL) {
244 uint64_t newpte;
245 if (cannot_poison) {
246 newpte = (uint64_t)zero_superpage_phys;
247 } else {
248 newpte = (vm_offset_t)alloc_page_zero()
249 | INTEL_PTE_WRITE;
250 }
251 L1[addr.pt] = newpte
252 | INTEL_PTE_VALID
253 | INTEL_PTE_NX;
254
255 /* adding a new entry, this is not strictly required */
256 invlpg(shadow_base);
257 }
258 }
259 }
260
261 void
kasan_arch_init(void)262 kasan_arch_init(void)
263 {
264 __nosan_bzero((void *)phys2virt(zero_superpage_phys), I386_LPGBYTES);
265
266 /* Map the physical aperture */
267 kasan_map_shadow_superpage_zero(physmap_base, physmap_max - physmap_base);
268 }
269
270 /*
271 * Steal some memory from EFI for the shadow map.
272 */
273 void
kasan_reserve_memory(void * _args)274 kasan_reserve_memory(void *_args)
275 {
276 boot_args *args = (boot_args *)_args;
277 vm_address_t pbase = args->kaddr;
278 vm_address_t ptop = args->kaddr + args->ksize;
279
280 kernel_vbase = ml_static_ptovirt(pbase);
281 kernel_vtop = ml_static_ptovirt(ptop);
282
283 EfiMemoryRange *mptr, *mptr_tmp;
284 unsigned int mcount;
285 unsigned int msize;
286 unsigned int i;
287 unsigned long total_pages;
288 unsigned long to_steal;
289
290 mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap);
291 msize = args->MemoryMapDescriptorSize;
292 mcount = args->MemoryMapSize / msize;
293
294 /* sum total physical memory */
295 total_pages = 0;
296 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
297 total_pages += mptr_tmp->NumberOfPages;
298 }
299
300 to_steal = (unsigned long)(total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES);
301
302 /* Search for a range large enough to steal from */
303 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
304 ppnum_t base, top;
305 base = (ppnum_t)(mptr_tmp->PhysicalStart >> I386_PGSHIFT);
306 top = (ppnum_t)((mptr_tmp->PhysicalStart >> I386_PGSHIFT) + mptr_tmp->NumberOfPages - 1);
307
308 if ((mptr_tmp->Type == kEfiConventionalMemory) && (mptr_tmp->NumberOfPages > to_steal)) {
309 /* Found a region with sufficient space - steal from the end */
310 mptr_tmp->NumberOfPages -= to_steal;
311
312 shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT);
313 shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT);
314 shadow_pnext = shadow_pbase;
315 shadow_pages_total = (unsigned int)to_steal;
316 shadow_stolen_idx = i;
317
318 /* Set aside a page of zeros we can use for dummy shadow mappings */
319 zero_superpage_phys = alloc_superpage();
320
321 return;
322 }
323 }
324
325 panic("KASAN: could not reserve memory");
326 }
327
328 bool
kasan_is_shadow_mapped(uintptr_t shadowp)329 kasan_is_shadow_mapped(uintptr_t shadowp)
330 {
331 split_addr_t addr = split_address(shadowp);
332 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
333 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
334
335 uint64_t *L3;
336 uint64_t *L2;
337 uint64_t *L1;
338
339 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
340 if (L3 == NULL) {
341 return false;
342 }
343 L3 = (uint64_t *)phys2virt(L3);
344
345 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
346 if (L2 == NULL) {
347 return false;
348 }
349 L2 = (uint64_t *)phys2virt(L2);
350
351 uint64_t pde = L2[addr.pd];
352 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
353 /* mapped as superpage */
354 return true;
355 }
356 L1 = (uint64_t *)(pde & ~PAGE_MASK);
357 if (L1 == NULL) {
358 return false;
359 }
360 L1 = (uint64_t *)phys2virt(L1);
361
362 if (L1[addr.pt] & INTEL_PTE_VALID) {
363 return true;
364 }
365
366 return false;
367 }
368
369 void
kasan_lock_init(void)370 kasan_lock_init(void)
371 {
372 simple_lock_init(&kasan_vm_lock, 0);
373 }
374
375 /*
376 * KASAN may be called from interrupt context, so we disable interrupts to
377 * ensure atomicity manipulating the global objects.
378 */
379 void
kasan_lock(boolean_t * b)380 kasan_lock(boolean_t *b)
381 {
382 *b = ml_set_interrupts_enabled(false);
383 simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
384 kasan_lock_holder = current_thread();
385 }
386
387 void
kasan_unlock(boolean_t b)388 kasan_unlock(boolean_t b)
389 {
390 kasan_lock_holder = THREAD_NULL;
391 simple_unlock(&kasan_vm_lock);
392 ml_set_interrupts_enabled(b);
393 }
394