1 /*
2 * Copyright (c) 2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <stdint.h>
30 #include <string.h>
31 #include <vm/vm_kern.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <i386/proc_reg.h>
35 #include <i386/machine_routines.h>
36 #include <kern/debug.h>
37 #include <mach/mach_vm.h>
38 #include <mach/vm_param.h>
39 #include <sys/param.h>
40 #include <i386/pmap.h>
41 #include <libkern/libkern.h>
42 #include <pexpert/i386/efi.h>
43 #include <pexpert/i386/boot.h>
44 #include <sys/queue.h>
45 #include "kasan.h"
46 #include "kasan_internal.h"
47 #include <vm/pmap.h>
48 #include <pexpert/i386/efi.h>
49 #include <pexpert/i386/boot.h>
50 #include "memintrinsics.h"
51
52 #define STOLEN_MEM_PERCENT 25UL
53 #define STOLEN_MEM_BYTES 0
54
55 extern uint64_t *IdlePML4;
56 #define phys2virt(x) ((uintptr_t)(x) + physmap_base)
57
58 vm_offset_t shadow_pbase;
59 vm_offset_t shadow_ptop;
60 vm_offset_t shadow_pnext;
61 unsigned shadow_stolen_idx;
62
63 static vm_offset_t zero_superpage_phys;
64
65 typedef struct {
66 unsigned int pml4 : 9;
67 unsigned int pdpt : 9;
68 unsigned int pd : 9;
69 unsigned int pt : 9;
70 unsigned int offset : 12;
71 } split_addr_t;
72
73 static split_addr_t
split_address(vm_offset_t address)74 split_address(vm_offset_t address)
75 {
76 split_addr_t addr;
77
78 addr.pml4 = (address >> 39) & 0x1ff;
79 addr.pdpt = (address >> 30) & 0x1ff;
80 addr.pd = (address >> 21) & 0x1ff;
81 addr.pt = (address >> 12) & 0x1ff;
82 // addr.offset = address & PAGE_MASK;
83
84 return addr;
85 }
86
87 static uintptr_t
alloc_page(void)88 alloc_page(void)
89 {
90 if (shadow_pnext + I386_PGBYTES >= shadow_ptop) {
91 panic("KASAN: OOM");
92 }
93
94 uintptr_t mem = shadow_pnext;
95 shadow_pnext += I386_PGBYTES;
96 shadow_pages_used++;
97
98 return mem;
99 }
100
101 #define ROUND_SUPERPAGE(x) ((((uintptr_t)(x)) + I386_LPGBYTES - 1) & ~(I386_LPGMASK))
102
103 static uintptr_t
alloc_superpage(void)104 alloc_superpage(void)
105 {
106 uintptr_t mem;
107 shadow_pnext = ROUND_SUPERPAGE(shadow_pnext);
108 assert((shadow_pnext & I386_LPGMASK) == 0);
109 mem = shadow_pnext;
110 shadow_pnext += I386_LPGBYTES;
111 shadow_pages_used += I386_LPGBYTES / I386_PGBYTES;
112 /* XXX: not accounting for superpage rounding */
113 return mem;
114 }
115
116 static uintptr_t
alloc_page_zero(void)117 alloc_page_zero(void)
118 {
119 uintptr_t mem = alloc_page();
120 bzero_phys(mem, I386_PGBYTES);
121 return mem;
122 }
123
124 static void
kasan_map_shadow_superpage_zero(vm_offset_t address,vm_size_t size)125 kasan_map_shadow_superpage_zero(vm_offset_t address, vm_size_t size)
126 {
127 address = vm_map_trunc_page(address, I386_LPGMASK);
128 size = vm_map_round_page(size, I386_LPGMASK);
129
130 vm_size_t j;
131 for (j = 0; j < size; j += I386_LPGBYTES * 8) {
132 vm_offset_t virt_shadow_target = (vm_offset_t)SHADOW_FOR_ADDRESS(address + j);
133
134 split_addr_t addr = split_address(virt_shadow_target);
135 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
136 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
137
138 uint64_t *L3;
139 uint64_t *L2;
140 uint64_t *L1;
141
142 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
143 if (L3 == NULL) {
144 uintptr_t pmem = alloc_page_zero();
145 L3 = (uint64_t *)phys2virt(pmem);
146 IdlePML4[addr.pml4] = pmem
147 | INTEL_PTE_VALID
148 | INTEL_PTE_WRITE;
149 } else {
150 L3 = (uint64_t *)phys2virt(L3);
151 }
152
153 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
154 if (L2 == NULL) {
155 uintptr_t pmem = alloc_page_zero();
156 L2 = (uint64_t *)phys2virt(pmem);
157 L3[addr.pdpt] = pmem
158 | INTEL_PTE_VALID
159 | INTEL_PTE_WRITE;
160 } else {
161 L2 = (uint64_t *)phys2virt(L2);
162 }
163
164 L1 = (uint64_t *)(L2[addr.pd] & ~PAGE_MASK);
165 if (L1 == NULL) {
166 L2[addr.pd] = (uint64_t)zero_superpage_phys
167 | INTEL_PTE_VALID
168 | INTEL_PTE_PS
169 | INTEL_PTE_NX;
170 } else {
171 panic("Unexpected shadow mapping, addr = %lx, sz = %lu",
172 address, size);
173 }
174
175 /* adding a new entry, this is not strictly required */
176 invlpg(virt_shadow_target);
177 }
178 }
179
180 void
kasan_map_shadow(vm_offset_t address,vm_size_t size,bool cannot_poison)181 kasan_map_shadow(vm_offset_t address, vm_size_t size, bool cannot_poison)
182 {
183 size = kasan_granule_round(size);
184 vm_offset_t shadow_base = vm_map_trunc_page(SHADOW_FOR_ADDRESS(address), PAGE_MASK);
185 vm_offset_t shadow_top = vm_map_round_page(SHADOW_FOR_ADDRESS(address + size), PAGE_MASK);
186
187 assert(kasan_granule_partial(size) == 0);
188
189 for (; shadow_base < shadow_top; shadow_base += I386_PGBYTES) {
190 split_addr_t addr = split_address(shadow_base);
191 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
192 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
193
194 uint64_t *L3;
195 uint64_t *L2;
196 uint64_t *L1;
197 uint64_t *pte;
198
199 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
200 if (L3 == NULL) {
201 uintptr_t pmem = alloc_page_zero();
202 L3 = (uint64_t *)phys2virt(pmem);
203 IdlePML4[addr.pml4] = pmem
204 | INTEL_PTE_VALID
205 | INTEL_PTE_WRITE;
206 } else {
207 L3 = (uint64_t *)phys2virt(L3);
208 }
209
210 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
211 if (L2 == NULL) {
212 uintptr_t pmem = alloc_page_zero();
213 L2 = (uint64_t *)phys2virt(pmem);
214 L3[addr.pdpt] = pmem
215 | INTEL_PTE_VALID
216 | INTEL_PTE_WRITE;
217 } else {
218 L2 = (uint64_t *)phys2virt(L2);
219 }
220
221 uint64_t pde = L2[addr.pd];
222 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
223 /* Already mapped as a superpage */
224 continue;
225 }
226
227 L1 = (uint64_t *)(pde & ~PAGE_MASK);
228 if (L1 == NULL) {
229 uintptr_t pmem = alloc_page_zero();
230 L1 = (uint64_t *)phys2virt(pmem);
231 L2[addr.pd] = pmem
232 | INTEL_PTE_VALID
233 | INTEL_PTE_WRITE;
234 } else {
235 L1 = (uint64_t *)phys2virt(L1);
236 }
237
238 pte = (uint64_t *)(L1[addr.pt] & ~PAGE_MASK);
239 if (pte == NULL) {
240 uint64_t newpte;
241 if (cannot_poison) {
242 newpte = (uint64_t)zero_superpage_phys;
243 } else {
244 newpte = (vm_offset_t)alloc_page_zero()
245 | INTEL_PTE_WRITE;
246 }
247 L1[addr.pt] = newpte
248 | INTEL_PTE_VALID
249 | INTEL_PTE_NX;
250
251 /* adding a new entry, this is not strictly required */
252 invlpg(shadow_base);
253 }
254 }
255 }
256
257 void
kasan_arch_init(void)258 kasan_arch_init(void)
259 {
260 __nosan_bzero((void *)phys2virt(zero_superpage_phys), I386_LPGBYTES);
261
262 /* Map the physical aperture */
263 kasan_map_shadow_superpage_zero(physmap_base, physmap_max - physmap_base);
264 }
265
266 /*
267 * Steal some memory from EFI for the shadow map.
268 */
269 void
kasan_reserve_memory(void * _args)270 kasan_reserve_memory(void *_args)
271 {
272 boot_args *args = (boot_args *)_args;
273 vm_address_t pbase = args->kaddr;
274 vm_address_t ptop = args->kaddr + args->ksize;
275
276 kernel_vbase = ml_static_ptovirt(pbase);
277 kernel_vtop = ml_static_ptovirt(ptop);
278
279 EfiMemoryRange *mptr, *mptr_tmp;
280 unsigned int mcount;
281 unsigned int msize;
282 unsigned int i;
283 unsigned long total_pages;
284 unsigned long to_steal;
285
286 mptr = (EfiMemoryRange *)ml_static_ptovirt((vm_offset_t)args->MemoryMap);
287 msize = args->MemoryMapDescriptorSize;
288 mcount = args->MemoryMapSize / msize;
289
290 /* sum total physical memory */
291 total_pages = 0;
292 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
293 total_pages += mptr_tmp->NumberOfPages;
294 }
295
296 to_steal = (unsigned long)(total_pages * STOLEN_MEM_PERCENT) / 100 + (STOLEN_MEM_BYTES / I386_PGBYTES);
297
298 /* Search for a range large enough to steal from */
299 for (i = 0, mptr_tmp = mptr; i < mcount; i++, mptr_tmp = (EfiMemoryRange *)(((vm_offset_t)mptr_tmp) + msize)) {
300 ppnum_t base, top;
301 base = (ppnum_t)(mptr_tmp->PhysicalStart >> I386_PGSHIFT);
302 top = (ppnum_t)((mptr_tmp->PhysicalStart >> I386_PGSHIFT) + mptr_tmp->NumberOfPages - 1);
303
304 if ((mptr_tmp->Type == kEfiConventionalMemory) && (mptr_tmp->NumberOfPages > to_steal)) {
305 /* Found a region with sufficient space - steal from the end */
306 mptr_tmp->NumberOfPages -= to_steal;
307
308 shadow_pbase = mptr_tmp->PhysicalStart + (mptr_tmp->NumberOfPages << I386_PGSHIFT);
309 shadow_ptop = shadow_pbase + (to_steal << I386_PGSHIFT);
310 shadow_pnext = shadow_pbase;
311 shadow_pages_total = (unsigned int)to_steal;
312 shadow_stolen_idx = i;
313
314 /* Set aside a page of zeros we can use for dummy shadow mappings */
315 zero_superpage_phys = alloc_superpage();
316
317 return;
318 }
319 }
320
321 panic("KASAN: could not reserve memory");
322 }
323
324 bool
kasan_is_shadow_mapped(uintptr_t shadowp)325 kasan_is_shadow_mapped(uintptr_t shadowp)
326 {
327 split_addr_t addr = split_address(shadowp);
328 assert(addr.pml4 >= KERNEL_KASAN_PML4_FIRST &&
329 addr.pml4 <= KERNEL_KASAN_PML4_LAST);
330
331 uint64_t *L3;
332 uint64_t *L2;
333 uint64_t *L1;
334
335 L3 = (uint64_t *)(IdlePML4[addr.pml4] & ~PAGE_MASK);
336 if (L3 == NULL) {
337 return false;
338 }
339 L3 = (uint64_t *)phys2virt(L3);
340
341 L2 = (uint64_t *)(L3[addr.pdpt] & ~PAGE_MASK);
342 if (L2 == NULL) {
343 return false;
344 }
345 L2 = (uint64_t *)phys2virt(L2);
346
347 uint64_t pde = L2[addr.pd];
348 if ((pde & (INTEL_PTE_VALID | INTEL_PTE_PS)) == (INTEL_PTE_VALID | INTEL_PTE_PS)) {
349 /* mapped as superpage */
350 return true;
351 }
352 L1 = (uint64_t *)(pde & ~PAGE_MASK);
353 if (L1 == NULL) {
354 return false;
355 }
356 L1 = (uint64_t *)phys2virt(L1);
357
358 if (L1[addr.pt] & INTEL_PTE_VALID) {
359 return true;
360 }
361
362 return false;
363 }
364