xref: /xnu-8020.101.4/san/memory/kasan.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <mach/mach_vm.h>
41 #include <mach/mach_types.h>
42 #include <mach/vm_param.h>
43 #include <mach/machine/vm_param.h>
44 #include <mach/sdt.h>
45 #include <libkern/libkern.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/kernel_mach_header.h>
48 #include <sys/queue.h>
49 #include <kern/thread.h>
50 #include <machine/atomic.h>
51 
52 #include "kasan.h"
53 #include "kasan_internal.h"
54 #include "memintrinsics.h"
55 
56 /*
57  * KASAN - Kernel Address SANitizer
58  *
59  * Address Sanitizer goal is to detect memory corruption issues as they
60  * happen. In XNU, we use a couple of different strategies/optimizations,
61  * heavily inspired by Google's various sanitizers.
62  * Each implementation locks down some amount of memory at boot to implment
63  * a shadow table that is then consulted by compiler-inserted
64  * instrumentation (mainly) and developer-added calls (mostly for management)
65  * at every memory operation.
66  *
67  * Each of the individual implementations is self contained
68  * in its own file.
69  *
70  * KASAN-CLASSIC (kasan-classic.c)
71  *
72  * For each 8-byte granule in the address space, one byte is reserved in the shadow
73  * table. Cost: ~13% of memory + 20-30MB of quarantine/redzones.
74  * See kasan-classic.c for details.
75  *
76  * KASAN-TBI (kasan-tbi.c)
77  *
78  * For each 16-byte granule in the address space, one byte is reserved in the
79  * shadow table. TBI (Top Byte Ignore) is used to associate a tag to each
80  * VA pointer and to match it with the shadow table backing storage. This
81  * mode of operation is similar to hardware memory tagging solutions (e.g. MTE)
82  * and is not available on x86-64. Cost: ~8% of memory. No need for redzones
83  * or quarantines. See kasan-tbi.c for details.
84  */
85 
86 /*
87  * LLVM contains enough logic to inline check operations against the shadow
88  * table and uses this symbol as an anchor to find it in memory.
89  */
90 const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_OFFSET;
91 
92 /* Statistics: Track every KEXT that successfully initializes under KASAN */
93 static unsigned kexts_loaded;
94 
95 /* Statistics: Track shadow table usage */
96 unsigned shadow_pages_total;
97 unsigned shadow_pages_used;
98 
99 /* Kernel VA shadow table initialization, populated in arch specific code */
100 vm_offset_t kernel_vbase;
101 vm_offset_t kernel_vtop;
102 
103 decl_simple_lock_data(, kasan_vm_lock);
104 static thread_t kasan_lock_holder;
105 
106 /* Global KASAN configuration. */
107 unsigned kasan_enabled;
108 unsigned kasan_enabled_checks = TYPE_ALL;
109 
110 /* imported osfmk functions */
111 extern vm_offset_t ml_stack_base(void);
112 extern vm_size_t ml_stack_size(void);
113 
114 /*
115  * KASAN may be called from interrupt context, so we disable interrupts to
116  * ensure atomicity manipulating the global objects.
117  */
118 void
kasan_lock(boolean_t * b)119 kasan_lock(boolean_t *b)
120 {
121 	*b = ml_set_interrupts_enabled(false);
122 	simple_lock(&kasan_vm_lock, LCK_GRP_NULL);
123 	kasan_lock_holder = current_thread();
124 }
125 
126 void
kasan_unlock(boolean_t b)127 kasan_unlock(boolean_t b)
128 {
129 	kasan_lock_holder = THREAD_NULL;
130 	simple_unlock(&kasan_vm_lock);
131 	ml_set_interrupts_enabled(b);
132 }
133 
134 /*
135  * Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
136  * thread
137  */
138 bool
kasan_lock_held(thread_t thread)139 kasan_lock_held(thread_t thread)
140 {
141 	return thread && thread == kasan_lock_holder;
142 }
143 
144 bool
kasan_check_enabled(access_t access)145 kasan_check_enabled(access_t access)
146 {
147 	return kasan_enabled && (kasan_enabled_checks & access) && !kasan_is_blacklisted(access);
148 }
149 
150 void
kasan_poison_range(vm_offset_t base,vm_size_t size,uint8_t flags)151 kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
152 {
153 	assert(kasan_granule_partial(base) == 0);
154 	assert(kasan_granule_partial(size) == 0);
155 
156 	/* size=0, leftsz=0, rightsz=size */
157 	kasan_poison(base, 0, 0, size, flags);
158 }
159 
160 void NOINLINE
kasan_unpoison(void * base,vm_size_t size)161 kasan_unpoison(void *base, vm_size_t size)
162 {
163 	/* size=size, leftsz=0, rightsz=0 */
164 	kasan_poison((vm_offset_t)base, size, 0, 0, 0);
165 }
166 
167 void NOINLINE
kasan_unpoison_stack(uintptr_t base,size_t size)168 kasan_unpoison_stack(uintptr_t base, size_t size)
169 {
170 	assert(base > 0);
171 	assert(size > 0);
172 
173 	size_t partial = kasan_granule_partial(base);
174 	base = kasan_granule_trunc(base);
175 	size = kasan_granule_round(size + partial);
176 
177 	kasan_unpoison((void *)base, size);
178 }
179 
180 void NOINLINE
kasan_unpoison_curstack(bool whole_stack)181 kasan_unpoison_curstack(bool whole_stack)
182 {
183 	uintptr_t base = ml_stack_base();
184 	size_t sz = ml_stack_size();
185 	uintptr_t cur = (uintptr_t)&base;
186 
187 	if (whole_stack) {
188 		cur = base;
189 	}
190 
191 	if (cur >= base && cur < base + sz) {
192 		/* unpoison from current stack depth to the top */
193 		size_t unused = cur - base;
194 		kasan_unpoison_stack(cur, sz - unused);
195 	}
196 }
197 
198 void NOINLINE
__asan_handle_no_return(void)199 __asan_handle_no_return(void)
200 {
201 	kasan_unpoison_curstack(false);
202 
203 	/*
204 	 * No need to free any fakestack objects because they must stay alive until
205 	 * we drop the real stack, at which point we can drop the entire fakestack
206 	 * anyway.
207 	 */
208 }
209 
210 void NOINLINE
kasan_load_kext(vm_offset_t base,vm_size_t __unused size,const void * bundleid)211 kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
212 {
213 	unsigned long sectsz;
214 	void *sect;
215 
216 #if KASAN_DYNAMIC_BLACKLIST
217 	kasan_dybl_load_kext(base, bundleid);
218 #endif
219 
220 	/* find the kasan globals segment/section */
221 	sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
222 	if (sect) {
223 		kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
224 		kexts_loaded++;
225 	}
226 }
227 
228 void NOINLINE
kasan_unload_kext(vm_offset_t base,vm_size_t size)229 kasan_unload_kext(vm_offset_t base, vm_size_t size)
230 {
231 	unsigned long sectsz;
232 	void *sect;
233 
234 	/* find the kasan globals segment/section */
235 	sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, &sectsz);
236 	if (sect) {
237 		kasan_unpoison((void *)base, size);
238 		kexts_loaded--;
239 	}
240 
241 #if KASAN_DYNAMIC_BLACKLIST
242 	kasan_dybl_unload_kext(base);
243 #endif
244 }
245 
246 /*
247  * It is not possible to fully enable/disable kasan. Try to disable as much checks
248  * as possible to allow panic code path to create a coredump without recursing into
249  * KASAN failures.
250  *
251  * Compiler inlined checks require us to keep running with kasan_enabled = 1 so the
252  * shadow map gets properly updated.
253  */
254 void NOINLINE
kasan_kdp_disable(void)255 kasan_kdp_disable(void)
256 {
257 	kasan_enabled_checks = 0;
258 	kasan_impl_kdp_disable();
259 }
260 
261 static void NOINLINE
kasan_init_xnu_globals(void)262 kasan_init_xnu_globals(void)
263 {
264 	const char *seg = KASAN_GLOBAL_SEGNAME;
265 	const char *sect = KASAN_GLOBAL_SECTNAME;
266 	unsigned long _size;
267 	vm_offset_t globals;
268 	vm_size_t size;
269 	kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
270 
271 	if (!header) {
272 		printf("KASan: failed to find kernel mach header\n");
273 		printf("KASan: redzones for globals not poisoned\n");
274 		return;
275 	}
276 
277 	globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
278 	if (!globals) {
279 		printf("KASan: failed to find segment %s section %s\n", seg, sect);
280 		printf("KASan: redzones for globals not poisoned\n");
281 		return;
282 	}
283 	size = (vm_size_t)_size;
284 
285 	printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
286 	kasan_init_globals(globals, size);
287 }
288 
289 void NOINLINE
kasan_late_init(void)290 kasan_late_init(void)
291 {
292 #if KASAN_DYNAMIC_BLACKLIST
293 	kasan_init_dybl();
294 #endif
295 	kasan_init_xnu_globals();
296 	kasan_impl_late_init();
297 }
298 
299 void NOINLINE
kasan_notify_stolen(vm_offset_t top)300 kasan_notify_stolen(vm_offset_t top)
301 {
302 	kasan_map_shadow(kernel_vtop, top - kernel_vtop, KASAN_MAY_POISON);
303 }
304 
305 static void NOINLINE
kasan_debug_touch_mappings(vm_offset_t base,vm_size_t sz)306 kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
307 {
308 #if KASAN_DEBUG
309 	vm_size_t i;
310 	uint8_t tmp1, tmp2;
311 
312 	/* Hit every byte in the shadow map. Don't write due to the zero mappings. */
313 	for (i = 0; i < sz; i += sizeof(uint64_t)) {
314 		vm_offset_t addr = base + i;
315 		uint8_t *x = SHADOW_FOR_ADDRESS(addr);
316 		tmp1 = *x;
317 		asm volatile ("" ::: "memory");
318 		tmp2 = *x;
319 		asm volatile ("" ::: "memory");
320 		assert(tmp1 == tmp2);
321 	}
322 #else
323 	(void)base;
324 	(void)sz;
325 #endif
326 }
327 
328 /* Valid values for kasan= boot-arg */
329 #define KASAN_ARGS_FAKESTACK       0x0010U
330 #define KASAN_ARGS_REPORTIGNORED   0x0020U
331 #define KASAN_ARGS_NODYCHECKS      0x0100U
332 #define KASAN_ARGS_NOPOISON_HEAP   0x0200U
333 #define KASAN_ARGS_NOPOISON_GLOBAL 0x0400U
334 
335 void NOINLINE
kasan_init(void)336 kasan_init(void)
337 {
338 	unsigned arg;
339 
340 	simple_lock_init(&kasan_vm_lock, 0);
341 	/* Map all of the kernel text and data */
342 	kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
343 	kasan_arch_init();
344 
345 	/* handle KASan boot-args */
346 	if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
347 		kasan_enabled_checks = arg;
348 	}
349 
350 	if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
351 		if (arg & KASAN_ARGS_FAKESTACK) {
352 			fakestack_enabled = 1;
353 		}
354 		if (arg & KASAN_ARGS_REPORTIGNORED) {
355 			report_suppressed_checks = true;
356 		}
357 		if (arg & KASAN_ARGS_NODYCHECKS) {
358 			kasan_enabled_checks &= ~TYPE_DYNAMIC;
359 		}
360 		if (arg & KASAN_ARGS_NOPOISON_HEAP) {
361 			kasan_enabled_checks &= ~TYPE_POISON_HEAP;
362 		}
363 		if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
364 			kasan_enabled_checks &= ~TYPE_POISON_GLOBAL;
365 		}
366 	}
367 
368 	/* Model specifi handling */
369 	kasan_impl_init();
370 	kasan_enabled = 1;
371 }
372 
373 static void NOINLINE
kasan_notify_address_internal(vm_offset_t address,vm_size_t size,bool cannot_poison)374 kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool cannot_poison)
375 {
376 	assert(address < VM_MAX_KERNEL_ADDRESS);
377 
378 	if (!kasan_enabled) {
379 		return;
380 	}
381 
382 	if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS) {
383 		/* only map kernel addresses */
384 		return;
385 	}
386 
387 	if (!size) {
388 		/* nothing to map */
389 		return;
390 	}
391 
392 	boolean_t flags;
393 	kasan_lock(&flags);
394 	kasan_map_shadow(address, size, cannot_poison);
395 	kasan_unlock(flags);
396 	kasan_debug_touch_mappings(address, size);
397 }
398 
399 /*
400  * This routine is called throughout xnu to synchronize KASAN's shadow map
401  * view with the virtual memory layout modifications.
402  */
403 void
kasan_notify_address(vm_offset_t address,vm_size_t size)404 kasan_notify_address(vm_offset_t address, vm_size_t size)
405 {
406 	kasan_notify_address_internal(address, size, KASAN_MAY_POISON);
407 }
408 
409 /*
410  * Notify a range that is always valid and that will never change state.
411  * (in KASAN CLASSIC speak, that will never get poisoned).
412  */
413 void
kasan_notify_address_nopoison(vm_offset_t address,vm_size_t size)414 kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
415 {
416 	kasan_notify_address_internal(address, size, KASAN_CANNOT_POISON);
417 }
418 
419 /*
420  * Call 'cb' for each contiguous range of the shadow map. This could be more
421  * efficient by walking the page table directly.
422  */
423 int
kasan_traverse_mappings(pmap_traverse_callback cb,void * ctx)424 kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
425 {
426 	uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
427 	uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
428 	shadow_base = vm_map_trunc_page(shadow_base, PAGE_MASK);
429 	shadow_top = vm_map_round_page(shadow_top, PAGE_MASK);
430 
431 	uintptr_t start = 0, end = 0;
432 
433 	for (uintptr_t addr = shadow_base; addr < shadow_top; addr += PAGE_SIZE) {
434 		if (kasan_is_shadow_mapped(addr)) {
435 			if (start == 0) {
436 				start = addr;
437 			}
438 			end = addr + PAGE_SIZE;
439 		} else if (start && end) {
440 			cb(start, end, ctx);
441 			start = end = 0;
442 		}
443 	}
444 
445 	if (start && end) {
446 		cb(start, end, ctx);
447 	}
448 
449 	return 0;
450 }
451 
452 /*
453  * Expose KASAN configuration and an interface to trigger the set of tests
454  * through sysctl.
455  */
456 SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
457 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
458 
459 SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
460 SYSCTL_STRING(_kern_kasan, OID_AUTO, model, CTLFLAG_RD, KASAN_MODEL_STR, 0, "");
461 SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &kasan_enabled_checks, 0, "");
462 SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
463 SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
464 SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
465 
466 /* Old-style configuration options, maintained for compatibility */
467 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
468 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, KASAN_ZALLOC, "");
469 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, KASAN_KALLOC, "");
470 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_BLACKLIST, "");
471