1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/simple_lock.h>
39 #include <kern/debug.h>
40 #include <mach/mach_vm.h>
41 #include <mach/mach_types.h>
42 #include <mach/vm_param.h>
43 #include <mach/machine/vm_param.h>
44 #include <mach/sdt.h>
45 #include <libkern/libkern.h>
46 #include <libkern/OSAtomic.h>
47 #include <libkern/kernel_mach_header.h>
48 #include <sys/queue.h>
49 #include <kern/thread.h>
50 #include <machine/atomic.h>
51
52 #include "kasan.h"
53 #include "kasan_internal.h"
54 #include "memintrinsics.h"
55
56 /*
57 * KASAN - Kernel Address SANitizer
58 *
59 * Address Sanitizer goal is to detect memory corruption issues as they
60 * happen. In XNU, we use a couple of different strategies/optimizations,
61 * heavily inspired by Google's various sanitizers.
62 * Each implementation locks down some amount of memory at boot to implment
63 * a shadow table that is then consulted by compiler-inserted
64 * instrumentation (mainly) and developer-added calls (mostly for management)
65 * at every memory operation.
66 *
67 * Each of the individual implementations is self contained
68 * in its own file.
69 *
70 * KASAN-CLASSIC (kasan-classic.c)
71 *
72 * For each 8-byte granule in the address space, one byte is reserved in the shadow
73 * table. Cost: ~13% of memory + 20-30MB of quarantine/redzones.
74 * See kasan-classic.c for details.
75 *
76 * KASAN-TBI (kasan-tbi.c)
77 *
78 * For each 16-byte granule in the address space, one byte is reserved in the
79 * shadow table. TBI (Top Byte Ignore) is used to associate a tag to each
80 * VA pointer and to match it with the shadow table backing storage. This
81 * mode of operation is similar to hardware memory tagging solutions (e.g. MTE)
82 * and is not available on x86-64. Cost: ~8% of memory. No need for redzones
83 * or quarantines. See kasan-tbi.c for details.
84 */
85
86 /* Statistics: Track every KEXT that successfully initializes under KASAN */
87 static unsigned kexts_loaded;
88
89 /* Statistics: Track shadow table usage */
90 unsigned shadow_pages_total;
91 unsigned shadow_pages_used;
92
93 /* Kernel VA shadow table initialization, populated in arch specific code */
94 vm_offset_t kernel_vbase;
95 vm_offset_t kernel_vtop;
96
97 thread_t kasan_lock_holder;
98
99 /* Global KASAN configuration. */
100 unsigned kasan_enabled;
101 unsigned kasan_enabled_checks = TYPE_ALL;
102 int fakestack_enabled;
103
104 /* imported osfmk functions */
105 extern vm_offset_t ml_stack_base(void);
106 extern vm_size_t ml_stack_size(void);
107
108 /*
109 * Return true if 'thread' holds the kasan lock. Only safe if 'thread' == current
110 * thread
111 */
112 bool
kasan_lock_held(thread_t thread)113 kasan_lock_held(thread_t thread)
114 {
115 return thread && thread == kasan_lock_holder;
116 }
117
118 bool
kasan_check_enabled(access_t access)119 kasan_check_enabled(access_t access)
120 {
121 return kasan_enabled && (kasan_enabled_checks & access) && !kasan_is_denylisted(access);
122 }
123
124 void
kasan_poison_range(vm_offset_t base,vm_size_t size,uint8_t flags)125 kasan_poison_range(vm_offset_t base, vm_size_t size, uint8_t flags)
126 {
127 assert(kasan_granule_partial(base) == 0);
128 assert(kasan_granule_partial(size) == 0);
129
130 /* size=0, leftsz=0, rightsz=size */
131 kasan_poison(base, 0, 0, size, flags);
132 }
133
134 void NOINLINE
kasan_unpoison(void * base,vm_size_t size)135 kasan_unpoison(void *base, vm_size_t size)
136 {
137 /* size=size, leftsz=0, rightsz=0 */
138 kasan_poison((vm_offset_t)base, size, 0, 0, 0);
139 }
140
141 void NOINLINE
kasan_unpoison_stack(uintptr_t base,size_t size)142 kasan_unpoison_stack(uintptr_t base, size_t size)
143 {
144 assert(base > 0);
145 assert(size > 0);
146
147 size_t partial = kasan_granule_partial(base);
148 base = kasan_granule_trunc(base);
149 size = kasan_granule_round(size + partial);
150
151 kasan_unpoison((void *)base, size);
152 }
153
154 void NOINLINE
kasan_unpoison_curstack(bool whole_stack)155 kasan_unpoison_curstack(bool whole_stack)
156 {
157 uintptr_t base = ml_stack_base();
158 size_t sz = ml_stack_size();
159 uintptr_t cur = (uintptr_t)&base;
160
161 if (whole_stack) {
162 cur = base;
163 }
164
165 if (cur >= base && cur < base + sz) {
166 /* unpoison from current stack depth to the top */
167 size_t unused = cur - base;
168 kasan_unpoison_stack(cur, sz - unused);
169 }
170 }
171
172 void NOINLINE
__asan_handle_no_return(void)173 __asan_handle_no_return(void)
174 {
175 kasan_unpoison_curstack(false);
176
177 /*
178 * No need to free any fakestack objects because they must stay alive until
179 * we drop the real stack, at which point we can drop the entire fakestack
180 * anyway.
181 */
182 }
183
184 void NOINLINE
kasan_load_kext(vm_offset_t base,vm_size_t __unused size,const void * bundleid)185 kasan_load_kext(vm_offset_t base, vm_size_t __unused size, const void *bundleid)
186 {
187 unsigned long sectsz;
188 void *sect;
189
190 #if KASAN_DYNAMIC_DENYLIST
191 kasan_dyn_denylist_load_kext(base, bundleid);
192 #endif
193
194 /* find the kasan globals segment/section */
195 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz);
196 if (sect) {
197 kasan_init_globals((vm_address_t)sect, (vm_size_t)sectsz);
198 kexts_loaded++;
199 }
200 }
201
202 void NOINLINE
kasan_unload_kext(vm_offset_t base,vm_size_t size)203 kasan_unload_kext(vm_offset_t base, vm_size_t size)
204 {
205 unsigned long sectsz;
206 void *sect;
207
208 /* find the kasan globals segment/section */
209 sect = getsectdatafromheader((void *)base, KASAN_GLOBAL_SEGNAME, KASAN_GLOBAL_SECTNAME, §sz);
210 if (sect) {
211 kasan_unpoison((void *)base, size);
212 kexts_loaded--;
213 }
214
215 #if KASAN_DYNAMIC_DENYLIST
216 kasan_dyn_denylist_unload_kext(base);
217 #endif
218 }
219
220 /*
221 * It is not possible to fully enable/disable kasan. Try to disable as much checks
222 * as possible to allow panic code path to create a coredump without recursing into
223 * KASAN failures.
224 *
225 * Compiler inlined checks require us to keep running with kasan_enabled = 1 so the
226 * shadow map gets properly updated.
227 */
228 void NOINLINE
kasan_kdp_disable(void)229 kasan_kdp_disable(void)
230 {
231 kasan_enabled_checks = 0;
232 kasan_impl_kdp_disable();
233 }
234
235 static void NOINLINE
kasan_init_xnu_globals(void)236 kasan_init_xnu_globals(void)
237 {
238 const char *seg = KASAN_GLOBAL_SEGNAME;
239 const char *sect = KASAN_GLOBAL_SECTNAME;
240 unsigned long _size;
241 vm_offset_t globals;
242 vm_size_t size;
243 kernel_mach_header_t *header = (kernel_mach_header_t *)&_mh_execute_header;
244
245 if (!header) {
246 printf("KASan: failed to find kernel mach header\n");
247 printf("KASan: redzones for globals not poisoned\n");
248 return;
249 }
250
251 globals = (vm_offset_t)getsectdatafromheader(header, seg, sect, &_size);
252 if (!globals) {
253 printf("KASan: failed to find segment %s section %s\n", seg, sect);
254 printf("KASan: redzones for globals not poisoned\n");
255 return;
256 }
257 size = (vm_size_t)_size;
258
259 printf("KASan: found (%s,%s) at %#lx + %lu\n", seg, sect, globals, size);
260 kasan_init_globals(globals, size);
261 }
262
263 void NOINLINE
kasan_late_init(void)264 kasan_late_init(void)
265 {
266 #if KASAN_DYNAMIC_DENYLIST
267 kasan_init_dyn_denylist();
268 #endif
269 kasan_init_xnu_globals();
270 kasan_impl_late_init();
271 }
272
273 void NOINLINE
kasan_notify_stolen(vm_offset_t top)274 kasan_notify_stolen(vm_offset_t top)
275 {
276 kasan_map_shadow(kernel_vtop, top - kernel_vtop, KASAN_MAY_POISON);
277 }
278
279 static void NOINLINE
kasan_debug_touch_mappings(vm_offset_t base,vm_size_t sz)280 kasan_debug_touch_mappings(vm_offset_t base, vm_size_t sz)
281 {
282 #if KASAN_DEBUG
283 vm_size_t i;
284 uint8_t tmp1, tmp2;
285
286 /* Hit every byte in the shadow map. Don't write due to the zero mappings. */
287 for (i = 0; i < sz; i += sizeof(uint64_t)) {
288 vm_offset_t addr = base + i;
289 uint8_t *x = SHADOW_FOR_ADDRESS(addr);
290 tmp1 = *x;
291 asm volatile ("" ::: "memory");
292 tmp2 = *x;
293 asm volatile ("" ::: "memory");
294 assert(tmp1 == tmp2);
295 }
296 #else
297 (void)base;
298 (void)sz;
299 #endif
300 }
301
302 /* Valid values for kasan= boot-arg */
303 #define KASAN_ARGS_FAKESTACK 0x0010U
304 #define KASAN_ARGS_REPORTIGNORED 0x0020U
305 #define KASAN_ARGS_NODYCHECKS 0x0100U
306 #define KASAN_ARGS_NOPOISON_HEAP 0x0200U
307 #define KASAN_ARGS_NOPOISON_GLOBAL 0x0400U
308
309 void NOINLINE
kasan_init(void)310 kasan_init(void)
311 {
312 unsigned arg;
313
314 kasan_lock_init();
315 /* Map all of the kernel text and data */
316 kasan_map_shadow(kernel_vbase, kernel_vtop - kernel_vbase, false);
317 kasan_arch_init();
318
319 /* handle KASan boot-args */
320 if (PE_parse_boot_argn("kasan.checks", &arg, sizeof(arg))) {
321 kasan_enabled_checks = arg;
322 }
323
324 if (PE_parse_boot_argn("kasan", &arg, sizeof(arg))) {
325 if (arg & KASAN_ARGS_FAKESTACK) {
326 fakestack_enabled = 1;
327 }
328 if (arg & KASAN_ARGS_REPORTIGNORED) {
329 report_suppressed_checks = true;
330 }
331 if (arg & KASAN_ARGS_NODYCHECKS) {
332 kasan_enabled_checks &= ~TYPE_DYNAMIC;
333 }
334 if (arg & KASAN_ARGS_NOPOISON_HEAP) {
335 kasan_enabled_checks &= ~TYPE_POISON_HEAP;
336 }
337 if (arg & KASAN_ARGS_NOPOISON_GLOBAL) {
338 kasan_enabled_checks &= ~TYPE_POISON_GLOBAL;
339 }
340 }
341
342 /* Model specifi handling */
343 kasan_impl_init();
344 kasan_enabled = 1;
345 }
346
347 static void NOINLINE
kasan_notify_address_internal(vm_offset_t address,vm_size_t size,bool cannot_poison)348 kasan_notify_address_internal(vm_offset_t address, vm_size_t size, bool cannot_poison)
349 {
350 assert(address < VM_MAX_KERNEL_ADDRESS);
351
352 if (!kasan_enabled) {
353 return;
354 }
355
356 if (address < VM_MIN_KERNEL_AND_KEXT_ADDRESS || size == 0) {
357 /* only map kernel addresses and actual allocations */
358 return;
359 }
360
361 boolean_t flags;
362 kasan_lock(&flags);
363 kasan_map_shadow(address, size, cannot_poison);
364 kasan_unlock(flags);
365 kasan_debug_touch_mappings(address, size);
366 }
367
368 /*
369 * This routine is called throughout xnu to synchronize KASAN's shadow map
370 * view with the virtual memory layout modifications.
371 */
372 void
kasan_notify_address(vm_offset_t address,vm_size_t size)373 kasan_notify_address(vm_offset_t address, vm_size_t size)
374 {
375 kasan_notify_address_internal(address, size, KASAN_MAY_POISON);
376 }
377
378 /*
379 * Notify a range that is always valid and that will never change state.
380 * (in KASAN CLASSIC speak, that will never get poisoned).
381 */
382 void
kasan_notify_address_nopoison(vm_offset_t address,vm_size_t size)383 kasan_notify_address_nopoison(vm_offset_t address, vm_size_t size)
384 {
385 kasan_notify_address_internal(address, size, KASAN_CANNOT_POISON);
386 }
387
388 /*
389 * Call 'cb' for each contiguous range of the shadow map. This could be more
390 * efficient by walking the page table directly.
391 */
392 int
kasan_traverse_mappings(pmap_traverse_callback cb,void * ctx)393 kasan_traverse_mappings(pmap_traverse_callback cb, void *ctx)
394 {
395 uintptr_t shadow_base = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MIN_KERNEL_AND_KEXT_ADDRESS);
396 uintptr_t shadow_top = (uintptr_t)SHADOW_FOR_ADDRESS(VM_MAX_KERNEL_ADDRESS);
397 shadow_base = vm_map_trunc_page(shadow_base, PAGE_MASK);
398 shadow_top = vm_map_round_page(shadow_top, PAGE_MASK);
399
400 uintptr_t start = 0, end = 0;
401
402 for (uintptr_t addr = shadow_base; addr < shadow_top; addr += PAGE_SIZE) {
403 if (kasan_is_shadow_mapped(addr)) {
404 if (start == 0) {
405 start = addr;
406 }
407 end = addr + PAGE_SIZE;
408 } else if (start && end) {
409 cb(start, end, ctx);
410 start = end = 0;
411 }
412 }
413
414 if (start && end) {
415 cb(start, end, ctx);
416 }
417
418 return 0;
419 }
420
421 /*
422 * Expose KASAN configuration and an interface to trigger the set of tests
423 * through sysctl.
424 */
425 SYSCTL_NODE(_kern, OID_AUTO, kasan, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "");
426 SYSCTL_COMPAT_INT(_kern_kasan, OID_AUTO, available, CTLFLAG_RD, NULL, KASAN, "");
427
428 SYSCTL_UINT(_kern_kasan, OID_AUTO, enabled, CTLFLAG_RD, &kasan_enabled, 0, "");
429 SYSCTL_STRING(_kern_kasan, OID_AUTO, model, CTLFLAG_RD, KASAN_MODEL_STR, 0, "");
430 SYSCTL_UINT(_kern_kasan, OID_AUTO, checks, CTLFLAG_RW, &kasan_enabled_checks, 0, "");
431 SYSCTL_UINT(_kern_kasan, OID_AUTO, memused, CTLFLAG_RD, &shadow_pages_used, 0, "");
432 SYSCTL_UINT(_kern_kasan, OID_AUTO, memtotal, CTLFLAG_RD, &shadow_pages_total, 0, "");
433 SYSCTL_UINT(_kern_kasan, OID_AUTO, kexts, CTLFLAG_RD, &kexts_loaded, 0, "");
434
435 /* Old-style configuration options, maintained for compatibility */
436 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, light, CTLFLAG_RD, NULL, KASAN_LIGHT, "");
437 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, debug, CTLFLAG_RD, NULL, KASAN_DEBUG, "");
438 #if KASAN_CLASSIC
439 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, 1, "");
440 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, 1, "");
441 #else
442 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, zalloc, CTLFLAG_RD, NULL, 0, "");
443 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, kalloc, CTLFLAG_RD, NULL, 0, "");
444 #endif
445 SYSCTL_COMPAT_UINT(_kern_kasan, OID_AUTO, dynamicbl, CTLFLAG_RD, NULL, KASAN_DYNAMIC_DENYLIST, "");
446