1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <machine/machine_routines.h>
36 #include <kern/locks.h>
37 #include <kern/simple_lock.h>
38 #include <kern/debug.h>
39 #include <kern/backtrace.h>
40 #include <kern/thread.h>
41 #include <libkern/libkern.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <mach/sdt.h>
47 #include <machine/atomic.h>
48 #include <sys/sysctl.h>
49
50 #include "kasan.h"
51 #include "kasan_internal.h"
52 #include "memintrinsics.h"
53 #include "kasan-classic.h"
54
55
56 /*
57 * KASAN-CLASSIC
58 *
59 * This implementation relies on a shadow table that matches each
60 * byte with 8 bytes of the kernel virtual address space. The value of this
61 * byte is either:
62 *
63 * - 0: the full 8 bytes are addressable
64 * - [1,7]: the byte is partially addressable (as many valid bytes
65 * as specified)
66 * - 0xFx, 0xAC, 0xE9: byte is not addressable and poisoned somehow (for a
67 * complete list, check kasan-classic.h)
68 *
69 * Through instrumentation of every load and store and through modifications
70 * to the kernel to properly record and/or quarantine memory regions as a
71 * consequence of memory management operations, KASAN can detect nearly any
72 * type of memory corruption, with two big caveats: linear overflows and
73 * use-after-free. These are solved by redzoning and quarantines.
74 *
75 * For linear overflows, if the adjacent memory is valid (as it is common on
76 * both stack and heap), KASAN must add redzones next to each buffer.
77 * For use-after-free, free'd buffers are not returned immediately on subsequent
78 * memory allocation calls, but are 'stored' in a quarantined region, de-facto
79 * delaying reallocation.
80 *
81 * KASAN-CLASSIC has significant memory cost:
82 * 1) ~13% of available memory for the shadow table (4G phone -> ~512MB)
83 * 2) ~20-30MB of quarantine space
84 * 3) extra padding introduced to support redzones
85 *
86 * (1) and (2) is backed by stealing memory at boot. (3) is instead added at
87 * runtime on top of each allocation.
88 */
89
90 /* Configuration options */
91 static unsigned quarantine_enabled = 1; /* Quarantine on/off */
92 static unsigned free_yield = 0; /* ms yield after each free */
93 static bool checks_enabled = false; /* Poision checking on/off */
94
95 void
kasan_impl_init(void)96 kasan_impl_init(void)
97 {
98 unsigned arg;
99
100 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
101 free_yield = arg;
102 }
103
104 /* Quarantine is enabled by default */
105 quarantine_enabled = 1;
106
107 /* Enable shadow checking early on. */
108 checks_enabled = true;
109 }
110
111 void
kasan_impl_kdp_disable(void)112 kasan_impl_kdp_disable(void)
113 {
114 quarantine_enabled = 0;
115 __asan_option_detect_stack_use_after_return = 0;
116 fakestack_enabled = 0;
117 checks_enabled = false;
118 }
119
120 void NOINLINE
kasan_impl_late_init(void)121 kasan_impl_late_init(void)
122 {
123 kasan_init_fakestack();
124 }
125
126 /* Describes the source location where a global is defined. */
127 struct asan_global_source_location {
128 const char *filename;
129 int line_no;
130 int column_no;
131 };
132
133 /* Describes an instrumented global variable. */
134 struct asan_global {
135 uptr addr;
136 uptr size;
137 uptr size_with_redzone;
138 const char *name;
139 const char *module;
140 uptr has_dynamic_init;
141 struct asan_global_source_location *location;
142 #if CLANG_MIN_VERSION(8020000)
143 uptr odr_indicator;
144 #endif
145 };
146
147 /* Walk through the globals section and set them up at boot */
148 void NOINLINE
kasan_init_globals(vm_offset_t base,vm_size_t size)149 kasan_init_globals(vm_offset_t base, vm_size_t size)
150 {
151 struct asan_global *glob = (struct asan_global *)base;
152 struct asan_global *glob_end = (struct asan_global *)(base + size);
153 for (; glob < glob_end; glob++) {
154 /*
155 * Add a redzone after each global variable.
156 * size=variable size, leftsz=0, rightsz=redzone
157 */
158 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
159 }
160 }
161
162 /* Reporting */
163 static const char *
kasan_classic_access_to_str(access_t type)164 kasan_classic_access_to_str(access_t type)
165 {
166 if (type & TYPE_READ) {
167 return "load from";
168 } else if (type & TYPE_WRITE) {
169 return "store to";
170 } else if (type & TYPE_FREE) {
171 return "free of";
172 } else {
173 return "access of";
174 }
175 }
176
177 static const char *kasan_classic_shadow_strings[] = {
178 [ASAN_VALID] = "VALID",
179 [ASAN_PARTIAL1] = "PARTIAL1",
180 [ASAN_PARTIAL2] = "PARTIAL2",
181 [ASAN_PARTIAL3] = "PARTIAL3",
182 [ASAN_PARTIAL4] = "PARTIAL4",
183 [ASAN_PARTIAL5] = "PARTIAL5",
184 [ASAN_PARTIAL6] = "PARTIAL6",
185 [ASAN_PARTIAL7] = "PARTIAL7",
186 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
187 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
188 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
189 [ASAN_STACK_FREED] = "STACK_FREED",
190 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
191 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
192 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
193 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
194 [ASAN_HEAP_FREED] = "HEAP_FREED",
195 [0xff] = NULL
196 };
197
198 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t reason)199 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t reason)
200 {
201 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
202 uint8_t shadow_type = *shadow_ptr;
203 size_t n = 0;
204
205 const char *shadow_str = kasan_classic_shadow_strings[shadow_type];
206 if (!shadow_str) {
207 shadow_str = "<invalid>";
208 }
209
210 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
211 n += scnprintf(logbuf, bufsize, "KASan: free of corrupted/invalid object %#lx\n", p);
212 } else if (reason == REASON_MOD_AFTER_FREE) {
213 n += scnprintf(logbuf, bufsize, "KASan: UaF of quarantined object %#lx\n", p);
214 } else {
215 n += scnprintf(logbuf, bufsize, "KASan: invalid %lu-byte %s %#lx [%s]\n",
216 width, kasan_classic_access_to_str(access), p, shadow_str);
217 }
218
219 return n;
220 }
221
222 static inline bool
kasan_poison_active(uint8_t flags)223 kasan_poison_active(uint8_t flags)
224 {
225 switch (flags) {
226 case ASAN_GLOBAL_RZ:
227 return kasan_check_enabled(TYPE_POISON_GLOBAL);
228 case ASAN_HEAP_RZ:
229 case ASAN_HEAP_LEFT_RZ:
230 case ASAN_HEAP_RIGHT_RZ:
231 case ASAN_HEAP_FREED:
232 return kasan_check_enabled(TYPE_POISON_HEAP);
233 default:
234 return true;
235 }
236 }
237
238 /*
239 * Create a poisoned redzone at the top and at the end of a (marked) valid range.
240 * Parameters:
241 * base: starting address (including the eventual left red zone)
242 * size: size of the valid range
243 * leftrz: size (multiple of KASAN_GRANULE) of the left redzone
244 * rightrz: size (multiple of KASAN_GRANULE) of the right redzone
245 * flags: select between different poisoning options (e.g. stack vs heap)
246 */
247 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)248 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
249 vm_size_t rightrz, uint8_t flags)
250 {
251 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
252 /*
253 * Buffer size is allowed to not be a multiple of 8. Create a partial
254 * entry in the shadow table if so.
255 */
256 uint8_t partial = (uint8_t)kasan_granule_partial(size);
257 vm_size_t total = leftrz + size + rightrz;
258 vm_size_t i = 0;
259
260 /* ensure base, leftrz and total allocation size are granule-aligned */
261 assert(kasan_granule_partial(base) == 0);
262 assert(kasan_granule_partial(leftrz) == 0);
263 assert(kasan_granule_partial(total) == 0);
264
265 if (!kasan_enabled || !kasan_poison_active(flags)) {
266 return;
267 }
268
269 leftrz >>= KASAN_SCALE;
270 size >>= KASAN_SCALE;
271 total >>= KASAN_SCALE;
272
273 uint8_t l_flags = flags;
274 uint8_t r_flags = flags;
275
276 if (flags == ASAN_STACK_RZ) {
277 l_flags = ASAN_STACK_LEFT_RZ;
278 r_flags = ASAN_STACK_RIGHT_RZ;
279 } else if (flags == ASAN_HEAP_RZ) {
280 l_flags = ASAN_HEAP_LEFT_RZ;
281 r_flags = ASAN_HEAP_RIGHT_RZ;
282 }
283
284 /*
285 * poison the redzones and unpoison the valid bytes
286 */
287 for (; i < leftrz; i++) {
288 shadow[i] = l_flags;
289 }
290 for (; i < leftrz + size; i++) {
291 shadow[i] = ASAN_VALID;
292 }
293 /* Do we have any leftover valid byte? */
294 if (partial && (i < total)) {
295 shadow[i] = partial;
296 i++;
297 }
298 for (; i < total; i++) {
299 shadow[i] = r_flags;
300 }
301 }
302
303 /*
304 * write junk into the redzones
305 */
306 static void NOINLINE
kasan_rz_clobber(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz)307 kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
308 {
309 #if KASAN_DEBUG
310 vm_size_t i;
311 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
312 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
313 uint8_t *buf = (uint8_t *)base;
314
315 assert(kasan_granule_partial(base) == 0);
316 assert(kasan_granule_partial(leftrz) == 0);
317 assert(kasan_granule_partial(size + leftrz + rightrz) == 0);
318
319 for (i = 0; i < leftrz; i++) {
320 buf[i] = deadbeef[i % 4];
321 }
322
323 for (i = 0; i < rightrz; i++) {
324 buf[i + size + leftrz] = c0ffee[i % 4];
325 }
326 #else
327 (void)base;
328 (void)size;
329 (void)leftrz;
330 (void)rightrz;
331 #endif
332 }
333
334 /*
335 * Check the shadow table to determine whether [base, base+size) is valid or
336 * is poisoned.
337 */
338 static bool NOINLINE
kasan_range_poisoned(vm_offset_t base,vm_size_t size,vm_offset_t * first_invalid)339 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
340 {
341 uint8_t *shadow;
342 vm_size_t i;
343
344 if (!kasan_enabled) {
345 return false;
346 }
347
348 size += kasan_granule_partial(base);
349 base = kasan_granule_trunc(base);
350
351 shadow = SHADOW_FOR_ADDRESS(base);
352 size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE;
353
354 /* Walk the shadow table, fail on any non-valid value */
355 for (i = 0; i < limit; i++, size -= KASAN_GRANULE) {
356 assert(size > 0);
357 uint8_t s = shadow[i];
358 if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) {
359 /* valid */
360 continue;
361 } else {
362 goto fail;
363 }
364 }
365
366 return false;
367
368 fail:
369 if (first_invalid) {
370 /* XXX: calculate the exact first byte that failed */
371 *first_invalid = base + i * 8;
372 }
373 return true;
374 }
375
376 /* An 8-byte valid range is indetified by 0 in kasan classic shadow table */
377 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)378 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
379 {
380 __nosan_bzero((void *)page, size);
381 }
382
383 /*
384 * Verify whether an access to memory is valid. A valid access is one that
385 * doesn't touch any region marked as a poisoned redzone or invalid.
386 * 'access' records whether the attempted access is a read or a write.
387 */
388 void NOINLINE
kasan_check_range(const void * x,size_t sz,access_t access)389 kasan_check_range(const void *x, size_t sz, access_t access)
390 {
391 uintptr_t invalid;
392 uintptr_t ptr = (uintptr_t)x;
393
394 if (!checks_enabled) {
395 return;
396 }
397
398 if (kasan_range_poisoned(ptr, sz, &invalid)) {
399 size_t remaining = sz - (invalid - ptr);
400 kasan_violation(invalid, remaining, access, REASON_POISONED);
401 }
402 }
403
404 /*
405 * Return true if [base, base+sz) is unpoisoned or matches the passed in
406 * shadow value.
407 */
408 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)409 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
410 {
411 /* round 'base' up to skip any partial, which won't match 'shadow' */
412 uintptr_t base = kasan_granule_round(addr);
413 sz -= base - addr;
414
415 uintptr_t end = base + sz;
416
417 while (base < end) {
418 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
419 if (*sh && *sh != shadow_match_value) {
420 return false;
421 }
422 base += KASAN_GRANULE;
423 }
424 return true;
425 }
426
427 static const size_t BACKTRACE_BITS = 4;
428 static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
429
430 /*
431 * KASAN zalloc hooks
432 *
433 * KASAN can only distinguish between valid and unvalid memory accesses.
434 * This property severely limits its applicability to zalloc (and any other
435 * memory allocator), whereby linear overflows are generally to valid
436 * memory and non-simple use-after-free can hit an already reallocated buffer.
437 *
438 * To overcome these limitations, KASAN requires a bunch of fairly invasive
439 * changes to zalloc to add both red-zoning and quarantines.
440 */
441
442 struct kasan_alloc_header {
443 uint16_t magic;
444 uint16_t crc;
445 uint32_t alloc_size;
446 uint32_t user_size;
447 struct {
448 uint32_t left_rz : 32 - BACKTRACE_BITS;
449 uint32_t frames : BACKTRACE_BITS;
450 };
451 };
452 _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
453
454 struct kasan_alloc_footer {
455 uint32_t backtrace[0];
456 };
457 _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
458
459 #define LIVE_XOR ((uint16_t)0x3a65)
460 #define FREE_XOR ((uint16_t)0xf233)
461
462 static uint16_t
magic_for_addr(vm_offset_t addr,uint16_t magic_xor)463 magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
464 {
465 uint16_t magic = addr & 0xFFFF;
466 magic ^= (addr >> 16) & 0xFFFF;
467 magic ^= (addr >> 32) & 0xFFFF;
468 magic ^= (addr >> 48) & 0xFFFF;
469 magic ^= magic_xor;
470 return magic;
471 }
472
473 static struct kasan_alloc_header *
header_for_user_addr(vm_offset_t addr)474 header_for_user_addr(vm_offset_t addr)
475 {
476 return (void *)(addr - sizeof(struct kasan_alloc_header));
477 }
478
479 static struct kasan_alloc_footer *
footer_for_user_addr(vm_offset_t addr,vm_size_t * size)480 footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
481 {
482 struct kasan_alloc_header *h = header_for_user_addr(addr);
483 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
484 *size = rightrz;
485 return (void *)(addr + h->user_size);
486 }
487
488 /*
489 * size: user-requested allocation size
490 * ret: minimum size for the real allocation
491 */
492 vm_size_t
kasan_alloc_resize(vm_size_t size)493 kasan_alloc_resize(vm_size_t size)
494 {
495 vm_size_t tmp;
496 if (os_add_overflow(size, 4 * PAGE_SIZE, &tmp)) {
497 panic("allocation size overflow (%lu)", size);
498 }
499
500 if (size >= 128) {
501 /* Add a little extra right redzone to larger objects. Gives us extra
502 * overflow protection, and more space for the backtrace. */
503 size += 16;
504 }
505
506 /* add left and right redzones */
507 size += KASAN_GUARD_PAD;
508
509 /* ensure the final allocation is a multiple of the granule */
510 size = kasan_granule_round(size);
511
512 return size;
513 }
514
515 extern vm_offset_t vm_kernel_slid_base;
516
517 static vm_size_t
kasan_alloc_bt(uint32_t * ptr,vm_size_t sz,vm_size_t skip)518 kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
519 {
520 uintptr_t buf[BACKTRACE_MAXFRAMES];
521 uintptr_t *bt = buf;
522
523 sz /= sizeof(uint32_t);
524 vm_size_t frames = sz;
525
526 if (frames > 0) {
527 frames = min((uint32_t)(frames + skip), BACKTRACE_MAXFRAMES);
528 frames = backtrace(bt, (uint32_t)frames, NULL, NULL);
529
530 while (frames > sz && skip > 0) {
531 bt++;
532 frames--;
533 skip--;
534 }
535
536 /* only store the offset from kernel base, and cram that into 32
537 * bits */
538 for (vm_size_t i = 0; i < frames; i++) {
539 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
540 }
541 }
542 return frames;
543 }
544
545 /* addr: user address of allocation */
546 static uint16_t
kasan_alloc_crc(vm_offset_t addr)547 kasan_alloc_crc(vm_offset_t addr)
548 {
549 struct kasan_alloc_header *h = header_for_user_addr(addr);
550 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
551
552 uint16_t crc_orig = h->crc;
553 h->crc = 0;
554
555 uint16_t crc = 0;
556 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
557 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
558
559 h->crc = crc_orig;
560
561 return crc;
562 }
563
564 /*
565 * addr: base address of full allocation (including redzones)
566 * size: total size of allocation (include redzones)
567 * req: user-requested allocation size
568 * lrz: size of the left redzone in bytes
569 * ret: address of usable allocation
570 */
571 vm_address_t
kasan_alloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)572 kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
573 {
574 if (!addr) {
575 return 0;
576 }
577 assert(size > 0);
578 assert(kasan_granule_partial(addr) == 0);
579 assert(kasan_granule_partial(size) == 0);
580
581 vm_size_t rightrz = size - req - leftrz;
582
583 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
584 kasan_rz_clobber(addr, req, leftrz, rightrz);
585
586 addr += leftrz;
587
588 /* stash the allocation sizes in the left redzone */
589 struct kasan_alloc_header *h = header_for_user_addr(addr);
590 h->magic = magic_for_addr(addr, LIVE_XOR);
591 h->left_rz = (uint32_t)leftrz;
592 h->alloc_size = (uint32_t)size;
593 h->user_size = (uint32_t)req;
594
595 /* ... and a backtrace in the right redzone */
596 vm_size_t fsize;
597 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
598 h->frames = (uint32_t)kasan_alloc_bt(f->backtrace, fsize, 2);
599
600 /* checksum the whole object, minus the user part */
601 h->crc = kasan_alloc_crc(addr);
602
603 return addr;
604 }
605
606 /*
607 * addr: address of usable allocation (excluding redzones)
608 * size: total size of allocation (include redzones)
609 * req: user-requested allocation size
610 * lrz: size of the left redzone in bytes
611 * ret: address of usable allocation
612 */
613 vm_address_t
kasan_realloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)614 kasan_realloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
615 {
616 return kasan_alloc(addr - leftrz, size, req, leftrz);
617 }
618
619 /*
620 * addr: user pointer
621 * size: returns full original allocation size
622 * ret: original allocation ptr
623 */
624 vm_address_t
kasan_dealloc(vm_offset_t addr,vm_size_t * size)625 kasan_dealloc(vm_offset_t addr, vm_size_t *size)
626 {
627 assert(size && addr);
628 struct kasan_alloc_header *h = header_for_user_addr(addr);
629 *size = h->alloc_size;
630 h->magic = 0; /* clear the magic so the debugger doesn't find a bogus object */
631 return addr - h->left_rz;
632 }
633
634 /*
635 * return the original user-requested allocation size
636 * addr: user alloc pointer
637 */
638 vm_size_t
kasan_user_size(vm_offset_t addr)639 kasan_user_size(vm_offset_t addr)
640 {
641 struct kasan_alloc_header *h = header_for_user_addr(addr);
642 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
643 return h->user_size;
644 }
645
646 /*
647 * Verify that `addr' (user pointer) is a valid allocation of `type'
648 */
649 void
kasan_check_free(vm_offset_t addr,vm_size_t size,unsigned heap_type)650 kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
651 {
652 struct kasan_alloc_header *h = header_for_user_addr(addr);
653
654 if (!checks_enabled) {
655 return;
656 }
657
658 /* map heap type to an internal access type */
659 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
660 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
661 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
662
663 /* check the magic and crc match */
664 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
665 kasan_violation(addr, size, type, REASON_BAD_METADATA);
666 }
667 if (h->crc != kasan_alloc_crc(addr)) {
668 kasan_violation(addr, size, type, REASON_MOD_OOB);
669 }
670
671 /* check the freed size matches what we recorded at alloc time */
672 if (h->user_size != size) {
673 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
674 }
675
676 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
677
678 /* Check that the redzones are valid */
679 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
680 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
681 kasan_violation(addr, size, type, REASON_BAD_METADATA);
682 }
683
684 /* Check the allocated range is not poisoned */
685 kasan_check_range((void *)addr, size, type);
686 }
687
688 /*
689 * KASAN Quarantine
690 */
691
692 struct freelist_entry {
693 uint16_t magic;
694 uint16_t crc;
695 STAILQ_ENTRY(freelist_entry) list;
696 union {
697 struct {
698 vm_size_t size : 28;
699 vm_size_t user_size : 28;
700 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
701 vm_size_t __unused : 8 - BACKTRACE_BITS;
702 };
703 uint64_t bits;
704 };
705 zone_t zone;
706 uint32_t backtrace[];
707 };
708 _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
709
710 struct quarantine {
711 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
712 unsigned long entries;
713 unsigned long max_entries;
714 vm_size_t size;
715 vm_size_t max_size;
716 };
717
718 struct quarantine quarantines[] = {
719 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
720 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
721 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
722 };
723
724 static uint16_t
fle_crc(struct freelist_entry * fle)725 fle_crc(struct freelist_entry *fle)
726 {
727 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
728 }
729
730 /*
731 * addr, sizep: pointer/size of full allocation including redzone
732 */
733 void NOINLINE
kasan_free_internal(void ** addrp,vm_size_t * sizep,int type,zone_t * zone,vm_size_t user_size,int locked,bool doquarantine)734 kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
735 zone_t *zone, vm_size_t user_size, int locked,
736 bool doquarantine)
737 {
738 vm_size_t size = *sizep;
739 vm_offset_t addr = *(vm_offset_t *)addrp;
740
741 assert(type >= 0 && type < KASAN_HEAP_TYPES);
742 if (type == KASAN_HEAP_KALLOC) {
743 /* zero-size kalloc allocations are allowed */
744 assert(!zone);
745 } else if (type == KASAN_HEAP_ZALLOC) {
746 assert(zone && user_size);
747 } else if (type == KASAN_HEAP_FAKESTACK) {
748 assert(zone && user_size);
749 }
750
751 /* clobber the entire freed region */
752 kasan_rz_clobber(addr, 0, size, 0);
753
754 if (!doquarantine || !quarantine_enabled) {
755 goto free_current;
756 }
757
758 /* poison the entire freed region */
759 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
760 kasan_poison(addr, 0, size, 0, flags);
761
762 struct freelist_entry *fle, *tofree = NULL;
763 struct quarantine *q = &quarantines[type];
764 assert(size >= sizeof(struct freelist_entry));
765
766 /* create a new freelist entry */
767 fle = (struct freelist_entry *)addr;
768 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
769 fle->size = size;
770 fle->user_size = user_size;
771 fle->frames = 0;
772 fle->zone = ZONE_NULL;
773 if (zone) {
774 fle->zone = *zone;
775 }
776 if (type != KASAN_HEAP_FAKESTACK) {
777 /* don't do expensive things on the fakestack path */
778 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
779 fle->crc = fle_crc(fle);
780 }
781
782 boolean_t flg;
783 if (!locked) {
784 kasan_lock(&flg);
785 }
786
787 if (q->size + size > q->max_size) {
788 /*
789 * Adding this entry would put us over the max quarantine size. Free the
790 * larger of the current object and the quarantine head object.
791 */
792 tofree = STAILQ_FIRST(&q->freelist);
793 if (fle->size > tofree->size) {
794 goto free_current_locked;
795 }
796 }
797
798 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
799 q->entries++;
800 q->size += size;
801
802 /* free the oldest entry, if necessary */
803 if (tofree || q->entries > q->max_entries) {
804 tofree = STAILQ_FIRST(&q->freelist);
805 STAILQ_REMOVE_HEAD(&q->freelist, list);
806
807 assert(q->entries > 0 && q->size >= tofree->size);
808 q->entries--;
809 q->size -= tofree->size;
810
811 if (type != KASAN_HEAP_KALLOC) {
812 assert((vm_offset_t)zone >= VM_MIN_KERNEL_AND_KEXT_ADDRESS &&
813 (vm_offset_t)zone <= VM_MAX_KERNEL_ADDRESS);
814 *zone = tofree->zone;
815 }
816
817 size = tofree->size;
818 addr = (vm_offset_t)tofree;
819
820 /* check the magic and crc match */
821 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
822 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
823 }
824 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
825 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
826 }
827
828 /* clobber the quarantine header */
829 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
830 } else {
831 /* quarantine is not full - don't really free anything */
832 addr = 0;
833 }
834
835 free_current_locked:
836 if (!locked) {
837 kasan_unlock(flg);
838 }
839
840 free_current:
841 *addrp = (void *)addr;
842 if (addr) {
843 kasan_unpoison((void *)addr, size);
844 *sizep = size;
845 }
846 }
847
848 void NOINLINE
kasan_free(void ** addrp,vm_size_t * sizep,int type,zone_t * zone,vm_size_t user_size,bool quarantine)849 kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
850 vm_size_t user_size, bool quarantine)
851 {
852 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, quarantine);
853
854 if (free_yield) {
855 thread_yield_internal(free_yield);
856 }
857 }
858
859 /*
860 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
861 * lives relative to the start of the buffer, but it's always the word immediately
862 * before the start of the array data, so for naturally-aligned objects we need to
863 * search at most 2 shadow bytes.
864 */
865 void
kasan_unpoison_cxx_array_cookie(void * ptr)866 kasan_unpoison_cxx_array_cookie(void *ptr)
867 {
868 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
869 for (size_t i = 0; i < 2; i++) {
870 if (shadow[i] == ASAN_ARRAY_COOKIE) {
871 shadow[i] = ASAN_VALID;
872 return;
873 } else if (shadow[i] != ASAN_VALID) {
874 /* must have seen the cookie by now */
875 return;
876 }
877 }
878 }
879
880 SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
881