1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <machine/machine_routines.h>
36 #include <kern/locks.h>
37 #include <kern/simple_lock.h>
38 #include <kern/debug.h>
39 #include <kern/backtrace.h>
40 #include <kern/thread.h>
41 #include <libkern/libkern.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <mach/sdt.h>
47 #include <machine/atomic.h>
48 #include <sys/sysctl.h>
49
50 #include "kasan.h"
51 #include "kasan_internal.h"
52 #include "memintrinsics.h"
53 #include "kasan-classic.h"
54
55
56 /*
57 * KASAN-CLASSIC
58 *
59 * This implementation relies on a shadow table that matches each
60 * byte with 8 bytes of the kernel virtual address space. The value of this
61 * byte is either:
62 *
63 * - 0: the full 8 bytes are addressable
64 * - [1,7]: the byte is partially addressable (as many valid bytes
65 * as specified)
66 * - 0xFx, 0xAC, 0xE9: byte is not addressable and poisoned somehow (for a
67 * complete list, check kasan-classic.h)
68 *
69 * Through instrumentation of every load and store and through modifications
70 * to the kernel to properly record and/or quarantine memory regions as a
71 * consequence of memory management operations, KASAN can detect nearly any
72 * type of memory corruption, with two big caveats: linear overflows and
73 * use-after-free. These are solved by redzoning and quarantines.
74 *
75 * For linear overflows, if the adjacent memory is valid (as it is common on
76 * both stack and heap), KASAN must add redzones next to each buffer.
77 * For use-after-free, free'd buffers are not returned immediately on subsequent
78 * memory allocation calls, but are 'stored' in a quarantined region, de-facto
79 * delaying reallocation.
80 *
81 * KASAN-CLASSIC has significant memory cost:
82 * 1) ~13% of available memory for the shadow table (4G phone -> ~512MB)
83 * 2) ~20-30MB of quarantine space
84 * 3) extra padding introduced to support redzones
85 *
86 * (1) and (2) is backed by stealing memory at boot. (3) is instead added at
87 * runtime on top of each allocation.
88 */
89
90 _Static_assert(!KASAN_LIGHT, "Light mode not supported by KASan Classic.");
91
92 /* Configuration options */
93 static unsigned quarantine_enabled = 1; /* Quarantine on/off */
94 static unsigned free_yield = 0; /* ms yield after each free */
95 static bool checks_enabled = false; /* Poision checking on/off */
96
97 /*
98 * LLVM contains enough logic to inline check operations against the shadow
99 * table and uses this symbol as an anchor to find it in memory.
100 */
101 const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_OFFSET;
102
103 void
kasan_impl_init(void)104 kasan_impl_init(void)
105 {
106 unsigned arg;
107
108 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
109 free_yield = arg;
110 }
111
112 /* Quarantine is enabled by default */
113 quarantine_enabled = 1;
114
115 /* Enable shadow checking early on. */
116 checks_enabled = true;
117 }
118
119 void
kasan_impl_kdp_disable(void)120 kasan_impl_kdp_disable(void)
121 {
122 quarantine_enabled = 0;
123 __asan_option_detect_stack_use_after_return = 0;
124 fakestack_enabled = 0;
125 checks_enabled = false;
126 }
127
128 void NOINLINE
kasan_impl_late_init(void)129 kasan_impl_late_init(void)
130 {
131 kasan_init_fakestack();
132 }
133
134 /* Describes the source location where a global is defined. */
135 struct asan_global_source_location {
136 const char *filename;
137 int line_no;
138 int column_no;
139 };
140
141 /* Describes an instrumented global variable. */
142 struct asan_global {
143 uptr addr;
144 uptr size;
145 uptr size_with_redzone;
146 const char *name;
147 const char *module;
148 uptr has_dynamic_init;
149 struct asan_global_source_location *location;
150 #if CLANG_MIN_VERSION(8020000)
151 uptr odr_indicator;
152 #endif
153 };
154
155 /* Walk through the globals section and set them up at boot */
156 void NOINLINE
kasan_init_globals(vm_offset_t base,vm_size_t size)157 kasan_init_globals(vm_offset_t base, vm_size_t size)
158 {
159 struct asan_global *glob = (struct asan_global *)base;
160 struct asan_global *glob_end = (struct asan_global *)(base + size);
161 for (; glob < glob_end; glob++) {
162 /*
163 * Add a redzone after each global variable.
164 * size=variable size, leftsz=0, rightsz=redzone
165 */
166 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
167 }
168 }
169
170 /* Reporting */
171 static const char *
kasan_classic_access_to_str(access_t type)172 kasan_classic_access_to_str(access_t type)
173 {
174 if (type & TYPE_READ) {
175 return "load from";
176 } else if (type & TYPE_WRITE) {
177 return "store to";
178 } else if (type & TYPE_FREE) {
179 return "free of";
180 } else {
181 return "access of";
182 }
183 }
184
185 static const char *kasan_classic_shadow_strings[] = {
186 [ASAN_VALID] = "VALID",
187 [ASAN_PARTIAL1] = "PARTIAL1",
188 [ASAN_PARTIAL2] = "PARTIAL2",
189 [ASAN_PARTIAL3] = "PARTIAL3",
190 [ASAN_PARTIAL4] = "PARTIAL4",
191 [ASAN_PARTIAL5] = "PARTIAL5",
192 [ASAN_PARTIAL6] = "PARTIAL6",
193 [ASAN_PARTIAL7] = "PARTIAL7",
194 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
195 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
196 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
197 [ASAN_STACK_FREED] = "STACK_FREED",
198 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
199 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
200 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
201 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
202 [ASAN_HEAP_FREED] = "HEAP_FREED",
203 [0xff] = NULL
204 };
205
206 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t reason)207 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t reason)
208 {
209 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
210 uint8_t shadow_type = *shadow_ptr;
211 size_t n = 0;
212
213 const char *shadow_str = kasan_classic_shadow_strings[shadow_type];
214 if (!shadow_str) {
215 shadow_str = "<invalid>";
216 }
217
218 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
219 n += scnprintf(logbuf, bufsize, "KASan: free of corrupted/invalid object %#lx\n", p);
220 } else if (reason == REASON_MOD_AFTER_FREE) {
221 n += scnprintf(logbuf, bufsize, "KASan: UaF of quarantined object %#lx\n", p);
222 } else {
223 n += scnprintf(logbuf, bufsize, "KASan: invalid %lu-byte %s %#lx [%s]\n",
224 width, kasan_classic_access_to_str(access), p, shadow_str);
225 }
226
227 return n;
228 }
229
230 static inline bool
kasan_poison_active(uint8_t flags)231 kasan_poison_active(uint8_t flags)
232 {
233 switch (flags) {
234 case ASAN_GLOBAL_RZ:
235 return kasan_check_enabled(TYPE_POISON_GLOBAL);
236 case ASAN_HEAP_RZ:
237 case ASAN_HEAP_LEFT_RZ:
238 case ASAN_HEAP_RIGHT_RZ:
239 case ASAN_HEAP_FREED:
240 return kasan_check_enabled(TYPE_POISON_HEAP);
241 default:
242 return true;
243 }
244 }
245
246 /*
247 * Create a poisoned redzone at the top and at the end of a (marked) valid range.
248 * Parameters:
249 * base: starting address (including the eventual left red zone)
250 * size: size of the valid range
251 * leftrz: size (multiple of KASAN_GRANULE) of the left redzone
252 * rightrz: size (multiple of KASAN_GRANULE) of the right redzone
253 * flags: select between different poisoning options (e.g. stack vs heap)
254 */
255 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)256 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
257 vm_size_t rightrz, uint8_t flags)
258 {
259 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
260 /*
261 * Buffer size is allowed to not be a multiple of 8. Create a partial
262 * entry in the shadow table if so.
263 */
264 uint8_t partial = (uint8_t)kasan_granule_partial(size);
265 vm_size_t total = leftrz + size + rightrz;
266 vm_size_t i = 0;
267
268 /* ensure base, leftrz and total allocation size are granule-aligned */
269 assert(kasan_granule_partial(base) == 0);
270 assert(kasan_granule_partial(leftrz) == 0);
271 assert(kasan_granule_partial(total) == 0);
272
273 if (!kasan_enabled || !kasan_poison_active(flags)) {
274 return;
275 }
276
277 leftrz >>= KASAN_SCALE;
278 size >>= KASAN_SCALE;
279 total >>= KASAN_SCALE;
280
281 uint8_t l_flags = flags;
282 uint8_t r_flags = flags;
283
284 if (flags == ASAN_STACK_RZ) {
285 l_flags = ASAN_STACK_LEFT_RZ;
286 r_flags = ASAN_STACK_RIGHT_RZ;
287 } else if (flags == ASAN_HEAP_RZ) {
288 l_flags = ASAN_HEAP_LEFT_RZ;
289 r_flags = ASAN_HEAP_RIGHT_RZ;
290 }
291
292 /*
293 * poison the redzones and unpoison the valid bytes
294 */
295 for (; i < leftrz; i++) {
296 shadow[i] = l_flags;
297 }
298 for (; i < leftrz + size; i++) {
299 shadow[i] = ASAN_VALID;
300 }
301 /* Do we have any leftover valid byte? */
302 if (partial && (i < total)) {
303 shadow[i] = partial;
304 i++;
305 }
306 for (; i < total; i++) {
307 shadow[i] = r_flags;
308 }
309 }
310
311 /*
312 * write junk into the redzones
313 */
314 static void NOINLINE
kasan_rz_clobber(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz)315 kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
316 {
317 #if KASAN_DEBUG
318 vm_size_t i;
319 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
320 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
321 uint8_t *buf = (uint8_t *)base;
322
323 assert(kasan_granule_partial(base) == 0);
324 assert(kasan_granule_partial(leftrz) == 0);
325 assert(kasan_granule_partial(size + leftrz + rightrz) == 0);
326
327 for (i = 0; i < leftrz; i++) {
328 buf[i] = deadbeef[i % 4];
329 }
330
331 for (i = 0; i < rightrz; i++) {
332 buf[i + size + leftrz] = c0ffee[i % 4];
333 }
334 #else
335 (void)base;
336 (void)size;
337 (void)leftrz;
338 (void)rightrz;
339 #endif
340 }
341
342 /*
343 * Check the shadow table to determine whether [base, base+size) is valid or
344 * is poisoned.
345 */
346 static bool NOINLINE
kasan_range_poisoned(vm_offset_t base,vm_size_t size,vm_offset_t * first_invalid)347 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
348 {
349 uint8_t *shadow;
350 vm_size_t i;
351
352 if (!kasan_enabled) {
353 return false;
354 }
355
356 size += kasan_granule_partial(base);
357 base = kasan_granule_trunc(base);
358
359 shadow = SHADOW_FOR_ADDRESS(base);
360 size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE;
361
362 /* Walk the shadow table, fail on any non-valid value */
363 for (i = 0; i < limit; i++, size -= KASAN_GRANULE) {
364 assert(size > 0);
365 uint8_t s = shadow[i];
366 if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) {
367 /* valid */
368 continue;
369 } else {
370 goto fail;
371 }
372 }
373
374 return false;
375
376 fail:
377 if (first_invalid) {
378 /* XXX: calculate the exact first byte that failed */
379 *first_invalid = base + i * 8;
380 }
381 return true;
382 }
383
384 /* An 8-byte valid range is indetified by 0 in kasan classic shadow table */
385 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)386 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
387 {
388 __nosan_bzero((void *)page, size);
389 }
390
391 /*
392 * Verify whether an access to memory is valid. A valid access is one that
393 * doesn't touch any region marked as a poisoned redzone or invalid.
394 * 'access' records whether the attempted access is a read or a write.
395 */
396 void NOINLINE
kasan_check_range(const void * x,size_t sz,access_t access)397 kasan_check_range(const void *x, size_t sz, access_t access)
398 {
399 uintptr_t invalid;
400 uintptr_t ptr = (uintptr_t)x;
401
402 if (!checks_enabled) {
403 return;
404 }
405
406 if (kasan_range_poisoned(ptr, sz, &invalid)) {
407 size_t remaining = sz - (invalid - ptr);
408 kasan_violation(invalid, remaining, access, REASON_POISONED);
409 }
410 }
411
412 /*
413 * Return true if [base, base+sz) is unpoisoned or matches the passed in
414 * shadow value.
415 */
416 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)417 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
418 {
419 /* round 'base' up to skip any partial, which won't match 'shadow' */
420 uintptr_t base = kasan_granule_round(addr);
421 sz -= base - addr;
422
423 uintptr_t end = base + sz;
424
425 while (base < end) {
426 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
427 if (*sh && *sh != shadow_match_value) {
428 return false;
429 }
430 base += KASAN_GRANULE;
431 }
432 return true;
433 }
434
435 static const size_t BACKTRACE_BITS = 4;
436 static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
437
438 /*
439 * KASAN zalloc hooks
440 *
441 * KASAN can only distinguish between valid and unvalid memory accesses.
442 * This property severely limits its applicability to zalloc (and any other
443 * memory allocator), whereby linear overflows are generally to valid
444 * memory and non-simple use-after-free can hit an already reallocated buffer.
445 *
446 * To overcome these limitations, KASAN requires a bunch of fairly invasive
447 * changes to zalloc to add both red-zoning and quarantines.
448 */
449
450 struct kasan_alloc_header {
451 uint16_t magic;
452 uint16_t crc;
453 uint32_t alloc_size;
454 uint32_t user_size;
455 struct {
456 uint32_t left_rz : 32 - BACKTRACE_BITS;
457 uint32_t frames : BACKTRACE_BITS;
458 };
459 };
460 _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
461
462 struct kasan_alloc_footer {
463 uint32_t backtrace[0];
464 };
465 _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
466
467 #define LIVE_XOR ((uint16_t)0x3a65)
468 #define FREE_XOR ((uint16_t)0xf233)
469
470 static uint16_t
magic_for_addr(vm_offset_t addr,uint16_t magic_xor)471 magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
472 {
473 uint16_t magic = addr & 0xFFFF;
474 magic ^= (addr >> 16) & 0xFFFF;
475 magic ^= (addr >> 32) & 0xFFFF;
476 magic ^= (addr >> 48) & 0xFFFF;
477 magic ^= magic_xor;
478 return magic;
479 }
480
481 static struct kasan_alloc_header *
header_for_user_addr(vm_offset_t addr)482 header_for_user_addr(vm_offset_t addr)
483 {
484 return (void *)(addr - sizeof(struct kasan_alloc_header));
485 }
486
487 static struct kasan_alloc_footer *
footer_for_user_addr(vm_offset_t addr,vm_size_t * size)488 footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
489 {
490 struct kasan_alloc_header *h = header_for_user_addr(addr);
491 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
492 *size = rightrz;
493 return (void *)(addr + h->user_size);
494 }
495
496 /*
497 * size: user-requested allocation size
498 * ret: minimum size for the real allocation
499 */
500 vm_size_t
kasan_alloc_resize(vm_size_t size)501 kasan_alloc_resize(vm_size_t size)
502 {
503 if (size >= 128) {
504 /* Add a little extra right redzone to larger objects. Gives us extra
505 * overflow protection, and more space for the backtrace. */
506 size += 16;
507 }
508
509 /* add left and right redzones */
510 size += KASAN_GUARD_PAD;
511
512 /* ensure the final allocation is a multiple of the granule */
513 size = kasan_granule_round(size);
514
515 return size;
516 }
517
518 extern vm_offset_t vm_kernel_slid_base;
519
520 static vm_size_t
kasan_alloc_bt(uint32_t * ptr,vm_size_t sz,vm_size_t skip)521 kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
522 {
523 uintptr_t buf[BACKTRACE_MAXFRAMES];
524 uintptr_t *bt = buf;
525
526 sz /= sizeof(uint32_t);
527 vm_size_t frames = sz;
528
529 if (frames > 0) {
530 frames = min((uint32_t)(frames + skip), BACKTRACE_MAXFRAMES);
531 frames = backtrace(bt, (uint32_t)frames, NULL, NULL);
532
533 while (frames > sz && skip > 0) {
534 bt++;
535 frames--;
536 skip--;
537 }
538
539 /* only store the offset from kernel base, and cram that into 32
540 * bits */
541 for (vm_size_t i = 0; i < frames; i++) {
542 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
543 }
544 }
545 return frames;
546 }
547
548 /* addr: user address of allocation */
549 static uint16_t
kasan_alloc_crc(vm_offset_t addr)550 kasan_alloc_crc(vm_offset_t addr)
551 {
552 struct kasan_alloc_header *h = header_for_user_addr(addr);
553 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
554
555 uint16_t crc_orig = h->crc;
556 h->crc = 0;
557
558 uint16_t crc = 0;
559 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
560 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
561
562 h->crc = crc_orig;
563
564 return crc;
565 }
566
567 /*
568 * addr: base address of full allocation (including redzones)
569 * size: total size of allocation (include redzones)
570 * req: user-requested allocation size
571 * lrz: size of the left redzone in bytes
572 * ret: address of usable allocation
573 */
574 vm_address_t
kasan_alloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)575 kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
576 {
577 if (!addr) {
578 return 0;
579 }
580 assert(size > 0);
581 assert(kasan_granule_partial(addr) == 0);
582 assert(kasan_granule_partial(size) == 0);
583
584 vm_size_t rightrz = size - req - leftrz;
585
586 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
587 kasan_rz_clobber(addr, req, leftrz, rightrz);
588
589 addr += leftrz;
590
591 /* stash the allocation sizes in the left redzone */
592 struct kasan_alloc_header *h = header_for_user_addr(addr);
593 h->magic = magic_for_addr(addr, LIVE_XOR);
594 h->left_rz = (uint32_t)leftrz;
595 h->alloc_size = (uint32_t)size;
596 h->user_size = (uint32_t)req;
597
598 /* ... and a backtrace in the right redzone */
599 vm_size_t fsize;
600 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
601 h->frames = (uint32_t)kasan_alloc_bt(f->backtrace, fsize, 2);
602
603 /* checksum the whole object, minus the user part */
604 h->crc = kasan_alloc_crc(addr);
605
606 return addr;
607 }
608
609 /*
610 * addr: address of usable allocation (excluding redzones)
611 * size: total size of allocation (include redzones)
612 * req: user-requested allocation size
613 * lrz: size of the left redzone in bytes
614 * ret: address of usable allocation
615 */
616 vm_address_t
kasan_realloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)617 kasan_realloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
618 {
619 return kasan_alloc(addr - leftrz, size, req, leftrz);
620 }
621
622 /*
623 * addr: user pointer
624 * size: returns full original allocation size
625 * ret: original allocation ptr
626 */
627 vm_address_t
kasan_dealloc(vm_offset_t addr,vm_size_t * size)628 kasan_dealloc(vm_offset_t addr, vm_size_t *size)
629 {
630 assert(size && addr);
631 struct kasan_alloc_header *h = header_for_user_addr(addr);
632 *size = h->alloc_size;
633 h->magic = 0; /* clear the magic so the debugger doesn't find a bogus object */
634 return addr - h->left_rz;
635 }
636
637 /*
638 * return the original user-requested allocation size
639 * addr: user alloc pointer
640 */
641 vm_size_t
kasan_user_size(vm_offset_t addr)642 kasan_user_size(vm_offset_t addr)
643 {
644 struct kasan_alloc_header *h = header_for_user_addr(addr);
645 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
646 return h->user_size;
647 }
648
649 /*
650 * Verify that `addr' (user pointer) is a valid allocation of `type'
651 */
652 void
kasan_check_free(vm_offset_t addr,vm_size_t size,unsigned heap_type)653 kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
654 {
655 struct kasan_alloc_header *h = header_for_user_addr(addr);
656
657 if (!checks_enabled) {
658 return;
659 }
660
661 /* map heap type to an internal access type */
662 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
663 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
664 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
665
666 /* check the magic and crc match */
667 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
668 kasan_violation(addr, size, type, REASON_BAD_METADATA);
669 }
670 if (h->crc != kasan_alloc_crc(addr)) {
671 kasan_violation(addr, size, type, REASON_MOD_OOB);
672 }
673
674 /* check the freed size matches what we recorded at alloc time */
675 if (h->user_size != size) {
676 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
677 }
678
679 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
680
681 /* Check that the redzones are valid */
682 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
683 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
684 kasan_violation(addr, size, type, REASON_BAD_METADATA);
685 }
686
687 /* Check the allocated range is not poisoned */
688 kasan_check_range((void *)addr, size, type);
689 }
690
691 /*
692 * KASAN Quarantine
693 */
694
695 struct freelist_entry {
696 uint16_t magic;
697 uint16_t crc;
698 STAILQ_ENTRY(freelist_entry) list;
699 union {
700 struct {
701 vm_size_t size : 28;
702 vm_size_t user_size : 28;
703 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
704 vm_size_t __unused : 8 - BACKTRACE_BITS;
705 };
706 uint64_t bits;
707 };
708 zone_t zone;
709 uint32_t backtrace[];
710 };
711 _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
712
713 struct quarantine {
714 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
715 unsigned long entries;
716 unsigned long max_entries;
717 vm_size_t size;
718 vm_size_t max_size;
719 };
720
721 struct quarantine quarantines[] = {
722 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
723 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
724 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
725 };
726
727 static uint16_t
fle_crc(struct freelist_entry * fle)728 fle_crc(struct freelist_entry *fle)
729 {
730 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
731 }
732
733 /*
734 * addr, sizep: pointer/size of full allocation including redzone
735 */
736 void NOINLINE
kasan_free_internal(void ** addrp,vm_size_t * sizep,int type,zone_t * zonep,vm_size_t user_size,int locked,bool doquarantine)737 kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
738 zone_t *zonep, vm_size_t user_size, int locked,
739 bool doquarantine)
740 {
741 vm_size_t size = *sizep;
742 vm_offset_t addr = *(vm_offset_t *)addrp;
743 zone_t zone = *zonep;
744
745 assert(type >= 0 && type < KASAN_HEAP_TYPES);
746 if (type == KASAN_HEAP_KALLOC) {
747 /* for kalloc the size can be 0 */
748 assert(zone);
749 } else {
750 assert(zone && user_size);
751 }
752
753 /* clobber the entire freed region */
754 kasan_rz_clobber(addr, 0, size, 0);
755
756 if (!doquarantine || !quarantine_enabled) {
757 goto free_current;
758 }
759
760 /* poison the entire freed region */
761 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
762 kasan_poison(addr, 0, size, 0, flags);
763
764 struct freelist_entry *fle, *tofree = NULL;
765 struct quarantine *q = &quarantines[type];
766 assert(size >= sizeof(struct freelist_entry));
767
768 /* create a new freelist entry */
769 fle = (struct freelist_entry *)addr;
770 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
771 fle->size = size;
772 fle->user_size = user_size;
773 fle->frames = 0;
774 fle->zone = zone;
775 if (type != KASAN_HEAP_FAKESTACK) {
776 /* don't do expensive things on the fakestack path */
777 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
778 fle->crc = fle_crc(fle);
779 }
780
781 boolean_t flg;
782 if (!locked) {
783 kasan_lock(&flg);
784 }
785
786 if (q->size + size > q->max_size) {
787 /*
788 * Adding this entry would put us over the max quarantine size. Free the
789 * larger of the current object and the quarantine head object.
790 */
791 tofree = STAILQ_FIRST(&q->freelist);
792 if (fle->size > tofree->size) {
793 goto free_current_locked;
794 }
795 }
796
797 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
798 q->entries++;
799 q->size += size;
800
801 /* free the oldest entry, if necessary */
802 if (tofree || q->entries > q->max_entries) {
803 tofree = STAILQ_FIRST(&q->freelist);
804 STAILQ_REMOVE_HEAD(&q->freelist, list);
805
806 assert(q->entries > 0 && q->size >= tofree->size);
807 q->entries--;
808 q->size -= tofree->size;
809
810 zone = tofree->zone;
811 size = tofree->size;
812 addr = (vm_offset_t)tofree;
813
814 /* check the magic and crc match */
815 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
816 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
817 }
818 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
819 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
820 }
821
822 /* clobber the quarantine header */
823 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
824 } else {
825 /* quarantine is not full - don't really free anything */
826 addr = 0;
827 zone = ZONE_NULL;
828 size = 0;
829 }
830
831 free_current_locked:
832 if (!locked) {
833 kasan_unlock(flg);
834 }
835
836 free_current:
837 *addrp = (void *)addr;
838 if (addr) {
839 kasan_unpoison((void *)addr, size);
840 *sizep = size;
841 *zonep = zone;
842 }
843 }
844
845 void NOINLINE
kasan_free(void ** addrp,vm_size_t * sizep,int type,zone_t * zone,vm_size_t user_size)846 kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
847 vm_size_t user_size)
848 {
849 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, true);
850
851 if (free_yield) {
852 thread_yield_internal(free_yield);
853 }
854 }
855
856 /*
857 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
858 * lives relative to the start of the buffer, but it's always the word immediately
859 * before the start of the array data, so for naturally-aligned objects we need to
860 * search at most 2 shadow bytes.
861 */
862 void
kasan_unpoison_cxx_array_cookie(void * ptr)863 kasan_unpoison_cxx_array_cookie(void *ptr)
864 {
865 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
866 for (size_t i = 0; i < 2; i++) {
867 if (shadow[i] == ASAN_ARRAY_COOKIE) {
868 shadow[i] = ASAN_VALID;
869 return;
870 } else if (shadow[i] != ASAN_VALID) {
871 /* must have seen the cookie by now */
872 return;
873 }
874 }
875 }
876
877 SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
878