1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <machine/machine_routines.h>
36 #include <kern/locks.h>
37 #include <kern/simple_lock.h>
38 #include <kern/debug.h>
39 #include <kern/backtrace.h>
40 #include <kern/thread.h>
41 #include <libkern/libkern.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <mach/sdt.h>
47 #include <machine/atomic.h>
48 #include <sys/sysctl.h>
49
50 #include "kasan.h"
51 #include "kasan_internal.h"
52 #include "memintrinsics.h"
53 #include "kasan-classic.h"
54
55
56 /*
57 * KASAN-CLASSIC
58 *
59 * This implementation relies on a shadow table that matches each
60 * byte with 8 bytes of the kernel virtual address space. The value of this
61 * byte is either:
62 *
63 * - 0: the full 8 bytes are addressable
64 * - [1,7]: the byte is partially addressable (as many valid bytes
65 * as specified)
66 * - 0xFx, 0xAC, 0xE9: byte is not addressable and poisoned somehow (for a
67 * complete list, check kasan-classic.h)
68 *
69 * Through instrumentation of every load and store and through modifications
70 * to the kernel to properly record and/or quarantine memory regions as a
71 * consequence of memory management operations, KASAN can detect nearly any
72 * type of memory corruption, with two big caveats: linear overflows and
73 * use-after-free. These are solved by redzoning and quarantines.
74 *
75 * For linear overflows, if the adjacent memory is valid (as it is common on
76 * both stack and heap), KASAN must add redzones next to each buffer.
77 * For use-after-free, free'd buffers are not returned immediately on subsequent
78 * memory allocation calls, but are 'stored' in a quarantined region, de-facto
79 * delaying reallocation.
80 *
81 * KASAN-CLASSIC has significant memory cost:
82 * 1) ~13% of available memory for the shadow table (4G phone -> ~512MB)
83 * 2) ~20-30MB of quarantine space
84 * 3) extra padding introduced to support redzones
85 *
86 * (1) and (2) is backed by stealing memory at boot. (3) is instead added at
87 * runtime on top of each allocation.
88 */
89
90 /* Configuration options */
91 static unsigned quarantine_enabled = 1; /* Quarantine on/off */
92 static unsigned free_yield = 0; /* ms yield after each free */
93 static bool checks_enabled = false; /* Poision checking on/off */
94
95 void
kasan_impl_init(void)96 kasan_impl_init(void)
97 {
98 unsigned arg;
99
100 if (PE_parse_boot_argn("kasan.free_yield_ms", &arg, sizeof(arg))) {
101 free_yield = arg;
102 }
103
104 /* Quarantine is enabled by default */
105 quarantine_enabled = 1;
106
107 /* Enable shadow checking early on. */
108 checks_enabled = true;
109 }
110
111 void
kasan_impl_kdp_disable(void)112 kasan_impl_kdp_disable(void)
113 {
114 quarantine_enabled = 0;
115 __asan_option_detect_stack_use_after_return = 0;
116 fakestack_enabled = 0;
117 checks_enabled = false;
118 }
119
120 void NOINLINE
kasan_impl_late_init(void)121 kasan_impl_late_init(void)
122 {
123 kasan_init_fakestack();
124 }
125
126 /* Describes the source location where a global is defined. */
127 struct asan_global_source_location {
128 const char *filename;
129 int line_no;
130 int column_no;
131 };
132
133 /* Describes an instrumented global variable. */
134 struct asan_global {
135 uptr addr;
136 uptr size;
137 uptr size_with_redzone;
138 const char *name;
139 const char *module;
140 uptr has_dynamic_init;
141 struct asan_global_source_location *location;
142 #if CLANG_MIN_VERSION(8020000)
143 uptr odr_indicator;
144 #endif
145 };
146
147 /* Walk through the globals section and set them up at boot */
148 void NOINLINE
kasan_init_globals(vm_offset_t base,vm_size_t size)149 kasan_init_globals(vm_offset_t base, vm_size_t size)
150 {
151 struct asan_global *glob = (struct asan_global *)base;
152 struct asan_global *glob_end = (struct asan_global *)(base + size);
153 for (; glob < glob_end; glob++) {
154 /*
155 * Add a redzone after each global variable.
156 * size=variable size, leftsz=0, rightsz=redzone
157 */
158 kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
159 }
160 }
161
162 /* Reporting */
163 static const char *
kasan_classic_access_to_str(access_t type)164 kasan_classic_access_to_str(access_t type)
165 {
166 if (type & TYPE_READ) {
167 return "load from";
168 } else if (type & TYPE_WRITE) {
169 return "store to";
170 } else if (type & TYPE_FREE) {
171 return "free of";
172 } else {
173 return "access of";
174 }
175 }
176
177 static const char *kasan_classic_shadow_strings[] = {
178 [ASAN_VALID] = "VALID",
179 [ASAN_PARTIAL1] = "PARTIAL1",
180 [ASAN_PARTIAL2] = "PARTIAL2",
181 [ASAN_PARTIAL3] = "PARTIAL3",
182 [ASAN_PARTIAL4] = "PARTIAL4",
183 [ASAN_PARTIAL5] = "PARTIAL5",
184 [ASAN_PARTIAL6] = "PARTIAL6",
185 [ASAN_PARTIAL7] = "PARTIAL7",
186 [ASAN_STACK_LEFT_RZ] = "STACK_LEFT_RZ",
187 [ASAN_STACK_MID_RZ] = "STACK_MID_RZ",
188 [ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
189 [ASAN_STACK_FREED] = "STACK_FREED",
190 [ASAN_STACK_OOSCOPE] = "STACK_OOSCOPE",
191 [ASAN_GLOBAL_RZ] = "GLOBAL_RZ",
192 [ASAN_HEAP_LEFT_RZ] = "HEAP_LEFT_RZ",
193 [ASAN_HEAP_RIGHT_RZ] = "HEAP_RIGHT_RZ",
194 [ASAN_HEAP_FREED] = "HEAP_FREED",
195 [0xff] = NULL
196 };
197
198 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t reason)199 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t reason)
200 {
201 uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
202 uint8_t shadow_type = *shadow_ptr;
203 size_t n = 0;
204
205 const char *shadow_str = kasan_classic_shadow_strings[shadow_type];
206 if (!shadow_str) {
207 shadow_str = "<invalid>";
208 }
209
210 if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
211 n += scnprintf(logbuf, bufsize, "KASan: free of corrupted/invalid object %#lx\n", p);
212 } else if (reason == REASON_MOD_AFTER_FREE) {
213 n += scnprintf(logbuf, bufsize, "KASan: UaF of quarantined object %#lx\n", p);
214 } else {
215 n += scnprintf(logbuf, bufsize, "KASan: invalid %lu-byte %s %#lx [%s]\n",
216 width, kasan_classic_access_to_str(access), p, shadow_str);
217 }
218
219 return n;
220 }
221
222 static inline bool
kasan_poison_active(uint8_t flags)223 kasan_poison_active(uint8_t flags)
224 {
225 switch (flags) {
226 case ASAN_GLOBAL_RZ:
227 return kasan_check_enabled(TYPE_POISON_GLOBAL);
228 case ASAN_HEAP_RZ:
229 case ASAN_HEAP_LEFT_RZ:
230 case ASAN_HEAP_RIGHT_RZ:
231 case ASAN_HEAP_FREED:
232 return kasan_check_enabled(TYPE_POISON_HEAP);
233 default:
234 return true;
235 }
236 }
237
238 /*
239 * Create a poisoned redzone at the top and at the end of a (marked) valid range.
240 * Parameters:
241 * base: starting address (including the eventual left red zone)
242 * size: size of the valid range
243 * leftrz: size (multiple of KASAN_GRANULE) of the left redzone
244 * rightrz: size (multiple of KASAN_GRANULE) of the right redzone
245 * flags: select between different poisoning options (e.g. stack vs heap)
246 */
247 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)248 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
249 vm_size_t rightrz, uint8_t flags)
250 {
251 uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
252 /*
253 * Buffer size is allowed to not be a multiple of 8. Create a partial
254 * entry in the shadow table if so.
255 */
256 uint8_t partial = (uint8_t)kasan_granule_partial(size);
257 vm_size_t total = leftrz + size + rightrz;
258 vm_size_t i = 0;
259
260 /* ensure base, leftrz and total allocation size are granule-aligned */
261 assert(kasan_granule_partial(base) == 0);
262 assert(kasan_granule_partial(leftrz) == 0);
263 assert(kasan_granule_partial(total) == 0);
264
265 if (!kasan_enabled || !kasan_poison_active(flags)) {
266 return;
267 }
268
269 leftrz >>= KASAN_SCALE;
270 size >>= KASAN_SCALE;
271 total >>= KASAN_SCALE;
272
273 uint8_t l_flags = flags;
274 uint8_t r_flags = flags;
275
276 if (flags == ASAN_STACK_RZ) {
277 l_flags = ASAN_STACK_LEFT_RZ;
278 r_flags = ASAN_STACK_RIGHT_RZ;
279 } else if (flags == ASAN_HEAP_RZ) {
280 l_flags = ASAN_HEAP_LEFT_RZ;
281 r_flags = ASAN_HEAP_RIGHT_RZ;
282 }
283
284 /*
285 * poison the redzones and unpoison the valid bytes
286 */
287 for (; i < leftrz; i++) {
288 shadow[i] = l_flags;
289 }
290 for (; i < leftrz + size; i++) {
291 shadow[i] = ASAN_VALID;
292 }
293 /* Do we have any leftover valid byte? */
294 if (partial && (i < total)) {
295 shadow[i] = partial;
296 i++;
297 }
298 for (; i < total; i++) {
299 shadow[i] = r_flags;
300 }
301 }
302
303 /*
304 * write junk into the redzones
305 */
306 static void NOINLINE
kasan_rz_clobber(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz)307 kasan_rz_clobber(vm_offset_t base, vm_size_t size, vm_size_t leftrz, vm_size_t rightrz)
308 {
309 #if KASAN_DEBUG
310 vm_size_t i;
311 const uint8_t deadbeef[] = { 0xde, 0xad, 0xbe, 0xef };
312 const uint8_t c0ffee[] = { 0xc0, 0xff, 0xee, 0xc0 };
313 uint8_t *buf = (uint8_t *)base;
314
315 assert(kasan_granule_partial(base) == 0);
316 assert(kasan_granule_partial(leftrz) == 0);
317 assert(kasan_granule_partial(size + leftrz + rightrz) == 0);
318
319 for (i = 0; i < leftrz; i++) {
320 buf[i] = deadbeef[i % 4];
321 }
322
323 for (i = 0; i < rightrz; i++) {
324 buf[i + size + leftrz] = c0ffee[i % 4];
325 }
326 #else
327 (void)base;
328 (void)size;
329 (void)leftrz;
330 (void)rightrz;
331 #endif
332 }
333
334 /*
335 * Check the shadow table to determine whether [base, base+size) is valid or
336 * is poisoned.
337 */
338 static bool NOINLINE
kasan_range_poisoned(vm_offset_t base,vm_size_t size,vm_offset_t * first_invalid)339 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
340 {
341 uint8_t *shadow;
342 vm_size_t i;
343
344 if (!kasan_enabled) {
345 return false;
346 }
347
348 size += kasan_granule_partial(base);
349 base = kasan_granule_trunc(base);
350
351 shadow = SHADOW_FOR_ADDRESS(base);
352 size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE;
353
354 /* Walk the shadow table, fail on any non-valid value */
355 for (i = 0; i < limit; i++, size -= KASAN_GRANULE) {
356 assert(size > 0);
357 uint8_t s = shadow[i];
358 if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) {
359 /* valid */
360 continue;
361 } else {
362 goto fail;
363 }
364 }
365
366 return false;
367
368 fail:
369 if (first_invalid) {
370 /* XXX: calculate the exact first byte that failed */
371 *first_invalid = base + i * 8;
372 }
373 return true;
374 }
375
376 /* An 8-byte valid range is indetified by 0 in kasan classic shadow table */
377 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)378 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
379 {
380 __nosan_bzero((void *)page, size);
381 }
382
383 /*
384 * Verify whether an access to memory is valid. A valid access is one that
385 * doesn't touch any region marked as a poisoned redzone or invalid.
386 * 'access' records whether the attempted access is a read or a write.
387 */
388 void NOINLINE
kasan_check_range(const void * x,size_t sz,access_t access)389 kasan_check_range(const void *x, size_t sz, access_t access)
390 {
391 uintptr_t invalid;
392 uintptr_t ptr = (uintptr_t)x;
393
394 if (!checks_enabled) {
395 return;
396 }
397
398 if (kasan_range_poisoned(ptr, sz, &invalid)) {
399 size_t remaining = sz - (invalid - ptr);
400 kasan_violation(invalid, remaining, access, REASON_POISONED);
401 }
402 }
403
404 /*
405 * Return true if [base, base+sz) is unpoisoned or matches the passed in
406 * shadow value.
407 */
408 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)409 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
410 {
411 /* round 'base' up to skip any partial, which won't match 'shadow' */
412 uintptr_t base = kasan_granule_round(addr);
413 sz -= base - addr;
414
415 uintptr_t end = base + sz;
416
417 while (base < end) {
418 uint8_t *sh = SHADOW_FOR_ADDRESS(base);
419 if (*sh && *sh != shadow_match_value) {
420 return false;
421 }
422 base += KASAN_GRANULE;
423 }
424 return true;
425 }
426
427 static const size_t BACKTRACE_BITS = 4;
428 static const size_t BACKTRACE_MAXFRAMES = (1UL << BACKTRACE_BITS) - 1;
429
430 /*
431 * KASAN zalloc hooks
432 *
433 * KASAN can only distinguish between valid and unvalid memory accesses.
434 * This property severely limits its applicability to zalloc (and any other
435 * memory allocator), whereby linear overflows are generally to valid
436 * memory and non-simple use-after-free can hit an already reallocated buffer.
437 *
438 * To overcome these limitations, KASAN requires a bunch of fairly invasive
439 * changes to zalloc to add both red-zoning and quarantines.
440 */
441
442 struct kasan_alloc_header {
443 uint16_t magic;
444 uint16_t crc;
445 uint32_t alloc_size;
446 uint32_t user_size;
447 struct {
448 uint32_t left_rz : 32 - BACKTRACE_BITS;
449 uint32_t frames : BACKTRACE_BITS;
450 };
451 };
452 _Static_assert(sizeof(struct kasan_alloc_header) <= KASAN_GUARD_SIZE, "kasan alloc header exceeds guard size");
453
454 struct kasan_alloc_footer {
455 uint32_t backtrace[0];
456 };
457 _Static_assert(sizeof(struct kasan_alloc_footer) <= KASAN_GUARD_SIZE, "kasan alloc footer exceeds guard size");
458
459 #define LIVE_XOR ((uint16_t)0x3a65)
460 #define FREE_XOR ((uint16_t)0xf233)
461
462 static uint16_t
magic_for_addr(vm_offset_t addr,uint16_t magic_xor)463 magic_for_addr(vm_offset_t addr, uint16_t magic_xor)
464 {
465 uint16_t magic = addr & 0xFFFF;
466 magic ^= (addr >> 16) & 0xFFFF;
467 magic ^= (addr >> 32) & 0xFFFF;
468 magic ^= (addr >> 48) & 0xFFFF;
469 magic ^= magic_xor;
470 return magic;
471 }
472
473 static struct kasan_alloc_header *
header_for_user_addr(vm_offset_t addr)474 header_for_user_addr(vm_offset_t addr)
475 {
476 return (void *)(addr - sizeof(struct kasan_alloc_header));
477 }
478
479 static struct kasan_alloc_footer *
footer_for_user_addr(vm_offset_t addr,vm_size_t * size)480 footer_for_user_addr(vm_offset_t addr, vm_size_t *size)
481 {
482 struct kasan_alloc_header *h = header_for_user_addr(addr);
483 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
484 *size = rightrz;
485 return (void *)(addr + h->user_size);
486 }
487
488 /*
489 * size: user-requested allocation size
490 * ret: minimum size for the real allocation
491 */
492 vm_size_t
kasan_alloc_resize(vm_size_t size)493 kasan_alloc_resize(vm_size_t size)
494 {
495 if (size >= 128) {
496 /* Add a little extra right redzone to larger objects. Gives us extra
497 * overflow protection, and more space for the backtrace. */
498 size += 16;
499 }
500
501 /* add left and right redzones */
502 size += KASAN_GUARD_PAD;
503
504 /* ensure the final allocation is a multiple of the granule */
505 size = kasan_granule_round(size);
506
507 return size;
508 }
509
510 extern vm_offset_t vm_kernel_slid_base;
511
512 static vm_size_t
kasan_alloc_bt(uint32_t * ptr,vm_size_t sz,vm_size_t skip)513 kasan_alloc_bt(uint32_t *ptr, vm_size_t sz, vm_size_t skip)
514 {
515 uintptr_t buf[BACKTRACE_MAXFRAMES];
516 uintptr_t *bt = buf;
517
518 sz /= sizeof(uint32_t);
519 vm_size_t frames = sz;
520
521 if (frames > 0) {
522 frames = min((uint32_t)(frames + skip), BACKTRACE_MAXFRAMES);
523 frames = backtrace(bt, (uint32_t)frames, NULL, NULL);
524
525 while (frames > sz && skip > 0) {
526 bt++;
527 frames--;
528 skip--;
529 }
530
531 /* only store the offset from kernel base, and cram that into 32
532 * bits */
533 for (vm_size_t i = 0; i < frames; i++) {
534 ptr[i] = (uint32_t)(bt[i] - vm_kernel_slid_base);
535 }
536 }
537 return frames;
538 }
539
540 /* addr: user address of allocation */
541 static uint16_t
kasan_alloc_crc(vm_offset_t addr)542 kasan_alloc_crc(vm_offset_t addr)
543 {
544 struct kasan_alloc_header *h = header_for_user_addr(addr);
545 vm_size_t rightrz = h->alloc_size - h->user_size - h->left_rz;
546
547 uint16_t crc_orig = h->crc;
548 h->crc = 0;
549
550 uint16_t crc = 0;
551 crc = __nosan_crc16(crc, (void *)(addr - h->left_rz), h->left_rz);
552 crc = __nosan_crc16(crc, (void *)(addr + h->user_size), rightrz);
553
554 h->crc = crc_orig;
555
556 return crc;
557 }
558
559 /*
560 * addr: base address of full allocation (including redzones)
561 * size: total size of allocation (include redzones)
562 * req: user-requested allocation size
563 * lrz: size of the left redzone in bytes
564 * ret: address of usable allocation
565 */
566 vm_address_t
kasan_alloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)567 kasan_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
568 {
569 if (!addr) {
570 return 0;
571 }
572 assert(size > 0);
573 assert(kasan_granule_partial(addr) == 0);
574 assert(kasan_granule_partial(size) == 0);
575
576 vm_size_t rightrz = size - req - leftrz;
577
578 kasan_poison(addr, req, leftrz, rightrz, ASAN_HEAP_RZ);
579 kasan_rz_clobber(addr, req, leftrz, rightrz);
580
581 addr += leftrz;
582
583 /* stash the allocation sizes in the left redzone */
584 struct kasan_alloc_header *h = header_for_user_addr(addr);
585 h->magic = magic_for_addr(addr, LIVE_XOR);
586 h->left_rz = (uint32_t)leftrz;
587 h->alloc_size = (uint32_t)size;
588 h->user_size = (uint32_t)req;
589
590 /* ... and a backtrace in the right redzone */
591 vm_size_t fsize;
592 struct kasan_alloc_footer *f = footer_for_user_addr(addr, &fsize);
593 h->frames = (uint32_t)kasan_alloc_bt(f->backtrace, fsize, 2);
594
595 /* checksum the whole object, minus the user part */
596 h->crc = kasan_alloc_crc(addr);
597
598 return addr;
599 }
600
601 /*
602 * addr: address of usable allocation (excluding redzones)
603 * size: total size of allocation (include redzones)
604 * req: user-requested allocation size
605 * lrz: size of the left redzone in bytes
606 * ret: address of usable allocation
607 */
608 vm_address_t
kasan_realloc(vm_offset_t addr,vm_size_t size,vm_size_t req,vm_size_t leftrz)609 kasan_realloc(vm_offset_t addr, vm_size_t size, vm_size_t req, vm_size_t leftrz)
610 {
611 return kasan_alloc(addr - leftrz, size, req, leftrz);
612 }
613
614 /*
615 * addr: user pointer
616 * size: returns full original allocation size
617 * ret: original allocation ptr
618 */
619 vm_address_t
kasan_dealloc(vm_offset_t addr,vm_size_t * size)620 kasan_dealloc(vm_offset_t addr, vm_size_t *size)
621 {
622 assert(size && addr);
623 struct kasan_alloc_header *h = header_for_user_addr(addr);
624 *size = h->alloc_size;
625 h->magic = 0; /* clear the magic so the debugger doesn't find a bogus object */
626 return addr - h->left_rz;
627 }
628
629 /*
630 * return the original user-requested allocation size
631 * addr: user alloc pointer
632 */
633 vm_size_t
kasan_user_size(vm_offset_t addr)634 kasan_user_size(vm_offset_t addr)
635 {
636 struct kasan_alloc_header *h = header_for_user_addr(addr);
637 assert(h->magic == magic_for_addr(addr, LIVE_XOR));
638 return h->user_size;
639 }
640
641 /*
642 * Verify that `addr' (user pointer) is a valid allocation of `type'
643 */
644 void
kasan_check_free(vm_offset_t addr,vm_size_t size,unsigned heap_type)645 kasan_check_free(vm_offset_t addr, vm_size_t size, unsigned heap_type)
646 {
647 struct kasan_alloc_header *h = header_for_user_addr(addr);
648
649 if (!checks_enabled) {
650 return;
651 }
652
653 /* map heap type to an internal access type */
654 access_t type = heap_type == KASAN_HEAP_KALLOC ? TYPE_KFREE :
655 heap_type == KASAN_HEAP_ZALLOC ? TYPE_ZFREE :
656 heap_type == KASAN_HEAP_FAKESTACK ? TYPE_FSFREE : 0;
657
658 /* check the magic and crc match */
659 if (h->magic != magic_for_addr(addr, LIVE_XOR)) {
660 kasan_violation(addr, size, type, REASON_BAD_METADATA);
661 }
662 if (h->crc != kasan_alloc_crc(addr)) {
663 kasan_violation(addr, size, type, REASON_MOD_OOB);
664 }
665
666 /* check the freed size matches what we recorded at alloc time */
667 if (h->user_size != size) {
668 kasan_violation(addr, size, type, REASON_INVALID_SIZE);
669 }
670
671 vm_size_t rightrz_sz = h->alloc_size - h->left_rz - h->user_size;
672
673 /* Check that the redzones are valid */
674 if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
675 !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
676 kasan_violation(addr, size, type, REASON_BAD_METADATA);
677 }
678
679 /* Check the allocated range is not poisoned */
680 kasan_check_range((void *)addr, size, type);
681 }
682
683 /*
684 * KASAN Quarantine
685 */
686
687 struct freelist_entry {
688 uint16_t magic;
689 uint16_t crc;
690 STAILQ_ENTRY(freelist_entry) list;
691 union {
692 struct {
693 vm_size_t size : 28;
694 vm_size_t user_size : 28;
695 vm_size_t frames : BACKTRACE_BITS; /* number of frames in backtrace */
696 vm_size_t __unused : 8 - BACKTRACE_BITS;
697 };
698 uint64_t bits;
699 };
700 zone_t zone;
701 uint32_t backtrace[];
702 };
703 _Static_assert(sizeof(struct freelist_entry) <= KASAN_GUARD_PAD, "kasan freelist header exceeds padded size");
704
705 struct quarantine {
706 STAILQ_HEAD(freelist_head, freelist_entry) freelist;
707 unsigned long entries;
708 unsigned long max_entries;
709 vm_size_t size;
710 vm_size_t max_size;
711 };
712
713 struct quarantine quarantines[] = {
714 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_ZALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
715 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_KALLOC].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE },
716 { STAILQ_HEAD_INITIALIZER((quarantines[KASAN_HEAP_FAKESTACK].freelist)), 0, QUARANTINE_ENTRIES, 0, QUARANTINE_MAXSIZE }
717 };
718
719 static uint16_t
fle_crc(struct freelist_entry * fle)720 fle_crc(struct freelist_entry *fle)
721 {
722 return __nosan_crc16(0, &fle->bits, fle->size - offsetof(struct freelist_entry, bits));
723 }
724
725 /*
726 * addr, sizep: pointer/size of full allocation including redzone
727 */
728 void NOINLINE
kasan_free_internal(void ** addrp,vm_size_t * sizep,int type,zone_t * zonep,vm_size_t user_size,int locked,bool doquarantine)729 kasan_free_internal(void **addrp, vm_size_t *sizep, int type,
730 zone_t *zonep, vm_size_t user_size, int locked,
731 bool doquarantine)
732 {
733 vm_size_t size = *sizep;
734 vm_offset_t addr = *(vm_offset_t *)addrp;
735 zone_t zone = *zonep;
736
737 assert(type >= 0 && type < KASAN_HEAP_TYPES);
738 if (type == KASAN_HEAP_KALLOC) {
739 /* for kalloc the size can be 0 */
740 assert(zone);
741 } else {
742 assert(zone && user_size);
743 }
744
745 /* clobber the entire freed region */
746 kasan_rz_clobber(addr, 0, size, 0);
747
748 if (!doquarantine || !quarantine_enabled) {
749 goto free_current;
750 }
751
752 /* poison the entire freed region */
753 uint8_t flags = (type == KASAN_HEAP_FAKESTACK) ? ASAN_STACK_FREED : ASAN_HEAP_FREED;
754 kasan_poison(addr, 0, size, 0, flags);
755
756 struct freelist_entry *fle, *tofree = NULL;
757 struct quarantine *q = &quarantines[type];
758 assert(size >= sizeof(struct freelist_entry));
759
760 /* create a new freelist entry */
761 fle = (struct freelist_entry *)addr;
762 fle->magic = magic_for_addr((vm_offset_t)fle, FREE_XOR);
763 fle->size = size;
764 fle->user_size = user_size;
765 fle->frames = 0;
766 fle->zone = zone;
767 if (type != KASAN_HEAP_FAKESTACK) {
768 /* don't do expensive things on the fakestack path */
769 fle->frames = kasan_alloc_bt(fle->backtrace, fle->size - sizeof(struct freelist_entry), 3);
770 fle->crc = fle_crc(fle);
771 }
772
773 boolean_t flg;
774 if (!locked) {
775 kasan_lock(&flg);
776 }
777
778 if (q->size + size > q->max_size) {
779 /*
780 * Adding this entry would put us over the max quarantine size. Free the
781 * larger of the current object and the quarantine head object.
782 */
783 tofree = STAILQ_FIRST(&q->freelist);
784 if (fle->size > tofree->size) {
785 goto free_current_locked;
786 }
787 }
788
789 STAILQ_INSERT_TAIL(&q->freelist, fle, list);
790 q->entries++;
791 q->size += size;
792
793 /* free the oldest entry, if necessary */
794 if (tofree || q->entries > q->max_entries) {
795 tofree = STAILQ_FIRST(&q->freelist);
796 STAILQ_REMOVE_HEAD(&q->freelist, list);
797
798 assert(q->entries > 0 && q->size >= tofree->size);
799 q->entries--;
800 q->size -= tofree->size;
801
802 zone = tofree->zone;
803 size = tofree->size;
804 addr = (vm_offset_t)tofree;
805
806 /* check the magic and crc match */
807 if (tofree->magic != magic_for_addr(addr, FREE_XOR)) {
808 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
809 }
810 if (type != KASAN_HEAP_FAKESTACK && tofree->crc != fle_crc(tofree)) {
811 kasan_violation(addr, size, TYPE_UAF, REASON_MOD_AFTER_FREE);
812 }
813
814 /* clobber the quarantine header */
815 __nosan_bzero((void *)addr, sizeof(struct freelist_entry));
816 } else {
817 /* quarantine is not full - don't really free anything */
818 addr = 0;
819 zone = ZONE_NULL;
820 size = 0;
821 }
822
823 free_current_locked:
824 if (!locked) {
825 kasan_unlock(flg);
826 }
827
828 free_current:
829 *addrp = (void *)addr;
830 if (addr) {
831 kasan_unpoison((void *)addr, size);
832 *sizep = size;
833 *zonep = zone;
834 }
835 }
836
837 void NOINLINE
kasan_free(void ** addrp,vm_size_t * sizep,int type,zone_t * zone,vm_size_t user_size)838 kasan_free(void **addrp, vm_size_t *sizep, int type, zone_t *zone,
839 vm_size_t user_size)
840 {
841 kasan_free_internal(addrp, sizep, type, zone, user_size, 0, true);
842
843 if (free_yield) {
844 thread_yield_internal(free_yield);
845 }
846 }
847
848 /*
849 * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
850 * lives relative to the start of the buffer, but it's always the word immediately
851 * before the start of the array data, so for naturally-aligned objects we need to
852 * search at most 2 shadow bytes.
853 */
854 void
kasan_unpoison_cxx_array_cookie(void * ptr)855 kasan_unpoison_cxx_array_cookie(void *ptr)
856 {
857 uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
858 for (size_t i = 0; i < 2; i++) {
859 if (shadow[i] == ASAN_ARRAY_COOKIE) {
860 shadow[i] = ASAN_VALID;
861 return;
862 } else if (shadow[i] != ASAN_VALID) {
863 /* must have seen the cookie by now */
864 return;
865 }
866 }
867 }
868
869 SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
870