xref: /xnu-10002.1.13/san/memory/kasan-classic.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <string.h>
30 #include <stdint.h>
31 #include <stdbool.h>
32 #include <vm/vm_map.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <machine/machine_routines.h>
36 #include <kern/locks.h>
37 #include <kern/simple_lock.h>
38 #include <kern/debug.h>
39 #include <kern/backtrace.h>
40 #include <kern/thread.h>
41 #include <kern/btlog.h>
42 #include <libkern/libkern.h>
43 #include <mach/mach_vm.h>
44 #include <mach/mach_types.h>
45 #include <mach/vm_param.h>
46 #include <mach/machine/vm_param.h>
47 #include <mach/sdt.h>
48 #include <machine/atomic.h>
49 #include <sys/sysctl.h>
50 
51 #include "kasan.h"
52 #include "kasan_internal.h"
53 #include "memintrinsics.h"
54 #include "kasan-classic.h"
55 
56 
57 /*
58  * KASAN-CLASSIC
59  *
60  * This implementation relies on a shadow table that matches each
61  * byte with 8 bytes of the kernel virtual address space. The value of this
62  * byte is either:
63  *
64  *  - 0:                the full 8 bytes are addressable
65  *  - [1,7]:            the byte is partially addressable (as many valid bytes
66  *                      as specified)
67  *  - 0xFx, 0xAC, 0xE9: byte is not addressable and poisoned somehow (for a
68  *                      complete list, check kasan-classic.h)
69  *
70  * Through instrumentation of every load and store and through modifications
71  * to the kernel to properly record and/or quarantine memory regions as a
72  * consequence of memory management operations, KASAN can detect nearly any
73  * type of memory corruption, with two big caveats: linear overflows and
74  * use-after-free. These are solved by redzoning and quarantines.
75  *
76  * For linear overflows, if the adjacent memory is valid (as it is common on
77  * both stack and heap), KASAN must add redzones next to each buffer.
78  * For use-after-free, free'd buffers are not returned immediately on subsequent
79  * memory allocation calls, but are 'stored' in a quarantined region, de-facto
80  * delaying reallocation.
81  *
82  * KASAN-CLASSIC has significant memory cost:
83  *  1) ~13% of available memory for the shadow table (4G phone -> ~512MB)
84  *  2) ~20-30MB of quarantine space
85  *  3) extra padding introduced to support redzones
86  *
87  * (1) and (2) is backed by stealing memory at boot. (3) is instead added at
88  * runtime on top of each allocation.
89  */
90 
91 _Static_assert(!KASAN_LIGHT, "Light mode not supported by KASan Classic.");
92 
93 /* Configuration options */
94 static unsigned quarantine_enabled = 1;               /* Quarantine on/off */
95 static bool checks_enabled = false;                   /* Poision checking on/off */
96 
97 /*
98  * LLVM contains enough logic to inline check operations against the shadow
99  * table and uses this symbol as an anchor to find it in memory.
100  */
101 const uintptr_t __asan_shadow_memory_dynamic_address = KASAN_OFFSET;
102 
103 void
kasan_impl_init(void)104 kasan_impl_init(void)
105 {
106 	/* Quarantine is enabled by default */
107 	quarantine_enabled = 1;
108 
109 	/* Enable shadow checking early on. */
110 	checks_enabled = true;
111 }
112 
113 void
kasan_impl_kdp_disable(void)114 kasan_impl_kdp_disable(void)
115 {
116 	quarantine_enabled = 0;
117 	__asan_option_detect_stack_use_after_return = 0;
118 	fakestack_enabled = 0;
119 	checks_enabled = false;
120 }
121 
122 void NOINLINE
kasan_impl_late_init(void)123 kasan_impl_late_init(void)
124 {
125 	kasan_init_fakestack();
126 }
127 
128 /* Describes the source location where a global is defined. */
129 struct asan_global_source_location {
130 	const char *filename;
131 	int line_no;
132 	int column_no;
133 };
134 
135 /* Describes an instrumented global variable. */
136 struct asan_global {
137 	uptr addr;
138 	uptr size;
139 	uptr size_with_redzone;
140 	const char *name;
141 	const char *module;
142 	uptr has_dynamic_init;
143 	struct asan_global_source_location *location;
144 #if CLANG_MIN_VERSION(8020000)
145 	uptr odr_indicator;
146 #endif
147 };
148 
149 /* Walk through the globals section and set them up at boot */
150 void NOINLINE
kasan_init_globals(vm_offset_t base,vm_size_t size)151 kasan_init_globals(vm_offset_t base, vm_size_t size)
152 {
153 	struct asan_global *glob = (struct asan_global *)base;
154 	struct asan_global *glob_end = (struct asan_global *)(base + size);
155 	for (; glob < glob_end; glob++) {
156 		/*
157 		 * Add a redzone after each global variable.
158 		 * size=variable size, leftsz=0, rightsz=redzone
159 		 */
160 		kasan_poison(glob->addr, glob->size, 0, glob->size_with_redzone - glob->size, ASAN_GLOBAL_RZ);
161 	}
162 }
163 
164 /* Reporting */
165 static const char *
kasan_classic_access_to_str(access_t type)166 kasan_classic_access_to_str(access_t type)
167 {
168 	if (type & TYPE_READ) {
169 		return "load from";
170 	} else if (type & TYPE_WRITE) {
171 		return "store to";
172 	} else if (type & TYPE_FREE) {
173 		return "free of";
174 	} else {
175 		return "access of";
176 	}
177 }
178 
179 static const char *kasan_classic_shadow_strings[] = {
180 	[ASAN_VALID] =          "VALID",
181 	[ASAN_PARTIAL1] =       "PARTIAL1",
182 	[ASAN_PARTIAL2] =       "PARTIAL2",
183 	[ASAN_PARTIAL3] =       "PARTIAL3",
184 	[ASAN_PARTIAL4] =       "PARTIAL4",
185 	[ASAN_PARTIAL5] =       "PARTIAL5",
186 	[ASAN_PARTIAL6] =       "PARTIAL6",
187 	[ASAN_PARTIAL7] =       "PARTIAL7",
188 	[ASAN_STACK_LEFT_RZ] =  "STACK_LEFT_RZ",
189 	[ASAN_STACK_MID_RZ] =   "STACK_MID_RZ",
190 	[ASAN_STACK_RIGHT_RZ] = "STACK_RIGHT_RZ",
191 	[ASAN_STACK_FREED] =    "STACK_FREED",
192 	[ASAN_STACK_OOSCOPE] =  "STACK_OOSCOPE",
193 	[ASAN_GLOBAL_RZ] =      "GLOBAL_RZ",
194 	[ASAN_HEAP_LEFT_RZ] =   "HEAP_LEFT_RZ",
195 	[ASAN_HEAP_RIGHT_RZ] =  "HEAP_RIGHT_RZ",
196 	[ASAN_HEAP_FREED] =     "HEAP_FREED",
197 	[0xff] =                NULL
198 };
199 
200 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t reason)201 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t reason)
202 {
203 	uint8_t *shadow_ptr = SHADOW_FOR_ADDRESS(p);
204 	uint8_t shadow_type = *shadow_ptr;
205 	size_t n = 0;
206 
207 	const char *shadow_str = kasan_classic_shadow_strings[shadow_type];
208 	if (!shadow_str) {
209 		shadow_str = "<invalid>";
210 	}
211 
212 	if (reason == REASON_MOD_OOB || reason == REASON_BAD_METADATA) {
213 		n += scnprintf(logbuf, bufsize, "KASan: free of corrupted/invalid object %#lx\n", p);
214 	} else if (reason == REASON_MOD_AFTER_FREE) {
215 		n += scnprintf(logbuf, bufsize, "KASan: UaF of quarantined object %#lx\n", p);
216 	} else {
217 		n += scnprintf(logbuf, bufsize, "KASan: invalid %lu-byte %s %#lx [%s]\n",
218 		    width, kasan_classic_access_to_str(access), p, shadow_str);
219 	}
220 
221 	return n;
222 }
223 
224 static inline bool
kasan_poison_active(uint8_t flags)225 kasan_poison_active(uint8_t flags)
226 {
227 	switch (flags) {
228 	case ASAN_GLOBAL_RZ:
229 		return kasan_check_enabled(TYPE_POISON_GLOBAL);
230 	case ASAN_HEAP_RZ:
231 	case ASAN_HEAP_LEFT_RZ:
232 	case ASAN_HEAP_RIGHT_RZ:
233 	case ASAN_HEAP_FREED:
234 		return kasan_check_enabled(TYPE_POISON_HEAP);
235 	default:
236 		return true;
237 	}
238 }
239 
240 /*
241  * Create a poisoned redzone at the top and at the end of a (marked) valid range.
242  * Parameters:
243  *    base: starting address (including the eventual left red zone)
244  *    size: size of the valid range
245  *    leftrz: size (multiple of KASAN_GRANULE) of the left redzone
246  *    rightrz: size (multiple of KASAN_GRANULE) of the right redzone
247  *    flags: select between different poisoning options (e.g. stack vs heap)
248  */
249 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)250 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
251     vm_size_t rightrz, uint8_t flags)
252 {
253 	uint8_t *shadow = SHADOW_FOR_ADDRESS(base);
254 	/*
255 	 * Buffer size is allowed to not be a multiple of 8. Create a partial
256 	 * entry in the shadow table if so.
257 	 */
258 	uint8_t partial = (uint8_t)kasan_granule_partial(size);
259 	vm_size_t total = leftrz + size + rightrz;
260 	vm_size_t pos = 0;
261 
262 	/* ensure base, leftrz and total allocation size are granule-aligned */
263 	assert(kasan_granule_partial(base) == 0);
264 	assert(kasan_granule_partial(leftrz) == 0);
265 	assert(kasan_granule_partial(total) == 0);
266 
267 	if (!kasan_enabled || !kasan_poison_active(flags)) {
268 		return;
269 	}
270 
271 	leftrz >>= KASAN_SCALE;
272 	size >>= KASAN_SCALE;
273 	total >>= KASAN_SCALE;
274 
275 	uint8_t l_flags = flags;
276 	uint8_t r_flags = flags;
277 
278 	if (flags == ASAN_STACK_RZ) {
279 		l_flags = ASAN_STACK_LEFT_RZ;
280 		r_flags = ASAN_STACK_RIGHT_RZ;
281 	} else if (flags == ASAN_HEAP_RZ) {
282 		l_flags = ASAN_HEAP_LEFT_RZ;
283 		r_flags = ASAN_HEAP_RIGHT_RZ;
284 	}
285 
286 	/*
287 	 * poison the redzones and unpoison the valid bytes
288 	 */
289 	__nosan_memset(shadow + pos, l_flags, leftrz);
290 	pos += leftrz;
291 
292 	__nosan_memset(shadow + pos, ASAN_VALID, size);
293 	pos += size;
294 
295 	/* Do we have any leftover valid byte? */
296 	if (partial && pos < total) {
297 		shadow[pos++] = partial;
298 	}
299 
300 	__nosan_memset(shadow + pos, r_flags, total - pos);
301 }
302 
303 /*
304  * Check the shadow table to determine whether [base, base+size) is valid or
305  * is poisoned.
306  */
307 static bool NOINLINE
kasan_range_poisoned(vm_offset_t base,vm_size_t size,vm_offset_t * first_invalid)308 kasan_range_poisoned(vm_offset_t base, vm_size_t size, vm_offset_t *first_invalid)
309 {
310 	uint8_t         *shadow;
311 	vm_size_t       i;
312 
313 	if (!kasan_enabled) {
314 		return false;
315 	}
316 
317 	size += kasan_granule_partial(base);
318 	base = kasan_granule_trunc(base);
319 
320 	shadow = SHADOW_FOR_ADDRESS(base);
321 	size_t limit = (size + KASAN_GRANULE - 1) / KASAN_GRANULE;
322 
323 	/* Walk the shadow table, fail on any non-valid value */
324 	for (i = 0; i < limit; i++, size -= KASAN_GRANULE) {
325 		assert(size > 0);
326 		uint8_t s = shadow[i];
327 		if (s == 0 || (size < KASAN_GRANULE && s >= size && s < KASAN_GRANULE)) {
328 			/* valid */
329 			continue;
330 		} else {
331 			goto fail;
332 		}
333 	}
334 
335 	return false;
336 
337 fail:
338 	if (first_invalid) {
339 		/* XXX: calculate the exact first byte that failed */
340 		*first_invalid = base + i * 8;
341 	}
342 	return true;
343 }
344 
345 /* An 8-byte valid range is indetified by 0 in kasan classic shadow table */
346 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)347 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
348 {
349 	__nosan_bzero((void *)page, size);
350 }
351 
352 /*
353  * Verify whether an access to memory is valid. A valid access is one that
354  * doesn't touch any region marked as a poisoned redzone or invalid.
355  * 'access' records whether the attempted access is a read or a write.
356  */
357 void NOINLINE
kasan_check_range(const void * x,size_t sz,access_t access)358 kasan_check_range(const void *x, size_t sz, access_t access)
359 {
360 	uintptr_t invalid;
361 	uintptr_t ptr = (uintptr_t)x;
362 
363 	if (!checks_enabled) {
364 		return;
365 	}
366 
367 	if (kasan_range_poisoned(ptr, sz, &invalid)) {
368 		size_t remaining = sz - (invalid - ptr);
369 		kasan_violation(invalid, remaining, access, REASON_POISONED);
370 	}
371 }
372 
373 /*
374  * Return true if [base, base+sz) is unpoisoned or matches the passed in
375  * shadow value.
376  */
377 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)378 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
379 {
380 	/* round 'base' up to skip any partial, which won't match 'shadow' */
381 	uintptr_t base = kasan_granule_round(addr);
382 	sz -= base - addr;
383 
384 	uintptr_t end = base + sz;
385 
386 	while (base < end) {
387 		uint8_t *sh = SHADOW_FOR_ADDRESS(base);
388 		if (*sh && *sh != shadow_match_value) {
389 			return false;
390 		}
391 		base += KASAN_GRANULE;
392 	}
393 	return true;
394 }
395 
396 /*
397  * KASAN zalloc hooks
398  *
399  * KASAN can only distinguish between valid and unvalid memory accesses.
400  * This property severely limits its applicability to zalloc (and any other
401  * memory allocator), whereby linear overflows are generally to valid
402  * memory and non-simple use-after-free can hit an already reallocated buffer.
403  *
404  * To overcome these limitations, KASAN requires a bunch of fairly invasive
405  * changes to zalloc to add both red-zoning and quarantines.
406  */
407 
408 __enum_decl(kasan_alloc_state_t, uint16_t, {
409 	KASAN_STATE_FREED,
410 	KASAN_STATE_ALLOCATED,
411 	KASAN_STATE_QUARANTINED,
412 });
413 
414 typedef struct kasan_alloc_header {
415 	union {
416 		struct {
417 			kasan_alloc_state_t state;
418 			uint16_t left_rz;
419 			uint32_t user_size;
420 		};
421 		struct {
422 			kasan_alloc_state_t state2;
423 			intptr_t next : 48;
424 		};
425 	};
426 	btref_t  alloc_btref;
427 	btref_t  free_btref;
428 } *kasan_alloc_header_t;
429 static_assert(sizeof(struct kasan_alloc_header) == KASAN_GUARD_SIZE);
430 
431 static kasan_alloc_header_t
header_for_user_addr(vm_offset_t addr)432 header_for_user_addr(vm_offset_t addr)
433 {
434 	return (void *)(addr - sizeof(struct kasan_alloc_header));
435 }
436 
437 void
kasan_zmem_add(vm_address_t addr,vm_size_t size,vm_offset_t esize,vm_offset_t offs,vm_offset_t rzsize)438 kasan_zmem_add(
439 	vm_address_t            addr,
440 	vm_size_t               size,
441 	vm_offset_t             esize,
442 	vm_offset_t             offs,
443 	vm_offset_t             rzsize)
444 {
445 	uint8_t *shadow = SHADOW_FOR_ADDRESS(addr);
446 
447 	assert(kasan_granule_partial(esize) == 0);
448 	assert(kasan_granule_partial(offs) == 0);
449 	assert(kasan_granule_partial(rzsize) == 0);
450 	assert((size - offs) % esize == 0);
451 
452 	size   >>= KASAN_SCALE;
453 	esize  >>= KASAN_SCALE;
454 	offs   >>= KASAN_SCALE;
455 	rzsize >>= KASAN_SCALE;
456 
457 	__nosan_memset(shadow, ASAN_HEAP_FREED, size);
458 
459 	__nosan_memset(shadow, ASAN_HEAP_LEFT_RZ, offs);
460 
461 	for (vm_offset_t pos = offs; pos < size; pos += esize) {
462 		__nosan_memset(shadow + pos, ASAN_HEAP_LEFT_RZ, rzsize);
463 	}
464 }
465 
466 void
kasan_zmem_remove(vm_address_t addr,vm_size_t size,vm_offset_t esize,vm_offset_t offs,vm_offset_t rzsize)467 kasan_zmem_remove(
468 	vm_address_t            addr,
469 	vm_size_t               size,
470 	vm_offset_t             esize,
471 	vm_offset_t             offs,
472 	vm_offset_t             rzsize)
473 {
474 	uint8_t *shadow = SHADOW_FOR_ADDRESS(addr);
475 
476 	assert(kasan_granule_partial(esize) == 0);
477 	assert(kasan_granule_partial(offs) == 0);
478 	assert(kasan_granule_partial(rzsize) == 0);
479 	assert((size - offs) % esize == 0);
480 
481 	if (rzsize) {
482 		for (vm_offset_t pos = offs + rzsize; pos < size; pos += esize) {
483 			kasan_alloc_header_t h;
484 
485 			h = header_for_user_addr(addr + pos);
486 
487 			assert(h->state == KASAN_STATE_FREED);
488 			btref_put(h->alloc_btref);
489 			btref_put(h->free_btref);
490 		}
491 	}
492 
493 	__nosan_memset(shadow, ASAN_VALID, size >> KASAN_SCALE);
494 }
495 
496 void
kasan_alloc(vm_address_t addr,vm_size_t size,vm_size_t req,vm_size_t rzsize,bool percpu,void * fp)497 kasan_alloc(
498 	vm_address_t            addr,
499 	vm_size_t               size,
500 	vm_size_t               req,
501 	vm_size_t               rzsize,
502 	bool                    percpu,
503 	void                   *fp)
504 {
505 	assert(kasan_granule_partial(addr) == 0);
506 	assert(kasan_granule_partial(size) == 0);
507 	assert(kasan_granule_partial(rzsize) == 0);
508 
509 	if (rzsize) {
510 		/* stash the allocation sizes in the left redzone */
511 		kasan_alloc_header_t h = header_for_user_addr(addr);
512 
513 		btref_put(h->free_btref);
514 		btref_put(h->alloc_btref);
515 
516 		h->state       = KASAN_STATE_ALLOCATED;
517 		h->left_rz     = (uint16_t)rzsize;
518 		h->user_size   = (uint32_t)req;
519 		h->alloc_btref = btref_get(fp, BTREF_GET_NOWAIT);
520 		h->free_btref  = 0;
521 	}
522 
523 	kasan_poison(addr, req, 0, size - req, ASAN_HEAP_RZ);
524 	if (percpu) {
525 		for (uint32_t i = 1; i < zpercpu_count(); i++) {
526 			addr += PAGE_SIZE;
527 			kasan_poison(addr, req, 0, size - req, ASAN_HEAP_RZ);
528 		}
529 	}
530 }
531 
532 void
kasan_free(vm_address_t addr,vm_size_t size,vm_size_t req,vm_size_t rzsize,bool percpu,void * fp)533 kasan_free(
534 	vm_address_t            addr,
535 	vm_size_t               size,
536 	vm_size_t               req,
537 	vm_size_t               rzsize,
538 	bool                    percpu,
539 	void                   *fp)
540 {
541 	uint8_t *shadow = SHADOW_FOR_ADDRESS(addr);
542 
543 	if (rzsize) {
544 		kasan_alloc_header_t h = header_for_user_addr(addr);
545 
546 		kasan_check_alloc(addr, size, req);
547 		assert(h->free_btref == 0);
548 		h->state      = KASAN_STATE_FREED;
549 		h->next       = 0;
550 		h->free_btref = btref_get(fp, BTREF_GET_NOWAIT);
551 	}
552 
553 	__nosan_memset(shadow, ASAN_HEAP_FREED, size >> KASAN_SCALE);
554 	if (percpu) {
555 		for (uint32_t i = 1; i < zpercpu_count(); i++) {
556 			shadow += PAGE_SIZE >> KASAN_SCALE;
557 			__nosan_memset(shadow, ASAN_HEAP_FREED,
558 			    size >> KASAN_SCALE);
559 		}
560 	}
561 }
562 
563 void
kasan_alloc_large(vm_address_t addr,vm_size_t req_size)564 kasan_alloc_large(vm_address_t addr, vm_size_t req_size)
565 {
566 	vm_size_t l_rz = PAGE_SIZE;
567 	vm_size_t r_rz = round_page(req_size) - req_size + PAGE_SIZE;
568 
569 	kasan_poison(addr - l_rz, req_size, l_rz, r_rz, ASAN_HEAP_RZ);
570 }
571 
572 /*
573  * return the original user-requested allocation size
574  * addr: user alloc pointer
575  */
576 vm_size_t
kasan_user_size(vm_offset_t addr)577 kasan_user_size(vm_offset_t addr)
578 {
579 	kasan_alloc_header_t h = header_for_user_addr(addr);
580 
581 	assert(h->state == KASAN_STATE_ALLOCATED);
582 	return h->user_size;
583 }
584 
585 /*
586  * Verify that `addr' (user pointer) is a valid allocation
587  */
588 void
kasan_check_alloc(vm_offset_t addr,vm_size_t size,vm_size_t req)589 kasan_check_alloc(vm_offset_t addr, vm_size_t size, vm_size_t req)
590 {
591 	kasan_alloc_header_t h = header_for_user_addr(addr);
592 
593 	if (!checks_enabled) {
594 		return;
595 	}
596 
597 	if (h->state != KASAN_STATE_ALLOCATED) {
598 		kasan_violation(addr, req, TYPE_ZFREE, REASON_BAD_METADATA);
599 	}
600 
601 	/* check the freed size matches what we recorded at alloc time */
602 	if (h->user_size != req) {
603 		kasan_violation(addr, req, TYPE_ZFREE, REASON_INVALID_SIZE);
604 	}
605 
606 	vm_size_t rightrz_sz = size - h->user_size;
607 
608 	/* Check that the redzones are valid */
609 	if (!kasan_check_shadow(addr - h->left_rz, h->left_rz, ASAN_HEAP_LEFT_RZ) ||
610 	    !kasan_check_shadow(addr + h->user_size, rightrz_sz, ASAN_HEAP_RIGHT_RZ)) {
611 		kasan_violation(addr, req, TYPE_ZFREE, REASON_BAD_METADATA);
612 	}
613 
614 	/* Check the allocated range is not poisoned */
615 	kasan_check_range((void *)addr, req, TYPE_ZFREE);
616 }
617 
618 /*
619  * KASAN Quarantine
620  */
621 
622 typedef struct kasan_quarantine {
623 	kasan_alloc_header_t  head;
624 	kasan_alloc_header_t  tail;
625 	uint32_t              size;
626 	uint32_t              count;
627 } *kasan_quarantine_t;
628 
629 static struct kasan_quarantine PERCPU_DATA(kasan_quarantine);
630 
631 extern int get_preemption_level(void);
632 
633 struct kasan_quarantine_result
kasan_quarantine(vm_address_t addr,vm_size_t size)634 kasan_quarantine(vm_address_t addr, vm_size_t size)
635 {
636 	kasan_alloc_header_t h = header_for_user_addr(addr);
637 	kasan_quarantine_t   q = PERCPU_GET(kasan_quarantine);
638 	struct kasan_quarantine_result kqr = { };
639 
640 	assert(h->state == KASAN_STATE_FREED && h->next == 0);
641 
642 	h->state = KASAN_STATE_QUARANTINED;
643 
644 	q->size += size;
645 	q->count++;
646 	if (q->tail == NULL) {
647 		q->head = h;
648 	} else {
649 		q->tail->next = (intptr_t)h;
650 	}
651 	q->tail = h;
652 
653 	if (q->size >= QUARANTINE_MAXSIZE || q->count > QUARANTINE_ENTRIES) {
654 		h = q->head;
655 		assert(h->state == KASAN_STATE_QUARANTINED);
656 
657 		q->head  = (kasan_alloc_header_t)(intptr_t)h->next;
658 		h->state = KASAN_STATE_FREED;
659 		h->next  = 0;
660 
661 		kqr.addr = (vm_address_t)(h + 1);
662 		q->size -= kasan_quarantine_resolve(kqr.addr, &kqr.zone);
663 		q->count--;
664 	}
665 
666 	return kqr;
667 }
668 
669 /*
670  * Unpoison the C++ array cookie (if it exists). We don't know exactly where it
671  * lives relative to the start of the buffer, but it's always the word immediately
672  * before the start of the array data, so for naturally-aligned objects we need to
673  * search at most 2 shadow bytes.
674  */
675 void
kasan_unpoison_cxx_array_cookie(void * ptr)676 kasan_unpoison_cxx_array_cookie(void *ptr)
677 {
678 	uint8_t *shadow = SHADOW_FOR_ADDRESS((uptr)ptr);
679 	for (size_t i = 0; i < 2; i++) {
680 		if (shadow[i] == ASAN_ARRAY_COOKIE) {
681 			shadow[i] = ASAN_VALID;
682 			return;
683 		} else if (shadow[i] != ASAN_VALID) {
684 			/* must have seen the cookie by now */
685 			return;
686 		}
687 	}
688 }
689 
690 SYSCTL_UINT(_kern_kasan, OID_AUTO, quarantine, CTLFLAG_RW, &quarantine_enabled, 0, "");
691