xref: /xnu-12377.1.9/san/memory/kasan-tbi.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <string.h>
29 #include <stdint.h>
30 #include <stdbool.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_memtag.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/debug.h>
37 #include <kern/telemetry.h>
38 #include <kern/trap_telemetry.h>
39 #include <kern/thread.h>
40 #include <libkern/libkern.h>
41 #include <mach/mach_vm.h>
42 #include <mach/mach_types.h>
43 #include <mach/vm_param.h>
44 #include <mach/machine/vm_param.h>
45 #include <machine/atomic.h>
46 
47 #include "kasan.h"
48 #include "kasan_internal.h"
49 #include "memintrinsics.h"
50 
51 uintptr_t kasan_tbi_tag_range(uintptr_t, size_t, uint8_t);
52 
53 #define P2ALIGN(x, align)           ((x) & -(align))
54 #define P2ROUNDUP(x, align)         (-(-(x) & -(align)))
55 
56 /* Configuration options */
57 bool kasan_tbi_check_tag = false;
58 bool kasan_tbi_enabled = false;
59 
60 /* Reserved tags */
61 #define KASAN_TBI_DEFAULT_TAG       0xFF
62 #define KASAN_TBI_DEFAULT_FREE_TAG  0xF0
63 #define KASAN_TBI_REDZONE_POISON    0x80
64 
65 #if defined(ARM_LARGE_MEMORY)
66 #define KASAN_TBI_SHADOW_MIN        (VM_MAX_KERNEL_ADDRESS+1)
67 #define KASAN_TBI_SHADOW_MAX        0xffffffffffffffffULL
68 #else
69 #define KASAN_TBI_SHADOW_MIN        0xfffffffe00000000ULL
70 #define KASAN_TBI_SHADOW_MAX        0xffffffffc0000000ULL
71 #endif
72 
73 #if !CONFIG_KERNEL_TAGGING
74 #error "KASAN-TBI requires KERNEL TAGGING"
75 #endif /* CONFIG_KERNEL_TAGGING */
76 
77 KERNEL_BRK_DESCRIPTOR_DEFINE(kasan_desc,
78     .type                = TRAP_TELEMETRY_TYPE_KERNEL_BRK_KASAN,
79     .base                = KASAN_TBI_ESR_BASE,
80     .max                 = KASAN_TBI_ESR_TOP,
81     .options             = BRK_TELEMETRY_OPTIONS_FATAL_DEFAULT,
82     .handle_breakpoint   = kasan_handle_brk_failure);
83 
84 #if KASAN_LIGHT
85 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
86 #endif /* KASAN_LIGHT */
87 extern uint64_t ml_get_speculative_timebase(void);
88 
89 /* Stack and large allocations use the whole set of tags. Tags 0 and 15 are reserved. */
90 static uint8_t kasan_tbi_full_tags[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14};
91 
92 /* Randomize tag allocation through a simple LFSR */
93 static uint32_t kasan_tbi_lfsr;
94 
95 /*
96  * LLVM contains enough logic to inline check operations against the shadow
97  * table and uses this symbol as an anchor to find it in memory.
98  */
99 const uintptr_t __hwasan_shadow_memory_dynamic_address = KASAN_OFFSET;
100 /* Make LLDB/automated tools happy for now */
101 const uintptr_t __asan_shadow_memory_dynamic_address = __hwasan_shadow_memory_dynamic_address;
102 
103 /*
104  * Untagged kernel addresses start with 0xFF. Match that whenever we create
105  * valid regions.
106  */
107 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)108 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
109 {
110 	(void) __nosan_memset((void *)page, KASAN_TBI_DEFAULT_TAG, size);
111 }
112 
113 void
kasan_impl_init(void)114 kasan_impl_init(void)
115 {
116 	kasan_tbi_lfsr = (uint32_t)ml_get_speculative_timebase();
117 
118 	/*
119 	 * KASAN depends on CONFIG_KERNEL_TBI, therefore (DATA) TBI has been
120 	 * set for us already at bootstrap.
121 	 */
122 	kasan_tbi_enabled = true;
123 
124 	/* Enable checking early on */
125 	kasan_tbi_check_tag = true;
126 
127 	/*
128 	 * Sanity check on features that are effectively disabled, but might have
129 	 * erroneously been setup by legacy boot-args
130 	 */
131 	if (fakestack_enabled) {
132 		fakestack_enabled = 0;
133 	}
134 }
135 
136 void NOINLINE
kasan_init_globals(vm_offset_t __unused base,vm_size_t __unused size)137 kasan_init_globals(vm_offset_t __unused base, vm_size_t __unused size)
138 {
139 	/*
140 	 * KASAN-TBI global support awaits compiler fixes to generate descriptive
141 	 * structures similar to KASAN-CLASSIC (see rdar://73914854)
142 	 */
143 }
144 
145 void
kasan_impl_kdp_disable(void)146 kasan_impl_kdp_disable(void)
147 {
148 	kasan_tbi_check_tag = false;
149 	kasan_tbi_enabled = false;
150 }
151 
152 /* redzones are not necessary with HWASAN */
153 void
kasan_unpoison_cxx_array_cookie(void __unused * ptr)154 kasan_unpoison_cxx_array_cookie(void __unused *ptr)
155 {
156 	return;
157 }
158 
159 static char *
kasan_tbi_decode_access(access_t access)160 kasan_tbi_decode_access(access_t access)
161 {
162 	if (access & TYPE_LOAD) {
163 		return "read from";
164 	}
165 	if (access & TYPE_WRITE) {
166 		return "write to";
167 	}
168 
169 	return "acccess to";
170 }
171 
172 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t __unused reason)173 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t __unused reason)
174 {
175 	size_t n = 0;
176 
177 	n += scnprintf(logbuf, bufsize, "KASAN_TBI: invalid %lu-byte %s %#lx\n",
178 	    width, kasan_tbi_decode_access(access), p);
179 
180 	return n;
181 }
182 
183 OS_NORETURN
184 const char *
kasan_handle_brk_failure(void * tstate,uint16_t esr)185 kasan_handle_brk_failure(void* tstate, uint16_t esr)
186 {
187 	arm_saved_state_t* state = (arm_saved_state_t *)tstate;
188 	vm_offset_t addr = saved_state64(state)->x[0];
189 	uptr width = KASAN_TBI_GET_SIZE(esr);
190 
191 	access_t access;
192 
193 	if (esr & KASAN_TBI_ESR_WRITE) {
194 		access = TYPE_STORE;
195 	} else {
196 		access = TYPE_LOAD;
197 	}
198 
199 	kasan_crash_report(addr, width, access, REASON_MOD_OOB);
200 }
201 
202 /*
203  * To a large extent, KASAN TBI doesn't require any poisoning, since versions
204  * mismatch is enough of a sentinel. Notwithstanding this, kasan_poison() is
205  * maintained for compatibility and to detect unexpected usages. And is still
206  * at the base of our initial global variables support for feature parity
207  * with KASAN CLASSIC.
208  */
209 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)210 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
211     vm_size_t rightrz, uint8_t flags)
212 {
213 	if (!kasan_tbi_enabled) {
214 		return;
215 	}
216 
217 	/* ensure base, leftrz and total allocation size are granule-aligned */
218 	assert(kasan_granule_partial(base) == 0);
219 	assert(kasan_granule_partial(leftrz) == 0);
220 	assert(kasan_granule_partial(leftrz + size + rightrz) == 0);
221 
222 	uint8_t tag = flags ? flags : KASAN_TBI_DEFAULT_TAG;
223 
224 	kasan_tbi_tag_range(base, leftrz, KASAN_TBI_REDZONE_POISON);
225 	kasan_tbi_tag_range(base + leftrz, size, tag);
226 	kasan_tbi_tag_range(base + leftrz + size, rightrz, KASAN_TBI_REDZONE_POISON);
227 }
228 
229 void OS_NOINLINE
kasan_impl_late_init(void)230 kasan_impl_late_init(void)
231 {
232 }
233 
234 static inline uint32_t
kasan_tbi_lfsr_next(void)235 kasan_tbi_lfsr_next(void)
236 {
237 	uint32_t v = kasan_tbi_lfsr;
238 	v = (v >> 1) ^ (-(v & 1) & 0x04C11DB7);
239 	kasan_tbi_lfsr = v;
240 	return v;
241 }
242 
243 static inline uint8_t
kasan_tbi_full_tag(void)244 kasan_tbi_full_tag(void)
245 {
246 	return kasan_tbi_full_tags[kasan_tbi_lfsr_next() %
247 	       sizeof(kasan_tbi_full_tags)] | 0xF0;
248 }
249 
250 uintptr_t
kasan_tbi_tag_range(uintptr_t addr,size_t sz,uint8_t tag)251 kasan_tbi_tag_range(uintptr_t addr, size_t sz, uint8_t tag)
252 {
253 	if (sz == 0) {
254 		return addr;
255 	}
256 
257 	if (tag == 0) {
258 		tag = KASAN_TBI_DEFAULT_TAG;
259 	}
260 
261 #if KASAN_LIGHT
262 	if (!kasan_zone_maps_owned(addr, sz)) {
263 		tag = KASAN_TBI_DEFAULT_TAG;
264 		return (uintptr_t)vm_memtag_insert_tag((long)addr, tag);
265 	}
266 #endif /* KASAN_LIGHT */
267 
268 	uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
269 	uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
270 
271 	__nosan_memset((void *)shadow_first, tag | 0xF0, shadow_last - shadow_first);
272 	return (uintptr_t)vm_memtag_insert_tag((long)addr, tag);
273 }
274 
275 static void
kasan_tbi_copy_tags(vm_offset_t new_addr,vm_offset_t old_addr,vm_size_t size)276 kasan_tbi_copy_tags(vm_offset_t new_addr, vm_offset_t old_addr, vm_size_t size)
277 {
278 	assert((new_addr & KASAN_GRANULE_MASK) == 0);
279 	assert((old_addr & KASAN_GRANULE_MASK) == 0);
280 	assert((size & KASAN_GRANULE_MASK) == 0);
281 
282 	uint8_t *new_shadow = SHADOW_FOR_ADDRESS(new_addr);
283 	uint8_t *old_shadow = SHADOW_FOR_ADDRESS(old_addr);
284 	uint8_t *old_end    = SHADOW_FOR_ADDRESS(old_addr + size);
285 
286 	__nosan_memcpy(new_shadow, old_shadow, old_end - old_shadow);
287 }
288 
289 void
__hwasan_tag_memory(uintptr_t p,unsigned char tag,uintptr_t sz)290 __hwasan_tag_memory(uintptr_t p, unsigned char tag, uintptr_t sz)
291 {
292 	if (kasan_tbi_enabled) {
293 #if KASAN_DEBUG
294 		/* Detect whether we'd be silently overwriting dirty stack */
295 		if (tag != 0) {
296 			(void)kasan_check_range((void *)p, sz, 0);
297 		}
298 #endif /* KASAN_DEBUG */
299 		(void)kasan_tbi_tag_range(p, sz, tag);
300 	}
301 }
302 
303 unsigned char
__hwasan_generate_tag(void)304 __hwasan_generate_tag(void)
305 {
306 	uint8_t tag = KASAN_TBI_DEFAULT_TAG;
307 
308 #if !KASAN_LIGHT
309 	if (kasan_tbi_enabled) {
310 		tag = kasan_tbi_full_tag();
311 	}
312 #endif /* !KASAN_LIGHT */
313 
314 	return tag;
315 }
316 
317 /* Get the tag location inside the shadow tag table */
318 uint8_t *
kasan_tbi_get_tag_address(vm_offset_t address)319 kasan_tbi_get_tag_address(vm_offset_t address)
320 {
321 	return SHADOW_FOR_ADDRESS(address);
322 }
323 
324 /* Single out accesses to the reserve free tag */
325 static violation_t
kasan_tbi_estimate_reason(uint8_t __unused access_tag,uint8_t stored_tag)326 kasan_tbi_estimate_reason(uint8_t __unused access_tag, uint8_t stored_tag)
327 {
328 	if (stored_tag == KASAN_TBI_DEFAULT_FREE_TAG) {
329 		return REASON_MOD_AFTER_FREE;
330 	}
331 
332 	return REASON_MOD_OOB;
333 }
334 
335 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)336 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
337 {
338 	if (shadow_match_value == 0) {
339 		kasan_check_range((void *)addr, sz, 1);
340 	}
341 
342 	return true;
343 }
344 
345 void OS_NOINLINE
kasan_check_range(const void * a,size_t sz,access_t access)346 kasan_check_range(const void *a, size_t sz, access_t access)
347 {
348 	uintptr_t addr = (uintptr_t)a;
349 
350 	if (!kasan_tbi_check_tag) {
351 		return;
352 	}
353 
354 	/* No point in checking a NULL pointer tag */
355 	if (a == NULL) {
356 		return;
357 	}
358 
359 	/*
360 	 * Inlining code expects to match the topmost 8 bits, while we only use
361 	 * four. Unconditionally set to one the others.
362 	 */
363 	uint8_t tag = vm_memtag_extract_tag(addr) | 0xF0;
364 
365 	/*
366 	 * Stay on par with inlining instrumentation, that considers untagged
367 	 * addresses as wildcards.
368 	 */
369 	if (tag == KASAN_TBI_DEFAULT_TAG) {
370 		return;
371 	}
372 
373 	uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
374 	uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
375 
376 	/*
377 	 * Address is tagged. Tag value must match what is present in the
378 	 * shadow table.
379 	 */
380 	for (uint8_t *p = shadow_first; p < shadow_last; p++) {
381 		if (tag == *p) {
382 			continue;
383 		}
384 
385 		/* Tag mismatch, prepare the reporting */
386 		violation_t reason = kasan_tbi_estimate_reason(tag, *p);
387 		uintptr_t fault_addr = vm_memtag_insert_tag(ADDRESS_FOR_SHADOW((uintptr_t)p), tag);
388 		kasan_violation(fault_addr, sz, access, reason);
389 	}
390 }
391 
392 /*
393  * Whenever more than the required space is allocated in a bucket,
394  * kasan_tbi_retag_unused_space() can be called to fill-up the remaining
395  * chunks (if present) with a newly randomly generated tag value, to catch
396  * off-by-small accesses.
397  */
398 void
kasan_tbi_retag_unused_space(caddr_t addr,vm_size_t size,vm_size_t used)399 kasan_tbi_retag_unused_space(caddr_t addr, vm_size_t size, vm_size_t used)
400 {
401 	used = kasan_granule_round(used);
402 	if (used < size) {
403 		(void) vm_memtag_generate_and_store_tag(addr + used, size - used);
404 	}
405 }
406 
407 /*
408  * KASAN-TBI tagging is based on virtual address ranges. Whenever we unwire
409  * pages from a portion of the VA space in a page based allocator, we reset
410  * that VA range to the default free tag value, to catch use-after-free
411  * accesses.
412  */
413 void
kasan_tbi_mark_free_space(caddr_t addr,vm_size_t size)414 kasan_tbi_mark_free_space(caddr_t addr, vm_size_t size)
415 {
416 	addr = (caddr_t)vm_memtag_insert_tag((vm_map_address_t)addr, KASAN_TBI_DEFAULT_TAG);
417 	vm_memtag_store_tag(addr, size);
418 }
419 
420 /*
421  * KASAN-TBI sanitizer is an implementation of vm_memtag.
422  */
423 void
vm_memtag_bzero_fast_checked(void * buf,vm_size_t n)424 vm_memtag_bzero_fast_checked(void *buf, vm_size_t n)
425 {
426 	bzero(buf, n);
427 }
428 
429 void
vm_memtag_bzero_unchecked(void * buf,vm_size_t n)430 vm_memtag_bzero_unchecked(void *buf, vm_size_t n)
431 {
432 	__nosan_bzero(buf, n);
433 }
434 
435 vm_map_address_t
vm_memtag_load_tag(vm_map_address_t address)436 vm_memtag_load_tag(vm_map_address_t address)
437 {
438 	return vm_memtag_insert_tag(address, *kasan_tbi_get_tag_address(address));
439 }
440 
441 void
vm_memtag_store_tag(caddr_t address,vm_size_t size)442 vm_memtag_store_tag(caddr_t address, vm_size_t size)
443 {
444 	uint8_t tag = vm_memtag_extract_tag((long)address);
445 	kasan_tbi_tag_range((vm_address_t)address, kasan_granule_round(size), tag);
446 }
447 
448 caddr_t
vm_memtag_generate_and_store_tag(caddr_t address,vm_size_t size)449 vm_memtag_generate_and_store_tag(caddr_t address, vm_size_t size)
450 {
451 	caddr_t tagged_address = (caddr_t)vm_memtag_insert_tag((long)address, kasan_tbi_full_tag());
452 	vm_memtag_store_tag(tagged_address, size);
453 
454 	return tagged_address;
455 }
456 
457 void
vm_memtag_verify_tag(vm_map_address_t tagged_address)458 vm_memtag_verify_tag(vm_map_address_t tagged_address)
459 {
460 	__asan_load1(tagged_address);
461 }
462 
463 void
vm_memtag_relocate_tags(vm_address_t new_address,vm_address_t old_address,vm_size_t size)464 vm_memtag_relocate_tags(vm_address_t new_address, vm_address_t old_address, vm_size_t size)
465 {
466 	kasan_tbi_copy_tags(new_address, old_address, size);
467 }
468 
469 void
vm_memtag_disable_checking()470 vm_memtag_disable_checking()
471 {
472 	/* Nothing to do with KASAN-TBI */
473 }
474 
475 __attribute__((always_inline)) void
vm_memtag_enable_checking()476 vm_memtag_enable_checking()
477 {
478 	/* Nothing to do with KASAN-TBI */
479 }
480 
481