xref: /xnu-8792.61.2/san/memory/kasan-tbi.c (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <string.h>
29 #include <stdint.h>
30 #include <stdbool.h>
31 #include <vm/vm_map.h>
32 #include <vm/pmap.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/debug.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <libkern/libkern.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <mach/sdt.h>
47 #include <machine/atomic.h>
48 
49 #include "kasan.h"
50 #include "kasan_internal.h"
51 #include "memintrinsics.h"
52 
53 uintptr_t kasan_tbi_tag_range(uintptr_t, size_t, uint8_t);
54 
55 #define P2ALIGN(x, align)           ((x) & -(align))
56 #define P2ROUNDUP(x, align)         (-(-(x) & -(align)))
57 
58 /* Configuration options */
59 bool kasan_tbi_check_tag = false;
60 bool kasan_tbi_enabled = false;
61 
62 /* Reserved tags */
63 #define KASAN_TBI_DEFAULT_TAG       0xFF
64 #define KASAN_TBI_ZALLOC_FREE_TAG   0xF0
65 #define KASAN_TBI_REDZONE_POISON    0x80
66 
67 #if defined(ARM_LARGE_MEMORY)
68 #define KASAN_TBI_SHADOW_MIN        (VM_MAX_KERNEL_ADDRESS+1)
69 #define KASAN_TBI_SHADOW_MAX        0xffffffffffffffffULL
70 #else
71 #define KASAN_TBI_SHADOW_MIN        0xfffffffe00000000ULL
72 #define KASAN_TBI_SHADOW_MAX        0xffffffffc0000000ULL
73 #endif
74 
75 #if !CONFIG_KERNEL_TBI
76 #error "KASAN-TBI requires KERNEL DATA TBI enabled"
77 #endif /* CONFIG_KERNEL_TBI */
78 
79 #if KASAN_LIGHT
80 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
81 #endif /* KASAN_LIGHT */
82 extern uint64_t ml_get_speculative_timebase(void);
83 
84 /* Stack and large allocations use the whole set of tags. Tags 0 and 15 are reserved. */
85 static uint8_t kasan_tbi_full_tags[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14};
86 static uint8_t kasan_tbi_odd_tags[] = {1, 3, 5, 7, 9, 11, 13};
87 static uint8_t kasan_tbi_even_tags[] = {2, 4, 6, 8, 10, 12, 14};
88 
89 static uint32_t kasan_tbi_lfsr;
90 
91 /*
92  * LLVM contains enough logic to inline check operations against the shadow
93  * table and uses this symbol as an anchor to find it in memory.
94  */
95 const uintptr_t __hwasan_shadow_memory_dynamic_address = KASAN_OFFSET;
96 /* Make LLDB/automated tools happy for now */
97 const uintptr_t __asan_shadow_memory_dynamic_address = __hwasan_shadow_memory_dynamic_address;
98 
99 /*
100  * Untagged kernel addresses start with 0xFF. Match that whenever we create
101  * valid regions.
102  */
103 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)104 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
105 {
106 	(void) __nosan_memset((void *)page, KASAN_TBI_DEFAULT_TAG, size);
107 }
108 
109 void
kasan_impl_init(void)110 kasan_impl_init(void)
111 {
112 	kasan_tbi_lfsr = (uint32_t)ml_get_speculative_timebase();
113 
114 	/*
115 	 * KASAN depends on CONFIG_KERNEL_TBI, therefore (DATA) TBI has been
116 	 * set for us already at bootstrap.
117 	 */
118 	kasan_tbi_enabled = true;
119 
120 	/* Enable checking early on */
121 	kasan_tbi_check_tag = true;
122 
123 	/*
124 	 * Sanity check on features that are effectively disabled, but might have
125 	 * erroneously been setup by legacy boot-args
126 	 */
127 	if (fakestack_enabled) {
128 		fakestack_enabled = 0;
129 	}
130 }
131 
132 void NOINLINE
kasan_init_globals(vm_offset_t __unused base,vm_size_t __unused size)133 kasan_init_globals(vm_offset_t __unused base, vm_size_t __unused size)
134 {
135 	/*
136 	 * KASAN-TBI global support awaits compiler fixes to generate descriptive
137 	 * structures similar to KASAN-CLASSIC (see rdar://73914854)
138 	 */
139 }
140 
141 void
kasan_impl_kdp_disable(void)142 kasan_impl_kdp_disable(void)
143 {
144 	kasan_tbi_check_tag = false;
145 	kasan_tbi_enabled = false;
146 }
147 
148 /* redzones are not necessary with HWASAN */
149 void
kasan_unpoison_cxx_array_cookie(void __unused * ptr)150 kasan_unpoison_cxx_array_cookie(void __unused *ptr)
151 {
152 	return;
153 }
154 
155 static char *
kasan_tbi_decode_access(access_t access)156 kasan_tbi_decode_access(access_t access)
157 {
158 	if (access & TYPE_LOAD) {
159 		return "read from";
160 	}
161 	if (access & TYPE_WRITE) {
162 		return "write to";
163 	}
164 
165 	return "acccess to";
166 }
167 
168 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t __unused reason)169 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t __unused reason)
170 {
171 	size_t n = 0;
172 
173 	n += scnprintf(logbuf, bufsize, "KASAN_TBI: invalid %lu-byte %s %#lx\n",
174 	    width, kasan_tbi_decode_access(access), p);
175 
176 	return n;
177 }
178 
179 void OS_NORETURN
kasan_handle_brk_failure(vm_offset_t addr,uint16_t esr)180 kasan_handle_brk_failure(vm_offset_t addr, uint16_t esr)
181 {
182 	uptr width = KASAN_TBI_GET_SIZE(esr);
183 	access_t access;
184 
185 	if (esr & KASAN_TBI_ESR_WRITE) {
186 		access = TYPE_STORE;
187 	} else {
188 		access = TYPE_LOAD;
189 	}
190 
191 	kasan_crash_report(addr, width, access, REASON_MOD_OOB);
192 }
193 
194 /*
195  * To a large extent, KASAN TBI doesn't require any poisoning, since versions
196  * mismatch is enough of a sentinel. Notwithstanding this, kasan_poison() is
197  * maintained for compatibility and to detect unexpected usages. And is still
198  * at the base of our initial global variables support for feature parity
199  * with KASAN CLASSIC.
200  */
201 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)202 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
203     vm_size_t rightrz, uint8_t flags)
204 {
205 	if (!kasan_tbi_enabled) {
206 		return;
207 	}
208 
209 	/* ensure base, leftrz and total allocation size are granule-aligned */
210 	assert(kasan_granule_partial(base) == 0);
211 	assert(kasan_granule_partial(leftrz) == 0);
212 	assert(kasan_granule_partial(leftrz + size + rightrz) == 0);
213 
214 	uint8_t tag = flags ? flags : KASAN_TBI_DEFAULT_TAG;
215 
216 	kasan_tbi_tag_range(base, leftrz, KASAN_TBI_REDZONE_POISON);
217 	kasan_tbi_tag_range(base + leftrz, size, tag);
218 	kasan_tbi_tag_range(base + leftrz + size, rightrz, KASAN_TBI_REDZONE_POISON);
219 }
220 
221 void OS_NOINLINE
kasan_impl_late_init(void)222 kasan_impl_late_init(void)
223 {
224 }
225 
226 static inline uint32_t
kasan_tbi_lfsr_next(void)227 kasan_tbi_lfsr_next(void)
228 {
229 	uint32_t v = kasan_tbi_lfsr;
230 	v = (v >> 1) ^ (-(v & 1) & 0x04C11DB7);
231 	kasan_tbi_lfsr = v;
232 	return v;
233 }
234 
235 static inline uint8_t
kasan_tbi_full_tag(void)236 kasan_tbi_full_tag(void)
237 {
238 	return kasan_tbi_full_tags[kasan_tbi_lfsr_next() %
239 	       sizeof(kasan_tbi_full_tags)] | 0xF0;
240 }
241 
242 static inline uint8_t
kasan_tbi_odd_even_tag(vm_offset_t addr,size_t size)243 kasan_tbi_odd_even_tag(vm_offset_t addr, size_t size)
244 {
245 	uint32_t i = kasan_tbi_lfsr_next();
246 	uint8_t tag = 0xF0;
247 
248 	if ((addr / size) % 2) {
249 		tag |= kasan_tbi_odd_tags[i % sizeof(kasan_tbi_odd_tags)];
250 	} else {
251 		tag |= kasan_tbi_even_tags[i % sizeof(kasan_tbi_even_tags)];
252 	}
253 
254 	return tag;
255 }
256 
257 uintptr_t
kasan_tbi_tag_range(uintptr_t addr,size_t sz,uint8_t tag)258 kasan_tbi_tag_range(uintptr_t addr, size_t sz, uint8_t tag)
259 {
260 	if (sz == 0) {
261 		return addr;
262 	}
263 
264 #if KASAN_LIGHT
265 	if (!kasan_zone_maps_owned(addr, sz)) {
266 		tag = KASAN_TBI_DEFAULT_TAG;
267 		return (uintptr_t)kasan_tbi_tag_ptr((long)addr, tag);
268 	}
269 #endif /* KASAN_LIGHT */
270 
271 	uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
272 	uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
273 
274 	__nosan_memset((void *)shadow_first, tag, shadow_last - shadow_first);
275 	return (uintptr_t)kasan_tbi_tag_ptr((long)addr, tag);
276 }
277 
278 static vm_offset_t
kasan_tbi_do_tag_zone_object(vm_offset_t addr,vm_size_t elem_size,uint8_t tag,boolean_t zxcpu)279 kasan_tbi_do_tag_zone_object(vm_offset_t addr, vm_size_t elem_size, uint8_t tag, boolean_t zxcpu)
280 {
281 	vm_offset_t retaddr = kasan_tbi_tag_range(addr, elem_size, tag);
282 	/*
283 	 * If the allocation comes from the per-cpu zones, extend the tag to all
284 	 * the adjacent, per cpu, instances.
285 	 */
286 	if (zxcpu) {
287 		zpercpu_foreach_cpu(index) {
288 			(void)kasan_tbi_tag_range(addr + ptoa(index), elem_size, tag);
289 		}
290 	}
291 
292 	return retaddr;
293 }
294 
295 void
kasan_tbi_copy_tags(vm_offset_t new_addr,vm_offset_t old_addr,vm_size_t size)296 kasan_tbi_copy_tags(vm_offset_t new_addr, vm_offset_t old_addr, vm_size_t size)
297 {
298 	assert((new_addr & KASAN_GRANULE_MASK) == 0);
299 	assert((old_addr & KASAN_GRANULE_MASK) == 0);
300 	assert((size & KASAN_GRANULE_MASK) == 0);
301 
302 	uint8_t *new_shadow = SHADOW_FOR_ADDRESS(new_addr);
303 	uint8_t *old_shadow = SHADOW_FOR_ADDRESS(old_addr);
304 	uint8_t *old_end    = SHADOW_FOR_ADDRESS(old_addr + size);
305 
306 	__nosan_memcpy(new_shadow, old_shadow, old_end - old_shadow);
307 }
308 
309 vm_offset_t
kasan_tbi_tag_zalloc(vm_offset_t addr,vm_size_t size,vm_size_t used,boolean_t zxcpu)310 kasan_tbi_tag_zalloc(vm_offset_t addr, vm_size_t size, vm_size_t used, boolean_t zxcpu)
311 {
312 	used = kasan_granule_round(used);
313 	if (used < size) {
314 		kasan_tbi_tag_zfree(addr + used, size - used, zxcpu);
315 	}
316 	uint8_t tag = kasan_tbi_odd_even_tag(addr, size);
317 	return kasan_tbi_do_tag_zone_object(addr, used, tag, zxcpu);
318 }
319 
320 vm_offset_t
kasan_tbi_tag_zalloc_default(vm_offset_t addr,vm_size_t size,boolean_t zxcpu)321 kasan_tbi_tag_zalloc_default(vm_offset_t addr, vm_size_t size, boolean_t zxcpu)
322 {
323 	return kasan_tbi_do_tag_zone_object(addr, size, KASAN_TBI_DEFAULT_TAG, zxcpu);
324 }
325 
326 vm_offset_t
kasan_tbi_tag_zfree(vm_offset_t addr,vm_size_t elem_size,boolean_t zxcpu)327 kasan_tbi_tag_zfree(vm_offset_t addr, vm_size_t elem_size, boolean_t zxcpu)
328 {
329 	return kasan_tbi_do_tag_zone_object(addr, elem_size, KASAN_TBI_ZALLOC_FREE_TAG, zxcpu);
330 }
331 
332 void
__hwasan_tag_memory(uintptr_t p,unsigned char tag,uintptr_t sz)333 __hwasan_tag_memory(uintptr_t p, unsigned char tag, uintptr_t sz)
334 {
335 	if (kasan_tbi_enabled) {
336 #if KASAN_DEBUG
337 		/* Detect whether we'd be silently overwriting dirty stack */
338 		if (tag != 0) {
339 			(void)kasan_check_range((void *)p, sz, 0);
340 		}
341 #endif /* KASAN_DEBUG */
342 		(void)kasan_tbi_tag_range(p, sz, tag);
343 	}
344 }
345 
346 unsigned char
__hwasan_generate_tag(void)347 __hwasan_generate_tag(void)
348 {
349 	uint8_t tag = KASAN_TBI_DEFAULT_TAG;
350 
351 #if !KASAN_LIGHT
352 	if (kasan_tbi_enabled) {
353 		tag = kasan_tbi_full_tag();
354 	}
355 #endif /* !KASAN_LIGHT */
356 
357 	return tag;
358 }
359 
360 vm_offset_t
kasan_tbi_tag_large_alloc(vm_offset_t addr,vm_size_t size,vm_size_t used)361 kasan_tbi_tag_large_alloc(vm_offset_t addr, vm_size_t size, vm_size_t used)
362 {
363 	used = kasan_granule_round(used);
364 	if (used < size) {
365 		kasan_tbi_tag_large_free(addr + used, size - used);
366 	}
367 	return kasan_tbi_tag_range(addr, used, kasan_tbi_full_tag());
368 }
369 
370 vm_offset_t
kasan_tbi_tag_large_free(vm_offset_t addr,vm_size_t size)371 kasan_tbi_tag_large_free(vm_offset_t addr, vm_size_t size)
372 {
373 	return kasan_tbi_tag_range(addr, size, KASAN_TBI_DEFAULT_TAG);
374 }
375 
376 /* Return the shadow table tag location */
377 __attribute__((always_inline))
378 uint8_t *
kasan_tbi_get_tag_address(vm_offset_t addr)379 kasan_tbi_get_tag_address(vm_offset_t addr)
380 {
381 	return SHADOW_FOR_ADDRESS(addr);
382 }
383 
384 /* Query the shadow table and return the memory tag */
385 __attribute__((always_inline))
386 uint8_t
kasan_tbi_get_memory_tag(vm_offset_t addr)387 kasan_tbi_get_memory_tag(vm_offset_t addr)
388 {
389 	return *kasan_tbi_get_tag_address(addr);
390 }
391 
392 /* Query the shadow table and tag the address accordingly */
393 vm_offset_t
kasan_tbi_fix_address_tag(vm_offset_t addr)394 kasan_tbi_fix_address_tag(vm_offset_t addr)
395 {
396 	return (uintptr_t)kasan_tbi_tag_ptr((long)addr, kasan_tbi_get_memory_tag(addr));
397 }
398 
399 /* Single out accesses to the reserve free tag */
400 static violation_t
kasan_tbi_estimate_reason(uint8_t __unused access_tag,uint8_t stored_tag)401 kasan_tbi_estimate_reason(uint8_t __unused access_tag, uint8_t stored_tag)
402 {
403 	if (stored_tag == KASAN_TBI_ZALLOC_FREE_TAG) {
404 		return REASON_MOD_AFTER_FREE;
405 	}
406 
407 	return REASON_MOD_OOB;
408 }
409 
410 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)411 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
412 {
413 	if (shadow_match_value == 0) {
414 		kasan_check_range((void *)addr, sz, 1);
415 	}
416 
417 	return true;
418 }
419 
420 void OS_NOINLINE
kasan_check_range(const void * a,size_t sz,access_t access)421 kasan_check_range(const void *a, size_t sz, access_t access)
422 {
423 	uintptr_t addr = (uintptr_t)a;
424 
425 	if (!kasan_tbi_check_tag) {
426 		return;
427 	}
428 
429 	/*
430 	 * Inlining code expects to match the topmost 8 bits, while we only use
431 	 * four. Unconditionally set to one the others.
432 	 */
433 	uint8_t tag = kasan_tbi_get_tag(addr) | 0xF0;
434 
435 	/*
436 	 * Stay on par with inlining instrumentation, that considers untagged
437 	 * addresses as wildcards.
438 	 */
439 	if (tag == KASAN_TBI_DEFAULT_TAG) {
440 		return;
441 	}
442 
443 	uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
444 	uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
445 
446 	/*
447 	 * Address is tagged. Tag value must match what is present in the
448 	 * shadow table.
449 	 */
450 	for (uint8_t *p = shadow_first; p < shadow_last; p++) {
451 		if (tag == *p) {
452 			continue;
453 		}
454 
455 		/* Tag mismatch, prepare the reporting */
456 		violation_t reason = kasan_tbi_estimate_reason(tag, *p);
457 		uintptr_t fault_addr = kasan_tbi_tag_ptr(ADDRESS_FOR_SHADOW((uintptr_t)p), tag);
458 		kasan_violation(fault_addr, sz, access, reason);
459 	}
460 }
461