1 /*
2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <stdint.h>
30 #include <stdbool.h>
31 #include <vm/vm_map.h>
32 #include <vm/vm_memtag.h>
33 #include <kern/assert.h>
34 #include <machine/machine_routines.h>
35 #include <kern/locks.h>
36 #include <kern/debug.h>
37 #include <kern/telemetry.h>
38 #include <kern/thread.h>
39 #include <libkern/libkern.h>
40 #include <mach/mach_vm.h>
41 #include <mach/mach_types.h>
42 #include <mach/vm_param.h>
43 #include <mach/machine/vm_param.h>
44 #include <machine/atomic.h>
45
46 #include "kasan.h"
47 #include "kasan_internal.h"
48 #include "memintrinsics.h"
49
50 uintptr_t kasan_tbi_tag_range(uintptr_t, size_t, uint8_t);
51
52 #define P2ALIGN(x, align) ((x) & -(align))
53 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
54
55 /* Configuration options */
56 bool kasan_tbi_check_tag = false;
57 bool kasan_tbi_enabled = false;
58
59 /* Reserved tags */
60 #define KASAN_TBI_DEFAULT_TAG 0xFF
61 #define KASAN_TBI_DEFAULT_FREE_TAG 0xF0
62 #define KASAN_TBI_REDZONE_POISON 0x80
63
64 #if defined(ARM_LARGE_MEMORY)
65 #define KASAN_TBI_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1)
66 #define KASAN_TBI_SHADOW_MAX 0xffffffffffffffffULL
67 #else
68 #define KASAN_TBI_SHADOW_MIN 0xfffffffe00000000ULL
69 #define KASAN_TBI_SHADOW_MAX 0xffffffffc0000000ULL
70 #endif
71
72 #if !CONFIG_KERNEL_TAGGING
73 #error "KASAN-TBI requires KERNEL TAGGING"
74 #endif /* CONFIG_KERNEL_TAGGING */
75
76 KERNEL_BRK_DESCRIPTOR_DEFINE(kasan_desc,
77 .type = KERNEL_BRK_TYPE_KASAN,
78 .base = KASAN_TBI_ESR_BASE,
79 .max = KASAN_TBI_ESR_TOP,
80 .options = KERNEL_BRK_UNRECOVERABLE,
81 .handle_breakpoint = kasan_handle_brk_failure);
82
83 #if KASAN_LIGHT
84 extern bool kasan_zone_maps_owned(vm_address_t, vm_size_t);
85 #endif /* KASAN_LIGHT */
86 extern uint64_t ml_get_speculative_timebase(void);
87
88 /* Stack and large allocations use the whole set of tags. Tags 0 and 15 are reserved. */
89 static uint8_t kasan_tbi_full_tags[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14};
90
91 /* Randomize tag allocation through a simple LFSR */
92 static uint32_t kasan_tbi_lfsr;
93
94 /*
95 * LLVM contains enough logic to inline check operations against the shadow
96 * table and uses this symbol as an anchor to find it in memory.
97 */
98 const uintptr_t __hwasan_shadow_memory_dynamic_address = KASAN_OFFSET;
99 /* Make LLDB/automated tools happy for now */
100 const uintptr_t __asan_shadow_memory_dynamic_address = __hwasan_shadow_memory_dynamic_address;
101
102 /*
103 * Untagged kernel addresses start with 0xFF. Match that whenever we create
104 * valid regions.
105 */
106 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)107 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
108 {
109 (void) __nosan_memset((void *)page, KASAN_TBI_DEFAULT_TAG, size);
110 }
111
112 void
kasan_impl_init(void)113 kasan_impl_init(void)
114 {
115 kasan_tbi_lfsr = (uint32_t)ml_get_speculative_timebase();
116
117 /*
118 * KASAN depends on CONFIG_KERNEL_TBI, therefore (DATA) TBI has been
119 * set for us already at bootstrap.
120 */
121 kasan_tbi_enabled = true;
122
123 /* Enable checking early on */
124 kasan_tbi_check_tag = true;
125
126 /*
127 * Sanity check on features that are effectively disabled, but might have
128 * erroneously been setup by legacy boot-args
129 */
130 if (fakestack_enabled) {
131 fakestack_enabled = 0;
132 }
133 }
134
135 void NOINLINE
kasan_init_globals(vm_offset_t __unused base,vm_size_t __unused size)136 kasan_init_globals(vm_offset_t __unused base, vm_size_t __unused size)
137 {
138 /*
139 * KASAN-TBI global support awaits compiler fixes to generate descriptive
140 * structures similar to KASAN-CLASSIC (see rdar://73914854)
141 */
142 }
143
144 void
kasan_impl_kdp_disable(void)145 kasan_impl_kdp_disable(void)
146 {
147 kasan_tbi_check_tag = false;
148 kasan_tbi_enabled = false;
149 }
150
151 /* redzones are not necessary with HWASAN */
152 void
kasan_unpoison_cxx_array_cookie(void __unused * ptr)153 kasan_unpoison_cxx_array_cookie(void __unused *ptr)
154 {
155 return;
156 }
157
158 static char *
kasan_tbi_decode_access(access_t access)159 kasan_tbi_decode_access(access_t access)
160 {
161 if (access & TYPE_LOAD) {
162 return "read from";
163 }
164 if (access & TYPE_WRITE) {
165 return "write to";
166 }
167
168 return "acccess to";
169 }
170
171 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t __unused reason)172 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t __unused reason)
173 {
174 size_t n = 0;
175
176 n += scnprintf(logbuf, bufsize, "KASAN_TBI: invalid %lu-byte %s %#lx\n",
177 width, kasan_tbi_decode_access(access), p);
178
179 return n;
180 }
181
182 void OS_NORETURN
kasan_handle_brk_failure(void * tstate,uint16_t esr)183 kasan_handle_brk_failure(void* tstate, uint16_t esr)
184 {
185 arm_saved_state_t* state = (arm_saved_state_t *)tstate;
186 vm_offset_t addr = saved_state64(state)->x[0];
187 uptr width = KASAN_TBI_GET_SIZE(esr);
188
189 access_t access;
190
191 if (esr & KASAN_TBI_ESR_WRITE) {
192 access = TYPE_STORE;
193 } else {
194 access = TYPE_LOAD;
195 }
196
197 kasan_crash_report(addr, width, access, REASON_MOD_OOB);
198 }
199
200 /*
201 * To a large extent, KASAN TBI doesn't require any poisoning, since versions
202 * mismatch is enough of a sentinel. Notwithstanding this, kasan_poison() is
203 * maintained for compatibility and to detect unexpected usages. And is still
204 * at the base of our initial global variables support for feature parity
205 * with KASAN CLASSIC.
206 */
207 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t flags)208 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
209 vm_size_t rightrz, uint8_t flags)
210 {
211 if (!kasan_tbi_enabled) {
212 return;
213 }
214
215 /* ensure base, leftrz and total allocation size are granule-aligned */
216 assert(kasan_granule_partial(base) == 0);
217 assert(kasan_granule_partial(leftrz) == 0);
218 assert(kasan_granule_partial(leftrz + size + rightrz) == 0);
219
220 uint8_t tag = flags ? flags : KASAN_TBI_DEFAULT_TAG;
221
222 kasan_tbi_tag_range(base, leftrz, KASAN_TBI_REDZONE_POISON);
223 kasan_tbi_tag_range(base + leftrz, size, tag);
224 kasan_tbi_tag_range(base + leftrz + size, rightrz, KASAN_TBI_REDZONE_POISON);
225 }
226
227 void OS_NOINLINE
kasan_impl_late_init(void)228 kasan_impl_late_init(void)
229 {
230 }
231
232 static inline uint32_t
kasan_tbi_lfsr_next(void)233 kasan_tbi_lfsr_next(void)
234 {
235 uint32_t v = kasan_tbi_lfsr;
236 v = (v >> 1) ^ (-(v & 1) & 0x04C11DB7);
237 kasan_tbi_lfsr = v;
238 return v;
239 }
240
241 static inline uint8_t
kasan_tbi_full_tag(void)242 kasan_tbi_full_tag(void)
243 {
244 return kasan_tbi_full_tags[kasan_tbi_lfsr_next() %
245 sizeof(kasan_tbi_full_tags)] | 0xF0;
246 }
247
248 uintptr_t
kasan_tbi_tag_range(uintptr_t addr,size_t sz,uint8_t tag)249 kasan_tbi_tag_range(uintptr_t addr, size_t sz, uint8_t tag)
250 {
251 if (sz == 0) {
252 return addr;
253 }
254
255 if (tag == 0) {
256 tag = KASAN_TBI_DEFAULT_TAG;
257 }
258
259 #if KASAN_LIGHT
260 if (!kasan_zone_maps_owned(addr, sz)) {
261 tag = KASAN_TBI_DEFAULT_TAG;
262 return (uintptr_t)vm_memtag_add_ptr_tag((long)addr, tag);
263 }
264 #endif /* KASAN_LIGHT */
265
266 uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
267 uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
268
269 __nosan_memset((void *)shadow_first, tag | 0xF0, shadow_last - shadow_first);
270 return (uintptr_t)vm_memtag_add_ptr_tag((long)addr, tag);
271 }
272
273 static void
kasan_tbi_copy_tags(vm_offset_t new_addr,vm_offset_t old_addr,vm_size_t size)274 kasan_tbi_copy_tags(vm_offset_t new_addr, vm_offset_t old_addr, vm_size_t size)
275 {
276 assert((new_addr & KASAN_GRANULE_MASK) == 0);
277 assert((old_addr & KASAN_GRANULE_MASK) == 0);
278 assert((size & KASAN_GRANULE_MASK) == 0);
279
280 uint8_t *new_shadow = SHADOW_FOR_ADDRESS(new_addr);
281 uint8_t *old_shadow = SHADOW_FOR_ADDRESS(old_addr);
282 uint8_t *old_end = SHADOW_FOR_ADDRESS(old_addr + size);
283
284 __nosan_memcpy(new_shadow, old_shadow, old_end - old_shadow);
285 }
286
287 void
__hwasan_tag_memory(uintptr_t p,unsigned char tag,uintptr_t sz)288 __hwasan_tag_memory(uintptr_t p, unsigned char tag, uintptr_t sz)
289 {
290 if (kasan_tbi_enabled) {
291 #if KASAN_DEBUG
292 /* Detect whether we'd be silently overwriting dirty stack */
293 if (tag != 0) {
294 (void)kasan_check_range((void *)p, sz, 0);
295 }
296 #endif /* KASAN_DEBUG */
297 (void)kasan_tbi_tag_range(p, sz, tag);
298 }
299 }
300
301 unsigned char
__hwasan_generate_tag(void)302 __hwasan_generate_tag(void)
303 {
304 uint8_t tag = KASAN_TBI_DEFAULT_TAG;
305
306 #if !KASAN_LIGHT
307 if (kasan_tbi_enabled) {
308 tag = kasan_tbi_full_tag();
309 }
310 #endif /* !KASAN_LIGHT */
311
312 return tag;
313 }
314
315 /* Get the tag location inside the shadow tag table */
316 uint8_t *
kasan_tbi_get_tag_address(vm_offset_t address)317 kasan_tbi_get_tag_address(vm_offset_t address)
318 {
319 return SHADOW_FOR_ADDRESS(address);
320 }
321
322 static inline uint8_t
kasan_tbi_get_tag(vm_offset_t address)323 kasan_tbi_get_tag(vm_offset_t address)
324 {
325 return *kasan_tbi_get_tag_address(address);
326 }
327
328 /* Single out accesses to the reserve free tag */
329 static violation_t
kasan_tbi_estimate_reason(uint8_t __unused access_tag,uint8_t stored_tag)330 kasan_tbi_estimate_reason(uint8_t __unused access_tag, uint8_t stored_tag)
331 {
332 if (stored_tag == KASAN_TBI_DEFAULT_FREE_TAG) {
333 return REASON_MOD_AFTER_FREE;
334 }
335
336 return REASON_MOD_OOB;
337 }
338
339 bool
kasan_check_shadow(vm_address_t addr,vm_size_t sz,uint8_t shadow_match_value)340 kasan_check_shadow(vm_address_t addr, vm_size_t sz, uint8_t shadow_match_value)
341 {
342 if (shadow_match_value == 0) {
343 kasan_check_range((void *)addr, sz, 1);
344 }
345
346 return true;
347 }
348
349 void OS_NOINLINE
kasan_check_range(const void * a,size_t sz,access_t access)350 kasan_check_range(const void *a, size_t sz, access_t access)
351 {
352 uintptr_t addr = (uintptr_t)a;
353
354 if (!kasan_tbi_check_tag) {
355 return;
356 }
357
358 /* No point in checking a NULL pointer tag */
359 if (a == NULL) {
360 return;
361 }
362
363 /*
364 * Inlining code expects to match the topmost 8 bits, while we only use
365 * four. Unconditionally set to one the others.
366 */
367 uint8_t tag = vm_memtag_extract_tag(addr) | 0xF0;
368
369 /*
370 * Stay on par with inlining instrumentation, that considers untagged
371 * addresses as wildcards.
372 */
373 if (tag == KASAN_TBI_DEFAULT_TAG) {
374 return;
375 }
376
377 uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
378 uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
379
380 /*
381 * Address is tagged. Tag value must match what is present in the
382 * shadow table.
383 */
384 for (uint8_t *p = shadow_first; p < shadow_last; p++) {
385 if (tag == *p) {
386 continue;
387 }
388
389 /* Tag mismatch, prepare the reporting */
390 violation_t reason = kasan_tbi_estimate_reason(tag, *p);
391 uintptr_t fault_addr = vm_memtag_add_ptr_tag(ADDRESS_FOR_SHADOW((uintptr_t)p), tag);
392 kasan_violation(fault_addr, sz, access, reason);
393 }
394 }
395
396 /*
397 * Whenever more than the required space is allocated in a bucket,
398 * kasan_tbi_retag_unused_space() can be called to fill-up the remaining
399 * chunks (if present) with a newly randomly generated tag value, to catch
400 * off-by-small accesses.
401 */
402 void
kasan_tbi_retag_unused_space(vm_offset_t addr,vm_size_t size,vm_size_t used)403 kasan_tbi_retag_unused_space(vm_offset_t addr, vm_size_t size, vm_size_t used)
404 {
405 used = kasan_granule_round(used);
406 if (used < size) {
407 vm_offset_t unused_tag_addr = vm_memtag_assign_tag(addr + used, size - used);
408 vm_memtag_set_tag(unused_tag_addr, size - used);
409 }
410 }
411
412 /*
413 * KASAN-TBI tagging is based on virtual address ranges. Whenever we unwire
414 * pages from a portion of the VA space in a page based allocator, we reset
415 * that VA range to the default free tag value, to catch use-after-free
416 * accesses.
417 */
418 void
kasan_tbi_mark_free_space(vm_offset_t addr,vm_size_t size)419 kasan_tbi_mark_free_space(vm_offset_t addr, vm_size_t size)
420 {
421 addr = vm_memtag_add_ptr_tag(addr, KASAN_TBI_DEFAULT_TAG);
422 vm_memtag_set_tag(addr, size);
423 }
424
425 /*
426 * KASAN-TBI sanitizer is an implementation of vm_memtag.
427 */
428 __attribute__((always_inline))
429 void
vm_memtag_bzero(void * buf,vm_size_t n)430 vm_memtag_bzero(void *buf, vm_size_t n)
431 {
432 bzero(buf, n);
433 }
434
435 /* Query the shadow table and return the associated tag. */
436 __attribute__((always_inline))
437 uint8_t
vm_memtag_get_tag(vm_offset_t address)438 vm_memtag_get_tag(vm_offset_t address)
439 {
440 return kasan_tbi_get_tag(address);
441 }
442
443 __attribute__((always_inline))
444 vm_offset_t
vm_memtag_fixup_ptr(vm_offset_t address)445 vm_memtag_fixup_ptr(vm_offset_t address)
446 {
447 return vm_memtag_add_ptr_tag(address, vm_memtag_get_tag(address));
448 }
449
450 __attribute__((always_inline))
451 void
vm_memtag_set_tag(vm_offset_t address,vm_offset_t size)452 vm_memtag_set_tag(vm_offset_t address, vm_offset_t size)
453 {
454 uint8_t tag = vm_memtag_extract_tag(address);
455 kasan_tbi_tag_range(address, kasan_granule_round(size), tag);
456 }
457
458 __attribute__((always_inline))
459 vm_offset_t
vm_memtag_assign_tag(vm_offset_t address,__unused vm_size_t size)460 vm_memtag_assign_tag(vm_offset_t address, __unused vm_size_t size)
461 {
462 uint8_t tag = kasan_tbi_full_tag();
463 return vm_memtag_add_ptr_tag((long)address, tag);
464 }
465
466 __attribute__((always_inline)) void
vm_memtag_verify_tag(vm_offset_t tagged_address)467 vm_memtag_verify_tag(vm_offset_t tagged_address)
468 {
469 __asan_load1(tagged_address);
470 }
471
472 void
vm_memtag_relocate_tags(vm_offset_t new_address,vm_offset_t old_address,vm_offset_t size)473 vm_memtag_relocate_tags(vm_offset_t new_address, vm_offset_t old_address, vm_offset_t size)
474 {
475 kasan_tbi_copy_tags(new_address, old_address, size);
476 }
477
478 __attribute__((always_inline)) void
vm_memtag_disable_checking()479 vm_memtag_disable_checking()
480 {
481 /* Nothing to do with KASAN-TBI */
482 }
483
484 __attribute__((always_inline)) void
vm_memtag_enable_checking()485 vm_memtag_enable_checking()
486 {
487 /* Nothing to do with KASAN-TBI */
488 }
489