1 /*
2 * Copyright (c) 2016-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <stdint.h>
30 #include <stdbool.h>
31 #include <vm/vm_map.h>
32 #include <vm/pmap.h>
33 #include <kern/assert.h>
34 #include <kern/cpu_data.h>
35 #include <kern/backtrace.h>
36 #include <machine/machine_routines.h>
37 #include <kern/locks.h>
38 #include <kern/debug.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 #include <libkern/libkern.h>
42 #include <mach/mach_vm.h>
43 #include <mach/mach_types.h>
44 #include <mach/vm_param.h>
45 #include <mach/machine/vm_param.h>
46 #include <mach/sdt.h>
47 #include <machine/atomic.h>
48
49 #include "kasan.h"
50 #include "kasan_internal.h"
51 #include "memintrinsics.h"
52
53 uintptr_t kasan_tbi_tag_range(uintptr_t, size_t, uint8_t);
54
55 #define P2ALIGN(x, align) ((x) & -(align))
56 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
57
58 /* Configuration options */
59 bool kasan_tbi_check_tag = false;
60 bool kasan_tbi_enabled = false;
61
62 /* Reserved tags */
63 #define KASAN_TBI_DEFAULT_TAG 0xFF
64 #define KASAN_TBI_ZALLOC_FREE_TAG 0xF0
65 #define KASAN_TBI_REDZONE_POISON 0x80
66
67 #if defined(ARM_LARGE_MEMORY)
68 #define KASAN_TBI_SHADOW_MIN (VM_MAX_KERNEL_ADDRESS+1)
69 #define KASAN_TBI_SHADOW_MAX 0xffffffffffffffffULL
70 #else
71 #define KASAN_TBI_SHADOW_MIN 0xfffffffe00000000ULL
72 #define KASAN_TBI_SHADOW_MAX 0xffffffffc0000000ULL
73 #endif
74
75 #if !CONFIG_KERNEL_TBI
76 #error "KASAN-TBI requires KERNEL DATA TBI enabled"
77 #endif /* CONFIG_KERNEL_TBI */
78
79 /*
80 * Untagged kernel addresses start with 0xFF. Match that whenever we create
81 * valid regions.
82 */
83 void
kasan_impl_fill_valid_range(uintptr_t page,size_t size)84 kasan_impl_fill_valid_range(uintptr_t page, size_t size)
85 {
86 (void) __nosan_memset((void *)page, 0xFF, size);
87 }
88
89 void
kasan_impl_init(void)90 kasan_impl_init(void)
91 {
92 /*
93 * KASAN depends on CONFIG_KERNEL_TBI, therefore (DATA) TBI has been
94 * set for us already at bootstrap.
95 */
96 kasan_tbi_enabled = true;
97
98 /* Enable checking early on */
99 kasan_tbi_check_tag = true;
100
101 /*
102 * Sanity check on features that are effectively disabled, but might have
103 * erroneously been setup by legacy boot-args
104 */
105 if (fakestack_enabled) {
106 fakestack_enabled = 0;
107 }
108 }
109
110 void NOINLINE
kasan_init_globals(vm_offset_t __unused base,vm_size_t __unused size)111 kasan_init_globals(vm_offset_t __unused base, vm_size_t __unused size)
112 {
113 /*
114 * KASAN-TBI global support awaits compiler fixes to generate descriptive
115 * structures similar to KASAN-CLASSIC (see rdar://73914854)
116 */
117 }
118
119 void
kasan_impl_kdp_disable(void)120 kasan_impl_kdp_disable(void)
121 {
122 kasan_tbi_check_tag = false;
123 kasan_tbi_enabled = false;
124 }
125
126 /* redzones are not necessary with HWASAN */
127 void
kasan_unpoison_cxx_array_cookie(void __unused * ptr)128 kasan_unpoison_cxx_array_cookie(void __unused *ptr)
129 {
130 return;
131 }
132
133 static char *
kasan_tbi_decode_access(access_t access)134 kasan_tbi_decode_access(access_t access)
135 {
136 if (access & TYPE_LOAD) {
137 return "read from";
138 }
139 if (access & TYPE_WRITE) {
140 return "write to";
141 }
142
143 return "acccess to";
144 }
145
146 size_t
kasan_impl_decode_issue(char * logbuf,size_t bufsize,uptr p,uptr width,access_t access,violation_t __unused reason)147 kasan_impl_decode_issue(char *logbuf, size_t bufsize, uptr p, uptr width, access_t access, violation_t __unused reason)
148 {
149 size_t n = 0;
150
151 n += scnprintf(logbuf, bufsize, "KASAN_TBI: invalid %lu-byte %s %#lx\n",
152 width, kasan_tbi_decode_access(access), p);
153
154 return n;
155 }
156
157 void OS_NORETURN
kasan_handle_brk_failure(vm_offset_t addr,uint16_t esr)158 kasan_handle_brk_failure(vm_offset_t addr, uint16_t esr)
159 {
160 uptr width = KASAN_TBI_GET_SIZE(esr);
161 access_t access;
162
163 if (esr & KASAN_TBI_ESR_WRITE) {
164 access = TYPE_STORE;
165 } else {
166 access = TYPE_LOAD;
167 }
168
169 kasan_crash_report(addr, width, access, REASON_MOD_OOB);
170 }
171
172 /*
173 * To a large extent, KASAN TBI doesn't require any poisoning, since versions
174 * mismatch is enough of a sentinel. Notwithstanding this, kasan_poison() is
175 * maintained for compatibility and to detect unexpected usages. And is still
176 * at the base of our initial global variables support for feature parity
177 * with KASAN CLASSIC.
178 */
179 void NOINLINE
kasan_poison(vm_offset_t base,vm_size_t size,vm_size_t leftrz,vm_size_t rightrz,uint8_t __unused flags)180 kasan_poison(vm_offset_t base, vm_size_t size, vm_size_t leftrz,
181 vm_size_t rightrz, uint8_t __unused flags)
182 {
183 /* ensure base, leftrz and total allocation size are granule-aligned */
184 assert(kasan_granule_partial(base) == 0);
185 assert(kasan_granule_partial(leftrz) == 0);
186 assert(kasan_granule_partial(leftrz + size + rightrz) == 0);
187
188 kasan_tbi_tag_range(base, leftrz, KASAN_TBI_REDZONE_POISON);
189 kasan_tbi_tag_range(base + leftrz, size, KASAN_TBI_DEFAULT_TAG);
190 kasan_tbi_tag_range(base + leftrz + size, rightrz, KASAN_TBI_REDZONE_POISON);
191 }
192
193 void OS_NOINLINE
kasan_impl_late_init(void)194 kasan_impl_late_init(void)
195 {
196 }
197
198 uintptr_t
kasan_tbi_tag_range(uintptr_t addr,size_t sz,uint8_t tag)199 kasan_tbi_tag_range(uintptr_t addr, size_t sz, uint8_t tag)
200 {
201 if (sz == 0) {
202 return addr;
203 }
204
205 uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
206 uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
207
208 __nosan_memset((void *)shadow_first, tag, shadow_last - shadow_first);
209 return (uintptr_t)kasan_tbi_tag_ptr((long)addr, tag);
210 }
211
212 /*
213 * This is a simplified, slightly inefficient and not randomized implementation
214 * of an odd/even tagging model. Tags 0 and 15 are reserved.
215 */
216 uint8_t kasan_tbi_odd_tags[] = {1, 3, 5, 7, 9, 11, 13};
217 uint8_t kasan_tbi_even_tags[] = {2, 4, 6, 8, 10, 12, 14};
218 uint8_t kasan_tbi_odd_index = 0;
219 uint8_t kasan_tbi_even_index = 0;
220
221 static uint8_t
kasan_tbi_odd_tag(void)222 kasan_tbi_odd_tag(void)
223 {
224 uint8_t tag = kasan_tbi_odd_tags[kasan_tbi_odd_index++ %
225 sizeof(kasan_tbi_odd_tags)];
226
227 return tag | 0xF0;
228 }
229
230 static uint8_t
kasan_tbi_even_tag(void)231 kasan_tbi_even_tag(void)
232 {
233 uint8_t tag = kasan_tbi_even_tags[kasan_tbi_even_index++ %
234 sizeof(kasan_tbi_even_tags)];
235
236 return tag | 0xF0;
237 }
238
239 static vm_offset_t
kasan_tbi_do_tag_zone_object(vm_offset_t addr,vm_offset_t elem_size,uint8_t tag,boolean_t zxcpu)240 kasan_tbi_do_tag_zone_object(vm_offset_t addr, vm_offset_t elem_size, uint8_t tag, boolean_t zxcpu)
241 {
242 vm_offset_t retaddr = kasan_tbi_tag_range(addr, elem_size, tag);
243 /*
244 * If the allocation comes from the per-cpu zones, extend the tag to all
245 * the adjacent, per cpu, instances.
246 */
247 if (zxcpu) {
248 zpercpu_foreach_cpu(index) {
249 (void)kasan_tbi_tag_range(addr + ptoa(index), elem_size, tag);
250 }
251 }
252
253 return retaddr;
254 }
255
256 void
kasan_tbi_copy_tags(vm_offset_t new_addr,vm_offset_t old_addr,vm_size_t size)257 kasan_tbi_copy_tags(vm_offset_t new_addr, vm_offset_t old_addr, vm_size_t size)
258 {
259 assert((new_addr & KASAN_GRANULE_MASK) == 0);
260 assert((old_addr & KASAN_GRANULE_MASK) == 0);
261 assert((size & KASAN_GRANULE_MASK) == 0);
262
263 uint8_t *new_shadow = SHADOW_FOR_ADDRESS(new_addr);
264 uint8_t *old_shadow = SHADOW_FOR_ADDRESS(old_addr);
265 uint8_t *old_end = SHADOW_FOR_ADDRESS(old_addr + size);
266
267 __nosan_memcpy(new_shadow, old_shadow, old_end - old_shadow);
268 }
269
270 vm_offset_t
kasan_tbi_tag_zalloc(vm_offset_t addr,vm_size_t size,vm_size_t used,boolean_t zxcpu)271 kasan_tbi_tag_zalloc(vm_offset_t addr, vm_size_t size, vm_size_t used, boolean_t zxcpu)
272 {
273 uint8_t tag;
274
275 if ((addr / size) % 2) {
276 tag = kasan_tbi_odd_tag();
277 } else {
278 tag = kasan_tbi_even_tag();
279 }
280
281 used = kasan_granule_round(used);
282 if (used < size) {
283 kasan_tbi_tag_zfree(addr + used, size - used, zxcpu);
284 }
285 return kasan_tbi_do_tag_zone_object(addr, used, tag, zxcpu);
286 }
287
288 vm_offset_t
kasan_tbi_tag_zalloc_default(vm_offset_t addr,vm_size_t size,boolean_t zxcpu)289 kasan_tbi_tag_zalloc_default(vm_offset_t addr, vm_size_t size, boolean_t zxcpu)
290 {
291 return kasan_tbi_do_tag_zone_object(addr, size, KASAN_TBI_DEFAULT_TAG, zxcpu);
292 }
293
294 vm_offset_t
kasan_tbi_tag_zfree(vm_offset_t addr,vm_offset_t elem_size,boolean_t zxcpu)295 kasan_tbi_tag_zfree(vm_offset_t addr, vm_offset_t elem_size, boolean_t zxcpu)
296 {
297 return kasan_tbi_do_tag_zone_object(addr, elem_size, KASAN_TBI_ZALLOC_FREE_TAG, zxcpu);
298 }
299
300 void
__hwasan_tag_memory(uintptr_t p,unsigned char tag,uintptr_t sz)301 __hwasan_tag_memory(uintptr_t p, unsigned char tag, uintptr_t sz)
302 {
303 if (kasan_tbi_enabled) {
304 (void)kasan_tbi_tag_range(p, sz, tag);
305 }
306 }
307
308 static uint8_t tag_cycle = 0;
309
310 unsigned char
__hwasan_generate_tag()311 __hwasan_generate_tag()
312 {
313 uint8_t tag;
314
315 if (kasan_tbi_enabled) {
316 tag = (tag_cycle++ & 0xF) | 0xF0;
317 } else {
318 tag = 0xFF;
319 }
320 return tag;
321 }
322
323 /* Query the shadow table and tag the address accordingly */
324 vm_offset_t
kasan_tbi_fix_address_tag(vm_offset_t addr)325 kasan_tbi_fix_address_tag(vm_offset_t addr)
326 {
327 uint8_t *shadow = SHADOW_FOR_ADDRESS(addr);
328 return (uintptr_t)kasan_tbi_tag_ptr((long)addr, *shadow);
329 }
330
331 /* Single out accesses to the reserve free tag */
332 static violation_t
kasan_tbi_estimate_reason(uint8_t __unused access_tag,uint8_t stored_tag)333 kasan_tbi_estimate_reason(uint8_t __unused access_tag, uint8_t stored_tag)
334 {
335 if (stored_tag == KASAN_TBI_ZALLOC_FREE_TAG) {
336 return REASON_MOD_AFTER_FREE;
337 }
338
339 return REASON_MOD_OOB;
340 }
341
342 void OS_NOINLINE
kasan_check_range(const void * a,size_t sz,access_t access)343 kasan_check_range(const void *a, size_t sz, access_t access)
344 {
345 uintptr_t addr = (uintptr_t)a;
346
347 if (!kasan_tbi_check_tag) {
348 return;
349 }
350
351 /*
352 * Inlining code expects to match the topmost 8 bits, while we only use
353 * four. Unconditionally set to one the others.
354 */
355 uint8_t tag = kasan_tbi_get_tag(addr) | 0xF0;
356
357 uint8_t *shadow_first = SHADOW_FOR_ADDRESS(addr);
358 uint8_t *shadow_last = SHADOW_FOR_ADDRESS(addr + P2ROUNDUP(sz, 16));
359
360 for (uint8_t *p = shadow_first; p < shadow_last; p++) {
361 if (tag == *p) {
362 continue;
363 }
364
365 /* Tag mismatch, prepare the reporting */
366 violation_t reason = kasan_tbi_estimate_reason(tag, *p);
367 kasan_violation(addr, sz, access, reason);
368 }
369 }
370