xref: /xnu-12377.1.9/san/coverage/kcov.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <string.h>
29 #include <stdbool.h>
30 #include <sys/sysctl.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <libkern/libkern.h>
34 #include <os/atomic_private.h>
35 #include <vm/pmap.h>
36 #include <machine/machine_routines.h>
37 
38 #include <san/kcov.h>
39 #include <san/kcov_data.h>
40 
41 #include <san/kcov_stksz.h>
42 #include <san/kcov_stksz_data.h>
43 
44 #include <san/kcov_ksancov.h>
45 #include <san/kcov_ksancov_data.h>
46 
47 /* Global flag that enables the sanitizer hook. */
48 static _Atomic unsigned int kcov_enabled = 0;
49 
50 
51 /*
52  * Sysctl interface to coverage sanitizer.
53  */
54 SYSCTL_DECL(_kern_kcov);
55 SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "kcov");
56 
57 
58 /*
59  * Coverage sanitizer bootstrap.
60  *
61  * A compiler will add hooks almost in any basic block in the kernel. However it is
62  * not safe to call hook from some of the contexts. To make this safe it would require
63  * precise denylist of all unsafe sources. Which results in high maintenance costs.
64  *
65  * To avoid this we bootsrap the coverage sanitizer in phases:
66  *
67  *   1. Kernel starts with globaly disabled coverage sanitizer. At this point the hook
68  *      can access safely only global variables.
69  *   2. The boot cpu has allocated/configured per-cpu data. At this point the hook can
70  *      use per-cpu data by using current_* but only on the boot cpu.
71  *
72  *   ... From this point we can start recording on boot cpu
73  *
74  *   3. Additional CPUs are added by kext. We rely on the fact that default value of
75  *      per-cpu variable is 0. The assumption here is that some other (already configured)
76  *      cpu is running the bootsrap of secondary CPU which is safe. Once secondary gets
77  *      configured the boostrap originator enables its converage sanitizer by writing
78  *      secondary's per-cpu data.
79  *
80  *      To make this step safe, it is required to maintain denylist that contains CPU
81  *      bootstrap code to avoid firing hook from unsupported context.
82  *
83  *   ... From this point all CPUs can execute the hook correctly.
84  *
85  * This allows stack size monitoring during early boot. For all other cases we simply
86  * boot with global set to 0 waiting for a client to actually enable sanitizer.
87  */
88 
89 /*
90  * 1. & 2. enabling step. Must be called *after* per-cpu data are set up.
91  */
92 __startup_func
93 static void
kcov_init(void)94 kcov_init(void)
95 {
96 	/* Master CPU is fully setup at this point so just enable coverage tracking. */
97 	ksancov_init();
98 	current_kcov_data()->kcd_enabled = 1;
99 }
100 STARTUP(EARLY_BOOT, STARTUP_RANK_LAST, kcov_init);
101 
102 /*
103  * 3. secondary CPU. Called on bootstrap originator after secondary is ready.
104  */
105 void
kcov_start_cpu(int cpuid)106 kcov_start_cpu(int cpuid)
107 {
108 	/* No need to use atomics as we don't need to be so precise here. */
109 	cpu_kcov_data(cpuid)->kcd_enabled = 1;
110 }
111 
112 void
kcov_enable(void)113 kcov_enable(void)
114 {
115 	os_atomic_add(&kcov_enabled, 1, relaxed);
116 }
117 
118 void
kcov_disable(void)119 kcov_disable(void)
120 {
121 	os_atomic_sub(&kcov_enabled, 1, relaxed);
122 }
123 
124 
125 /*
126  * Disable coverage sanitizer recording for given thread.
127  */
128 static void
kcov_disable_thread(kcov_thread_data_t * data)129 kcov_disable_thread(kcov_thread_data_t *data)
130 {
131 	data->ktd_disabled++;
132 }
133 
134 
135 /*
136  * Enable coverage sanitizer recording for given thread.
137  */
138 static void
kcov_enable_thread(kcov_thread_data_t * data)139 kcov_enable_thread(kcov_thread_data_t *data)
140 {
141 	data->ktd_disabled--;
142 }
143 
144 
145 /*
146  * Called when system enters panic code path with no return. There is no point in tracking
147  * stack usage and delay (and possibly break) the coredump code.
148  */
149 void
kcov_panic_disable(void)150 kcov_panic_disable(void)
151 {
152 	printf("KCOV: Disabling coverage tracking. System panicking.\n");
153 	/* Force disable the sanitizer hook. */
154 	os_atomic_store(&kcov_enabled, 0, relaxed);
155 }
156 
157 
158 /* Initialize per-thread sanitizer data for each new kernel thread. */
159 void
kcov_init_thread(kcov_thread_data_t * data)160 kcov_init_thread(kcov_thread_data_t *data)
161 {
162 	data->ktd_disabled = 0;
163 
164 	kcov_ksancov_init_thread(&data->ktd_device);
165 	kcov_stksz_init_thread(&data->ktd_stksz);
166 }
167 
168 /* Shared prologue between trace functions */
169 static kcov_thread_data_t *
trace_prologue(void)170 trace_prologue(void)
171 {
172 	/* Check the global flag for the case no recording is enabled. */
173 	if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) {
174 		return NULL;
175 	}
176 
177 	/*
178 	 * rdar://145659776
179 	 * If PAN is disabled we cannot safely re-enable preemption after disabling it.
180 	 * The proper way to do this in a generic way is to check here for PAN and bail ot
181 	 * if (__improbable(__builtin_arm_rsr("pan") == 0))
182 	 *
183 	 * The issue with this solution is the performance cost of reading the MSR for each
184 	 * trace point, so PAN disabled functions are included in the baclklist instead
185 	 * (see kcov-blacklist-arm64).
186 	 */
187 
188 	/* Per-cpu area access. Must happen with disabled interrupts/preemtion. */
189 	disable_preemption();
190 
191 	if (!current_kcov_data()->kcd_enabled) {
192 		enable_preemption();
193 		return NULL;
194 	}
195 
196 	/* No support for PPL. */
197 	if (pmap_in_ppl()) {
198 		enable_preemption();
199 		return NULL;
200 	}
201 	/* Interrupt context not supported. */
202 	if (ml_at_interrupt_context()) {
203 		enable_preemption();
204 		return NULL;
205 	}
206 
207 	thread_t th = current_thread();
208 	if (__improbable(th == THREAD_NULL)) {
209 		enable_preemption();
210 		return NULL;
211 	}
212 
213 	/* This thread does not want to be traced. */
214 	kcov_thread_data_t *data = kcov_get_thread_data(th);
215 	if (__improbable(data->ktd_disabled) != 0) {
216 		enable_preemption();
217 		return NULL;
218 	}
219 
220 	/* Enable preemption as we are no longer accessing per-cpu data. */
221 	enable_preemption();
222 
223 	return data;
224 }
225 
226 /*
227  * This is the core of the coverage recording.
228  *
229  * A compiler inlines this function into every place eligible for instrumentation.
230  * Every modification is very risky as added code may be called from unexpected
231  * contexts (for example per-cpu data access).
232  *
233  * Do not call anything unnecessary before ksancov_disable() as that will cause
234  * recursion. Update denylist after any such change.
235  *
236  * Every complex code here may have impact on the overall performance. This function
237  * is called for every edge in the kernel and that means multiple times through a
238  * single function execution.
239  */
240 static void
trace_pc_guard(uint32_t __unused * guardp,void __unused * caller,uintptr_t __unused sp)241 trace_pc_guard(uint32_t __unused *guardp, void __unused *caller, uintptr_t __unused sp)
242 {
243 	kcov_ksancov_trace_guard(guardp, caller);
244 
245 	kcov_thread_data_t *data = trace_prologue();
246 	if (data == NULL) {
247 		return;
248 	}
249 
250 	/* It is now safe to call back to kernel from this thread without recursing in the hook itself. */
251 	kcov_disable_thread(data);
252 
253 	kcov_stksz_update_stack_size(th, data, caller, sp);
254 	kcov_ksancov_trace_pc(data, guardp, caller, sp);
255 
256 	kcov_enable_thread(data);
257 }
258 
259 /*
260  * Coverage Sanitizer ABI implementation.
261  */
262 
263 
264 void
__sanitizer_cov_trace_pc_indirect(void * __unused callee)265 __sanitizer_cov_trace_pc_indirect(void * __unused callee)
266 {
267 	/* No indirect call recording support at this moment. */
268 	return;
269 }
270 
271 
272 __attribute__((nodebug))
273 void
__sanitizer_cov_trace_pc(void)274 __sanitizer_cov_trace_pc(void)
275 {
276 	uintptr_t sp = (uintptr_t)&sp;
277 	trace_pc_guard(NULL, __builtin_return_address(0), sp);
278 }
279 
280 
281 __attribute__((nodebug))
282 void
__sanitizer_cov_trace_pc_guard(uint32_t __unused * guardp)283 __sanitizer_cov_trace_pc_guard(uint32_t __unused *guardp)
284 {
285 	uintptr_t sp = (uintptr_t)&sp;
286 	trace_pc_guard(guardp, __builtin_return_address(0), sp);
287 }
288 
289 
290 void
__sanitizer_cov_trace_pc_guard_init(uint32_t __unused * start,uint32_t __unused * stop)291 __sanitizer_cov_trace_pc_guard_init(uint32_t __unused *start, uint32_t __unused *stop)
292 {
293 	kcov_ksancov_trace_pc_guard_init(start, stop);
294 }
295 
296 
297 void
__sanitizer_cov_pcs_init(uintptr_t __unused * start,uintptr_t __unused * stop)298 __sanitizer_cov_pcs_init(uintptr_t __unused *start, uintptr_t __unused *stop)
299 {
300 	kcov_ksancov_pcs_init(start, stop);
301 }
302 
303 static void
trace_cmp(uint32_t __unused type,uint64_t __unused arg1,uint64_t __unused arg2,void __unused * caller)304 trace_cmp(uint32_t __unused type, uint64_t __unused arg1, uint64_t __unused arg2, void __unused *caller)
305 {
306 	kcov_thread_data_t *data = trace_prologue();
307 	if (data == NULL) {
308 		return;
309 	}
310 
311 	/* It is now safe to call back to kernel from this thread without recursing in the hook itself. */
312 	kcov_disable_thread(data);
313 
314 	kcov_ksancov_trace_cmp(data, type, arg1, arg2, caller);
315 
316 	kcov_enable_thread(data);
317 }
318 
319 void
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)320 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
321 {
322 	trace_cmp(KCOV_CMP_SIZE1, arg1, arg2, __builtin_return_address(0));
323 }
324 
325 void
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)326 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
327 {
328 	trace_cmp(KCOV_CMP_SIZE2, arg1, arg2, __builtin_return_address(0));
329 }
330 
331 void
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)332 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
333 {
334 	trace_cmp(KCOV_CMP_SIZE4, arg1, arg2, __builtin_return_address(0));
335 }
336 
337 void
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)338 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
339 {
340 	trace_cmp(KCOV_CMP_SIZE8, arg1, arg2, __builtin_return_address(0));
341 }
342 
343 void
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)344 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
345 {
346 	trace_cmp(KCOV_CMP_SIZE1 | KCOV_CMP_CONST, arg1, arg2, __builtin_return_address(0));
347 }
348 
349 void
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)350 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
351 {
352 	trace_cmp(KCOV_CMP_SIZE2 | KCOV_CMP_CONST, arg1, arg2, __builtin_return_address(0));
353 }
354 
355 void
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)356 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
357 {
358 	trace_cmp(KCOV_CMP_SIZE4 | KCOV_CMP_CONST, arg1, arg2, __builtin_return_address(0));
359 }
360 
361 void
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)362 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
363 {
364 	trace_cmp(KCOV_CMP_SIZE8 | KCOV_CMP_CONST, arg1, arg2, __builtin_return_address(0));
365 }
366 
367 void
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)368 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
369 {
370 	void *ret = __builtin_return_address(0);
371 
372 	uint32_t type;
373 	switch (cases[1]) {
374 	case 8:
375 		type = KCOV_CMP_SIZE1 | KCOV_CMP_CONST;
376 		break;
377 	case 16:
378 		type = KCOV_CMP_SIZE2 | KCOV_CMP_CONST;
379 		break;
380 	case 32:
381 		type = KCOV_CMP_SIZE4 | KCOV_CMP_CONST;
382 		break;
383 	case 64:
384 		type = KCOV_CMP_SIZE8 | KCOV_CMP_CONST;
385 		break;
386 	default:
387 		return;
388 	}
389 
390 	uint64_t i;
391 	uint64_t count = cases[0];
392 
393 	for (i = 0; i < count; i++) {
394 		trace_cmp(type, cases[i + 2], val, ret);
395 	}
396 }
397 
398 void
kcov_trace_cmp_func(void * caller_pc,uint32_t type,const void * s1,size_t s1len,const void * s2,size_t s2len,bool always_log)399 kcov_trace_cmp_func(void *caller_pc, uint32_t type, const void *s1, size_t s1len, const void *s2, size_t s2len, bool always_log)
400 {
401 	kcov_thread_data_t *data = trace_prologue();
402 	if (data == NULL) {
403 		return;
404 	}
405 
406 	/* It is now safe to call back to kernel from this thread without recursing in the hook itself. */
407 	kcov_disable_thread(data);
408 
409 	kcov_ksancov_trace_cmp_func(data, type, s1, s1len, s2, s2len, caller_pc, always_log);
410 
411 	kcov_enable_thread(data);
412 }
413