1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <stdbool.h>
30 #include <sys/sysctl.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <libkern/libkern.h>
34 #include <os/atomic_private.h>
35 #include <vm/pmap.h>
36 #include <machine/machine_routines.h>
37
38 #include <san/kcov.h>
39 #include <san/kcov_data.h>
40
41 #include <san/kcov_stksz.h>
42 #include <san/kcov_stksz_data.h>
43
44 #include <san/kcov_ksancov.h>
45 #include <san/kcov_ksancov_data.h>
46
47 /* Global flag that enables the sanitizer hook. */
48 static _Atomic unsigned int kcov_enabled = 0;
49
50
51 /*
52 * Sysctl interface to coverage sanitizer.
53 */
54 SYSCTL_DECL(_kern_kcov);
55 SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "kcov");
56
57
58 /*
59 * Coverage sanitizer bootstrap.
60 *
61 * A compiler will add hooks almost in any basic block in the kernel. However it is
62 * not safe to call hook from some of the contexts. To make this safe it would require
63 * precise blacklist of all unsafe sources. Which results in high maintenance costs.
64 *
65 * To avoid this we bootsrap the coverage sanitizer in phases:
66 *
67 * 1. Kernel starts with globaly disabled coverage sanitizer. At this point the hook
68 * can access safely only global variables.
69 * 2. The boot cpu has allocated/configured per-cpu data. At this point the hook can
70 * use per-cpu data by using current_* but only on the boot cpu.
71 *
72 * ... From this point we can start recording on boot cpu
73 *
74 * 3. Additional CPUs are added by kext. We rely on the fact that default value of
75 * per-cpu variable is 0. The assumption here is that some other (already configured)
76 * cpu is running the bootsrap of secondary CPU which is safe. Once secondary gets
77 * configured the boostrap originator enables its converage sanitizer by writing
78 * secondary's per-cpu data.
79 *
80 * To make this step safe, it is required to maintain blacklist that contains CPU
81 * bootstrap code to avoid firing hook from unsupported context.
82 *
83 * ... From this point all CPUs can execute the hook correctly.
84 *
85 * This allows stack size monitoring during early boot. For all other cases we simply
86 * boot with global set to 0 waiting for a client to actually enable sanitizer.
87 */
88
89 /*
90 * 1. & 2. enabling step. Must be called *after* per-cpu data are set up.
91 */
92 __startup_func
93 static void
kcov_init(void)94 kcov_init(void)
95 {
96 /* Master CPU is fully setup at this point so just enable coverage tracking. */
97 printf("KCOV: Enabling coverage tracking on cpu %d\n", cpu_number());
98 ksancov_init();
99 current_kcov_data()->kcd_enabled = 1;
100 }
101 STARTUP(EARLY_BOOT, STARTUP_RANK_LAST, kcov_init);
102
103 /*
104 * 3. secondary CPU. Called on bootstrap originator after secondary is ready.
105 */
106 void
kcov_start_cpu(int cpuid)107 kcov_start_cpu(int cpuid)
108 {
109 printf("KCOV: Enabling coverage tracking on cpu %d\n", cpuid);
110 /* No need to use atomics as we don't need to be so precise here. */
111 cpu_kcov_data(cpuid)->kcd_enabled = 1;
112 }
113
114 void
kcov_enable(void)115 kcov_enable(void)
116 {
117 os_atomic_add(&kcov_enabled, 1, relaxed);
118 }
119
120 void
kcov_disable(void)121 kcov_disable(void)
122 {
123 os_atomic_sub(&kcov_enabled, 1, relaxed);
124 }
125
126
127 /*
128 * Disable coverage sanitizer recording for given thread.
129 */
130 static void
kcov_disable_thread(kcov_thread_data_t * data)131 kcov_disable_thread(kcov_thread_data_t *data)
132 {
133 data->ktd_disabled++;
134 }
135
136
137 /*
138 * Enable coverage sanitizer recording for given thread.
139 */
140 static void
kcov_enable_thread(kcov_thread_data_t * data)141 kcov_enable_thread(kcov_thread_data_t *data)
142 {
143 data->ktd_disabled--;
144 }
145
146
147 /*
148 * Called when system enters panic code path with no return. There is no point in tracking
149 * stack usage and delay (and possibly break) the coredump code.
150 */
151 void
kcov_panic_disable(void)152 kcov_panic_disable(void)
153 {
154 printf("KCOV: Disabling coverage tracking. System panicking.\n");
155 /* Force disable the sanitizer hook. */
156 os_atomic_store(&kcov_enabled, 0, relaxed);
157 }
158
159
160 /* Initialize per-thread sanitizer data for each new kernel thread. */
161 void
kcov_init_thread(kcov_thread_data_t * data)162 kcov_init_thread(kcov_thread_data_t *data)
163 {
164 data->ktd_disabled = 0;
165
166 kcov_ksancov_init_thread(&data->ktd_device);
167 kcov_stksz_init_thread(&data->ktd_stksz);
168 }
169
170 /*
171 * This is the core of the coverage recording.
172 *
173 * A compiler inlines this function into every place eligible for instrumentation.
174 * Every modification is very risky as added code may be called from unexpected
175 * contexts (for example per-cpu data access).
176 *
177 * Do not call anything unnecessary before ksancov_disable() as that will cause
178 * recursion. Update blacklist after any such change.
179 *
180 * Every complex code here may have impact on the overall performance. This function
181 * is called for every edge in the kernel and that means multiple times through a
182 * single function execution.
183 */
184 static void
trace_pc_guard(uint32_t __unused * guardp,void __unused * caller,uintptr_t __unused sp)185 trace_pc_guard(uint32_t __unused *guardp, void __unused *caller, uintptr_t __unused sp)
186 {
187 kcov_ksancov_trace_guard(guardp, caller);
188
189 /* Check the global flag for the case no recording is enabled. */
190 if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) {
191 return;
192 }
193
194 /* Per-cpu area access. Must happen with disabled interrupts/preemtion. */
195 disable_preemption();
196
197 if (!current_kcov_data()->kcd_enabled) {
198 enable_preemption();
199 return;
200 }
201
202 /* No support for PPL. */
203 if (pmap_in_ppl()) {
204 enable_preemption();
205 return;
206 }
207 /* Interrupt context not supported. */
208 if (ml_at_interrupt_context()) {
209 enable_preemption();
210 return;
211 }
212
213 thread_t th = current_thread();
214 if (__improbable(th == THREAD_NULL)) {
215 enable_preemption();
216 return;
217 }
218
219 /* This thread does not want to record stack usage. */
220 kcov_thread_data_t *data = kcov_get_thread_data(th);
221 if (__improbable(data->ktd_disabled) != 0) {
222 enable_preemption();
223 return;
224 }
225
226 /* Enable preemption as we are no longer accessing per-cpu data. */
227 enable_preemption();
228
229 /* It is now safe to call back to kernel from this thread without recursing in the hook itself. */
230 kcov_disable_thread(data);
231
232 kcov_stksz_update_stack_size(th, data, caller, sp);
233 kcov_ksancov_trace_pc(data, guardp, caller, sp);
234
235 kcov_enable_thread(data);
236 }
237
238 /*
239 * Coverage Sanitizer ABI implementation.
240 */
241
242
243 void
__sanitizer_cov_trace_pc_indirect(void * __unused callee)244 __sanitizer_cov_trace_pc_indirect(void * __unused callee)
245 {
246 /* No indirect call recording support at this moment. */
247 return;
248 }
249
250
251 __attribute__((nodebug))
252 void
__sanitizer_cov_trace_pc(void)253 __sanitizer_cov_trace_pc(void)
254 {
255 uintptr_t sp = (uintptr_t)&sp;
256 trace_pc_guard(NULL, __builtin_return_address(0), sp);
257 }
258
259
260 __attribute__((nodebug))
261 void
__sanitizer_cov_trace_pc_guard(uint32_t __unused * guardp)262 __sanitizer_cov_trace_pc_guard(uint32_t __unused *guardp)
263 {
264 uintptr_t sp = (uintptr_t)&sp;
265 trace_pc_guard(guardp, __builtin_return_address(0), sp);
266 }
267
268
269 void
__sanitizer_cov_trace_pc_guard_init(uint32_t __unused * start,uint32_t __unused * stop)270 __sanitizer_cov_trace_pc_guard_init(uint32_t __unused *start, uint32_t __unused *stop)
271 {
272 kcov_ksancov_trace_pc_guard_init(start, stop);
273 }
274
275
276 void
__sanitizer_cov_pcs_init(uintptr_t __unused * start,uintptr_t __unused * stop)277 __sanitizer_cov_pcs_init(uintptr_t __unused *start, uintptr_t __unused *stop)
278 {
279 kcov_ksancov_pcs_init(start, stop);
280 }
281