1 /*
2 * Copyright (c) 2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <string.h>
29 #include <stdbool.h>
30 #include <sys/sysctl.h>
31 #include <kern/cpu_number.h>
32 #include <kern/cpu_data.h>
33 #include <libkern/libkern.h>
34 #include <os/atomic_private.h>
35 #include <vm/pmap.h>
36 #include <machine/machine_routines.h>
37
38 #include <san/kcov.h>
39 #include <san/kcov_data.h>
40
41 #include <san/kcov_stksz.h>
42 #include <san/kcov_stksz_data.h>
43
44 #include <san/kcov_ksancov.h>
45 #include <san/kcov_ksancov_data.h>
46
47 /* Global flag that enables the sanitizer hook. */
48 static _Atomic unsigned int kcov_enabled = 0;
49
50
51 /*
52 * Sysctl interface to coverage sanitizer.
53 */
54 SYSCTL_DECL(_kern_kcov);
55 SYSCTL_NODE(_kern, OID_AUTO, kcov, CTLFLAG_RW | CTLFLAG_LOCKED, 0, "kcov");
56
57
58 /*
59 * Coverage sanitizer bootstrap.
60 *
61 * A compiler will add hooks almost in any basic block in the kernel. However it is
62 * not safe to call hook from some of the contexts. To make this safe it would require
63 * precise blacklist of all unsafe sources. Which results in high maintenance costs.
64 *
65 * To avoid this we bootsrap the coverage sanitizer in phases:
66 *
67 * 1. Kernel starts with globaly disabled coverage sanitizer. At this point the hook
68 * can access safely only global variables.
69 * 2. The boot cpu has allocated/configured per-cpu data. At this point the hook can
70 * use per-cpu data by using current_* but only on the boot cpu.
71 *
72 * ... From this point we can start recording on boot cpu
73 *
74 * 3. Additional CPUs are added by kext. We rely on the fact that default value of
75 * per-cpu variable is 0. The assumption here is that some other (already configured)
76 * cpu is running the bootsrap of secondary CPU which is safe. Once secondary gets
77 * configured the boostrap originator enables its converage sanitizer by writing
78 * secondary's per-cpu data.
79 *
80 * To make this step safe, it is required to maintain blacklist that contains CPU
81 * bootstrap code to avoid firing hook from unsupported context.
82 *
83 * ... From this point all CPUs can execute the hook correctly.
84 *
85 * This allows stack size monitoring during early boot. For all other cases we simply
86 * boot with global set to 0 waiting for a client to actually enable sanitizer.
87 */
88
89 /*
90 * 1. & 2. enabling step. Must be called *after* per-cpu data are set up.
91 */
92 __startup_func
93 static void
kcov_init(void)94 kcov_init(void)
95 {
96 /* Master CPU is fully setup at this point so just enable coverage tracking. */
97 printf("KCOV: Enabling coverage tracking on cpu %d\n", cpu_number());
98 ksancov_init();
99 current_kcov_data()->kcd_enabled = 1;
100 }
101 STARTUP(EARLY_BOOT, STARTUP_RANK_LAST, kcov_init);
102
103 /*
104 * 3. secondary CPU. Called on bootstrap originator after secondary is ready.
105 */
106 void
kcov_start_cpu(int cpuid)107 kcov_start_cpu(int cpuid)
108 {
109 /* No need to use atomics as we don't need to be so precise here. */
110 cpu_kcov_data(cpuid)->kcd_enabled = 1;
111 }
112
113 void
kcov_enable(void)114 kcov_enable(void)
115 {
116 os_atomic_add(&kcov_enabled, 1, relaxed);
117 }
118
119 void
kcov_disable(void)120 kcov_disable(void)
121 {
122 os_atomic_sub(&kcov_enabled, 1, relaxed);
123 }
124
125
126 /*
127 * Disable coverage sanitizer recording for given thread.
128 */
129 static void
kcov_disable_thread(kcov_thread_data_t * data)130 kcov_disable_thread(kcov_thread_data_t *data)
131 {
132 data->ktd_disabled++;
133 }
134
135
136 /*
137 * Enable coverage sanitizer recording for given thread.
138 */
139 static void
kcov_enable_thread(kcov_thread_data_t * data)140 kcov_enable_thread(kcov_thread_data_t *data)
141 {
142 data->ktd_disabled--;
143 }
144
145
146 /*
147 * Called when system enters panic code path with no return. There is no point in tracking
148 * stack usage and delay (and possibly break) the coredump code.
149 */
150 void
kcov_panic_disable(void)151 kcov_panic_disable(void)
152 {
153 printf("KCOV: Disabling coverage tracking. System panicking.\n");
154 /* Force disable the sanitizer hook. */
155 os_atomic_store(&kcov_enabled, 0, relaxed);
156 }
157
158
159 /* Initialize per-thread sanitizer data for each new kernel thread. */
160 void
kcov_init_thread(kcov_thread_data_t * data)161 kcov_init_thread(kcov_thread_data_t *data)
162 {
163 data->ktd_disabled = 0;
164
165 kcov_ksancov_init_thread(&data->ktd_device);
166 kcov_stksz_init_thread(&data->ktd_stksz);
167 }
168
169 /*
170 * This is the core of the coverage recording.
171 *
172 * A compiler inlines this function into every place eligible for instrumentation.
173 * Every modification is very risky as added code may be called from unexpected
174 * contexts (for example per-cpu data access).
175 *
176 * Do not call anything unnecessary before ksancov_disable() as that will cause
177 * recursion. Update blacklist after any such change.
178 *
179 * Every complex code here may have impact on the overall performance. This function
180 * is called for every edge in the kernel and that means multiple times through a
181 * single function execution.
182 */
183 static void
trace_pc_guard(uint32_t __unused * guardp,void __unused * caller,uintptr_t __unused sp)184 trace_pc_guard(uint32_t __unused *guardp, void __unused *caller, uintptr_t __unused sp)
185 {
186 kcov_ksancov_trace_guard(guardp, caller);
187
188 /* Check the global flag for the case no recording is enabled. */
189 if (__probable(os_atomic_load(&kcov_enabled, relaxed) == 0)) {
190 return;
191 }
192
193 /* Per-cpu area access. Must happen with disabled interrupts/preemtion. */
194 disable_preemption();
195
196 if (!current_kcov_data()->kcd_enabled) {
197 enable_preemption();
198 return;
199 }
200
201 /* No support for PPL. */
202 if (pmap_in_ppl()) {
203 enable_preemption();
204 return;
205 }
206 /* Interrupt context not supported. */
207 if (ml_at_interrupt_context()) {
208 enable_preemption();
209 return;
210 }
211
212 thread_t th = current_thread();
213 if (__improbable(th == THREAD_NULL)) {
214 enable_preemption();
215 return;
216 }
217
218 /* This thread does not want to record stack usage. */
219 kcov_thread_data_t *data = kcov_get_thread_data(th);
220 if (__improbable(data->ktd_disabled) != 0) {
221 enable_preemption();
222 return;
223 }
224
225 /* Enable preemption as we are no longer accessing per-cpu data. */
226 enable_preemption();
227
228 /* It is now safe to call back to kernel from this thread without recursing in the hook itself. */
229 kcov_disable_thread(data);
230
231 kcov_stksz_update_stack_size(th, data, caller, sp);
232 kcov_ksancov_trace_pc(data, guardp, caller, sp);
233
234 kcov_enable_thread(data);
235 }
236
237 /*
238 * Coverage Sanitizer ABI implementation.
239 */
240
241
242 void
__sanitizer_cov_trace_pc_indirect(void * __unused callee)243 __sanitizer_cov_trace_pc_indirect(void * __unused callee)
244 {
245 /* No indirect call recording support at this moment. */
246 return;
247 }
248
249
250 __attribute__((nodebug))
251 void
__sanitizer_cov_trace_pc(void)252 __sanitizer_cov_trace_pc(void)
253 {
254 uintptr_t sp = (uintptr_t)&sp;
255 trace_pc_guard(NULL, __builtin_return_address(0), sp);
256 }
257
258
259 __attribute__((nodebug))
260 void
__sanitizer_cov_trace_pc_guard(uint32_t __unused * guardp)261 __sanitizer_cov_trace_pc_guard(uint32_t __unused *guardp)
262 {
263 uintptr_t sp = (uintptr_t)&sp;
264 trace_pc_guard(guardp, __builtin_return_address(0), sp);
265 }
266
267
268 void
__sanitizer_cov_trace_pc_guard_init(uint32_t __unused * start,uint32_t __unused * stop)269 __sanitizer_cov_trace_pc_guard_init(uint32_t __unused *start, uint32_t __unused *stop)
270 {
271 kcov_ksancov_trace_pc_guard_init(start, stop);
272 }
273
274
275 void
__sanitizer_cov_pcs_init(uintptr_t __unused * start,uintptr_t __unused * stop)276 __sanitizer_cov_pcs_init(uintptr_t __unused *start, uintptr_t __unused *stop)
277 {
278 kcov_ksancov_pcs_init(start, stop);
279 }
280