1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdbool.h>
31
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41
42 #include <vm/vm_kern_xnu.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_memory_entry_xnu.h>
46
47 #include <mach/mach_vm.h>
48 #include <mach/mach_types.h>
49 #include <mach/mach_port.h>
50 #include <mach/vm_map.h>
51 #include <mach/vm_param.h>
52 #include <mach/machine/vm_param.h>
53
54 #include <sys/stat.h> /* dev_t */
55 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
56 #include <sys/conf.h> /* must come after sys/stat.h */
57 #include <sys/sysctl.h>
58
59 #include <console/serial_protos.h>
60 #include <pexpert/pexpert.h> /* PE_parse_boot_argn */
61
62 #include <libkern/libkern.h>
63 #include <libkern/OSKextLibPrivate.h>
64 #include <libkern/kernel_mach_header.h>
65 #include <os/atomic_private.h>
66 #include <os/log.h>
67 #include <os/overflow.h>
68
69 #include <san/kcov_data.h>
70 #include <san/kcov_ksancov.h>
71
72 /* header mess... */
73 struct uthread;
74 typedef struct uthread * uthread_t;
75
76 #include <sys/sysproto.h>
77 #include <sys/queue.h>
78 #include <sys/sysctl.h>
79 #include <sys/kdebug.h>
80
81 #define USE_PC_TABLE 0
82 #define KSANCOV_MAX_DEV 128
83 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
84
85 extern boolean_t ml_at_interrupt_context(void);
86 extern boolean_t ml_get_interrupts_enabled(void);
87
88 static void ksancov_detach(ksancov_dev_t);
89
90 static int dev_major;
91 static size_t nedges = 0;
92 static uint32_t __unused npcs = 0;
93
94 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
95 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
96
97 /* array of devices indexed by devnode minor */
98 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
99 static struct ksancov_edgemap *ksancov_edgemap;
100
101 /* Global flag that enables the sanitizer hook. */
102 static _Atomic unsigned int ksancov_enabled = 0;
103
104 /* Toggled after ksancov_init() */
105 static boolean_t ksancov_initialized = false;
106
107
108 /* Support for gated callbacks (referred to as "on demand", "od") */
109 static void kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop);
110
111 static LCK_MTX_DECLARE(ksancov_od_lck, &ksancov_lck_grp);
112
113 /* Bookkeeping structures for gated sancov instrumentation */
114 struct ksancov_od_module_entry {
115 char bundle[KMOD_MAX_NAME]; /* module bundle */
116 uint32_t idx; /* index into entries/handles arrays */
117 };
118
119 struct ksancov_od_module_handle {
120 uint32_t *start; /* guards boundaries */
121 uint32_t *stop;
122 uint64_t *gate; /* pointer to __DATA,__sancov_gate*/
123 uint64_t text_start; /* .text section start, stripped and unslided address */
124 uint64_t text_end; /* .text section end, stripped and unslided address */
125 };
126
127 static struct ksancov_od_module_entry *ksancov_od_module_entries = NULL;
128 static struct ksancov_od_module_handle *ksancov_od_module_handles = NULL;
129
130 /* number of entries/handles allocated */
131 static unsigned int ksancov_od_allocated_count = 0;
132
133 /* number of registered modules */
134 static unsigned int ksancov_od_modules_count = 0;
135 /* number of modules whose callbacks are currently enabled */
136 static unsigned int ksancov_od_enabled_count = 0;
137
138 /* Valid values for ksancov.on_demand= boot-arg */
139 #define KSANCOV_OD_SUPPORT 0x0010 // Enable runtime support
140 #define KSANCOV_OD_LOGGING 0x0020 // Enable logging (via os_log)
141
142 __options_decl(ksancov_od_config_t, uint32_t, {
143 KSANCOV_OD_NONE = 0,
144 KSANCOV_OD_ENABLE_SUPPORT = 0x0010,
145 KSANCOV_OD_ENABLE_LOGGING = 0x0020,
146 });
147
148 /* configurable at boot; enabled by default */
149 static ksancov_od_config_t ksancov_od_config = KSANCOV_OD_ENABLE_SUPPORT;
150
151 static unsigned ksancov_od_support_enabled = 1;
152 static unsigned ksancov_od_logging_enabled = 0;
153
154 SYSCTL_DECL(_kern_kcov);
155 SYSCTL_ULONG(_kern_kcov, OID_AUTO, nedges, CTLFLAG_RD, &nedges, "");
156
157 SYSCTL_NODE(_kern_kcov, OID_AUTO, od, CTLFLAG_RD, 0, "od");
158 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, config, CTLFLAG_RD, &ksancov_od_config, 0, "");
159 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, allocated_entries, CTLFLAG_RD, &ksancov_od_allocated_count, 0, "");
160 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, modules_count, CTLFLAG_RD, &ksancov_od_modules_count, 0, "");
161 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, enabled_count, CTLFLAG_RD, &ksancov_od_enabled_count, 0, "");
162 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, support_enabled, CTLFLAG_RD, &ksancov_od_support_enabled, 0, "");
163 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, logging_enabled, CTLFLAG_RW, &ksancov_od_logging_enabled, 0, "");
164
165 #define ksancov_od_log(...) do { \
166 if (ksancov_od_logging_enabled) { \
167 os_log_debug(OS_LOG_DEFAULT, __VA_ARGS__); \
168 } \
169 } while (0)
170
171 __startup_func
172 void
ksancov_init(void)173 ksancov_init(void)
174 {
175 unsigned arg;
176
177 /* handle ksancov boot-args */
178 if (PE_parse_boot_argn("ksancov.on_demand", &arg, sizeof(arg))) {
179 ksancov_od_config = (ksancov_od_config_t)arg;
180 }
181
182 if (ksancov_od_config & KSANCOV_OD_ENABLE_SUPPORT) {
183 /* enable the runtime support for on-demand instrumentation */
184 ksancov_od_support_enabled = 1;
185 ksancov_od_allocated_count = 64;
186 ksancov_od_module_entries = kalloc_type_tag(struct ksancov_od_module_entry,
187 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
188 ksancov_od_module_handles = kalloc_type_tag(struct ksancov_od_module_handle,
189 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
190 } else {
191 ksancov_od_support_enabled = 0;
192 }
193
194 if (ksancov_od_config & KSANCOV_OD_ENABLE_LOGGING) {
195 ksancov_od_logging_enabled = 1;
196 } else {
197 ksancov_od_logging_enabled = 0;
198 }
199
200 ksancov_initialized = true;
201 }
202
203 /*
204 * Coverage sanitizer per-thread routines.
205 */
206
207 /* Initialize per-thread sanitizer data for each new kernel thread. */
208 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)209 kcov_ksancov_init_thread(ksancov_dev_t *dev)
210 {
211 *dev = NULL;
212 }
213
214
215 #define GUARD_SEEN (uint32_t)0x80000000
216 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
217
218 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uintptr_t pc)219 trace_pc_guard_pcs(struct ksancov_dev *dev, uintptr_t pc)
220 {
221 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
222 return; /* overflow */
223 }
224
225 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
226 if (__improbable(idx >= dev->maxpcs)) {
227 return;
228 }
229
230 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
231 entries[idx] = pc;
232 }
233
234 #if CONFIG_STKSZ
235 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uintptr_t pc,uint32_t stksize)236 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uintptr_t pc, uint32_t stksize)
237 {
238 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
239 return; /* overflow */
240 }
241
242 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
243 if (__improbable(idx >= dev->maxpcs)) {
244 return;
245 }
246
247 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
248 entries[idx] = {
249 .pc = pc,
250 .stksize = stksize
251 };
252 }
253 #endif
254
255 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)256 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
257 {
258 size_t idx = *guardp & GUARD_IDX_MASK;
259 ksancov_counters_t *counters = dev->counters;
260
261 /* saturating 8bit add */
262 if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
263 counters->kc_hits[idx]++;
264 }
265 }
266
267 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)268 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
269 {
270 /*
271 * Return as early as possible if we haven't had a chance to
272 * create the edge map yet.
273 *
274 * Note: this will also protect us from performing unnecessary
275 * operations (especially during early boot) which may result
276 * in increased maintenance burden for the instrumentation (see
277 * the comment about VM_KERNEL_UNSLIDE below).
278 */
279 if (__probable(ksancov_edgemap == NULL)) {
280 return;
281 }
282
283 if (guardp == NULL) {
284 return;
285 }
286
287 uint32_t gd = *guardp;
288 if (__improbable(gd && !(gd & GUARD_SEEN))) {
289 size_t idx = gd & GUARD_IDX_MASK;
290 if (idx < ksancov_edgemap->ke_nedges) {
291 /*
292 * Since this code was originally introduced, VM_KERNEL_UNSLIDE
293 * evolved significantly, and it now expands to a series of
294 * function calls that check whether the address is slid, mask
295 * off tags and ultimately unslide the pointer.
296 *
297 * Therefore we need to make sure that we do not instrument any function
298 * in the closure of VM_KERNEL_UNSLIDE: this would cause a loop where the
299 * instrumentation callbacks end up calling into instrumented code.
300 *
301 */
302 uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
303
304 ksancov_edgemap->ke_addrs[idx] = pc;
305 *guardp |= GUARD_SEEN;
306 }
307 }
308 }
309
310 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)311 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
312 {
313 #pragma unused(sp)
314 uintptr_t pc;
315 ksancov_dev_t dev = data->ktd_device;
316
317 /* Check that we have coverage recording enabled for a thread. */
318 if (__probable(dev == NULL)) {
319 return;
320 }
321
322 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
323 return;
324 }
325
326 /*
327 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
328 * the risk of killing machine with recursive calls.
329 */
330 switch (dev->mode) {
331 case KS_MODE_TRACE:
332 pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
333 trace_pc_guard_pcs(dev, pc);
334 break;
335 #if CONFIG_STKSZ
336 case KS_MODE_STKSIZE:
337 pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
338 trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
339 break;
340 #endif
341 case KS_MODE_COUNTERS:
342 trace_pc_guard_counter(dev, guardp);
343 break;
344 default:
345 /*
346 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
347 * coverage sanitiser will not record anything in such case.
348 */
349 ;
350 }
351 }
352
353 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)354 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
355 {
356 const size_t orig_nedges = nedges;
357
358 /* assign a unique number to each guard */
359 for (uint32_t *cur = start; cur != stop; cur++) {
360 /* zero means that the guard has not been assigned */
361 if (*cur == 0) {
362 if (nedges < KSANCOV_MAX_EDGES) {
363 *cur = (uint32_t)++nedges;
364 }
365 }
366 }
367
368 /* only invoke kcov_ksancov_bookmark_on_demand_module if we assigned new guards */
369 if (nedges > orig_nedges) {
370 kcov_ksancov_bookmark_on_demand_module(start, stop);
371 }
372 }
373
374 void
kcov_ksancov_trace_cmp(kcov_thread_data_t * data,uint32_t type,uint64_t arg1,uint64_t arg2,void * caller)375 kcov_ksancov_trace_cmp(kcov_thread_data_t *data, uint32_t type, uint64_t arg1, uint64_t arg2, void *caller)
376 {
377 ksancov_dev_t dev = data->ktd_device;
378
379 /* Check that we have coverage recording enabled for a thread. */
380 if (__probable(dev == NULL)) {
381 return;
382 }
383
384 /* Check that we have cmps tracing enabled. */
385 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) {
386 return;
387 }
388 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) {
389 return;
390 }
391
392 /*
393 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
394 * coverage sanitiser will not record anything in such case.
395 */
396 if (dev->cmps_mode != KS_CMPS_MODE_TRACE && dev->cmps_mode != KS_CMPS_MODE_TRACE_FUNC) {
397 return;
398 }
399
400 if (__improbable(dev->cmps_sz < sizeof(ksancov_trace_t))) {
401 return;
402 }
403 size_t max_entries = (dev->cmps_sz - sizeof(ksancov_trace_t)) / sizeof(ksancov_cmps_trace_ent_t);
404
405 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) {
406 return; /* overflow */
407 }
408
409 uint32_t idx = os_atomic_inc_orig(&dev->cmps_trace->kt_head, relaxed);
410 if (__improbable(idx >= max_entries)) {
411 return;
412 }
413
414 uint64_t pc = (uint64_t)(VM_KERNEL_UNSLIDE(caller) - 1);
415
416 ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)dev->cmps_trace->kt_entries;
417 entries[idx].pc = pc;
418 entries[idx].type = type;
419 entries[idx].args[0] = arg1;
420 entries[idx].args[1] = arg2;
421 }
422
423 void
kcov_ksancov_trace_cmp_func(kcov_thread_data_t * data,uint32_t type,const void * arg1,size_t len1,const void * arg2,size_t len2,void * caller,bool always_log)424 kcov_ksancov_trace_cmp_func(kcov_thread_data_t *data, uint32_t type, const void *arg1, size_t len1, const void *arg2, size_t len2, void *caller, bool always_log)
425 {
426 if (len1 + len2 > KSANCOV_CMPS_TRACE_FUNC_MAX_BYTES) {
427 return;
428 }
429
430 ksancov_dev_t dev = data->ktd_device;
431
432 /* Check that we have coverage recording enabled for a thread. */
433 if (__probable(dev == NULL)) {
434 return;
435 }
436
437 /* Check that we have cmps tracing enabled. */
438 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) {
439 return;
440 }
441 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) {
442 return;
443 }
444
445 /*
446 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
447 * coverage sanitiser will not record anything in such case.
448 */
449 if (dev->cmps_mode != KS_CMPS_MODE_TRACE_FUNC) {
450 return;
451 }
452
453 if (__improbable(dev->cmps_sz < sizeof(ksancov_trace_t))) {
454 return;
455 }
456
457 size_t max_entries = (dev->cmps_sz - sizeof(ksancov_trace_t)) / sizeof(ksancov_cmps_trace_ent_t);
458 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) {
459 return; /* overflow */
460 }
461
462 uintptr_t addr = (uintptr_t)VM_KERNEL_UNSLIDE(caller);
463 if (!addr) {
464 return;
465 }
466
467 if (!always_log && !kcov_ksancov_must_instrument((uintptr_t)caller)) {
468 return;
469 }
470
471 uint32_t space = (uint32_t)ksancov_cmps_trace_func_space(len1, len2);
472
473 uint32_t idx = os_atomic_add_orig(&dev->cmps_trace->kt_head, space / sizeof(ksancov_cmps_trace_ent_t), relaxed);
474 if (__improbable(idx >= max_entries)) {
475 return;
476 }
477
478 uint64_t pc = (uint64_t)(addr - 1);
479
480 ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)dev->cmps_trace->kt_entries;
481
482 entries[idx].pc = pc;
483 entries[idx].type = type;
484 entries[idx].len1_func = (uint16_t)len1;
485 entries[idx].len2_func = (uint16_t)len2;
486
487 uint8_t* func_args = (uint8_t*)entries[idx].args;
488
489 __builtin_memcpy(func_args, arg1, len1);
490 __builtin_memcpy(&func_args[len1], arg2, len2);
491 }
492
493
494 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)495 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
496 {
497 #if USE_PC_TABLE
498 static const uintptr_t pc_table_seen_flag = 0x100;
499
500 for (; start < stop; start += 2) {
501 uintptr_t pc = start[0];
502 uintptr_t flags = start[1];
503
504 /*
505 * This function gets called multiple times on the same range, so mark the
506 * ones we've seen using unused bits in the flags field.
507 */
508 if (flags & pc_table_seen_flag) {
509 continue;
510 }
511
512 start[1] |= pc_table_seen_flag;
513 assert(npcs < KSANCOV_MAX_EDGES - 1);
514 edge_addrs[++npcs] = pc;
515 }
516 #else
517 (void)start;
518 (void)stop;
519 #endif
520 }
521
522 static void
kcov_ksancov_bookmark_on_demand_module(uint32_t * start,uint32_t * stop)523 kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop)
524 {
525 OSKextLoadedKextSummary summary = {};
526 struct ksancov_od_module_entry *entry = NULL;
527 struct ksancov_od_module_handle *handle = NULL;
528 uint64_t *gate_section = NULL;
529 unsigned long gate_sz = 0;
530 uint32_t idx = 0;
531
532 if (!ksancov_od_support_enabled) {
533 return;
534 }
535
536 if (OSKextGetLoadedKextSummaryForAddress(start, &summary) != KERN_SUCCESS) {
537 return;
538 }
539
540 if (!ksancov_initialized) {
541 ksancov_od_log("ksancov: Dropping %s pre-initialization\n", summary.name);
542 return;
543 }
544
545 if (nedges >= KSANCOV_MAX_EDGES) {
546 ksancov_od_log("ksancov: Dropping %s: maximum number of edges reached\n",
547 summary.name);
548 return;
549 }
550
551 /*
552 * The __DATA,__sancov_gate section is where the compiler stores the 64-bit
553 * global variable that is used by the inline instrumentation to decide
554 * whether it should call into the runtime or not.
555 */
556 gate_section = getsectdatafromheader((kernel_mach_header_t *)summary.address,
557 "__DATA", "__sancov_gate", &gate_sz);
558 if (gate_sz == 0) {
559 ksancov_od_log("ksancov: Dropping %s: not instrumented with gated callbacks\n",
560 summary.name);
561 return;
562 }
563
564 lck_mtx_lock(&ksancov_od_lck);
565
566 /* reallocate the bookkeeping structures if needed */
567 if (ksancov_od_modules_count >= ksancov_od_allocated_count) {
568 unsigned int old_ksancov_od_allocated_count = ksancov_od_allocated_count;
569 ksancov_od_allocated_count += (ksancov_od_allocated_count / 2);
570
571 ksancov_od_log("ksancov: Reallocating entries: %u -> %u\n",
572 old_ksancov_od_allocated_count,
573 ksancov_od_allocated_count);
574
575 ksancov_od_module_entries = krealloc_type_tag(struct ksancov_od_module_entry,
576 old_ksancov_od_allocated_count,
577 ksancov_od_allocated_count,
578 ksancov_od_module_entries, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
579
580 ksancov_od_module_handles = krealloc_type_tag(struct ksancov_od_module_handle,
581 old_ksancov_od_allocated_count,
582 ksancov_od_allocated_count,
583 ksancov_od_module_handles, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
584 }
585
586 /* this is the index of the entry we're going to fill in both arrays */
587 idx = ksancov_od_modules_count++;
588
589 entry = &ksancov_od_module_entries[idx];
590 handle = &ksancov_od_module_handles[idx];
591
592 handle->start = start;
593 handle->stop = stop;
594 handle->gate = gate_section;
595 handle->text_start = (uint64_t)VM_KERNEL_UNSLIDE(summary.text_exec_address);
596 handle->text_end = (uint64_t)VM_KERNEL_UNSLIDE(summary.text_exec_address + summary.text_exec_size);
597
598 strlcpy(entry->bundle, summary.name, sizeof(entry->bundle));
599 entry->idx = (uint32_t)idx;
600
601 ksancov_od_log("ksancov: Bookmarked module %s (0x%lx - 0x%lx, %lu guards) [idx: %u]\n",
602 entry->bundle, (uintptr_t)handle->start, (uintptr_t)handle->stop,
603 handle->stop - handle->start, entry->idx);
604 lck_mtx_unlock(&ksancov_od_lck);
605 }
606
607 bool
kcov_ksancov_must_instrument(uintptr_t addr)608 kcov_ksancov_must_instrument(uintptr_t addr)
609 {
610 /*
611 * If the kernel itself was not compiled with sanitizer coverage skip
612 * addresses from the kernel itself and focus on KEXTs only.
613 */
614 #if __has_feature(coverage_sanitizer)
615 if (kernel_text_contains(addr)) {
616 return true;
617 }
618 #endif
619
620 uintptr_t unslided_addr = (uintptr_t)VM_KERNEL_UNSLIDE(addr);
621 if (!unslided_addr) {
622 return false;
623 }
624
625 /*
626 * Check that the address is in a KEXT and that the on demand gate is enabled
627 * NOTE: We don't use any lock here as we are reading:
628 * 1) atomically ksancov_od_modules_count, that can only increase
629 * 2) ksancov_od_module_handles[...] that are constant after being added to the
630 * array, with only the gate field changing
631 * 3) atomically the gate value
632 */
633 unsigned int modules_count = os_atomic_load(&ksancov_od_modules_count, relaxed);
634 for (unsigned int idx = 0; idx < modules_count; idx++) {
635 struct ksancov_od_module_handle *handle = &ksancov_od_module_handles[idx];
636 if (unslided_addr >= handle->text_start && unslided_addr < handle->text_end && handle->gate) {
637 return os_atomic_load(handle->gate, relaxed) != 0;
638 }
639 }
640
641 return false;
642 }
643
644 /*
645 * Coverage sanitizer pseudo-device code.
646 */
647
648 static ksancov_dev_t
create_dev(dev_t dev)649 create_dev(dev_t dev)
650 {
651 ksancov_dev_t d;
652
653 d = kalloc_type_tag(struct ksancov_dev, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
654 d->mode = KS_MODE_NONE;
655 d->maxpcs = KSANCOV_MAX_PCS;
656 d->dev = dev;
657 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
658
659 return d;
660 }
661
662 static void
free_dev(ksancov_dev_t d)663 free_dev(ksancov_dev_t d)
664 {
665 if ((d->mode == KS_MODE_TRACE || d->mode == KS_MODE_STKSIZE) && d->trace) {
666 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
667 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
668 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
669 }
670 if ((d->cmps_mode == KS_CMPS_MODE_TRACE || d->cmps_mode == KS_CMPS_MODE_TRACE_FUNC) && d->cmps_trace) {
671 kmem_free(kernel_map, (uintptr_t)d->cmps_trace, d->cmps_sz);
672 }
673 if (d->testcases) {
674 kmem_free(kernel_map, (uintptr_t)d->testcases, sizeof(ksancov_serialized_testcases_t) + sizeof(ksancov_serialized_testcase_t) * d->testcases_count);
675 }
676 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
677 kfree_type(struct ksancov_dev, d);
678 }
679
680 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)681 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
682 {
683 kern_return_t kr;
684 mach_port_t mem_entry = MACH_PORT_NULL;
685 mach_vm_address_t user_addr = 0;
686 memory_object_size_t size = sz;
687
688 kr = mach_make_memory_entry_64(kernel_map,
689 &size,
690 (mach_vm_offset_t)base,
691 MAP_MEM_VM_SHARE | prot,
692 &mem_entry,
693 MACH_PORT_NULL);
694 if (kr != KERN_SUCCESS) {
695 return NULL;
696 }
697
698 kr = mach_vm_map_kernel(get_task_map(current_task()),
699 &user_addr,
700 size,
701 0,
702 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
703 mem_entry,
704 0,
705 FALSE,
706 prot,
707 prot,
708 VM_INHERIT_SHARE);
709
710 /*
711 * At this point, either vm_map() has taken a reference on the memory entry
712 * and we can release our local reference, or the map failed and the entry
713 * needs to be freed.
714 */
715 mach_memory_entry_port_release(mem_entry);
716
717 if (kr != KERN_SUCCESS) {
718 return NULL;
719 }
720
721 return (void *)user_addr;
722 }
723
724 /*
725 * map the sancov buffer into the current process
726 */
727 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)728 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
729 {
730 uintptr_t addr;
731 size_t size = d->sz;
732
733 switch (d->mode) {
734 case KS_MODE_STKSIZE:
735 case KS_MODE_TRACE:
736 if (!d->trace) {
737 return EINVAL;
738 }
739 addr = (uintptr_t)d->trace;
740 break;
741 case KS_MODE_COUNTERS:
742 if (!d->counters) {
743 return EINVAL;
744 }
745 addr = (uintptr_t)d->counters;
746 break;
747 default:
748 return EINVAL; /* not configured */
749 }
750
751 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
752 if (buf == NULL) {
753 return ENOMEM;
754 }
755
756 *bufp = (uintptr_t)buf;
757 *sizep = size;
758
759 return 0;
760 }
761
762 /*
763 * map the edge -> pc mapping as read-only
764 */
765 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)766 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
767 {
768 uintptr_t addr;
769 size_t size;
770
771 if (ksancov_edgemap == NULL) {
772 return EINVAL;
773 }
774
775 addr = (uintptr_t)ksancov_edgemap;
776 size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uintptr_t);
777
778 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
779 if (buf == NULL) {
780 return ENOMEM;
781 }
782
783 *bufp = (uintptr_t)buf;
784 *sizep = size;
785 return 0;
786 }
787
788 /*
789 * Device node management
790 */
791
792 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)793 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
794 {
795 #pragma unused(flags,devtype,p)
796 const int minor_num = minor(dev);
797
798 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
799 return ENXIO;
800 }
801
802 lck_rw_lock_exclusive(&ksancov_devs_lck);
803
804 if (ksancov_devs[minor_num]) {
805 lck_rw_unlock_exclusive(&ksancov_devs_lck);
806 return EBUSY;
807 }
808
809 ksancov_dev_t d = create_dev(dev);
810 if (!d) {
811 lck_rw_unlock_exclusive(&ksancov_devs_lck);
812 return ENOMEM;
813 }
814 ksancov_devs[minor_num] = d;
815
816 if (ksancov_edgemap == NULL) {
817 uintptr_t buf;
818 size_t sz = sizeof(struct ksancov_edgemap) + nedges * sizeof(uintptr_t);
819
820 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
821 KMA_DATA_SHARED | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
822 if (kr) {
823 printf("ksancov: failed to allocate edge addr map\n");
824 lck_rw_unlock_exclusive(&ksancov_devs_lck);
825 return ENOMEM;
826 }
827
828 ksancov_edgemap = (void *)buf;
829 ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
830 ksancov_edgemap->ke_nedges = (uint32_t)nedges;
831 }
832
833 lck_rw_unlock_exclusive(&ksancov_devs_lck);
834
835 return 0;
836 }
837
838 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)839 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
840 {
841 if (d->mode != KS_MODE_NONE) {
842 return EBUSY; /* trace/counters already created */
843 }
844 assert(d->trace == NULL);
845
846 uintptr_t buf;
847 size_t sz;
848
849 if (mode == KS_MODE_TRACE) {
850 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
851 sizeof(struct ksancov_trace), &sz)) {
852 return EINVAL;
853 }
854 } else if (mode == KS_MODE_STKSIZE) {
855 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
856 sizeof(struct ksancov_trace), &sz)) {
857 return EINVAL;
858 }
859 } else {
860 return EINVAL;
861 }
862
863 /* allocate the shared memory buffer */
864 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
865 VM_KERN_MEMORY_DIAG);
866 if (kr != KERN_SUCCESS) {
867 return ENOMEM;
868 }
869
870 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
871 trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
872 os_atomic_init(&trace->kt_head, 0);
873 os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
874 trace->kt_maxent = (uint32_t)maxpcs;
875
876 d->trace = trace;
877 d->sz = sz;
878 d->maxpcs = maxpcs;
879 d->mode = mode;
880
881 return 0;
882 }
883
884 static int
ksancov_counters_alloc(ksancov_dev_t d)885 ksancov_counters_alloc(ksancov_dev_t d)
886 {
887 if (d->mode != KS_MODE_NONE) {
888 return EBUSY; /* trace/counters already created */
889 }
890 assert(d->counters == NULL);
891
892 uintptr_t buf;
893 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
894
895 /* allocate the shared memory buffer */
896 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
897 VM_KERN_MEMORY_DIAG);
898 if (kr != KERN_SUCCESS) {
899 return ENOMEM;
900 }
901
902 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
903 counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
904 counters->kc_nedges = ksancov_edgemap->ke_nedges;
905 os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
906
907 d->counters = counters;
908 d->sz = sz;
909 d->mode = KS_MODE_COUNTERS;
910
911 return 0;
912 }
913
914 /*
915 * attach a thread to a ksancov dev instance
916 */
917 static int
ksancov_attach(ksancov_dev_t d,thread_t th)918 ksancov_attach(ksancov_dev_t d, thread_t th)
919 {
920 if (d->mode == KS_MODE_NONE) {
921 return EINVAL; /* not configured */
922 }
923
924 if (th != current_thread()) {
925 /* can only attach to self presently */
926 return EINVAL;
927 }
928
929 kcov_thread_data_t *data = kcov_get_thread_data(th);
930 if (data->ktd_device) {
931 return EBUSY; /* one dev per thread */
932 }
933
934 if (d->thread != THREAD_NULL) {
935 ksancov_detach(d);
936 }
937
938 d->thread = th;
939 thread_reference(d->thread);
940
941 os_atomic_store(&data->ktd_device, d, relaxed);
942 os_atomic_add(&ksancov_enabled, 1, relaxed);
943 kcov_enable();
944
945 return 0;
946 }
947
948 extern void
949 thread_wait(
950 thread_t thread,
951 boolean_t until_not_runnable);
952
953
954 /*
955 * disconnect thread from ksancov dev
956 */
957 static void
ksancov_detach(ksancov_dev_t d)958 ksancov_detach(ksancov_dev_t d)
959 {
960 if (d->thread == THREAD_NULL) {
961 /* no thread attached */
962 return;
963 }
964
965 /* disconnect dev from thread */
966 kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
967 if (data->ktd_device != NULL) {
968 assert(data->ktd_device == d);
969 os_atomic_store(&data->ktd_device, NULL, relaxed);
970 }
971
972 if (d->thread != current_thread()) {
973 /* wait until it's safe to yank */
974 thread_wait(d->thread, TRUE);
975 }
976
977 assert(ksancov_enabled >= 1);
978 os_atomic_sub(&ksancov_enabled, 1, relaxed);
979 kcov_disable();
980
981 /* drop our thread reference */
982 thread_deallocate(d->thread);
983 d->thread = THREAD_NULL;
984 }
985
986 static int
ksancov_cmps_trace_alloc(ksancov_dev_t d,ksancov_cmps_mode_t mode,size_t maxcmps)987 ksancov_cmps_trace_alloc(ksancov_dev_t d, ksancov_cmps_mode_t mode, size_t maxcmps)
988 {
989 if (d->cmps_mode != KS_CMPS_MODE_NONE) {
990 return EBUSY; /* cmps trace already created */
991 }
992 assert(d->cmps_trace == NULL);
993
994 uintptr_t buf;
995 size_t sz;
996
997 if (mode == KS_CMPS_MODE_TRACE || mode == KS_CMPS_MODE_TRACE_FUNC) {
998 if (os_mul_and_add_overflow(maxcmps, sizeof(ksancov_cmps_trace_ent_t),
999 sizeof(struct ksancov_trace), &sz)) {
1000 return EINVAL;
1001 }
1002 } else {
1003 return EINVAL;
1004 }
1005
1006 /* allocate the shared memory buffer */
1007 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
1008 VM_KERN_MEMORY_DIAG);
1009 if (kr != KERN_SUCCESS) {
1010 return ENOMEM;
1011 }
1012
1013 struct ksancov_trace *cmps_trace = (struct ksancov_trace *)buf;
1014 cmps_trace->kt_hdr.kh_magic = KSANCOV_CMPS_TRACE_MAGIC;
1015 os_atomic_init(&cmps_trace->kt_head, 0);
1016 os_atomic_init(&cmps_trace->kt_hdr.kh_enabled, 0);
1017 cmps_trace->kt_maxent = (uint32_t)maxcmps;
1018
1019 d->cmps_trace = cmps_trace;
1020 d->cmps_sz = sz;
1021 d->cmps_mode = mode;
1022
1023 return 0;
1024 }
1025
1026 /*
1027 * map the sancov comparisons buffer into the current process
1028 */
1029 static int
ksancov_cmps_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)1030 ksancov_cmps_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
1031 {
1032 uintptr_t addr;
1033 size_t size = d->cmps_sz;
1034
1035 switch (d->cmps_mode) {
1036 case KS_CMPS_MODE_TRACE:
1037 case KS_CMPS_MODE_TRACE_FUNC:
1038 if (!d->cmps_trace) {
1039 return EINVAL;
1040 }
1041 addr = (uintptr_t)d->cmps_trace;
1042 break;
1043 default:
1044 return EINVAL; /* not configured */
1045 }
1046
1047 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
1048 if (buf == NULL) {
1049 return ENOMEM;
1050 }
1051
1052 *bufp = (uintptr_t)buf;
1053 *sizep = size;
1054
1055 return 0;
1056 }
1057
1058 static int
ksancov_testcases_alloc(ksancov_dev_t d,size_t testcases_count)1059 ksancov_testcases_alloc(ksancov_dev_t d, size_t testcases_count)
1060 {
1061 if (d->testcases != NULL) {
1062 return EBUSY; /* testcases buffer already created */
1063 }
1064
1065 if (testcases_count > KSANCOV_SERIALIZED_TESTCASES_MAX_COUNT) {
1066 return EINVAL;
1067 }
1068
1069 uintptr_t buf;
1070
1071 /* allocate the shared memory buffer */
1072 kern_return_t kr = kmem_alloc(kernel_map, &buf, sizeof(ksancov_serialized_testcases_t) + sizeof(ksancov_serialized_testcase_t) * testcases_count, KMA_DATA_SHARED | KMA_ZERO, VM_KERN_MEMORY_DIAG);
1073 if (kr != KERN_SUCCESS) {
1074 return ENOMEM;
1075 }
1076
1077 d->testcases = (ksancov_serialized_testcases_t *)buf;
1078 d->testcases->head = 0;
1079 d->testcases->inner_index = 0;
1080 d->testcases_count = (uint32_t)testcases_count;
1081
1082 return 0;
1083 }
1084
1085 static int
ksancov_testcases_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)1086 ksancov_testcases_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
1087 {
1088 uintptr_t addr = (uintptr_t)d->testcases;
1089 if (addr == 0) {
1090 return EINVAL;
1091 }
1092
1093 size_t nbytes = sizeof(ksancov_serialized_testcases_t) + sizeof(ksancov_serialized_testcase_t) * d->testcases_count;
1094
1095 void *buf = ksancov_do_map(addr, nbytes, VM_PROT_READ | VM_PROT_WRITE);
1096 if (buf == 0) {
1097 return ENOMEM;
1098 }
1099
1100 *bufp = (uintptr_t)buf;
1101 *sizep = nbytes;
1102
1103 return 0;
1104 }
1105
1106 extern void console_write_unbuffered(char);
1107 extern void (*PE_kputc)(char c);
1108 void
1109 _doprnt(
1110 const char *fmt,
1111 va_list *argp,
1112 void (*putc)(char),
1113 int radix) __printflike(1, 0);
1114
1115 /*
1116 * Print directly to the serial if enabled. If not, do nothing.
1117 * ksancov_on_panic_log must print only on serial upon panic to avoid overflowing the debug buffer.
1118 */
1119 static int
ksancov_can_print_serial(void)1120 ksancov_can_print_serial(void)
1121 {
1122 /*
1123 * PE_kputc is the serial by default, if the serial is enabled, and fallback to console_write_unbuffered otherwise (video console).
1124 * We do not care about the serial settings for the panic log testcases, always send to serial if DB_PRT is in the debug boot arg (debug=0x2)
1125 */
1126 return PE_kputc != NULL && PE_kputc != console_write_unbuffered && disable_serial_output == false;
1127 }
1128
1129 #pragma clang diagnostic push
1130 #pragma clang diagnostic ignored "-Wformat-nonliteral"
1131 static int
ksancov_serial_print(const char * fmt,...)1132 ksancov_serial_print(const char *fmt, ...)
1133 {
1134 va_list listp;
1135 va_start(listp, fmt);
1136 _doprnt(fmt, &listp, PE_kputc, 16);
1137 va_end(listp);
1138
1139 return 0;
1140 }
1141 #pragma clang diagnostic pop
1142
1143 static void
ksancov_base64_serial_print(const uint8_t * buffer,size_t len)1144 ksancov_base64_serial_print(const uint8_t *buffer, size_t len)
1145 {
1146 #define BASE64_TMP_BUFFER_LEN 1024
1147 char tmp_buffer[BASE64_TMP_BUFFER_LEN + 1];
1148 size_t tmp_buffer_index = 0;
1149 size_t i = 0;
1150
1151 static const char base64_chars[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
1152 while (i < len) {
1153 uint8_t block[3];
1154 size_t block_len = 0;
1155
1156 size_t j;
1157 for (j = 0; j < 3 && i < len; ++j) {
1158 block[j] = buffer[i++];
1159 block_len++;
1160 }
1161
1162 char encoded_block[4];
1163 encoded_block[0] = base64_chars[block[0] >> 2];
1164 encoded_block[1] = base64_chars[((block[0] & 0x03) << 4) | (block_len > 1 ? (block[1] >> 4) : 0)];
1165 encoded_block[2] = (block_len > 1) ? base64_chars[((block[1] & 0x0F) << 2) | (block_len > 2 ? (block[2] >> 6) : 0)] : '=';
1166 encoded_block[3] = (block_len > 2) ? base64_chars[block[2] & 0x3F] : '=';
1167
1168 if (tmp_buffer_index + 4 > BASE64_TMP_BUFFER_LEN) {
1169 tmp_buffer[tmp_buffer_index] = '\0';
1170 ksancov_serial_print("%s", tmp_buffer);
1171 tmp_buffer_index = 0;
1172 }
1173
1174 for (j = 0; j < 4; ++j) {
1175 tmp_buffer[tmp_buffer_index++] = encoded_block[j];
1176 }
1177 }
1178
1179 if (tmp_buffer_index > 0) {
1180 tmp_buffer[tmp_buffer_index] = '\0';
1181 ksancov_serial_print("%s", tmp_buffer);
1182 }
1183 }
1184
1185 /* Print every testcase in every ksancov device to serial */
1186 static int
ksancov_testcases_serial_log(bool take_locks)1187 ksancov_testcases_serial_log(bool take_locks)
1188 {
1189 if (!ksancov_can_print_serial()) {
1190 return EBUSY;
1191 }
1192
1193 bool print_banner = false;
1194 for (int dev_idx = 0; dev_idx < KSANCOV_MAX_DEV; dev_idx++) {
1195 ksancov_dev_t dev = ksancov_devs[dev_idx];
1196 if (dev == NULL) {
1197 continue;
1198 }
1199 if (take_locks) {
1200 lck_mtx_lock(&dev->lock);
1201 }
1202
1203 if (dev->testcases == NULL) {
1204 if (take_locks) {
1205 lck_mtx_unlock(&dev->lock);
1206 }
1207 continue;
1208 }
1209
1210 if (!print_banner) {
1211 /* print the marker when there is at least one ksancov dev with testcases */
1212 print_banner = true;
1213 ksancov_serial_print("Begin ksancov testcases dump\n");
1214 }
1215
1216 ksancov_serial_print("Device %d head %d inner_index %lu attached %d\n", dev_idx, dev->testcases->head, dev->testcases->inner_index, dev->thread ? 1 : 0);
1217 for (int idx = 0; idx < dev->testcases_count; idx++) {
1218 size_t testcase_idx = (dev->testcases->head - 1 - idx + dev->testcases_count) % dev->testcases_count;
1219 ksancov_serialized_testcase_t *testcase = &dev->testcases->list[testcase_idx];
1220 size_t size = testcase->size % KSANCOV_SERIALIZED_TESTCASE_BYTES;
1221
1222 ksancov_serial_print("Testcase %d size %llu\n", testcase_idx, size);
1223 ksancov_base64_serial_print(testcase->buffer, size);
1224 ksancov_serial_print("\n");
1225 }
1226
1227 if (take_locks) {
1228 lck_mtx_unlock(&dev->lock);
1229 }
1230 }
1231 if (print_banner) {
1232 ksancov_serial_print("End ksancov testcases dump\n");
1233 }
1234
1235 return 0;
1236 }
1237
1238 /*
1239 * Print to serial the content of all testcas buffers on panic if there is at least one thread under trace.
1240 * This code is serialized as it is called from print_all_panic_info().
1241 */
1242 void
ksancov_on_panic_log(void)1243 ksancov_on_panic_log(void)
1244 {
1245 if (__probable(os_atomic_load(&ksancov_enabled, relaxed) == 0)) {
1246 return;
1247 }
1248 ksancov_testcases_serial_log(false);
1249 }
1250
1251 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)1252 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
1253 {
1254 #pragma unused(flags,devtype,p)
1255 const int minor_num = minor(dev);
1256
1257 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
1258 return ENXIO;
1259 }
1260
1261 lck_rw_lock_exclusive(&ksancov_devs_lck);
1262 ksancov_dev_t d = ksancov_devs[minor_num];
1263 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
1264 lck_rw_unlock_exclusive(&ksancov_devs_lck);
1265
1266 /*
1267 * No need to lock d here as there is and will be no one having its
1268 * reference except for this thread and the one which is going to
1269 * be detached below.
1270 */
1271
1272 if (!d) {
1273 return ENXIO;
1274 }
1275
1276 if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
1277 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
1278 }
1279 if (d->cmps_mode != KS_CMPS_MODE_NONE && d->cmps_hdr != NULL) {
1280 os_atomic_store(&d->cmps_hdr->kh_enabled, 0, relaxed); /* stop tracing cmps */
1281 }
1282
1283 ksancov_detach(d);
1284 free_dev(d);
1285
1286 return 0;
1287 }
1288
1289 static void
ksancov_testpanic(volatile uint64_t guess)1290 ksancov_testpanic(volatile uint64_t guess)
1291 {
1292 const uint64_t tgt = 0xf85de3b12891c817UL;
1293
1294 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
1295
1296 if (X(0)) {
1297 if (X(1)) {
1298 if (X(2)) {
1299 if (X(3)) {
1300 if (X(4)) {
1301 if (X(5)) {
1302 if (X(6)) {
1303 if (X(7)) {
1304 if (X(8)) {
1305 if (X(9)) {
1306 if (X(10)) {
1307 if (X(11)) {
1308 if (X(12)) {
1309 if (X(13)) {
1310 if (X(14)) {
1311 if (X(15)) {
1312 panic("ksancov: found test value");
1313 }
1314 }
1315 }
1316 }
1317 }
1318 }
1319 }
1320 }
1321 }
1322 }
1323 }
1324 }
1325 }
1326 }
1327 }
1328 }
1329 }
1330
1331 static int
ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg * kmsg)1332 ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg *kmsg)
1333 {
1334 struct ksancov_od_module_entry *entry = NULL;
1335 struct ksancov_od_module_handle *handle = NULL;
1336 ksancov_on_demand_operation_t op = kmsg->operation;
1337
1338 lck_mtx_lock(&ksancov_od_lck);
1339
1340 if (op == KS_OD_GET_BUNDLE) {
1341 uint64_t pc = kmsg->pc;
1342 for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
1343 entry = &ksancov_od_module_entries[idx];
1344 handle = &ksancov_od_module_handles[idx];
1345
1346 if (pc >= handle->text_start && pc < handle->text_end) {
1347 strncpy(kmsg->bundle, entry->bundle, sizeof(kmsg->bundle));
1348 lck_mtx_unlock(&ksancov_od_lck);
1349 return 0;
1350 }
1351 }
1352
1353 lck_mtx_unlock(&ksancov_od_lck);
1354 return EINVAL;
1355 }
1356
1357 int ret = 0;
1358
1359 /* find the entry/handle to the module */
1360 for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
1361 entry = &ksancov_od_module_entries[idx];
1362 if (strncmp(entry->bundle, kmsg->bundle, sizeof(entry->bundle)) == 0) {
1363 handle = &ksancov_od_module_handles[idx];
1364 break;
1365 }
1366 }
1367
1368 if (handle == NULL) {
1369 ksancov_od_log("ksancov: Could not find module '%s'\n", kmsg->bundle);
1370 lck_mtx_unlock(&ksancov_od_lck);
1371 return EINVAL;
1372 }
1373
1374 switch (op) {
1375 case KS_OD_GET_GATE:
1376 /* Get whether on-demand instrumentation is enabled in a given module */
1377 if (handle->gate) {
1378 kmsg->gate = *handle->gate;
1379 } else {
1380 ret = EINVAL;
1381 }
1382 break;
1383 case KS_OD_SET_GATE:
1384 /* Toggle callback invocation for a given module */
1385 if (handle->gate) {
1386 ksancov_od_log("ksancov: Setting gate for '%s': %llu\n",
1387 kmsg->bundle, kmsg->gate);
1388 if (kmsg->gate != *handle->gate) {
1389 if (kmsg->gate) {
1390 ksancov_od_enabled_count++;
1391 } else {
1392 ksancov_od_enabled_count--;
1393 }
1394 *handle->gate = kmsg->gate;
1395 }
1396 } else {
1397 ret = EINVAL;
1398 }
1399 break;
1400 case KS_OD_GET_RANGE:
1401 /* Get which range of the guards table covers the given module */
1402 ksancov_od_log("ksancov: Range for '%s': %u, %u\n",
1403 kmsg->bundle, *handle->start, *(handle->stop - 1));
1404 kmsg->range.start = *handle->start & GUARD_IDX_MASK;
1405 kmsg->range.stop = *(handle->stop - 1) & GUARD_IDX_MASK;
1406 break;
1407 default:
1408 ret = EINVAL;
1409 break;
1410 }
1411
1412 lck_mtx_unlock(&ksancov_od_lck);
1413 return ret;
1414 }
1415
1416 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)1417 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
1418 {
1419 #pragma unused(fflag,p)
1420 const int minor_num = minor(dev);
1421
1422 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
1423 return ENXIO;
1424 }
1425
1426 struct ksancov_buf_desc *mcmd;
1427 void *data = (void *)_data;
1428
1429 lck_rw_lock_shared(&ksancov_devs_lck);
1430 ksancov_dev_t d = ksancov_devs[minor_num];
1431 if (!d) {
1432 lck_rw_unlock_shared(&ksancov_devs_lck);
1433 return EINVAL; /* dev not open */
1434 }
1435
1436 int ret = 0;
1437
1438 switch (cmd) {
1439 case KSANCOV_IOC_TRACE:
1440 case KSANCOV_IOC_STKSIZE:
1441 lck_mtx_lock(&d->lock);
1442 ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
1443 ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
1444 lck_mtx_unlock(&d->lock);
1445 break;
1446 case KSANCOV_IOC_COUNTERS:
1447 lck_mtx_lock(&d->lock);
1448 ret = ksancov_counters_alloc(d);
1449 lck_mtx_unlock(&d->lock);
1450 break;
1451 case KSANCOV_IOC_MAP:
1452 mcmd = (struct ksancov_buf_desc *)data;
1453 lck_mtx_lock(&d->lock);
1454 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
1455 lck_mtx_unlock(&d->lock);
1456 break;
1457 case KSANCOV_IOC_MAP_EDGEMAP:
1458 mcmd = (struct ksancov_buf_desc *)data;
1459 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
1460 break;
1461 case KSANCOV_IOC_START:
1462 lck_mtx_lock(&d->lock);
1463 ret = ksancov_attach(d, current_thread());
1464 lck_mtx_unlock(&d->lock);
1465 break;
1466 case KSANCOV_IOC_NEDGES:
1467 *(size_t *)data = nedges;
1468 break;
1469 case KSANCOV_IOC_ON_DEMAND:
1470 ret = ksancov_handle_on_demand_cmd((struct ksancov_on_demand_msg *)data);
1471 break;
1472 case KSANCOV_IOC_TESTPANIC:
1473 ksancov_testpanic(*(uint64_t *)data);
1474 break;
1475 case KSANCOV_IOC_CMPS_TRACE:
1476 case KSANCOV_IOC_CMPS_TRACE_FUNC:
1477 lck_mtx_lock(&d->lock);
1478 ksancov_cmps_mode_t cmp_mode = (cmd == KSANCOV_IOC_CMPS_TRACE) ? KS_CMPS_MODE_TRACE : KS_CMPS_MODE_TRACE_FUNC;
1479 ret = ksancov_cmps_trace_alloc(d, cmp_mode, *(size_t *)data);
1480 lck_mtx_unlock(&d->lock);
1481 break;
1482 case KSANCOV_IOC_CMPS_MAP:
1483 mcmd = (struct ksancov_buf_desc *)data;
1484 lck_mtx_lock(&d->lock);
1485 ret = ksancov_cmps_map(d, &mcmd->ptr, &mcmd->sz);
1486 lck_mtx_unlock(&d->lock);
1487 break;
1488 case KSANCOV_IOC_TESTCASES:
1489 lck_mtx_lock(&d->lock);
1490 ret = ksancov_testcases_alloc(d, *(size_t *)data);
1491 lck_mtx_unlock(&d->lock);
1492 break;
1493 case KSANCOV_IOC_TESTCASES_MAP:
1494 mcmd = (struct ksancov_buf_desc *)data;
1495 lck_mtx_lock(&d->lock);
1496 ret = ksancov_testcases_map(d, &mcmd->ptr, &mcmd->sz);
1497 lck_mtx_unlock(&d->lock);
1498 break;
1499 case KSANCOV_IOC_TESTCASES_LOG:
1500 ret = ksancov_testcases_serial_log(true);
1501 break;
1502
1503 default:
1504 ret = EINVAL;
1505 break;
1506 }
1507
1508 lck_rw_unlock_shared(&ksancov_devs_lck);
1509
1510 return ret;
1511 }
1512
1513 static int
ksancov_dev_clone(dev_t dev,int action)1514 ksancov_dev_clone(dev_t dev, int action)
1515 {
1516 #pragma unused(dev)
1517 if (action == DEVFS_CLONE_ALLOC) {
1518 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
1519 if (ksancov_devs[i] == NULL) {
1520 return i;
1521 }
1522 }
1523 } else if (action == DEVFS_CLONE_FREE) {
1524 return 0;
1525 }
1526
1527 return -1;
1528 }
1529
1530 static const struct cdevsw
1531 ksancov_cdev = {
1532 .d_open = ksancov_open,
1533 .d_close = ksancov_close,
1534 .d_ioctl = ksancov_ioctl,
1535
1536 .d_read = eno_rdwrt,
1537 .d_write = eno_rdwrt,
1538 .d_stop = eno_stop,
1539 .d_reset = eno_reset,
1540 .d_select = eno_select,
1541 .d_mmap = eno_mmap,
1542 .d_strategy = eno_strat,
1543 .d_type = 0
1544 };
1545
1546 int
ksancov_init_dev(void)1547 ksancov_init_dev(void)
1548 {
1549 dev_major = cdevsw_add(-1, &ksancov_cdev);
1550 if (dev_major < 0) {
1551 printf("ksancov: failed to allocate major device node\n");
1552 return -1;
1553 }
1554
1555 dev_t dev = makedev(dev_major, 0);
1556 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
1557 ksancov_dev_clone, KSANCOV_DEVNODE);
1558 if (!node) {
1559 printf("ksancov: failed to create device node\n");
1560 return -1;
1561 }
1562
1563 return 0;
1564 }
1565