1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdbool.h>
31
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41
42 #include <vm/vm_kern_xnu.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45 #include <vm/vm_memory_entry_xnu.h>
46
47 #include <mach/mach_vm.h>
48 #include <mach/mach_types.h>
49 #include <mach/mach_port.h>
50 #include <mach/vm_map.h>
51 #include <mach/vm_param.h>
52 #include <mach/machine/vm_param.h>
53
54 #include <sys/stat.h> /* dev_t */
55 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
56 #include <sys/conf.h> /* must come after sys/stat.h */
57 #include <sys/sysctl.h>
58
59 #include <pexpert/pexpert.h> /* PE_parse_boot_argn */
60
61 #include <libkern/libkern.h>
62 #include <libkern/OSKextLibPrivate.h>
63 #include <libkern/kernel_mach_header.h>
64 #include <os/atomic_private.h>
65 #include <os/log.h>
66 #include <os/overflow.h>
67
68 #include <san/kcov_data.h>
69 #include <san/kcov_ksancov.h>
70
71 /* header mess... */
72 struct uthread;
73 typedef struct uthread * uthread_t;
74
75 #include <sys/sysproto.h>
76 #include <sys/queue.h>
77 #include <sys/sysctl.h>
78 #include <sys/kdebug.h>
79
80 #define USE_PC_TABLE 0
81 #define KSANCOV_MAX_DEV 128
82 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
83
84 extern boolean_t ml_at_interrupt_context(void);
85 extern boolean_t ml_get_interrupts_enabled(void);
86
87 static void ksancov_detach(ksancov_dev_t);
88
89 static int dev_major;
90 static size_t nedges = 0;
91 static uint32_t __unused npcs = 0;
92
93 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
94 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
95
96 /* array of devices indexed by devnode minor */
97 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
98 static struct ksancov_edgemap *ksancov_edgemap;
99
100 /* Global flag that enables the sanitizer hook. */
101 static _Atomic unsigned int ksancov_enabled = 0;
102
103 /* Toggled after ksancov_init() */
104 static boolean_t ksancov_initialized = false;
105
106
107 /* Support for gated callbacks (referred to as "on demand", "od") */
108 static void kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop);
109
110 static LCK_MTX_DECLARE(ksancov_od_lck, &ksancov_lck_grp);
111
112 /* Bookkeeping structures for gated sancov instrumentation */
113 struct ksancov_od_module_entry {
114 char bundle[KMOD_MAX_NAME]; /* module bundle */
115 uint32_t idx; /* index into entries/handles arrays */
116 };
117
118 struct ksancov_od_module_handle {
119 uint32_t *start; /* guards boundaries */
120 uint32_t *stop;
121 uint64_t *gate; /* pointer to __DATA,__sancov_gate*/
122 uint64_t text_start; /* .text section start, stripped and unslided address */
123 uint64_t text_end; /* .text section end, stripped and unslided address */
124 };
125
126 static struct ksancov_od_module_entry *ksancov_od_module_entries = NULL;
127 static struct ksancov_od_module_handle *ksancov_od_module_handles = NULL;
128
129 /* number of entries/handles allocated */
130 static unsigned int ksancov_od_allocated_count = 0;
131
132 /* number of registered modules */
133 static unsigned int ksancov_od_modules_count = 0;
134 /* number of modules whose callbacks are currently enabled */
135 static unsigned int ksancov_od_enabled_count = 0;
136
137 /* Valid values for ksancov.on_demand= boot-arg */
138 #define KSANCOV_OD_SUPPORT 0x0010 // Enable runtime support
139 #define KSANCOV_OD_LOGGING 0x0020 // Enable logging (via os_log)
140
141 __options_decl(ksancov_od_config_t, uint32_t, {
142 KSANCOV_OD_NONE = 0,
143 KSANCOV_OD_ENABLE_SUPPORT = 0x0010,
144 KSANCOV_OD_ENABLE_LOGGING = 0x0020,
145 });
146
147 /* configurable at boot; enabled by default */
148 static ksancov_od_config_t ksancov_od_config = KSANCOV_OD_ENABLE_SUPPORT;
149
150 static unsigned ksancov_od_support_enabled = 1;
151 static unsigned ksancov_od_logging_enabled = 0;
152
153 SYSCTL_DECL(_kern_kcov);
154 SYSCTL_ULONG(_kern_kcov, OID_AUTO, nedges, CTLFLAG_RD, &nedges, "");
155
156 SYSCTL_NODE(_kern_kcov, OID_AUTO, od, CTLFLAG_RD, 0, "od");
157 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, config, CTLFLAG_RD, &ksancov_od_config, 0, "");
158 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, allocated_entries, CTLFLAG_RD, &ksancov_od_allocated_count, 0, "");
159 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, modules_count, CTLFLAG_RD, &ksancov_od_modules_count, 0, "");
160 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, enabled_count, CTLFLAG_RD, &ksancov_od_enabled_count, 0, "");
161 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, support_enabled, CTLFLAG_RD, &ksancov_od_support_enabled, 0, "");
162 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, logging_enabled, CTLFLAG_RW, &ksancov_od_logging_enabled, 0, "");
163
164 #define ksancov_od_log(...) do { \
165 if (ksancov_od_logging_enabled) { \
166 os_log_debug(OS_LOG_DEFAULT, __VA_ARGS__); \
167 } \
168 } while (0)
169
170 __startup_func
171 void
ksancov_init(void)172 ksancov_init(void)
173 {
174 unsigned arg;
175
176 /* handle ksancov boot-args */
177 if (PE_parse_boot_argn("ksancov.on_demand", &arg, sizeof(arg))) {
178 ksancov_od_config = (ksancov_od_config_t)arg;
179 }
180
181 if (ksancov_od_config & KSANCOV_OD_ENABLE_SUPPORT) {
182 /* enable the runtime support for on-demand instrumentation */
183 ksancov_od_support_enabled = 1;
184 ksancov_od_allocated_count = 64;
185 ksancov_od_module_entries = kalloc_type_tag(struct ksancov_od_module_entry,
186 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
187 ksancov_od_module_handles = kalloc_type_tag(struct ksancov_od_module_handle,
188 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
189 } else {
190 ksancov_od_support_enabled = 0;
191 }
192
193 if (ksancov_od_config & KSANCOV_OD_ENABLE_LOGGING) {
194 ksancov_od_logging_enabled = 1;
195 } else {
196 ksancov_od_logging_enabled = 0;
197 }
198
199 ksancov_initialized = true;
200 }
201
202 /*
203 * Coverage sanitizer per-thread routines.
204 */
205
206 /* Initialize per-thread sanitizer data for each new kernel thread. */
207 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)208 kcov_ksancov_init_thread(ksancov_dev_t *dev)
209 {
210 *dev = NULL;
211 }
212
213
214 #define GUARD_SEEN (uint32_t)0x80000000
215 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
216
217 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uintptr_t pc)218 trace_pc_guard_pcs(struct ksancov_dev *dev, uintptr_t pc)
219 {
220 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
221 return; /* overflow */
222 }
223
224 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
225 if (__improbable(idx >= dev->maxpcs)) {
226 return;
227 }
228
229 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
230 entries[idx] = pc;
231 }
232
233 #if CONFIG_STKSZ
234 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uintptr_t pc,uint32_t stksize)235 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uintptr_t pc, uint32_t stksize)
236 {
237 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
238 return; /* overflow */
239 }
240
241 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
242 if (__improbable(idx >= dev->maxpcs)) {
243 return;
244 }
245
246 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
247 entries[idx] = {
248 .pc = pc,
249 .stksize = stksize
250 };
251 }
252 #endif
253
254 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)255 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
256 {
257 size_t idx = *guardp & GUARD_IDX_MASK;
258 ksancov_counters_t *counters = dev->counters;
259
260 /* saturating 8bit add */
261 if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
262 counters->kc_hits[idx]++;
263 }
264 }
265
266 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)267 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
268 {
269 /*
270 * Return as early as possible if we haven't had a chance to
271 * create the edge map yet.
272 *
273 * Note: this will also protect us from performing unnecessary
274 * operations (especially during early boot) which may result
275 * in increased maintenance burden for the instrumentation (see
276 * the comment about VM_KERNEL_UNSLIDE below).
277 */
278 if (__probable(ksancov_edgemap == NULL)) {
279 return;
280 }
281
282 if (guardp == NULL) {
283 return;
284 }
285
286 uint32_t gd = *guardp;
287 if (__improbable(gd && !(gd & GUARD_SEEN))) {
288 size_t idx = gd & GUARD_IDX_MASK;
289 if (idx < ksancov_edgemap->ke_nedges) {
290 /*
291 * Since this code was originally introduced, VM_KERNEL_UNSLIDE
292 * evolved significantly, and it now expands to a series of
293 * function calls that check whether the address is slid, mask
294 * off tags and ultimately unslide the pointer.
295 *
296 * Therefore we need to make sure that we do not instrument any function
297 * in the closure of VM_KERNEL_UNSLIDE: this would cause a loop where the
298 * instrumentation callbacks end up calling into instrumented code.
299 *
300 */
301 uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
302
303 ksancov_edgemap->ke_addrs[idx] = pc;
304 *guardp |= GUARD_SEEN;
305 }
306 }
307 }
308
309 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)310 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
311 {
312 #pragma unused(sp)
313 uintptr_t pc;
314 ksancov_dev_t dev = data->ktd_device;
315
316 /* Check that we have coverage recording enabled for a thread. */
317 if (__probable(dev == NULL)) {
318 return;
319 }
320
321 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
322 return;
323 }
324
325 /*
326 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
327 * the risk of killing machine with recursive calls.
328 */
329 switch (dev->mode) {
330 case KS_MODE_TRACE:
331 pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
332 trace_pc_guard_pcs(dev, pc);
333 break;
334 #if CONFIG_STKSZ
335 case KS_MODE_STKSIZE:
336 pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
337 trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
338 break;
339 #endif
340 case KS_MODE_COUNTERS:
341 trace_pc_guard_counter(dev, guardp);
342 break;
343 default:
344 /*
345 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
346 * coverage sanitiser will not record anything in such case.
347 */
348 ;
349 }
350 }
351
352 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)353 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
354 {
355 const size_t orig_nedges = nedges;
356
357 /* assign a unique number to each guard */
358 for (uint32_t *cur = start; cur != stop; cur++) {
359 /* zero means that the guard has not been assigned */
360 if (*cur == 0) {
361 if (nedges < KSANCOV_MAX_EDGES) {
362 *cur = (uint32_t)++nedges;
363 }
364 }
365 }
366
367 /* only invoke kcov_ksancov_bookmark_on_demand_module if we assigned new guards */
368 if (nedges > orig_nedges) {
369 kcov_ksancov_bookmark_on_demand_module(start, stop);
370 }
371 }
372
373 void
kcov_ksancov_trace_cmp(kcov_thread_data_t * data,uint32_t type,uint64_t arg1,uint64_t arg2,void * caller)374 kcov_ksancov_trace_cmp(kcov_thread_data_t *data, uint32_t type, uint64_t arg1, uint64_t arg2, void *caller)
375 {
376 ksancov_dev_t dev = data->ktd_device;
377
378 /* Check that we have coverage recording enabled for a thread. */
379 if (__probable(dev == NULL)) {
380 return;
381 }
382
383 /* Check that we have cmps tracing enabled. */
384 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) {
385 return;
386 }
387 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) {
388 return;
389 }
390
391 /*
392 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
393 * coverage sanitiser will not record anything in such case.
394 */
395 if (dev->cmps_mode != KS_CMPS_MODE_TRACE && dev->cmps_mode != KS_CMPS_MODE_TRACE_FUNC) {
396 return;
397 }
398
399 if (__improbable(dev->cmps_sz < sizeof(ksancov_trace_t))) {
400 return;
401 }
402 size_t max_entries = (dev->cmps_sz - sizeof(ksancov_trace_t)) / sizeof(ksancov_cmps_trace_ent_t);
403
404 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) {
405 return; /* overflow */
406 }
407
408 uint32_t idx = os_atomic_inc_orig(&dev->cmps_trace->kt_head, relaxed);
409 if (__improbable(idx >= max_entries)) {
410 return;
411 }
412
413 uint64_t pc = (uint64_t)(VM_KERNEL_UNSLIDE(caller) - 1);
414
415 ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)dev->cmps_trace->kt_entries;
416 entries[idx].pc = pc;
417 entries[idx].type = type;
418 entries[idx].args[0] = arg1;
419 entries[idx].args[1] = arg2;
420 }
421
422 void
kcov_ksancov_trace_cmp_func(kcov_thread_data_t * data,uint32_t type,const void * arg1,size_t len1,const void * arg2,size_t len2,void * caller,bool always_log)423 kcov_ksancov_trace_cmp_func(kcov_thread_data_t *data, uint32_t type, const void *arg1, size_t len1, const void *arg2, size_t len2, void *caller, bool always_log)
424 {
425 if (len1 + len2 > KSANCOV_CMPS_TRACE_FUNC_MAX_BYTES) {
426 return;
427 }
428
429 ksancov_dev_t dev = data->ktd_device;
430
431 /* Check that we have coverage recording enabled for a thread. */
432 if (__probable(dev == NULL)) {
433 return;
434 }
435
436 /* Check that we have cmps tracing enabled. */
437 if (os_atomic_load(&dev->cmps_hdr, relaxed) == NULL) {
438 return;
439 }
440 if (os_atomic_load(&dev->cmps_hdr->kh_enabled, relaxed) == 0) {
441 return;
442 }
443
444 /*
445 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
446 * coverage sanitiser will not record anything in such case.
447 */
448 if (dev->cmps_mode != KS_CMPS_MODE_TRACE_FUNC) {
449 return;
450 }
451
452 if (__improbable(dev->cmps_sz < sizeof(ksancov_trace_t))) {
453 return;
454 }
455
456 size_t max_entries = (dev->cmps_sz - sizeof(ksancov_trace_t)) / sizeof(ksancov_cmps_trace_ent_t);
457 if (os_atomic_load(&dev->cmps_trace->kt_head, relaxed) >= max_entries) {
458 return; /* overflow */
459 }
460
461 uintptr_t addr = (uintptr_t)VM_KERNEL_UNSLIDE(caller);
462 if (!addr) {
463 return;
464 }
465
466 if (!always_log && !kcov_ksancov_must_instrument((uintptr_t)caller)) {
467 return;
468 }
469
470 uint32_t space = (uint32_t)ksancov_cmps_trace_func_space(len1, len2);
471
472 uint32_t idx = os_atomic_add_orig(&dev->cmps_trace->kt_head, space / sizeof(ksancov_cmps_trace_ent_t), relaxed);
473 if (__improbable(idx >= max_entries)) {
474 return;
475 }
476
477 uint64_t pc = (uint64_t)(addr - 1);
478
479 ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)dev->cmps_trace->kt_entries;
480
481 entries[idx].pc = pc;
482 entries[idx].type = type;
483 entries[idx].len1_func = (uint16_t)len1;
484 entries[idx].len2_func = (uint16_t)len2;
485
486 uint8_t* func_args = (uint8_t*)entries[idx].args;
487
488 __builtin_memcpy(func_args, arg1, len1);
489 __builtin_memcpy(&func_args[len1], arg2, len2);
490 }
491
492
493 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)494 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
495 {
496 #if USE_PC_TABLE
497 static const uintptr_t pc_table_seen_flag = 0x100;
498
499 for (; start < stop; start += 2) {
500 uintptr_t pc = start[0];
501 uintptr_t flags = start[1];
502
503 /*
504 * This function gets called multiple times on the same range, so mark the
505 * ones we've seen using unused bits in the flags field.
506 */
507 if (flags & pc_table_seen_flag) {
508 continue;
509 }
510
511 start[1] |= pc_table_seen_flag;
512 assert(npcs < KSANCOV_MAX_EDGES - 1);
513 edge_addrs[++npcs] = pc;
514 }
515 #else
516 (void)start;
517 (void)stop;
518 #endif
519 }
520
521 static void
kcov_ksancov_bookmark_on_demand_module(uint32_t * start,uint32_t * stop)522 kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop)
523 {
524 OSKextLoadedKextSummary summary = {};
525 struct ksancov_od_module_entry *entry = NULL;
526 struct ksancov_od_module_handle *handle = NULL;
527 uint64_t *gate_section = NULL;
528 unsigned long gate_sz = 0;
529 uint32_t idx = 0;
530
531 if (!ksancov_od_support_enabled) {
532 return;
533 }
534
535 if (OSKextGetLoadedKextSummaryForAddress(start, &summary) != KERN_SUCCESS) {
536 return;
537 }
538
539 if (!ksancov_initialized) {
540 ksancov_od_log("ksancov: Dropping %s pre-initialization\n", summary.name);
541 return;
542 }
543
544 if (nedges >= KSANCOV_MAX_EDGES) {
545 ksancov_od_log("ksancov: Dropping %s: maximum number of edges reached\n",
546 summary.name);
547 return;
548 }
549
550 /*
551 * The __DATA,__sancov_gate section is where the compiler stores the 64-bit
552 * global variable that is used by the inline instrumentation to decide
553 * whether it should call into the runtime or not.
554 */
555 gate_section = getsectdatafromheader((kernel_mach_header_t *)summary.address,
556 "__DATA", "__sancov_gate", &gate_sz);
557 if (gate_sz == 0) {
558 ksancov_od_log("ksancov: Dropping %s: not instrumented with gated callbacks\n",
559 summary.name);
560 return;
561 }
562
563 lck_mtx_lock(&ksancov_od_lck);
564
565 /* reallocate the bookkeeping structures if needed */
566 if (ksancov_od_modules_count >= ksancov_od_allocated_count) {
567 unsigned int old_ksancov_od_allocated_count = ksancov_od_allocated_count;
568 ksancov_od_allocated_count += (ksancov_od_allocated_count / 2);
569
570 ksancov_od_log("ksancov: Reallocating entries: %u -> %u\n",
571 old_ksancov_od_allocated_count,
572 ksancov_od_allocated_count);
573
574 ksancov_od_module_entries = krealloc_type_tag(struct ksancov_od_module_entry,
575 old_ksancov_od_allocated_count,
576 ksancov_od_allocated_count,
577 ksancov_od_module_entries, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
578
579 ksancov_od_module_handles = krealloc_type_tag(struct ksancov_od_module_handle,
580 old_ksancov_od_allocated_count,
581 ksancov_od_allocated_count,
582 ksancov_od_module_handles, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
583 }
584
585 /* this is the index of the entry we're going to fill in both arrays */
586 idx = ksancov_od_modules_count++;
587
588 entry = &ksancov_od_module_entries[idx];
589 handle = &ksancov_od_module_handles[idx];
590
591 handle->start = start;
592 handle->stop = stop;
593 handle->gate = gate_section;
594 handle->text_start = (uint64_t)VM_KERNEL_UNSLIDE(summary.text_exec_address);
595 handle->text_end = (uint64_t)VM_KERNEL_UNSLIDE(summary.text_exec_address + summary.text_exec_size);
596
597 strlcpy(entry->bundle, summary.name, sizeof(entry->bundle));
598 entry->idx = (uint32_t)idx;
599
600 ksancov_od_log("ksancov: Bookmarked module %s (0x%lx - 0x%lx, %lu guards) [idx: %u]\n",
601 entry->bundle, (uintptr_t)handle->start, (uintptr_t)handle->stop,
602 handle->stop - handle->start, entry->idx);
603 lck_mtx_unlock(&ksancov_od_lck);
604 }
605
606 bool
kcov_ksancov_must_instrument(uintptr_t addr)607 kcov_ksancov_must_instrument(uintptr_t addr)
608 {
609 /*
610 * If the kernel itself was not compiled with sanitizer coverage skip
611 * addresses from the kernel itself and focus on KEXTs only.
612 */
613 #if __has_feature(coverage_sanitizer)
614 if (kernel_text_contains(addr)) {
615 return true;
616 }
617 #endif
618
619 uintptr_t unslided_addr = (uintptr_t)VM_KERNEL_UNSLIDE(addr);
620 if (!unslided_addr) {
621 return false;
622 }
623
624 /*
625 * Check that the address is in a KEXT and that the on demand gate is enabled
626 * NOTE: We don't use any lock here as we are reading:
627 * 1) atomically ksancov_od_modules_count, that can only increase
628 * 2) ksancov_od_module_handles[...] that are constant after being added to the
629 * array, with only the gate field changing
630 * 3) atomically the gate value
631 */
632 unsigned int modules_count = os_atomic_load(&ksancov_od_modules_count, relaxed);
633 for (unsigned int idx = 0; idx < modules_count; idx++) {
634 struct ksancov_od_module_handle *handle = &ksancov_od_module_handles[idx];
635 if (unslided_addr >= handle->text_start && unslided_addr < handle->text_end && handle->gate) {
636 return os_atomic_load(handle->gate, relaxed) != 0;
637 }
638 }
639
640 return false;
641 }
642
643 /*
644 * Coverage sanitizer pseudo-device code.
645 */
646
647 static ksancov_dev_t
create_dev(dev_t dev)648 create_dev(dev_t dev)
649 {
650 ksancov_dev_t d;
651
652 d = kalloc_type_tag(struct ksancov_dev, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
653 d->mode = KS_MODE_NONE;
654 d->maxpcs = KSANCOV_MAX_PCS;
655 d->dev = dev;
656 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
657
658 return d;
659 }
660
661 static void
free_dev(ksancov_dev_t d)662 free_dev(ksancov_dev_t d)
663 {
664 if ((d->mode == KS_MODE_TRACE || d->mode == KS_MODE_STKSIZE) && d->trace) {
665 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
666 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
667 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
668 }
669 if ((d->cmps_mode == KS_CMPS_MODE_TRACE || d->cmps_mode == KS_CMPS_MODE_TRACE_FUNC) && d->cmps_trace) {
670 kmem_free(kernel_map, (uintptr_t)d->cmps_trace, d->cmps_sz);
671 }
672 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
673 kfree_type(struct ksancov_dev, d);
674 }
675
676 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)677 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
678 {
679 kern_return_t kr;
680 mach_port_t mem_entry = MACH_PORT_NULL;
681 mach_vm_address_t user_addr = 0;
682 memory_object_size_t size = sz;
683
684 kr = mach_make_memory_entry_64(kernel_map,
685 &size,
686 (mach_vm_offset_t)base,
687 MAP_MEM_VM_SHARE | prot,
688 &mem_entry,
689 MACH_PORT_NULL);
690 if (kr != KERN_SUCCESS) {
691 return NULL;
692 }
693
694 kr = mach_vm_map_kernel(get_task_map(current_task()),
695 &user_addr,
696 size,
697 0,
698 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
699 mem_entry,
700 0,
701 FALSE,
702 prot,
703 prot,
704 VM_INHERIT_SHARE);
705
706 /*
707 * At this point, either vm_map() has taken a reference on the memory entry
708 * and we can release our local reference, or the map failed and the entry
709 * needs to be freed.
710 */
711 mach_memory_entry_port_release(mem_entry);
712
713 if (kr != KERN_SUCCESS) {
714 return NULL;
715 }
716
717 return (void *)user_addr;
718 }
719
720 /*
721 * map the sancov buffer into the current process
722 */
723 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)724 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
725 {
726 uintptr_t addr;
727 size_t size = d->sz;
728
729 switch (d->mode) {
730 case KS_MODE_STKSIZE:
731 case KS_MODE_TRACE:
732 if (!d->trace) {
733 return EINVAL;
734 }
735 addr = (uintptr_t)d->trace;
736 break;
737 case KS_MODE_COUNTERS:
738 if (!d->counters) {
739 return EINVAL;
740 }
741 addr = (uintptr_t)d->counters;
742 break;
743 default:
744 return EINVAL; /* not configured */
745 }
746
747 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
748 if (buf == NULL) {
749 return ENOMEM;
750 }
751
752 *bufp = (uintptr_t)buf;
753 *sizep = size;
754
755 return 0;
756 }
757
758 /*
759 * map the edge -> pc mapping as read-only
760 */
761 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)762 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
763 {
764 uintptr_t addr;
765 size_t size;
766
767 if (ksancov_edgemap == NULL) {
768 return EINVAL;
769 }
770
771 addr = (uintptr_t)ksancov_edgemap;
772 size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uintptr_t);
773
774 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
775 if (buf == NULL) {
776 return ENOMEM;
777 }
778
779 *bufp = (uintptr_t)buf;
780 *sizep = size;
781 return 0;
782 }
783
784 /*
785 * Device node management
786 */
787
788 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)789 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
790 {
791 #pragma unused(flags,devtype,p)
792 const int minor_num = minor(dev);
793
794 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
795 return ENXIO;
796 }
797
798 lck_rw_lock_exclusive(&ksancov_devs_lck);
799
800 if (ksancov_devs[minor_num]) {
801 lck_rw_unlock_exclusive(&ksancov_devs_lck);
802 return EBUSY;
803 }
804
805 ksancov_dev_t d = create_dev(dev);
806 if (!d) {
807 lck_rw_unlock_exclusive(&ksancov_devs_lck);
808 return ENOMEM;
809 }
810 ksancov_devs[minor_num] = d;
811
812 if (ksancov_edgemap == NULL) {
813 uintptr_t buf;
814 size_t sz = sizeof(struct ksancov_edgemap) + nedges * sizeof(uintptr_t);
815
816 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
817 KMA_DATA_SHARED | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
818 if (kr) {
819 printf("ksancov: failed to allocate edge addr map\n");
820 lck_rw_unlock_exclusive(&ksancov_devs_lck);
821 return ENOMEM;
822 }
823
824 ksancov_edgemap = (void *)buf;
825 ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
826 ksancov_edgemap->ke_nedges = (uint32_t)nedges;
827 }
828
829 lck_rw_unlock_exclusive(&ksancov_devs_lck);
830
831 return 0;
832 }
833
834 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)835 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
836 {
837 if (d->mode != KS_MODE_NONE) {
838 return EBUSY; /* trace/counters already created */
839 }
840 assert(d->trace == NULL);
841
842 uintptr_t buf;
843 size_t sz;
844
845 if (mode == KS_MODE_TRACE) {
846 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
847 sizeof(struct ksancov_trace), &sz)) {
848 return EINVAL;
849 }
850 } else if (mode == KS_MODE_STKSIZE) {
851 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
852 sizeof(struct ksancov_trace), &sz)) {
853 return EINVAL;
854 }
855 } else {
856 return EINVAL;
857 }
858
859 /* allocate the shared memory buffer */
860 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
861 VM_KERN_MEMORY_DIAG);
862 if (kr != KERN_SUCCESS) {
863 return ENOMEM;
864 }
865
866 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
867 trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
868 os_atomic_init(&trace->kt_head, 0);
869 os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
870 trace->kt_maxent = (uint32_t)maxpcs;
871
872 d->trace = trace;
873 d->sz = sz;
874 d->maxpcs = maxpcs;
875 d->mode = mode;
876
877 return 0;
878 }
879
880 static int
ksancov_counters_alloc(ksancov_dev_t d)881 ksancov_counters_alloc(ksancov_dev_t d)
882 {
883 if (d->mode != KS_MODE_NONE) {
884 return EBUSY; /* trace/counters already created */
885 }
886 assert(d->counters == NULL);
887
888 uintptr_t buf;
889 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
890
891 /* allocate the shared memory buffer */
892 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
893 VM_KERN_MEMORY_DIAG);
894 if (kr != KERN_SUCCESS) {
895 return ENOMEM;
896 }
897
898 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
899 counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
900 counters->kc_nedges = ksancov_edgemap->ke_nedges;
901 os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
902
903 d->counters = counters;
904 d->sz = sz;
905 d->mode = KS_MODE_COUNTERS;
906
907 return 0;
908 }
909
910 /*
911 * attach a thread to a ksancov dev instance
912 */
913 static int
ksancov_attach(ksancov_dev_t d,thread_t th)914 ksancov_attach(ksancov_dev_t d, thread_t th)
915 {
916 if (d->mode == KS_MODE_NONE) {
917 return EINVAL; /* not configured */
918 }
919
920 if (th != current_thread()) {
921 /* can only attach to self presently */
922 return EINVAL;
923 }
924
925 kcov_thread_data_t *data = kcov_get_thread_data(th);
926 if (data->ktd_device) {
927 return EBUSY; /* one dev per thread */
928 }
929
930 if (d->thread != THREAD_NULL) {
931 ksancov_detach(d);
932 }
933
934 d->thread = th;
935 thread_reference(d->thread);
936
937 os_atomic_store(&data->ktd_device, d, relaxed);
938 os_atomic_add(&ksancov_enabled, 1, relaxed);
939 kcov_enable();
940
941 return 0;
942 }
943
944 extern void
945 thread_wait(
946 thread_t thread,
947 boolean_t until_not_runnable);
948
949
950 /*
951 * disconnect thread from ksancov dev
952 */
953 static void
ksancov_detach(ksancov_dev_t d)954 ksancov_detach(ksancov_dev_t d)
955 {
956 if (d->thread == THREAD_NULL) {
957 /* no thread attached */
958 return;
959 }
960
961 /* disconnect dev from thread */
962 kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
963 if (data->ktd_device != NULL) {
964 assert(data->ktd_device == d);
965 os_atomic_store(&data->ktd_device, NULL, relaxed);
966 }
967
968 if (d->thread != current_thread()) {
969 /* wait until it's safe to yank */
970 thread_wait(d->thread, TRUE);
971 }
972
973 assert(ksancov_enabled >= 1);
974 os_atomic_sub(&ksancov_enabled, 1, relaxed);
975 kcov_disable();
976
977 /* drop our thread reference */
978 thread_deallocate(d->thread);
979 d->thread = THREAD_NULL;
980 }
981
982 static int
ksancov_cmps_trace_alloc(ksancov_dev_t d,ksancov_cmps_mode_t mode,size_t maxcmps)983 ksancov_cmps_trace_alloc(ksancov_dev_t d, ksancov_cmps_mode_t mode, size_t maxcmps)
984 {
985 if (d->cmps_mode != KS_CMPS_MODE_NONE) {
986 return EBUSY; /* cmps trace already created */
987 }
988 assert(d->cmps_trace == NULL);
989
990 uintptr_t buf;
991 size_t sz;
992
993 if (mode == KS_CMPS_MODE_TRACE || mode == KS_CMPS_MODE_TRACE_FUNC) {
994 if (os_mul_and_add_overflow(maxcmps, sizeof(ksancov_cmps_trace_ent_t),
995 sizeof(struct ksancov_trace), &sz)) {
996 return EINVAL;
997 }
998 } else {
999 return EINVAL;
1000 }
1001
1002 /* allocate the shared memory buffer */
1003 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA_SHARED | KMA_ZERO,
1004 VM_KERN_MEMORY_DIAG);
1005 if (kr != KERN_SUCCESS) {
1006 return ENOMEM;
1007 }
1008
1009 struct ksancov_trace *cmps_trace = (struct ksancov_trace *)buf;
1010 cmps_trace->kt_hdr.kh_magic = KSANCOV_CMPS_TRACE_MAGIC;
1011 os_atomic_init(&cmps_trace->kt_head, 0);
1012 os_atomic_init(&cmps_trace->kt_hdr.kh_enabled, 0);
1013 cmps_trace->kt_maxent = (uint32_t)maxcmps;
1014
1015 d->cmps_trace = cmps_trace;
1016 d->cmps_sz = sz;
1017 d->cmps_mode = mode;
1018
1019 return 0;
1020 }
1021
1022 /*
1023 * map the sancov comparisons buffer into the current process
1024 */
1025 static int
ksancov_cmps_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)1026 ksancov_cmps_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
1027 {
1028 uintptr_t addr;
1029 size_t size = d->cmps_sz;
1030
1031 switch (d->cmps_mode) {
1032 case KS_CMPS_MODE_TRACE:
1033 case KS_CMPS_MODE_TRACE_FUNC:
1034 if (!d->cmps_trace) {
1035 return EINVAL;
1036 }
1037 addr = (uintptr_t)d->cmps_trace;
1038 break;
1039 default:
1040 return EINVAL; /* not configured */
1041 }
1042
1043 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
1044 if (buf == NULL) {
1045 return ENOMEM;
1046 }
1047
1048 *bufp = (uintptr_t)buf;
1049 *sizep = size;
1050
1051 return 0;
1052 }
1053
1054 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)1055 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
1056 {
1057 #pragma unused(flags,devtype,p)
1058 const int minor_num = minor(dev);
1059
1060 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
1061 return ENXIO;
1062 }
1063
1064 lck_rw_lock_exclusive(&ksancov_devs_lck);
1065 ksancov_dev_t d = ksancov_devs[minor_num];
1066 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
1067 lck_rw_unlock_exclusive(&ksancov_devs_lck);
1068
1069 /*
1070 * No need to lock d here as there is and will be no one having its
1071 * reference except for this thread and the one which is going to
1072 * be detached below.
1073 */
1074
1075 if (!d) {
1076 return ENXIO;
1077 }
1078
1079 if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
1080 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
1081 }
1082 if (d->cmps_mode != KS_CMPS_MODE_NONE && d->cmps_hdr != NULL) {
1083 os_atomic_store(&d->cmps_hdr->kh_enabled, 0, relaxed); /* stop tracing cmps */
1084 }
1085
1086 ksancov_detach(d);
1087 free_dev(d);
1088
1089 return 0;
1090 }
1091
1092 static void
ksancov_testpanic(volatile uint64_t guess)1093 ksancov_testpanic(volatile uint64_t guess)
1094 {
1095 const uint64_t tgt = 0xf85de3b12891c817UL;
1096
1097 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
1098
1099 if (X(0)) {
1100 if (X(1)) {
1101 if (X(2)) {
1102 if (X(3)) {
1103 if (X(4)) {
1104 if (X(5)) {
1105 if (X(6)) {
1106 if (X(7)) {
1107 if (X(8)) {
1108 if (X(9)) {
1109 if (X(10)) {
1110 if (X(11)) {
1111 if (X(12)) {
1112 if (X(13)) {
1113 if (X(14)) {
1114 if (X(15)) {
1115 panic("ksancov: found test value");
1116 }
1117 }
1118 }
1119 }
1120 }
1121 }
1122 }
1123 }
1124 }
1125 }
1126 }
1127 }
1128 }
1129 }
1130 }
1131 }
1132 }
1133
1134 static int
ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg * kmsg)1135 ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg *kmsg)
1136 {
1137 struct ksancov_od_module_entry *entry = NULL;
1138 struct ksancov_od_module_handle *handle = NULL;
1139 ksancov_on_demand_operation_t op = kmsg->operation;
1140
1141 lck_mtx_lock(&ksancov_od_lck);
1142
1143 if (op == KS_OD_GET_BUNDLE) {
1144 uint64_t pc = kmsg->pc;
1145 for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
1146 entry = &ksancov_od_module_entries[idx];
1147 handle = &ksancov_od_module_handles[idx];
1148
1149 if (pc >= handle->text_start && pc < handle->text_end) {
1150 strncpy(kmsg->bundle, entry->bundle, sizeof(kmsg->bundle));
1151 lck_mtx_unlock(&ksancov_od_lck);
1152 return 0;
1153 }
1154 }
1155
1156 lck_mtx_unlock(&ksancov_od_lck);
1157 return EINVAL;
1158 }
1159
1160 int ret = 0;
1161
1162 /* find the entry/handle to the module */
1163 for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
1164 entry = &ksancov_od_module_entries[idx];
1165 if (strncmp(entry->bundle, kmsg->bundle, sizeof(entry->bundle)) == 0) {
1166 handle = &ksancov_od_module_handles[idx];
1167 break;
1168 }
1169 }
1170
1171 if (handle == NULL) {
1172 ksancov_od_log("ksancov: Could not find module '%s'\n", kmsg->bundle);
1173 lck_mtx_unlock(&ksancov_od_lck);
1174 return EINVAL;
1175 }
1176
1177 switch (op) {
1178 case KS_OD_GET_GATE:
1179 /* Get whether on-demand instrumentation is enabled in a given module */
1180 if (handle->gate) {
1181 kmsg->gate = *handle->gate;
1182 } else {
1183 ret = EINVAL;
1184 }
1185 break;
1186 case KS_OD_SET_GATE:
1187 /* Toggle callback invocation for a given module */
1188 if (handle->gate) {
1189 ksancov_od_log("ksancov: Setting gate for '%s': %llu\n",
1190 kmsg->bundle, kmsg->gate);
1191 if (kmsg->gate != *handle->gate) {
1192 if (kmsg->gate) {
1193 ksancov_od_enabled_count++;
1194 } else {
1195 ksancov_od_enabled_count--;
1196 }
1197 *handle->gate = kmsg->gate;
1198 }
1199 } else {
1200 ret = EINVAL;
1201 }
1202 break;
1203 case KS_OD_GET_RANGE:
1204 /* Get which range of the guards table covers the given module */
1205 ksancov_od_log("ksancov: Range for '%s': %u, %u\n",
1206 kmsg->bundle, *handle->start, *(handle->stop - 1));
1207 kmsg->range.start = *handle->start & GUARD_IDX_MASK;
1208 kmsg->range.stop = *(handle->stop - 1) & GUARD_IDX_MASK;
1209 break;
1210 default:
1211 ret = EINVAL;
1212 break;
1213 }
1214
1215 lck_mtx_unlock(&ksancov_od_lck);
1216 return ret;
1217 }
1218
1219 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)1220 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
1221 {
1222 #pragma unused(fflag,p)
1223 const int minor_num = minor(dev);
1224
1225 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
1226 return ENXIO;
1227 }
1228
1229 struct ksancov_buf_desc *mcmd;
1230 void *data = (void *)_data;
1231
1232 lck_rw_lock_shared(&ksancov_devs_lck);
1233 ksancov_dev_t d = ksancov_devs[minor_num];
1234 if (!d) {
1235 lck_rw_unlock_shared(&ksancov_devs_lck);
1236 return EINVAL; /* dev not open */
1237 }
1238
1239 int ret = 0;
1240
1241 switch (cmd) {
1242 case KSANCOV_IOC_TRACE:
1243 case KSANCOV_IOC_STKSIZE:
1244 lck_mtx_lock(&d->lock);
1245 ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
1246 ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
1247 lck_mtx_unlock(&d->lock);
1248 break;
1249 case KSANCOV_IOC_COUNTERS:
1250 lck_mtx_lock(&d->lock);
1251 ret = ksancov_counters_alloc(d);
1252 lck_mtx_unlock(&d->lock);
1253 break;
1254 case KSANCOV_IOC_MAP:
1255 mcmd = (struct ksancov_buf_desc *)data;
1256 lck_mtx_lock(&d->lock);
1257 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
1258 lck_mtx_unlock(&d->lock);
1259 break;
1260 case KSANCOV_IOC_MAP_EDGEMAP:
1261 mcmd = (struct ksancov_buf_desc *)data;
1262 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
1263 break;
1264 case KSANCOV_IOC_START:
1265 lck_mtx_lock(&d->lock);
1266 ret = ksancov_attach(d, current_thread());
1267 lck_mtx_unlock(&d->lock);
1268 break;
1269 case KSANCOV_IOC_NEDGES:
1270 *(size_t *)data = nedges;
1271 break;
1272 case KSANCOV_IOC_ON_DEMAND:
1273 ret = ksancov_handle_on_demand_cmd((struct ksancov_on_demand_msg *)data);
1274 break;
1275 case KSANCOV_IOC_TESTPANIC:
1276 ksancov_testpanic(*(uint64_t *)data);
1277 break;
1278 case KSANCOV_IOC_CMPS_TRACE:
1279 case KSANCOV_IOC_CMPS_TRACE_FUNC:
1280 lck_mtx_lock(&d->lock);
1281 ksancov_cmps_mode_t cmp_mode = (cmd == KSANCOV_IOC_CMPS_TRACE) ? KS_CMPS_MODE_TRACE : KS_CMPS_MODE_TRACE_FUNC;
1282 ret = ksancov_cmps_trace_alloc(d, cmp_mode, *(size_t *)data);
1283 lck_mtx_unlock(&d->lock);
1284 break;
1285 case KSANCOV_IOC_CMPS_MAP:
1286 mcmd = (struct ksancov_buf_desc *)data;
1287 lck_mtx_lock(&d->lock);
1288 ret = ksancov_cmps_map(d, &mcmd->ptr, &mcmd->sz);
1289 lck_mtx_unlock(&d->lock);
1290 break;
1291 default:
1292 ret = EINVAL;
1293 break;
1294 }
1295
1296 lck_rw_unlock_shared(&ksancov_devs_lck);
1297
1298 return ret;
1299 }
1300
1301 static int
ksancov_dev_clone(dev_t dev,int action)1302 ksancov_dev_clone(dev_t dev, int action)
1303 {
1304 #pragma unused(dev)
1305 if (action == DEVFS_CLONE_ALLOC) {
1306 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
1307 if (ksancov_devs[i] == NULL) {
1308 return i;
1309 }
1310 }
1311 } else if (action == DEVFS_CLONE_FREE) {
1312 return 0;
1313 }
1314
1315 return -1;
1316 }
1317
1318 static const struct cdevsw
1319 ksancov_cdev = {
1320 .d_open = ksancov_open,
1321 .d_close = ksancov_close,
1322 .d_ioctl = ksancov_ioctl,
1323
1324 .d_read = eno_rdwrt,
1325 .d_write = eno_rdwrt,
1326 .d_stop = eno_stop,
1327 .d_reset = eno_reset,
1328 .d_select = eno_select,
1329 .d_mmap = eno_mmap,
1330 .d_strategy = eno_strat,
1331 .d_type = 0
1332 };
1333
1334 int
ksancov_init_dev(void)1335 ksancov_init_dev(void)
1336 {
1337 dev_major = cdevsw_add(-1, &ksancov_cdev);
1338 if (dev_major < 0) {
1339 printf("ksancov: failed to allocate major device node\n");
1340 return -1;
1341 }
1342
1343 dev_t dev = makedev(dev_major, 0);
1344 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
1345 ksancov_dev_clone, KSANCOV_DEVNODE);
1346 if (!node) {
1347 printf("ksancov: failed to create device node\n");
1348 return -1;
1349 }
1350
1351 return 0;
1352 }
1353