1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdbool.h>
31
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41
42 #include <vm/vm_kern.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45
46 #include <mach/mach_vm.h>
47 #include <mach/mach_types.h>
48 #include <mach/mach_port.h>
49 #include <mach/vm_map.h>
50 #include <mach/vm_param.h>
51 #include <mach/machine/vm_param.h>
52
53 #include <sys/stat.h> /* dev_t */
54 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55 #include <sys/conf.h> /* must come after sys/stat.h */
56
57 #include <libkern/libkern.h>
58 #include <os/atomic_private.h>
59 #include <os/overflow.h>
60
61 #include <san/kcov_data.h>
62 #include <san/kcov_ksancov.h>
63
64 /* header mess... */
65 struct uthread;
66 typedef struct uthread * uthread_t;
67
68 #include <sys/sysproto.h>
69 #include <sys/queue.h>
70 #include <sys/sysctl.h>
71 #include <sys/kdebug.h>
72
73 #define USE_PC_TABLE 0
74 #define KSANCOV_MAX_DEV 64
75 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
76
77 extern boolean_t ml_at_interrupt_context(void);
78 extern boolean_t ml_get_interrupts_enabled(void);
79
80 static void ksancov_detach(ksancov_dev_t);
81
82 static int dev_major;
83 static size_t nedges = 0;
84 static uint32_t __unused npcs = 0;
85
86 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
87 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
88
89 /* array of devices indexed by devnode minor */
90 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
91 static struct ksancov_edgemap *ksancov_edgemap;
92
93 /* Global flag that enables the sanitizer hook. */
94 static _Atomic unsigned int ksancov_enabled = 0;
95
96 /*
97 * Coverage sanitizer per-thread routines.
98 */
99
100 /* Initialize per-thread sanitizer data for each new kernel thread. */
101 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)102 kcov_ksancov_init_thread(ksancov_dev_t *dev)
103 {
104 *dev = NULL;
105 }
106
107
108 #define GUARD_SEEN (uint32_t)0x80000000
109 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
110
111 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uint32_t pc)112 trace_pc_guard_pcs(struct ksancov_dev *dev, uint32_t pc)
113 {
114 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
115 return; /* overflow */
116 }
117
118 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
119 if (__improbable(idx >= dev->maxpcs)) {
120 return;
121 }
122
123 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
124 entries[idx] = pc;
125 }
126
127 #if CONFIG_STKSZ
128 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uint32_t pc,uint32_t stksize)129 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uint32_t pc, uint32_t stksize)
130 {
131 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
132 return; /* overflow */
133 }
134
135 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
136 if (__improbable(idx >= dev->maxpcs)) {
137 return;
138 }
139
140 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
141 entries[idx].pc = pc;
142 entries[idx].stksize = stksize;
143 }
144 #endif
145
146 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)147 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
148 {
149 size_t idx = *guardp & GUARD_IDX_MASK;
150 ksancov_counters_t *counters = dev->counters;
151
152 /* saturating 8bit add */
153 if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
154 counters->kc_hits[idx]++;
155 }
156 }
157
158 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)159 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
160 {
161 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
162
163 if (guardp == NULL) {
164 return;
165 }
166
167 uint32_t gd = *guardp;
168 if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) {
169 size_t idx = gd & GUARD_IDX_MASK;
170 if (idx < ksancov_edgemap->ke_nedges) {
171 ksancov_edgemap->ke_addrs[idx] = pc;
172 *guardp |= GUARD_SEEN;
173 }
174 }
175 }
176
177 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)178 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
179 {
180 #pragma unused(sp)
181 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
182 ksancov_dev_t dev = data->ktd_device;
183
184 /* Check that we have coverage recording enabled for a thread. */
185 if (__probable(dev == NULL)) {
186 return;
187 }
188
189 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
190 return;
191 }
192
193 /*
194 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
195 * the risk of killing machine with recursive calls.
196 */
197 switch (dev->mode) {
198 case KS_MODE_TRACE:
199 trace_pc_guard_pcs(dev, pc);
200 break;
201 #if CONFIG_STKSZ
202 case KS_MODE_STKSIZE:
203 trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
204 break;
205 #endif
206 case KS_MODE_COUNTERS:
207 trace_pc_guard_counter(dev, guardp);
208 break;
209 default:
210 /*
211 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
212 * coverage sanitiser will not record anything in such case.
213 */
214 ;
215 }
216 }
217
218 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)219 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
220 {
221 /* assign a unique number to each guard */
222 for (; start != stop; start++) {
223 if (*start == 0) {
224 if (nedges < KSANCOV_MAX_EDGES) {
225 *start = (uint32_t)++nedges;
226 }
227 }
228 }
229 }
230
231 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)232 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
233 {
234 #if USE_PC_TABLE
235 static const uintptr_t pc_table_seen_flag = 0x100;
236
237 for (; start < stop; start += 2) {
238 uintptr_t pc = start[0];
239 uintptr_t flags = start[1];
240
241 /*
242 * This function gets called multiple times on the same range, so mark the
243 * ones we've seen using unused bits in the flags field.
244 */
245 if (flags & pc_table_seen_flag) {
246 continue;
247 }
248
249 start[1] |= pc_table_seen_flag;
250 assert(npcs < KSANCOV_MAX_EDGES - 1);
251 edge_addrs[++npcs] = pc;
252 }
253 #else
254 (void)start;
255 (void)stop;
256 #endif
257 }
258
259 /*
260 * Coverage sanitizer pseudo-device code.
261 */
262
263 static ksancov_dev_t
create_dev(dev_t dev)264 create_dev(dev_t dev)
265 {
266 ksancov_dev_t d;
267
268 d = kalloc_type(struct ksancov_dev, Z_WAITOK | Z_ZERO | Z_NOFAIL);
269 d->mode = KS_MODE_NONE;
270 d->maxpcs = KSANCOV_MAX_PCS;
271 d->dev = dev;
272 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
273
274 return d;
275 }
276
277 static void
free_dev(ksancov_dev_t d)278 free_dev(ksancov_dev_t d)
279 {
280 if (d->mode == KS_MODE_TRACE && d->trace) {
281 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
282 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
283 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
284 }
285 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
286 kfree_type(struct ksancov_dev, d);
287 }
288
289 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)290 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
291 {
292 kern_return_t kr;
293 mach_port_t mem_entry = MACH_PORT_NULL;
294 mach_vm_address_t user_addr = 0;
295 memory_object_size_t size = sz;
296
297 kr = mach_make_memory_entry_64(kernel_map,
298 &size,
299 (mach_vm_offset_t)base,
300 MAP_MEM_VM_SHARE | prot,
301 &mem_entry,
302 MACH_PORT_NULL);
303 if (kr != KERN_SUCCESS) {
304 return NULL;
305 }
306
307 kr = mach_vm_map_kernel(get_task_map(current_task()),
308 &user_addr,
309 size,
310 0,
311 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
312 mem_entry,
313 0,
314 FALSE,
315 prot,
316 prot,
317 VM_INHERIT_SHARE);
318
319 /*
320 * At this point, either vm_map() has taken a reference on the memory entry
321 * and we can release our local reference, or the map failed and the entry
322 * needs to be freed.
323 */
324 mach_memory_entry_port_release(mem_entry);
325
326 if (kr != KERN_SUCCESS) {
327 return NULL;
328 }
329
330 return (void *)user_addr;
331 }
332
333 /*
334 * map the sancov buffer into the current process
335 */
336 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)337 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
338 {
339 uintptr_t addr;
340 size_t size = d->sz;
341
342 switch (d->mode) {
343 case KS_MODE_STKSIZE:
344 case KS_MODE_TRACE:
345 if (!d->trace) {
346 return EINVAL;
347 }
348 addr = (uintptr_t)d->trace;
349 break;
350 case KS_MODE_COUNTERS:
351 if (!d->counters) {
352 return EINVAL;
353 }
354 addr = (uintptr_t)d->counters;
355 break;
356 default:
357 return EINVAL; /* not configured */
358 }
359
360 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
361 if (buf == NULL) {
362 return ENOMEM;
363 }
364
365 *bufp = (uintptr_t)buf;
366 *sizep = size;
367
368 return 0;
369 }
370
371 /*
372 * map the edge -> pc mapping as read-only
373 */
374 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)375 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
376 {
377 uintptr_t addr = (uintptr_t)ksancov_edgemap;
378 size_t size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uint32_t);
379
380 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
381 if (buf == NULL) {
382 return ENOMEM;
383 }
384
385 *bufp = (uintptr_t)buf;
386 *sizep = size;
387 return 0;
388 }
389
390 /*
391 * Device node management
392 */
393
394 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)395 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
396 {
397 #pragma unused(flags,devtype,p)
398 const int minor_num = minor(dev);
399
400 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
401 return ENXIO;
402 }
403
404 lck_rw_lock_exclusive(&ksancov_devs_lck);
405
406 if (ksancov_devs[minor_num]) {
407 lck_rw_unlock_exclusive(&ksancov_devs_lck);
408 return EBUSY;
409 }
410
411 ksancov_dev_t d = create_dev(dev);
412 if (!d) {
413 lck_rw_unlock_exclusive(&ksancov_devs_lck);
414 return ENOMEM;
415 }
416 ksancov_devs[minor_num] = d;
417
418 lck_rw_unlock_exclusive(&ksancov_devs_lck);
419
420 return 0;
421 }
422
423 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)424 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
425 {
426 if (d->mode != KS_MODE_NONE) {
427 return EBUSY; /* trace/counters already created */
428 }
429 assert(d->trace == NULL);
430
431 uintptr_t buf;
432 size_t sz;
433
434 if (mode == KS_MODE_TRACE) {
435 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
436 sizeof(struct ksancov_trace), &sz)) {
437 return EINVAL;
438 }
439 } else if (mode == KS_MODE_STKSIZE) {
440 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
441 sizeof(struct ksancov_trace), &sz)) {
442 return EINVAL;
443 }
444 } else {
445 return EINVAL;
446 }
447
448 /* allocate the shared memory buffer */
449 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
450 VM_KERN_MEMORY_DIAG);
451 if (kr != KERN_SUCCESS) {
452 return ENOMEM;
453 }
454
455 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
456 trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
457 trace->kt_offset = VM_MIN_KERNEL_ADDRESS;
458 os_atomic_init(&trace->kt_head, 0);
459 os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
460 trace->kt_maxent = (uint32_t)maxpcs;
461
462 d->trace = trace;
463 d->sz = sz;
464 d->maxpcs = maxpcs;
465 d->mode = mode;
466
467 return 0;
468 }
469
470 static int
ksancov_counters_alloc(ksancov_dev_t d)471 ksancov_counters_alloc(ksancov_dev_t d)
472 {
473 if (d->mode != KS_MODE_NONE) {
474 return EBUSY; /* trace/counters already created */
475 }
476 assert(d->counters == NULL);
477
478 uintptr_t buf;
479 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
480
481 /* allocate the shared memory buffer */
482 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
483 VM_KERN_MEMORY_DIAG);
484 if (kr != KERN_SUCCESS) {
485 return ENOMEM;
486 }
487
488 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
489 counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
490 counters->kc_nedges = ksancov_edgemap->ke_nedges;
491 os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
492
493 d->counters = counters;
494 d->sz = sz;
495 d->mode = KS_MODE_COUNTERS;
496
497 return 0;
498 }
499
500 /*
501 * attach a thread to a ksancov dev instance
502 */
503 static int
ksancov_attach(ksancov_dev_t d,thread_t th)504 ksancov_attach(ksancov_dev_t d, thread_t th)
505 {
506 if (d->mode == KS_MODE_NONE) {
507 return EINVAL; /* not configured */
508 }
509
510 if (th != current_thread()) {
511 /* can only attach to self presently */
512 return EINVAL;
513 }
514
515 kcov_thread_data_t *data = kcov_get_thread_data(th);
516 if (data->ktd_device) {
517 return EBUSY; /* one dev per thread */
518 }
519
520 if (d->thread != THREAD_NULL) {
521 ksancov_detach(d);
522 }
523
524 d->thread = th;
525 thread_reference(d->thread);
526
527 os_atomic_store(&data->ktd_device, d, relaxed);
528 os_atomic_add(&ksancov_enabled, 1, relaxed);
529 kcov_enable();
530
531 return 0;
532 }
533
534 extern void
535 thread_wait(
536 thread_t thread,
537 boolean_t until_not_runnable);
538
539
540 /*
541 * disconnect thread from ksancov dev
542 */
543 static void
ksancov_detach(ksancov_dev_t d)544 ksancov_detach(ksancov_dev_t d)
545 {
546 if (d->thread == THREAD_NULL) {
547 /* no thread attached */
548 return;
549 }
550
551 /* disconnect dev from thread */
552 kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
553 if (data->ktd_device != NULL) {
554 assert(data->ktd_device == d);
555 os_atomic_store(&data->ktd_device, NULL, relaxed);
556 }
557
558 if (d->thread != current_thread()) {
559 /* wait until it's safe to yank */
560 thread_wait(d->thread, TRUE);
561 }
562
563 assert(ksancov_enabled >= 1);
564 os_atomic_sub(&ksancov_enabled, 1, relaxed);
565 kcov_disable();
566
567 /* drop our thread reference */
568 thread_deallocate(d->thread);
569 d->thread = THREAD_NULL;
570 }
571
572 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)573 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
574 {
575 #pragma unused(flags,devtype,p)
576 const int minor_num = minor(dev);
577
578 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
579 return ENXIO;
580 }
581
582 lck_rw_lock_exclusive(&ksancov_devs_lck);
583 ksancov_dev_t d = ksancov_devs[minor_num];
584 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
585 lck_rw_unlock_exclusive(&ksancov_devs_lck);
586
587 /*
588 * No need to lock d here as there is and will be no one having its
589 * reference except for this thread and the one which is going to
590 * be detached below.
591 */
592
593 if (!d) {
594 return ENXIO;
595 }
596
597 if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
598 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
599 }
600
601 ksancov_detach(d);
602 free_dev(d);
603
604 return 0;
605 }
606
607 static void
ksancov_testpanic(volatile uint64_t guess)608 ksancov_testpanic(volatile uint64_t guess)
609 {
610 const uint64_t tgt = 0xf85de3b12891c817UL;
611
612 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
613
614 if (X(0)) {
615 if (X(1)) {
616 if (X(2)) {
617 if (X(3)) {
618 if (X(4)) {
619 if (X(5)) {
620 if (X(6)) {
621 if (X(7)) {
622 if (X(8)) {
623 if (X(9)) {
624 if (X(10)) {
625 if (X(11)) {
626 if (X(12)) {
627 if (X(13)) {
628 if (X(14)) {
629 if (X(15)) {
630 panic("ksancov: found test value");
631 }
632 }
633 }
634 }
635 }
636 }
637 }
638 }
639 }
640 }
641 }
642 }
643 }
644 }
645 }
646 }
647 }
648
649 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)650 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
651 {
652 #pragma unused(fflag,p)
653 const int minor_num = minor(dev);
654
655 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
656 return ENXIO;
657 }
658
659 struct ksancov_buf_desc *mcmd;
660 void *data = (void *)_data;
661
662 lck_rw_lock_shared(&ksancov_devs_lck);
663 ksancov_dev_t d = ksancov_devs[minor_num];
664 if (!d) {
665 lck_rw_unlock_shared(&ksancov_devs_lck);
666 return EINVAL; /* dev not open */
667 }
668
669 int ret = 0;
670
671 switch (cmd) {
672 case KSANCOV_IOC_TRACE:
673 case KSANCOV_IOC_STKSIZE:
674 lck_mtx_lock(&d->lock);
675 ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
676 ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
677 lck_mtx_unlock(&d->lock);
678 break;
679 case KSANCOV_IOC_COUNTERS:
680 lck_mtx_lock(&d->lock);
681 ret = ksancov_counters_alloc(d);
682 lck_mtx_unlock(&d->lock);
683 break;
684 case KSANCOV_IOC_MAP:
685 mcmd = (struct ksancov_buf_desc *)data;
686 lck_mtx_lock(&d->lock);
687 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
688 lck_mtx_unlock(&d->lock);
689 break;
690 case KSANCOV_IOC_MAP_EDGEMAP:
691 mcmd = (struct ksancov_buf_desc *)data;
692 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
693 break;
694 case KSANCOV_IOC_START:
695 lck_mtx_lock(&d->lock);
696 ret = ksancov_attach(d, current_thread());
697 lck_mtx_unlock(&d->lock);
698 break;
699 case KSANCOV_IOC_NEDGES:
700 *(size_t *)data = nedges;
701 break;
702 case KSANCOV_IOC_TESTPANIC:
703 ksancov_testpanic(*(uint64_t *)data);
704 break;
705 default:
706 ret = EINVAL;
707 break;
708 }
709
710 lck_rw_unlock_shared(&ksancov_devs_lck);
711
712 return ret;
713 }
714
715 static int
ksancov_dev_clone(dev_t dev,int action)716 ksancov_dev_clone(dev_t dev, int action)
717 {
718 #pragma unused(dev)
719 if (action == DEVFS_CLONE_ALLOC) {
720 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
721 if (ksancov_devs[i] == NULL) {
722 return i;
723 }
724 }
725 } else if (action == DEVFS_CLONE_FREE) {
726 return 0;
727 }
728
729 return -1;
730 }
731
732 static const struct cdevsw
733 ksancov_cdev = {
734 .d_open = ksancov_open,
735 .d_close = ksancov_close,
736 .d_ioctl = ksancov_ioctl,
737
738 .d_read = eno_rdwrt,
739 .d_write = eno_rdwrt,
740 .d_stop = eno_stop,
741 .d_reset = eno_reset,
742 .d_select = eno_select,
743 .d_mmap = eno_mmap,
744 .d_strategy = eno_strat,
745 .d_type = 0
746 };
747
748 int
ksancov_init_dev(void)749 ksancov_init_dev(void)
750 {
751 dev_major = cdevsw_add(-1, &ksancov_cdev);
752 if (dev_major < 0) {
753 printf("ksancov: failed to allocate major device node\n");
754 return -1;
755 }
756
757 dev_t dev = makedev(dev_major, 0);
758 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
759 ksancov_dev_clone, KSANCOV_DEVNODE);
760 if (!node) {
761 printf("ksancov: failed to create device node\n");
762 return -1;
763 }
764
765 /* This could be moved to the first use of /dev/ksancov to save memory */
766 uintptr_t buf;
767 size_t sz = sizeof(struct ksancov_edgemap) + KSANCOV_MAX_EDGES * sizeof(uint32_t);
768
769 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
770 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
771 if (kr) {
772 printf("ksancov: failed to allocate edge addr map\n");
773 return -1;
774 }
775
776 ksancov_edgemap = (void *)buf;
777 ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
778 ksancov_edgemap->ke_nedges = (uint32_t)nedges;
779 ksancov_edgemap->ke_offset = VM_MIN_KERNEL_ADDRESS;
780
781 return 0;
782 }
783