1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdbool.h>
31
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41
42 #include <vm/vm_kern.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45
46 #include <mach/mach_vm.h>
47 #include <mach/mach_types.h>
48 #include <mach/mach_port.h>
49 #include <mach/vm_map.h>
50 #include <mach/vm_param.h>
51 #include <mach/machine/vm_param.h>
52
53 #include <sys/stat.h> /* dev_t */
54 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55 #include <sys/conf.h> /* must come after sys/stat.h */
56
57 #include <libkern/libkern.h>
58 #include <os/atomic_private.h>
59 #include <os/overflow.h>
60
61 #include <san/kcov_data.h>
62 #include <san/kcov_ksancov.h>
63
64 /* header mess... */
65 struct uthread;
66 typedef struct uthread * uthread_t;
67
68 #include <sys/sysproto.h>
69 #include <sys/queue.h>
70 #include <sys/sysctl.h>
71 #include <sys/kdebug.h>
72
73 #define USE_PC_TABLE 0
74 #define KSANCOV_MAX_DEV 64
75 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
76
77 extern boolean_t ml_at_interrupt_context(void);
78 extern boolean_t ml_get_interrupts_enabled(void);
79
80 static void ksancov_detach(ksancov_dev_t);
81
82 static int dev_major;
83 static size_t nedges = 0;
84 static uint32_t __unused npcs = 0;
85
86 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
87 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
88
89 /* array of devices indexed by devnode minor */
90 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
91 static struct ksancov_edgemap *ksancov_edgemap;
92
93 /* Global flag that enables the sanitizer hook. */
94 static _Atomic unsigned int ksancov_enabled = 0;
95
96 /*
97 * Coverage sanitizer per-thread routines.
98 */
99
100 /* Initialize per-thread sanitizer data for each new kernel thread. */
101 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)102 kcov_ksancov_init_thread(ksancov_dev_t *dev)
103 {
104 *dev = NULL;
105 }
106
107
108 #define GUARD_SEEN (uint32_t)0x80000000
109 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
110
111 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uint32_t pc)112 trace_pc_guard_pcs(struct ksancov_dev *dev, uint32_t pc)
113 {
114 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
115 return; /* overflow */
116 }
117
118 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
119 if (__improbable(idx >= dev->maxpcs)) {
120 return;
121 }
122
123 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
124 entries[idx] = pc;
125 }
126
127 #if CONFIG_STKSZ
128 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uint32_t pc,uint32_t stksize)129 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uint32_t pc, uint32_t stksize)
130 {
131 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
132 return; /* overflow */
133 }
134
135 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
136 if (__improbable(idx >= dev->maxpcs)) {
137 return;
138 }
139
140 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
141 entries[idx].pc = pc;
142 entries[idx].stksize = stksize;
143 }
144 #endif
145
146 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)147 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
148 {
149 size_t idx = *guardp & GUARD_IDX_MASK;
150 ksancov_counters_t *counters = dev->counters;
151
152 /* saturating 8bit add */
153 if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
154 counters->kc_hits[idx]++;
155 }
156 }
157
158 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)159 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
160 {
161 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
162
163 if (guardp == NULL) {
164 return;
165 }
166
167 uint32_t gd = *guardp;
168 if (__improbable(gd && !(gd & GUARD_SEEN) && ksancov_edgemap)) {
169 size_t idx = gd & GUARD_IDX_MASK;
170 if (idx < ksancov_edgemap->ke_nedges) {
171 ksancov_edgemap->ke_addrs[idx] = pc;
172 *guardp |= GUARD_SEEN;
173 }
174 }
175 }
176
177 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)178 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
179 {
180 #pragma unused(sp)
181 uint32_t pc = (uint32_t)(VM_KERNEL_UNSLIDE(caller) - VM_MIN_KERNEL_ADDRESS - 1);
182 ksancov_dev_t dev = data->ktd_device;
183
184 /* Check that we have coverage recording enabled for a thread. */
185 if (__probable(dev == NULL)) {
186 return;
187 }
188
189 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
190 return;
191 }
192
193 /*
194 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
195 * the risk of killing machine with recursive calls.
196 */
197 switch (dev->mode) {
198 case KS_MODE_TRACE:
199 trace_pc_guard_pcs(dev, pc);
200 break;
201 #if CONFIG_STKSZ
202 case KS_MODE_STKSIZE:
203 trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
204 break;
205 #endif
206 case KS_MODE_COUNTERS:
207 trace_pc_guard_counter(dev, guardp);
208 break;
209 default:
210 /*
211 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
212 * coverage sanitiser will not record anything in such case.
213 */
214 ;
215 }
216 }
217
218 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)219 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
220 {
221 /* assign a unique number to each guard */
222 for (; start != stop; start++) {
223 if (*start == 0) {
224 if (nedges < KSANCOV_MAX_EDGES) {
225 *start = (uint32_t)++nedges;
226 }
227 }
228 }
229 }
230
231 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)232 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
233 {
234 #if USE_PC_TABLE
235 static const uintptr_t pc_table_seen_flag = 0x100;
236
237 for (; start < stop; start += 2) {
238 uintptr_t pc = start[0];
239 uintptr_t flags = start[1];
240
241 /*
242 * This function gets called multiple times on the same range, so mark the
243 * ones we've seen using unused bits in the flags field.
244 */
245 if (flags & pc_table_seen_flag) {
246 continue;
247 }
248
249 start[1] |= pc_table_seen_flag;
250 assert(npcs < KSANCOV_MAX_EDGES - 1);
251 edge_addrs[++npcs] = pc;
252 }
253 #else
254 (void)start;
255 (void)stop;
256 #endif
257 }
258
259 /*
260 * Coverage sanitizer pseudo-device code.
261 */
262
263 static ksancov_dev_t
create_dev(dev_t dev)264 create_dev(dev_t dev)
265 {
266 ksancov_dev_t d;
267
268 d = kalloc_type(struct ksancov_dev, Z_WAITOK | Z_ZERO | Z_NOFAIL);
269 d->mode = KS_MODE_NONE;
270 d->maxpcs = KSANCOV_MAX_PCS;
271 d->dev = dev;
272 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
273
274 return d;
275 }
276
277 static void
free_dev(ksancov_dev_t d)278 free_dev(ksancov_dev_t d)
279 {
280 if (d->mode == KS_MODE_TRACE && d->trace) {
281 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
282 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
283 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
284 }
285 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
286 kfree_type(struct ksancov_dev, d);
287 }
288
289 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)290 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
291 {
292 kern_return_t kr;
293 mach_port_t mem_entry = MACH_PORT_NULL;
294 mach_vm_address_t user_addr = 0;
295 memory_object_size_t size = sz;
296
297 kr = mach_make_memory_entry_64(kernel_map,
298 &size,
299 (mach_vm_offset_t)base,
300 MAP_MEM_VM_SHARE | prot,
301 &mem_entry,
302 MACH_PORT_NULL);
303 if (kr != KERN_SUCCESS) {
304 return NULL;
305 }
306
307 kr = mach_vm_map_kernel(get_task_map(current_task()),
308 &user_addr,
309 size,
310 0,
311 VM_FLAGS_ANYWHERE,
312 VM_MAP_KERNEL_FLAGS_NONE,
313 VM_KERN_MEMORY_NONE,
314 mem_entry,
315 0,
316 FALSE,
317 prot,
318 prot,
319 VM_INHERIT_SHARE);
320
321 /*
322 * At this point, either vm_map() has taken a reference on the memory entry
323 * and we can release our local reference, or the map failed and the entry
324 * needs to be freed.
325 */
326 mach_memory_entry_port_release(mem_entry);
327
328 if (kr != KERN_SUCCESS) {
329 return NULL;
330 }
331
332 return (void *)user_addr;
333 }
334
335 /*
336 * map the sancov buffer into the current process
337 */
338 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)339 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
340 {
341 uintptr_t addr;
342 size_t size = d->sz;
343
344 switch (d->mode) {
345 case KS_MODE_STKSIZE:
346 case KS_MODE_TRACE:
347 if (!d->trace) {
348 return EINVAL;
349 }
350 addr = (uintptr_t)d->trace;
351 break;
352 case KS_MODE_COUNTERS:
353 if (!d->counters) {
354 return EINVAL;
355 }
356 addr = (uintptr_t)d->counters;
357 break;
358 default:
359 return EINVAL; /* not configured */
360 }
361
362 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
363 if (buf == NULL) {
364 return ENOMEM;
365 }
366
367 *bufp = (uintptr_t)buf;
368 *sizep = size;
369
370 return 0;
371 }
372
373 /*
374 * map the edge -> pc mapping as read-only
375 */
376 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)377 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
378 {
379 uintptr_t addr = (uintptr_t)ksancov_edgemap;
380 size_t size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uint32_t);
381
382 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
383 if (buf == NULL) {
384 return ENOMEM;
385 }
386
387 *bufp = (uintptr_t)buf;
388 *sizep = size;
389 return 0;
390 }
391
392 /*
393 * Device node management
394 */
395
396 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)397 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
398 {
399 #pragma unused(flags,devtype,p)
400 const int minor_num = minor(dev);
401
402 if (minor_num >= KSANCOV_MAX_DEV) {
403 return EBUSY;
404 }
405
406 lck_rw_lock_exclusive(&ksancov_devs_lck);
407
408 if (ksancov_devs[minor_num]) {
409 lck_rw_unlock_exclusive(&ksancov_devs_lck);
410 return EBUSY;
411 }
412
413 ksancov_dev_t d = create_dev(dev);
414 if (!d) {
415 lck_rw_unlock_exclusive(&ksancov_devs_lck);
416 return ENOMEM;
417 }
418 ksancov_devs[minor_num] = d;
419
420 lck_rw_unlock_exclusive(&ksancov_devs_lck);
421
422 return 0;
423 }
424
425 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)426 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
427 {
428 if (d->mode != KS_MODE_NONE) {
429 return EBUSY; /* trace/counters already created */
430 }
431 assert(d->trace == NULL);
432
433 uintptr_t buf;
434 size_t sz;
435
436 if (mode == KS_MODE_TRACE) {
437 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
438 sizeof(struct ksancov_trace), &sz)) {
439 return EINVAL;
440 }
441 } else if (mode == KS_MODE_STKSIZE) {
442 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
443 sizeof(struct ksancov_trace), &sz)) {
444 return EINVAL;
445 }
446 } else {
447 return EINVAL;
448 }
449
450 /* allocate the shared memory buffer */
451 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
452 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
453 if (kr != KERN_SUCCESS) {
454 return ENOMEM;
455 }
456
457 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
458 trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
459 trace->kt_offset = VM_MIN_KERNEL_ADDRESS;
460 os_atomic_init(&trace->kt_head, 0);
461 os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
462 trace->kt_maxent = (uint32_t)maxpcs;
463
464 d->trace = trace;
465 d->sz = sz;
466 d->maxpcs = maxpcs;
467 d->mode = mode;
468
469 return 0;
470 }
471
472 static int
ksancov_counters_alloc(ksancov_dev_t d)473 ksancov_counters_alloc(ksancov_dev_t d)
474 {
475 if (d->mode != KS_MODE_NONE) {
476 return EBUSY; /* trace/counters already created */
477 }
478 assert(d->counters == NULL);
479
480 uintptr_t buf;
481 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
482
483 /* allocate the shared memory buffer */
484 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
485 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
486 if (kr != KERN_SUCCESS) {
487 return ENOMEM;
488 }
489
490 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
491 counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
492 counters->kc_nedges = ksancov_edgemap->ke_nedges;
493 os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
494
495 d->counters = counters;
496 d->sz = sz;
497 d->mode = KS_MODE_COUNTERS;
498
499 return 0;
500 }
501
502 /*
503 * attach a thread to a ksancov dev instance
504 */
505 static int
ksancov_attach(ksancov_dev_t d,thread_t th)506 ksancov_attach(ksancov_dev_t d, thread_t th)
507 {
508 if (d->mode == KS_MODE_NONE) {
509 return EINVAL; /* not configured */
510 }
511
512 if (th != current_thread()) {
513 /* can only attach to self presently */
514 return EINVAL;
515 }
516
517 kcov_thread_data_t *data = kcov_get_thread_data(th);
518 if (data->ktd_device) {
519 return EBUSY; /* one dev per thread */
520 }
521
522 if (d->thread != THREAD_NULL) {
523 ksancov_detach(d);
524 }
525
526 d->thread = th;
527 thread_reference(d->thread);
528
529 os_atomic_store(&data->ktd_device, d, relaxed);
530 os_atomic_add(&ksancov_enabled, 1, relaxed);
531 kcov_enable();
532
533 return 0;
534 }
535
536 extern void
537 thread_wait(
538 thread_t thread,
539 boolean_t until_not_runnable);
540
541
542 /*
543 * disconnect thread from ksancov dev
544 */
545 static void
ksancov_detach(ksancov_dev_t d)546 ksancov_detach(ksancov_dev_t d)
547 {
548 if (d->thread == THREAD_NULL) {
549 /* no thread attached */
550 return;
551 }
552
553 /* disconnect dev from thread */
554 kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
555 if (data->ktd_device != NULL) {
556 assert(data->ktd_device == d);
557 os_atomic_store(&data->ktd_device, NULL, relaxed);
558 }
559
560 if (d->thread != current_thread()) {
561 /* wait until it's safe to yank */
562 thread_wait(d->thread, TRUE);
563 }
564
565 assert(ksancov_enabled >= 1);
566 os_atomic_sub(&ksancov_enabled, 1, relaxed);
567 kcov_disable();
568
569 /* drop our thread reference */
570 thread_deallocate(d->thread);
571 d->thread = THREAD_NULL;
572 }
573
574 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)575 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
576 {
577 #pragma unused(flags,devtype,p)
578 const int minor_num = minor(dev);
579
580 lck_rw_lock_exclusive(&ksancov_devs_lck);
581 ksancov_dev_t d = ksancov_devs[minor_num];
582 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
583 lck_rw_unlock_exclusive(&ksancov_devs_lck);
584
585 /*
586 * No need to lock d here as there is and will be no one having its
587 * reference except for this thread and the one which is going to
588 * be detached below.
589 */
590
591 if (!d) {
592 return ENXIO;
593 }
594
595 if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
596 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
597 }
598
599 ksancov_detach(d);
600 free_dev(d);
601
602 return 0;
603 }
604
605 static void
ksancov_testpanic(volatile uint64_t guess)606 ksancov_testpanic(volatile uint64_t guess)
607 {
608 const uint64_t tgt = 0xf85de3b12891c817UL;
609
610 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
611
612 if (X(0)) {
613 if (X(1)) {
614 if (X(2)) {
615 if (X(3)) {
616 if (X(4)) {
617 if (X(5)) {
618 if (X(6)) {
619 if (X(7)) {
620 if (X(8)) {
621 if (X(9)) {
622 if (X(10)) {
623 if (X(11)) {
624 if (X(12)) {
625 if (X(13)) {
626 if (X(14)) {
627 if (X(15)) {
628 panic("ksancov: found test value");
629 }
630 }
631 }
632 }
633 }
634 }
635 }
636 }
637 }
638 }
639 }
640 }
641 }
642 }
643 }
644 }
645 }
646
647 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)648 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
649 {
650 #pragma unused(fflag,p)
651 struct ksancov_buf_desc *mcmd;
652 void *data = (void *)_data;
653
654 lck_rw_lock_shared(&ksancov_devs_lck);
655 ksancov_dev_t d = ksancov_devs[minor(dev)];
656 if (!d) {
657 lck_rw_unlock_shared(&ksancov_devs_lck);
658 return EINVAL; /* dev not open */
659 }
660
661 int ret = 0;
662
663 switch (cmd) {
664 case KSANCOV_IOC_TRACE:
665 case KSANCOV_IOC_STKSIZE:
666 lck_mtx_lock(&d->lock);
667 ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
668 ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
669 lck_mtx_unlock(&d->lock);
670 break;
671 case KSANCOV_IOC_COUNTERS:
672 lck_mtx_lock(&d->lock);
673 ret = ksancov_counters_alloc(d);
674 lck_mtx_unlock(&d->lock);
675 break;
676 case KSANCOV_IOC_MAP:
677 mcmd = (struct ksancov_buf_desc *)data;
678 lck_mtx_lock(&d->lock);
679 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
680 lck_mtx_unlock(&d->lock);
681 break;
682 case KSANCOV_IOC_MAP_EDGEMAP:
683 mcmd = (struct ksancov_buf_desc *)data;
684 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
685 break;
686 case KSANCOV_IOC_START:
687 lck_mtx_lock(&d->lock);
688 ret = ksancov_attach(d, current_thread());
689 lck_mtx_unlock(&d->lock);
690 break;
691 case KSANCOV_IOC_NEDGES:
692 *(size_t *)data = nedges;
693 break;
694 case KSANCOV_IOC_TESTPANIC:
695 ksancov_testpanic(*(uint64_t *)data);
696 break;
697 default:
698 ret = EINVAL;
699 break;
700 }
701
702 lck_rw_unlock_shared(&ksancov_devs_lck);
703
704 return ret;
705 }
706
707 static int
ksancov_dev_clone(dev_t dev,int action)708 ksancov_dev_clone(dev_t dev, int action)
709 {
710 #pragma unused(dev)
711 if (action == DEVFS_CLONE_ALLOC) {
712 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
713 if (ksancov_devs[i] == NULL) {
714 return i;
715 }
716 }
717 } else if (action == DEVFS_CLONE_FREE) {
718 return 0;
719 }
720
721 return -1;
722 }
723
724 static const struct cdevsw
725 ksancov_cdev = {
726 .d_open = ksancov_open,
727 .d_close = ksancov_close,
728 .d_ioctl = ksancov_ioctl,
729
730 .d_read = eno_rdwrt,
731 .d_write = eno_rdwrt,
732 .d_stop = eno_stop,
733 .d_reset = eno_reset,
734 .d_select = eno_select,
735 .d_mmap = eno_mmap,
736 .d_strategy = eno_strat,
737 .d_type = 0
738 };
739
740 int
ksancov_init_dev(void)741 ksancov_init_dev(void)
742 {
743 dev_major = cdevsw_add(-1, &ksancov_cdev);
744 if (dev_major < 0) {
745 printf("ksancov: failed to allocate major device node\n");
746 return -1;
747 }
748
749 dev_t dev = makedev(dev_major, 0);
750 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
751 ksancov_dev_clone, KSANCOV_DEVNODE);
752 if (!node) {
753 printf("ksancov: failed to create device node\n");
754 return -1;
755 }
756
757 /* This could be moved to the first use of /dev/ksancov to save memory */
758 uintptr_t buf;
759 size_t sz = sizeof(struct ksancov_edgemap) + KSANCOV_MAX_EDGES * sizeof(uint32_t);
760
761 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
762 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
763 if (kr) {
764 printf("ksancov: failed to allocate edge addr map\n");
765 return -1;
766 }
767
768 ksancov_edgemap = (void *)buf;
769 ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
770 ksancov_edgemap->ke_nedges = (uint32_t)nedges;
771 ksancov_edgemap->ke_offset = VM_MIN_KERNEL_ADDRESS;
772
773 return 0;
774 }
775