1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <string.h>
30 #include <stdbool.h>
31
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41
42 #include <vm/vm_kern.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45
46 #include <mach/mach_vm.h>
47 #include <mach/mach_types.h>
48 #include <mach/mach_port.h>
49 #include <mach/vm_map.h>
50 #include <mach/vm_param.h>
51 #include <mach/machine/vm_param.h>
52
53 #include <sys/stat.h> /* dev_t */
54 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55 #include <sys/conf.h> /* must come after sys/stat.h */
56 #include <sys/sysctl.h>
57
58 #include <pexpert/pexpert.h> /* PE_parse_boot_argn */
59
60 #include <libkern/libkern.h>
61 #include <libkern/OSKextLibPrivate.h>
62 #include <libkern/kernel_mach_header.h>
63 #include <os/atomic_private.h>
64 #include <os/log.h>
65 #include <os/overflow.h>
66
67 #include <san/kcov_data.h>
68 #include <san/kcov_ksancov.h>
69
70 /* header mess... */
71 struct uthread;
72 typedef struct uthread * uthread_t;
73
74 #include <sys/sysproto.h>
75 #include <sys/queue.h>
76 #include <sys/sysctl.h>
77 #include <sys/kdebug.h>
78
79 #define USE_PC_TABLE 0
80 #define KSANCOV_MAX_DEV 64
81 #define KSANCOV_MAX_PCS (1024U * 64) /* default to 256k buffer => 64k pcs */
82
83 extern boolean_t ml_at_interrupt_context(void);
84 extern boolean_t ml_get_interrupts_enabled(void);
85
86 static void ksancov_detach(ksancov_dev_t);
87
88 static int dev_major;
89 static size_t nedges = 0;
90 static uint32_t __unused npcs = 0;
91
92 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
93 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
94
95 /* array of devices indexed by devnode minor */
96 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
97 static struct ksancov_edgemap *ksancov_edgemap;
98
99 /* Global flag that enables the sanitizer hook. */
100 static _Atomic unsigned int ksancov_enabled = 0;
101
102 /* Toggled after ksancov_init() */
103 static boolean_t ksancov_initialized = false;
104
105
106 /* Support for gated callbacks (referred to as "on demand", "od") */
107 static void kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop);
108
109 static LCK_MTX_DECLARE(ksancov_od_lck, &ksancov_lck_grp);
110
111 /* Bookkeeping structures for gated sancov instrumentation */
112 struct ksancov_od_module_entry {
113 char bundle[KMOD_MAX_NAME]; /* module bundle */
114 uint32_t idx; /* index into entries/handles arrays */
115 };
116
117 struct ksancov_od_module_handle {
118 uint32_t *start; /* guards boundaries */
119 uint32_t *stop;
120 uint64_t *gate; /* pointer to __DATA,__sancov_gate*/
121 };
122
123 static struct ksancov_od_module_entry *ksancov_od_module_entries = NULL;
124 static struct ksancov_od_module_handle *ksancov_od_module_handles = NULL;
125
126 /* number of entries/handles allocated */
127 static unsigned int ksancov_od_allocated_count = 0;
128
129 /* number of registered modules */
130 static unsigned int ksancov_od_modules_count = 0;
131 /* number of modules whose callbacks are currently enabled */
132 static unsigned int ksancov_od_enabled_count = 0;
133
134 /* Valid values for ksancov.on_demand= boot-arg */
135 #define KSANCOV_OD_SUPPORT 0x0010 // Enable runtime support
136 #define KSANCOV_OD_LOGGING 0x0020 // Enable logging (via os_log)
137
138 __options_decl(ksancov_od_config_t, uint32_t, {
139 KSANCOV_OD_NONE = 0,
140 KSANCOV_OD_ENABLE_SUPPORT = 0x0010,
141 KSANCOV_OD_ENABLE_LOGGING = 0x0020,
142 });
143
144 /* configurable at boot; enabled by default */
145 static ksancov_od_config_t ksancov_od_config = KSANCOV_OD_ENABLE_SUPPORT;
146
147 static unsigned ksancov_od_support_enabled = 1;
148 static unsigned ksancov_od_logging_enabled = 0;
149
150 SYSCTL_DECL(_kern_kcov);
151 SYSCTL_ULONG(_kern_kcov, OID_AUTO, nedges, CTLFLAG_RD, &nedges, "");
152
153 SYSCTL_NODE(_kern_kcov, OID_AUTO, od, CTLFLAG_RD, 0, "od");
154 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, config, CTLFLAG_RD, &ksancov_od_config, 0, "");
155 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, allocated_entries, CTLFLAG_RD, &ksancov_od_allocated_count, 0, "");
156 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, modules_count, CTLFLAG_RD, &ksancov_od_modules_count, 0, "");
157 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, enabled_count, CTLFLAG_RD, &ksancov_od_enabled_count, 0, "");
158 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, support_enabled, CTLFLAG_RD, &ksancov_od_support_enabled, 0, "");
159 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, logging_enabled, CTLFLAG_RW, &ksancov_od_logging_enabled, 0, "");
160
161 #define ksancov_od_log(...) do { \
162 if (ksancov_od_logging_enabled) { \
163 os_log_debug(OS_LOG_DEFAULT, __VA_ARGS__); \
164 } \
165 } while (0)
166
167 __startup_func
168 void
ksancov_init(void)169 ksancov_init(void)
170 {
171 unsigned arg;
172
173 /* handle ksancov boot-args */
174 if (PE_parse_boot_argn("ksancov.on_demand", &arg, sizeof(arg))) {
175 ksancov_od_config = (ksancov_od_config_t)arg;
176 }
177
178 if (ksancov_od_config & KSANCOV_OD_ENABLE_SUPPORT) {
179 /* enable the runtime support for on-demand instrumentation */
180 ksancov_od_support_enabled = 1;
181 ksancov_od_allocated_count = 64;
182 ksancov_od_module_entries = kalloc_type_tag(struct ksancov_od_module_entry,
183 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
184 ksancov_od_module_handles = kalloc_type_tag(struct ksancov_od_module_handle,
185 ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
186 } else {
187 ksancov_od_support_enabled = 0;
188 }
189
190 if (ksancov_od_config & KSANCOV_OD_ENABLE_LOGGING) {
191 ksancov_od_logging_enabled = 1;
192 } else {
193 ksancov_od_logging_enabled = 0;
194 }
195
196 ksancov_initialized = true;
197 }
198
199 /*
200 * Coverage sanitizer per-thread routines.
201 */
202
203 /* Initialize per-thread sanitizer data for each new kernel thread. */
204 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)205 kcov_ksancov_init_thread(ksancov_dev_t *dev)
206 {
207 *dev = NULL;
208 }
209
210
211 #define GUARD_SEEN (uint32_t)0x80000000
212 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
213
214 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uintptr_t pc)215 trace_pc_guard_pcs(struct ksancov_dev *dev, uintptr_t pc)
216 {
217 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
218 return; /* overflow */
219 }
220
221 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
222 if (__improbable(idx >= dev->maxpcs)) {
223 return;
224 }
225
226 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
227 entries[idx] = pc;
228 }
229
230 #if CONFIG_STKSZ
231 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uintptr_t pc,uint32_t stksize)232 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uintptr_t pc, uint32_t stksize)
233 {
234 if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
235 return; /* overflow */
236 }
237
238 uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
239 if (__improbable(idx >= dev->maxpcs)) {
240 return;
241 }
242
243 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
244 entries[idx] = {
245 .pc = pc,
246 .stksize = stksize
247 };
248 }
249 #endif
250
251 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)252 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
253 {
254 size_t idx = *guardp & GUARD_IDX_MASK;
255 ksancov_counters_t *counters = dev->counters;
256
257 /* saturating 8bit add */
258 if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
259 counters->kc_hits[idx]++;
260 }
261 }
262
263 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)264 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
265 {
266 /*
267 * Return as early as possible if we haven't had a chance to
268 * create the edge map yet.
269 *
270 * Note: this will also protect us from performing unnecessary
271 * operations (especially during early boot) which may result
272 * in increased maintenance burden for the instrumentation (see
273 * the comment about VM_KERNEL_UNSLIDE below).
274 */
275 if (__probable(ksancov_edgemap == NULL)) {
276 return;
277 }
278
279 if (guardp == NULL) {
280 return;
281 }
282
283 /*
284 * Since this code was originally introduced, VM_KERNEL_UNSLIDE
285 * evolved significantly, and it now expands to a series of
286 * function calls that check whether the address is slid, mask
287 * off tags and ultimately unslide the pointer.
288 *
289 * Therefore we need to make sure that we do not instrument any function
290 * in the closure of VM_KERNEL_UNSLIDE: this would cause a loop where the
291 * instrumentation callbacks end up calling into instrumented code.
292 *
293 */
294 uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
295
296 uint32_t gd = *guardp;
297 if (__improbable(gd && !(gd & GUARD_SEEN))) {
298 size_t idx = gd & GUARD_IDX_MASK;
299 if (idx < ksancov_edgemap->ke_nedges) {
300 ksancov_edgemap->ke_addrs[idx] = pc;
301 *guardp |= GUARD_SEEN;
302 }
303 }
304 }
305
306 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)307 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
308 {
309 #pragma unused(sp)
310 uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
311 ksancov_dev_t dev = data->ktd_device;
312
313 /* Check that we have coverage recording enabled for a thread. */
314 if (__probable(dev == NULL)) {
315 return;
316 }
317
318 if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
319 return;
320 }
321
322 /*
323 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
324 * the risk of killing machine with recursive calls.
325 */
326 switch (dev->mode) {
327 case KS_MODE_TRACE:
328 trace_pc_guard_pcs(dev, pc);
329 break;
330 #if CONFIG_STKSZ
331 case KS_MODE_STKSIZE:
332 trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
333 break;
334 #endif
335 case KS_MODE_COUNTERS:
336 trace_pc_guard_counter(dev, guardp);
337 break;
338 default:
339 /*
340 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
341 * coverage sanitiser will not record anything in such case.
342 */
343 ;
344 }
345 }
346
347 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)348 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
349 {
350 const size_t orig_nedges = nedges;
351
352 /* assign a unique number to each guard */
353 for (uint32_t *cur = start; cur != stop; cur++) {
354 /* zero means that the guard has not been assigned */
355 if (*cur == 0) {
356 if (nedges < KSANCOV_MAX_EDGES) {
357 *cur = (uint32_t)++nedges;
358 }
359 }
360 }
361
362 /* only invoke kcov_ksancov_bookmark_on_demand_module if we assigned new guards */
363 if (nedges > orig_nedges) {
364 kcov_ksancov_bookmark_on_demand_module(start, stop);
365 }
366 }
367
368 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)369 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
370 {
371 #if USE_PC_TABLE
372 static const uintptr_t pc_table_seen_flag = 0x100;
373
374 for (; start < stop; start += 2) {
375 uintptr_t pc = start[0];
376 uintptr_t flags = start[1];
377
378 /*
379 * This function gets called multiple times on the same range, so mark the
380 * ones we've seen using unused bits in the flags field.
381 */
382 if (flags & pc_table_seen_flag) {
383 continue;
384 }
385
386 start[1] |= pc_table_seen_flag;
387 assert(npcs < KSANCOV_MAX_EDGES - 1);
388 edge_addrs[++npcs] = pc;
389 }
390 #else
391 (void)start;
392 (void)stop;
393 #endif
394 }
395
396 static void
kcov_ksancov_bookmark_on_demand_module(uint32_t * start,uint32_t * stop)397 kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop)
398 {
399 OSKextLoadedKextSummary summary = {};
400 struct ksancov_od_module_entry *entry = NULL;
401 struct ksancov_od_module_handle *handle = NULL;
402 uint64_t *gate_section = NULL;
403 unsigned long gate_sz = 0;
404 uint32_t idx = 0;
405
406 if (!ksancov_od_support_enabled) {
407 return;
408 }
409
410 if (OSKextGetLoadedKextSummaryForAddress(start, &summary) != KERN_SUCCESS) {
411 return;
412 }
413
414 if (!ksancov_initialized) {
415 ksancov_od_log("ksancov: Dropping %s pre-initialization\n", summary.name);
416 return;
417 }
418
419 if (nedges >= KSANCOV_MAX_EDGES) {
420 ksancov_od_log("ksancov: Dropping %s: maximum number of edges reached\n",
421 summary.name);
422 return;
423 }
424
425 /*
426 * The __DATA,__sancov_gate section is where the compiler stores the 64-bit
427 * global variable that is used by the inline instrumentation to decide
428 * whether it should call into the runtime or not.
429 */
430 gate_section = getsectdatafromheader((kernel_mach_header_t *)summary.address,
431 "__DATA", "__sancov_gate", &gate_sz);
432 if (gate_sz == 0) {
433 ksancov_od_log("ksancov: Dropping %s: not instrumented with gated callbacks\n",
434 summary.name);
435 return;
436 }
437
438 lck_mtx_lock(&ksancov_od_lck);
439
440 /* reallocate the bookkeeping structures if needed */
441 if (ksancov_od_modules_count >= ksancov_od_allocated_count) {
442 unsigned int old_ksancov_od_allocated_count = ksancov_od_allocated_count;
443 ksancov_od_allocated_count += (ksancov_od_allocated_count / 2);
444
445 ksancov_od_log("ksancov: Reallocating entries: %u -> %u\n",
446 old_ksancov_od_allocated_count,
447 ksancov_od_allocated_count);
448
449 ksancov_od_module_entries = krealloc_type_tag(struct ksancov_od_module_entry,
450 old_ksancov_od_allocated_count,
451 ksancov_od_allocated_count,
452 ksancov_od_module_entries, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
453
454 ksancov_od_module_handles = krealloc_type_tag(struct ksancov_od_module_handle,
455 old_ksancov_od_allocated_count,
456 ksancov_od_allocated_count,
457 ksancov_od_module_handles, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
458 }
459
460 /* this is the index of the entry we're going to fill in both arrays */
461 idx = ksancov_od_modules_count++;
462
463 entry = &ksancov_od_module_entries[idx];
464 handle = &ksancov_od_module_handles[idx];
465
466 handle->start = start;
467 handle->stop = stop;
468 handle->gate = gate_section;
469
470 strlcpy(entry->bundle, summary.name, sizeof(entry->bundle));
471 entry->idx = (uint32_t)idx;
472
473 ksancov_od_log("ksancov: Bookmarked module %s (0x%lx - 0x%lx, %lu guards) [idx: %u]\n",
474 entry->bundle, (uintptr_t)handle->start, (uintptr_t)handle->stop,
475 handle->stop - handle->start, entry->idx);
476 lck_mtx_unlock(&ksancov_od_lck);
477 }
478
479 /*
480 * Coverage sanitizer pseudo-device code.
481 */
482
483 static ksancov_dev_t
create_dev(dev_t dev)484 create_dev(dev_t dev)
485 {
486 ksancov_dev_t d;
487
488 d = kalloc_type_tag(struct ksancov_dev, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
489 d->mode = KS_MODE_NONE;
490 d->maxpcs = KSANCOV_MAX_PCS;
491 d->dev = dev;
492 lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
493
494 return d;
495 }
496
497 static void
free_dev(ksancov_dev_t d)498 free_dev(ksancov_dev_t d)
499 {
500 if (d->mode == KS_MODE_TRACE && d->trace) {
501 kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
502 } else if (d->mode == KS_MODE_COUNTERS && d->counters) {
503 kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
504 }
505 lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
506 kfree_type(struct ksancov_dev, d);
507 }
508
509 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)510 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
511 {
512 kern_return_t kr;
513 mach_port_t mem_entry = MACH_PORT_NULL;
514 mach_vm_address_t user_addr = 0;
515 memory_object_size_t size = sz;
516
517 kr = mach_make_memory_entry_64(kernel_map,
518 &size,
519 (mach_vm_offset_t)base,
520 MAP_MEM_VM_SHARE | prot,
521 &mem_entry,
522 MACH_PORT_NULL);
523 if (kr != KERN_SUCCESS) {
524 return NULL;
525 }
526
527 kr = mach_vm_map_kernel(get_task_map(current_task()),
528 &user_addr,
529 size,
530 0,
531 VM_MAP_KERNEL_FLAGS_ANYWHERE(),
532 mem_entry,
533 0,
534 FALSE,
535 prot,
536 prot,
537 VM_INHERIT_SHARE);
538
539 /*
540 * At this point, either vm_map() has taken a reference on the memory entry
541 * and we can release our local reference, or the map failed and the entry
542 * needs to be freed.
543 */
544 mach_memory_entry_port_release(mem_entry);
545
546 if (kr != KERN_SUCCESS) {
547 return NULL;
548 }
549
550 return (void *)user_addr;
551 }
552
553 /*
554 * map the sancov buffer into the current process
555 */
556 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)557 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
558 {
559 uintptr_t addr;
560 size_t size = d->sz;
561
562 switch (d->mode) {
563 case KS_MODE_STKSIZE:
564 case KS_MODE_TRACE:
565 if (!d->trace) {
566 return EINVAL;
567 }
568 addr = (uintptr_t)d->trace;
569 break;
570 case KS_MODE_COUNTERS:
571 if (!d->counters) {
572 return EINVAL;
573 }
574 addr = (uintptr_t)d->counters;
575 break;
576 default:
577 return EINVAL; /* not configured */
578 }
579
580 void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
581 if (buf == NULL) {
582 return ENOMEM;
583 }
584
585 *bufp = (uintptr_t)buf;
586 *sizep = size;
587
588 return 0;
589 }
590
591 /*
592 * map the edge -> pc mapping as read-only
593 */
594 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)595 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
596 {
597 uintptr_t addr;
598 size_t size;
599
600 if (ksancov_edgemap == NULL) {
601 return EINVAL;
602 }
603
604 addr = (uintptr_t)ksancov_edgemap;
605 size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uintptr_t);
606
607 void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
608 if (buf == NULL) {
609 return ENOMEM;
610 }
611
612 *bufp = (uintptr_t)buf;
613 *sizep = size;
614 return 0;
615 }
616
617 /*
618 * Device node management
619 */
620
621 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)622 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
623 {
624 #pragma unused(flags,devtype,p)
625 const int minor_num = minor(dev);
626
627 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
628 return ENXIO;
629 }
630
631 lck_rw_lock_exclusive(&ksancov_devs_lck);
632
633 if (ksancov_devs[minor_num]) {
634 lck_rw_unlock_exclusive(&ksancov_devs_lck);
635 return EBUSY;
636 }
637
638 ksancov_dev_t d = create_dev(dev);
639 if (!d) {
640 lck_rw_unlock_exclusive(&ksancov_devs_lck);
641 return ENOMEM;
642 }
643 ksancov_devs[minor_num] = d;
644
645 if (ksancov_edgemap == NULL) {
646 uintptr_t buf;
647 size_t sz = sizeof(struct ksancov_edgemap) + nedges * sizeof(uintptr_t);
648
649 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
650 KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
651 if (kr) {
652 printf("ksancov: failed to allocate edge addr map\n");
653 lck_rw_unlock_exclusive(&ksancov_devs_lck);
654 return ENOMEM;
655 }
656
657 ksancov_edgemap = (void *)buf;
658 ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
659 ksancov_edgemap->ke_nedges = (uint32_t)nedges;
660 }
661
662 lck_rw_unlock_exclusive(&ksancov_devs_lck);
663
664 return 0;
665 }
666
667 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)668 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
669 {
670 if (d->mode != KS_MODE_NONE) {
671 return EBUSY; /* trace/counters already created */
672 }
673 assert(d->trace == NULL);
674
675 uintptr_t buf;
676 size_t sz;
677
678 if (mode == KS_MODE_TRACE) {
679 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
680 sizeof(struct ksancov_trace), &sz)) {
681 return EINVAL;
682 }
683 } else if (mode == KS_MODE_STKSIZE) {
684 if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
685 sizeof(struct ksancov_trace), &sz)) {
686 return EINVAL;
687 }
688 } else {
689 return EINVAL;
690 }
691
692 /* allocate the shared memory buffer */
693 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
694 VM_KERN_MEMORY_DIAG);
695 if (kr != KERN_SUCCESS) {
696 return ENOMEM;
697 }
698
699 struct ksancov_trace *trace = (struct ksancov_trace *)buf;
700 trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
701 os_atomic_init(&trace->kt_head, 0);
702 os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
703 trace->kt_maxent = (uint32_t)maxpcs;
704
705 d->trace = trace;
706 d->sz = sz;
707 d->maxpcs = maxpcs;
708 d->mode = mode;
709
710 return 0;
711 }
712
713 static int
ksancov_counters_alloc(ksancov_dev_t d)714 ksancov_counters_alloc(ksancov_dev_t d)
715 {
716 if (d->mode != KS_MODE_NONE) {
717 return EBUSY; /* trace/counters already created */
718 }
719 assert(d->counters == NULL);
720
721 uintptr_t buf;
722 size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
723
724 /* allocate the shared memory buffer */
725 kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
726 VM_KERN_MEMORY_DIAG);
727 if (kr != KERN_SUCCESS) {
728 return ENOMEM;
729 }
730
731 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
732 counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
733 counters->kc_nedges = ksancov_edgemap->ke_nedges;
734 os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
735
736 d->counters = counters;
737 d->sz = sz;
738 d->mode = KS_MODE_COUNTERS;
739
740 return 0;
741 }
742
743 /*
744 * attach a thread to a ksancov dev instance
745 */
746 static int
ksancov_attach(ksancov_dev_t d,thread_t th)747 ksancov_attach(ksancov_dev_t d, thread_t th)
748 {
749 if (d->mode == KS_MODE_NONE) {
750 return EINVAL; /* not configured */
751 }
752
753 if (th != current_thread()) {
754 /* can only attach to self presently */
755 return EINVAL;
756 }
757
758 kcov_thread_data_t *data = kcov_get_thread_data(th);
759 if (data->ktd_device) {
760 return EBUSY; /* one dev per thread */
761 }
762
763 if (d->thread != THREAD_NULL) {
764 ksancov_detach(d);
765 }
766
767 d->thread = th;
768 thread_reference(d->thread);
769
770 os_atomic_store(&data->ktd_device, d, relaxed);
771 os_atomic_add(&ksancov_enabled, 1, relaxed);
772 kcov_enable();
773
774 return 0;
775 }
776
777 extern void
778 thread_wait(
779 thread_t thread,
780 boolean_t until_not_runnable);
781
782
783 /*
784 * disconnect thread from ksancov dev
785 */
786 static void
ksancov_detach(ksancov_dev_t d)787 ksancov_detach(ksancov_dev_t d)
788 {
789 if (d->thread == THREAD_NULL) {
790 /* no thread attached */
791 return;
792 }
793
794 /* disconnect dev from thread */
795 kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
796 if (data->ktd_device != NULL) {
797 assert(data->ktd_device == d);
798 os_atomic_store(&data->ktd_device, NULL, relaxed);
799 }
800
801 if (d->thread != current_thread()) {
802 /* wait until it's safe to yank */
803 thread_wait(d->thread, TRUE);
804 }
805
806 assert(ksancov_enabled >= 1);
807 os_atomic_sub(&ksancov_enabled, 1, relaxed);
808 kcov_disable();
809
810 /* drop our thread reference */
811 thread_deallocate(d->thread);
812 d->thread = THREAD_NULL;
813 }
814
815 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)816 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
817 {
818 #pragma unused(flags,devtype,p)
819 const int minor_num = minor(dev);
820
821 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
822 return ENXIO;
823 }
824
825 lck_rw_lock_exclusive(&ksancov_devs_lck);
826 ksancov_dev_t d = ksancov_devs[minor_num];
827 ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
828 lck_rw_unlock_exclusive(&ksancov_devs_lck);
829
830 /*
831 * No need to lock d here as there is and will be no one having its
832 * reference except for this thread and the one which is going to
833 * be detached below.
834 */
835
836 if (!d) {
837 return ENXIO;
838 }
839
840 if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
841 os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
842 }
843
844 ksancov_detach(d);
845 free_dev(d);
846
847 return 0;
848 }
849
850 static void
ksancov_testpanic(volatile uint64_t guess)851 ksancov_testpanic(volatile uint64_t guess)
852 {
853 const uint64_t tgt = 0xf85de3b12891c817UL;
854
855 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
856
857 if (X(0)) {
858 if (X(1)) {
859 if (X(2)) {
860 if (X(3)) {
861 if (X(4)) {
862 if (X(5)) {
863 if (X(6)) {
864 if (X(7)) {
865 if (X(8)) {
866 if (X(9)) {
867 if (X(10)) {
868 if (X(11)) {
869 if (X(12)) {
870 if (X(13)) {
871 if (X(14)) {
872 if (X(15)) {
873 panic("ksancov: found test value");
874 }
875 }
876 }
877 }
878 }
879 }
880 }
881 }
882 }
883 }
884 }
885 }
886 }
887 }
888 }
889 }
890 }
891
892 static int
ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg * kmsg)893 ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg *kmsg)
894 {
895 struct ksancov_od_module_entry *entry = NULL;
896 struct ksancov_od_module_handle *handle = NULL;
897 ksancov_on_demand_operation_t op = kmsg->operation;
898 int ret = 0;
899
900 lck_mtx_lock(&ksancov_od_lck);
901
902 /* find the entry/handle to the module */
903 for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
904 entry = &ksancov_od_module_entries[idx];
905 if (strncmp(entry->bundle, kmsg->bundle, sizeof(entry->bundle)) == 0) {
906 handle = &ksancov_od_module_handles[idx];
907 break;
908 }
909 }
910
911 if (handle == NULL) {
912 ksancov_od_log("ksancov: Could not find module '%s'\n", kmsg->bundle);
913 lck_mtx_unlock(&ksancov_od_lck);
914 return EINVAL;
915 }
916
917 switch (op) {
918 case KS_OD_GET_GATE:
919 /* Get whether on-demand instrumentation is enabled in a given module */
920 if (handle->gate) {
921 kmsg->gate = *handle->gate;
922 } else {
923 ret = EINVAL;
924 }
925 break;
926 case KS_OD_SET_GATE:
927 /* Toggle callback invocation for a given module */
928 if (handle->gate) {
929 ksancov_od_log("ksancov: Setting gate for '%s': %llu\n",
930 kmsg->bundle, kmsg->gate);
931 if (kmsg->gate != *handle->gate) {
932 if (kmsg->gate) {
933 ksancov_od_enabled_count++;
934 } else {
935 ksancov_od_enabled_count--;
936 }
937 *handle->gate = kmsg->gate;
938 }
939 } else {
940 ret = EINVAL;
941 }
942 break;
943 case KS_OD_GET_RANGE:
944 /* Get which range of the guards table covers the given module */
945 ksancov_od_log("ksancov: Range for '%s': %u, %u\n",
946 kmsg->bundle, *handle->start, *(handle->stop - 1));
947 kmsg->range.start = *handle->start;
948 kmsg->range.stop = *(handle->stop - 1);
949 break;
950 default:
951 ret = EINVAL;
952 break;
953 }
954
955 lck_mtx_unlock(&ksancov_od_lck);
956 return ret;
957 }
958
959 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)960 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
961 {
962 #pragma unused(fflag,p)
963 const int minor_num = minor(dev);
964
965 if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
966 return ENXIO;
967 }
968
969 struct ksancov_buf_desc *mcmd;
970 void *data = (void *)_data;
971
972 lck_rw_lock_shared(&ksancov_devs_lck);
973 ksancov_dev_t d = ksancov_devs[minor_num];
974 if (!d) {
975 lck_rw_unlock_shared(&ksancov_devs_lck);
976 return EINVAL; /* dev not open */
977 }
978
979 int ret = 0;
980
981 switch (cmd) {
982 case KSANCOV_IOC_TRACE:
983 case KSANCOV_IOC_STKSIZE:
984 lck_mtx_lock(&d->lock);
985 ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
986 ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
987 lck_mtx_unlock(&d->lock);
988 break;
989 case KSANCOV_IOC_COUNTERS:
990 lck_mtx_lock(&d->lock);
991 ret = ksancov_counters_alloc(d);
992 lck_mtx_unlock(&d->lock);
993 break;
994 case KSANCOV_IOC_MAP:
995 mcmd = (struct ksancov_buf_desc *)data;
996 lck_mtx_lock(&d->lock);
997 ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
998 lck_mtx_unlock(&d->lock);
999 break;
1000 case KSANCOV_IOC_MAP_EDGEMAP:
1001 mcmd = (struct ksancov_buf_desc *)data;
1002 ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
1003 break;
1004 case KSANCOV_IOC_START:
1005 lck_mtx_lock(&d->lock);
1006 ret = ksancov_attach(d, current_thread());
1007 lck_mtx_unlock(&d->lock);
1008 break;
1009 case KSANCOV_IOC_NEDGES:
1010 *(size_t *)data = nedges;
1011 break;
1012 case KSANCOV_IOC_ON_DEMAND:
1013 ret = ksancov_handle_on_demand_cmd((struct ksancov_on_demand_msg *)data);
1014 break;
1015 case KSANCOV_IOC_TESTPANIC:
1016 ksancov_testpanic(*(uint64_t *)data);
1017 break;
1018 default:
1019 ret = EINVAL;
1020 break;
1021 }
1022
1023 lck_rw_unlock_shared(&ksancov_devs_lck);
1024
1025 return ret;
1026 }
1027
1028 static int
ksancov_dev_clone(dev_t dev,int action)1029 ksancov_dev_clone(dev_t dev, int action)
1030 {
1031 #pragma unused(dev)
1032 if (action == DEVFS_CLONE_ALLOC) {
1033 for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
1034 if (ksancov_devs[i] == NULL) {
1035 return i;
1036 }
1037 }
1038 } else if (action == DEVFS_CLONE_FREE) {
1039 return 0;
1040 }
1041
1042 return -1;
1043 }
1044
1045 static const struct cdevsw
1046 ksancov_cdev = {
1047 .d_open = ksancov_open,
1048 .d_close = ksancov_close,
1049 .d_ioctl = ksancov_ioctl,
1050
1051 .d_read = eno_rdwrt,
1052 .d_write = eno_rdwrt,
1053 .d_stop = eno_stop,
1054 .d_reset = eno_reset,
1055 .d_select = eno_select,
1056 .d_mmap = eno_mmap,
1057 .d_strategy = eno_strat,
1058 .d_type = 0
1059 };
1060
1061 int
ksancov_init_dev(void)1062 ksancov_init_dev(void)
1063 {
1064 dev_major = cdevsw_add(-1, &ksancov_cdev);
1065 if (dev_major < 0) {
1066 printf("ksancov: failed to allocate major device node\n");
1067 return -1;
1068 }
1069
1070 dev_t dev = makedev(dev_major, 0);
1071 void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
1072 ksancov_dev_clone, KSANCOV_DEVNODE);
1073 if (!node) {
1074 printf("ksancov: failed to create device node\n");
1075 return -1;
1076 }
1077
1078 return 0;
1079 }
1080