xref: /xnu-10002.41.9/san/coverage/kcov_ksancov.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <string.h>
30 #include <stdbool.h>
31 
32 #include <kern/assert.h>
33 #include <kern/cpu_data.h>
34 #include <kern/debug.h>
35 #include <kern/locks.h>
36 #include <kern/kalloc.h>
37 #include <kern/startup.h>
38 #include <kern/task.h>
39 #include <kern/thread.h>
40 #include <kern/zalloc.h>
41 
42 #include <vm/vm_kern.h>
43 #include <vm/vm_protos.h>
44 #include <vm/pmap.h>
45 
46 #include <mach/mach_vm.h>
47 #include <mach/mach_types.h>
48 #include <mach/mach_port.h>
49 #include <mach/vm_map.h>
50 #include <mach/vm_param.h>
51 #include <mach/machine/vm_param.h>
52 
53 #include <sys/stat.h> /* dev_t */
54 #include <miscfs/devfs/devfs.h> /* must come after sys/stat.h */
55 #include <sys/conf.h> /* must come after sys/stat.h */
56 #include <sys/sysctl.h>
57 
58 #include <pexpert/pexpert.h> /* PE_parse_boot_argn */
59 
60 #include <libkern/libkern.h>
61 #include <libkern/OSKextLibPrivate.h>
62 #include <libkern/kernel_mach_header.h>
63 #include <os/atomic_private.h>
64 #include <os/log.h>
65 #include <os/overflow.h>
66 
67 #include <san/kcov_data.h>
68 #include <san/kcov_ksancov.h>
69 
70 /* header mess... */
71 struct uthread;
72 typedef struct uthread * uthread_t;
73 
74 #include <sys/sysproto.h>
75 #include <sys/queue.h>
76 #include <sys/sysctl.h>
77 #include <sys/kdebug.h>
78 
79 #define USE_PC_TABLE 0
80 #define KSANCOV_MAX_DEV 64
81 #define KSANCOV_MAX_PCS (1024U * 64)  /* default to 256k buffer => 64k pcs */
82 
83 extern boolean_t ml_at_interrupt_context(void);
84 extern boolean_t ml_get_interrupts_enabled(void);
85 
86 static void ksancov_detach(ksancov_dev_t);
87 
88 static int dev_major;
89 static size_t nedges = 0;
90 static uint32_t __unused npcs = 0;
91 
92 static LCK_GRP_DECLARE(ksancov_lck_grp, "ksancov_lck_grp");
93 static LCK_RW_DECLARE(ksancov_devs_lck, &ksancov_lck_grp);
94 
95 /* array of devices indexed by devnode minor */
96 static ksancov_dev_t ksancov_devs[KSANCOV_MAX_DEV];
97 static struct ksancov_edgemap *ksancov_edgemap;
98 
99 /* Global flag that enables the sanitizer hook. */
100 static _Atomic unsigned int ksancov_enabled = 0;
101 
102 /* Toggled after ksancov_init() */
103 static boolean_t ksancov_initialized = false;
104 
105 
106 /* Support for gated callbacks (referred to as "on demand", "od") */
107 static void kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop);
108 
109 static LCK_MTX_DECLARE(ksancov_od_lck, &ksancov_lck_grp);
110 
111 /* Bookkeeping structures for gated sancov instrumentation */
112 struct ksancov_od_module_entry {
113 	char     bundle[KMOD_MAX_NAME]; /* module bundle */
114 	uint32_t idx; /* index into entries/handles arrays */
115 };
116 
117 struct ksancov_od_module_handle {
118 	uint32_t *start; /* guards boundaries */
119 	uint32_t *stop;
120 	uint64_t *gate; /* pointer to __DATA,__sancov_gate*/
121 };
122 
123 static struct ksancov_od_module_entry *ksancov_od_module_entries = NULL;
124 static struct ksancov_od_module_handle *ksancov_od_module_handles = NULL;
125 
126 /* number of entries/handles allocated */
127 static unsigned int ksancov_od_allocated_count = 0;
128 
129 /* number of registered modules */
130 static unsigned int ksancov_od_modules_count = 0;
131 /* number of modules whose callbacks are currently enabled */
132 static unsigned int ksancov_od_enabled_count = 0;
133 
134 /* Valid values for ksancov.on_demand= boot-arg */
135 #define KSANCOV_OD_SUPPORT  0x0010 // Enable runtime support
136 #define KSANCOV_OD_LOGGING  0x0020 // Enable logging (via os_log)
137 
138 __options_decl(ksancov_od_config_t, uint32_t, {
139 	KSANCOV_OD_NONE = 0,
140 	KSANCOV_OD_ENABLE_SUPPORT = 0x0010,
141 	KSANCOV_OD_ENABLE_LOGGING = 0x0020,
142 });
143 
144 /* configurable at boot; enabled by default */
145 static ksancov_od_config_t ksancov_od_config = KSANCOV_OD_ENABLE_SUPPORT;
146 
147 static unsigned ksancov_od_support_enabled = 1;
148 static unsigned ksancov_od_logging_enabled = 0;
149 
150 SYSCTL_DECL(_kern_kcov);
151 SYSCTL_ULONG(_kern_kcov, OID_AUTO, nedges, CTLFLAG_RD, &nedges, "");
152 
153 SYSCTL_NODE(_kern_kcov, OID_AUTO, od, CTLFLAG_RD, 0, "od");
154 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, config, CTLFLAG_RD, &ksancov_od_config, 0, "");
155 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, allocated_entries, CTLFLAG_RD, &ksancov_od_allocated_count, 0, "");
156 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, modules_count, CTLFLAG_RD, &ksancov_od_modules_count, 0, "");
157 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, enabled_count, CTLFLAG_RD, &ksancov_od_enabled_count, 0, "");
158 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, support_enabled, CTLFLAG_RD, &ksancov_od_support_enabled, 0, "");
159 SYSCTL_UINT(_kern_kcov_od, OID_AUTO, logging_enabled, CTLFLAG_RW, &ksancov_od_logging_enabled, 0, "");
160 
161 #define ksancov_od_log(...) do { \
162 	        if (ksancov_od_logging_enabled) { \
163 	                os_log_debug(OS_LOG_DEFAULT, __VA_ARGS__); \
164 	        } \
165 	} while (0)
166 
167 __startup_func
168 void
ksancov_init(void)169 ksancov_init(void)
170 {
171 	unsigned arg;
172 
173 	/* handle ksancov boot-args */
174 	if (PE_parse_boot_argn("ksancov.on_demand", &arg, sizeof(arg))) {
175 		ksancov_od_config = (ksancov_od_config_t)arg;
176 	}
177 
178 	if (ksancov_od_config & KSANCOV_OD_ENABLE_SUPPORT) {
179 		/* enable the runtime support for on-demand instrumentation */
180 		ksancov_od_support_enabled = 1;
181 		ksancov_od_allocated_count = 64;
182 		ksancov_od_module_entries = kalloc_type_tag(struct ksancov_od_module_entry,
183 		    ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
184 		ksancov_od_module_handles = kalloc_type_tag(struct ksancov_od_module_handle,
185 		    ksancov_od_allocated_count, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
186 	} else {
187 		ksancov_od_support_enabled = 0;
188 	}
189 
190 	if (ksancov_od_config & KSANCOV_OD_ENABLE_LOGGING) {
191 		ksancov_od_logging_enabled = 1;
192 	} else {
193 		ksancov_od_logging_enabled = 0;
194 	}
195 
196 	ksancov_initialized = true;
197 }
198 
199 /*
200  * Coverage sanitizer per-thread routines.
201  */
202 
203 /* Initialize per-thread sanitizer data for each new kernel thread. */
204 void
kcov_ksancov_init_thread(ksancov_dev_t * dev)205 kcov_ksancov_init_thread(ksancov_dev_t *dev)
206 {
207 	*dev = NULL;
208 }
209 
210 
211 #define GUARD_SEEN     (uint32_t)0x80000000
212 #define GUARD_IDX_MASK (uint32_t)0x0fffffff
213 
214 static void
trace_pc_guard_pcs(struct ksancov_dev * dev,uintptr_t pc)215 trace_pc_guard_pcs(struct ksancov_dev *dev, uintptr_t pc)
216 {
217 	if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
218 		return; /* overflow */
219 	}
220 
221 	uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
222 	if (__improbable(idx >= dev->maxpcs)) {
223 		return;
224 	}
225 
226 	ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)dev->trace->kt_entries;
227 	entries[idx] = pc;
228 }
229 
230 #if CONFIG_STKSZ
231 static void
trace_pc_guard_pcs_stk(struct ksancov_dev * dev,uintptr_t pc,uint32_t stksize)232 trace_pc_guard_pcs_stk(struct ksancov_dev *dev, uintptr_t pc, uint32_t stksize)
233 {
234 	if (os_atomic_load(&dev->trace->kt_head, relaxed) >= dev->maxpcs) {
235 		return; /* overflow */
236 	}
237 
238 	uint32_t idx = os_atomic_inc_orig(&dev->trace->kt_head, relaxed);
239 	if (__improbable(idx >= dev->maxpcs)) {
240 		return;
241 	}
242 
243 	ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)dev->trace->kt_entries;
244 	entries[idx] = {
245 		.pc = pc,
246 		.stksize = stksize
247 	};
248 }
249 #endif
250 
251 static void
trace_pc_guard_counter(struct ksancov_dev * dev,uint32_t * guardp)252 trace_pc_guard_counter(struct ksancov_dev *dev, uint32_t *guardp)
253 {
254 	size_t idx = *guardp & GUARD_IDX_MASK;
255 	ksancov_counters_t *counters = dev->counters;
256 
257 	/* saturating 8bit add */
258 	if (counters->kc_hits[idx] < KSANCOV_MAX_HITS) {
259 		counters->kc_hits[idx]++;
260 	}
261 }
262 
263 void
kcov_ksancov_trace_guard(uint32_t * guardp,void * caller)264 kcov_ksancov_trace_guard(uint32_t *guardp, void *caller)
265 {
266 	uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
267 
268 	if (guardp == NULL) {
269 		return;
270 	}
271 
272 	if (__probable(ksancov_edgemap == NULL)) {
273 		return;
274 	}
275 
276 	uint32_t gd = *guardp;
277 	if (__improbable(gd && !(gd & GUARD_SEEN))) {
278 		size_t idx = gd & GUARD_IDX_MASK;
279 		if (idx < ksancov_edgemap->ke_nedges) {
280 			ksancov_edgemap->ke_addrs[idx] = pc;
281 			*guardp |= GUARD_SEEN;
282 		}
283 	}
284 }
285 
286 void
kcov_ksancov_trace_pc(kcov_thread_data_t * data,uint32_t * guardp,void * caller,uintptr_t sp)287 kcov_ksancov_trace_pc(kcov_thread_data_t *data, uint32_t *guardp, void *caller, uintptr_t sp)
288 {
289 #pragma unused(sp)
290 	uintptr_t pc = (uintptr_t)(VM_KERNEL_UNSLIDE(caller) - 1);
291 	ksancov_dev_t dev = data->ktd_device;
292 
293 	/* Check that we have coverage recording enabled for a thread. */
294 	if (__probable(dev == NULL)) {
295 		return;
296 	}
297 
298 	if (os_atomic_load(&dev->hdr->kh_enabled, relaxed) == 0) {
299 		return;
300 	}
301 
302 	/*
303 	 * Coverage sanitizer is disabled in the code called below. This allows calling back to the kernel without
304 	 * the risk of killing machine with recursive calls.
305 	 */
306 	switch (dev->mode) {
307 	case KS_MODE_TRACE:
308 		trace_pc_guard_pcs(dev, pc);
309 		break;
310 #if CONFIG_STKSZ
311 	case KS_MODE_STKSIZE:
312 		trace_pc_guard_pcs_stk(dev, pc, data->ktd_stksz.kst_stksz);
313 		break;
314 #endif
315 	case KS_MODE_COUNTERS:
316 		trace_pc_guard_counter(dev, guardp);
317 		break;
318 	default:
319 		/*
320 		 * Treat all unsupported tracing modes as no-op. It is not destructive for the kernel itself just
321 		 * coverage sanitiser will not record anything in such case.
322 		 */
323 		;
324 	}
325 }
326 
327 void
kcov_ksancov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)328 kcov_ksancov_trace_pc_guard_init(uint32_t *start, uint32_t *stop)
329 {
330 	const size_t orig_nedges = nedges;
331 
332 	/* assign a unique number to each guard */
333 	for (uint32_t *cur = start; cur != stop; cur++) {
334 		/* zero means that the guard has not been assigned */
335 		if (*cur == 0) {
336 			if (nedges < KSANCOV_MAX_EDGES) {
337 				*cur = (uint32_t)++nedges;
338 			}
339 		}
340 	}
341 
342 	/* only invoke kcov_ksancov_bookmark_on_demand_module if we assigned new guards */
343 	if (nedges > orig_nedges) {
344 		kcov_ksancov_bookmark_on_demand_module(start, stop);
345 	}
346 }
347 
348 void
kcov_ksancov_pcs_init(uintptr_t * start,uintptr_t * stop)349 kcov_ksancov_pcs_init(uintptr_t *start, uintptr_t *stop)
350 {
351 #if USE_PC_TABLE
352 	static const uintptr_t pc_table_seen_flag = 0x100;
353 
354 	for (; start < stop; start += 2) {
355 		uintptr_t pc = start[0];
356 		uintptr_t flags = start[1];
357 
358 		/*
359 		 * This function gets called multiple times on the same range, so mark the
360 		 * ones we've seen using unused bits in the flags field.
361 		 */
362 		if (flags & pc_table_seen_flag) {
363 			continue;
364 		}
365 
366 		start[1] |= pc_table_seen_flag;
367 		assert(npcs < KSANCOV_MAX_EDGES - 1);
368 		edge_addrs[++npcs] = pc;
369 	}
370 #else
371 	(void)start;
372 	(void)stop;
373 #endif
374 }
375 
376 static void
kcov_ksancov_bookmark_on_demand_module(uint32_t * start,uint32_t * stop)377 kcov_ksancov_bookmark_on_demand_module(uint32_t *start, uint32_t *stop)
378 {
379 	OSKextLoadedKextSummary summary = {};
380 	struct ksancov_od_module_entry *entry = NULL;
381 	struct ksancov_od_module_handle *handle = NULL;
382 	uint64_t *gate_section = NULL;
383 	unsigned long gate_sz = 0;
384 	uint32_t idx = 0;
385 
386 	if (!ksancov_od_support_enabled) {
387 		return;
388 	}
389 
390 	if (OSKextGetLoadedKextSummaryForAddress(start, &summary) != KERN_SUCCESS) {
391 		return;
392 	}
393 
394 	if (!ksancov_initialized) {
395 		ksancov_od_log("ksancov: Dropping %s pre-initialization\n", summary.name);
396 		return;
397 	}
398 
399 	if (nedges >= KSANCOV_MAX_EDGES) {
400 		ksancov_od_log("ksancov: Dropping %s: maximum number of edges reached\n",
401 		    summary.name);
402 		return;
403 	}
404 
405 	/*
406 	 * The __DATA,__sancov_gate section is where the compiler stores the 64-bit
407 	 * global variable that is used by the inline instrumentation to decide
408 	 * whether it should call into the runtime or not.
409 	 */
410 	gate_section = getsectdatafromheader((kernel_mach_header_t *)summary.address,
411 	    "__DATA", "__sancov_gate", &gate_sz);
412 	if (gate_sz == 0) {
413 		ksancov_od_log("ksancov: Dropping %s: not instrumented with gated callbacks\n",
414 		    summary.name);
415 		return;
416 	}
417 
418 	lck_mtx_lock(&ksancov_od_lck);
419 
420 	/* reallocate the bookkeeping structures if needed */
421 	if (ksancov_od_modules_count >= ksancov_od_allocated_count) {
422 		unsigned int old_ksancov_od_allocated_count = ksancov_od_allocated_count;
423 		ksancov_od_allocated_count += (ksancov_od_allocated_count / 2);
424 
425 		ksancov_od_log("ksancov: Reallocating entries: %u -> %u\n",
426 		    old_ksancov_od_allocated_count,
427 		    ksancov_od_allocated_count);
428 
429 		ksancov_od_module_entries = krealloc_type_tag(struct ksancov_od_module_entry,
430 		    old_ksancov_od_allocated_count,
431 		    ksancov_od_allocated_count,
432 		    ksancov_od_module_entries, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
433 
434 		ksancov_od_module_handles = krealloc_type_tag(struct ksancov_od_module_handle,
435 		    old_ksancov_od_allocated_count,
436 		    ksancov_od_allocated_count,
437 		    ksancov_od_module_handles, Z_WAITOK_ZERO | Z_REALLOCF, VM_KERN_MEMORY_DIAG);
438 	}
439 
440 	/* this is the index of the entry we're going to fill in both arrays */
441 	idx = ksancov_od_modules_count++;
442 
443 	entry = &ksancov_od_module_entries[idx];
444 	handle = &ksancov_od_module_handles[idx];
445 
446 	handle->start = start;
447 	handle->stop = stop;
448 	handle->gate = gate_section;
449 
450 	strlcpy(entry->bundle, summary.name, sizeof(entry->bundle));
451 	entry->idx = (uint32_t)idx;
452 
453 	ksancov_od_log("ksancov: Bookmarked module %s (0x%lx - 0x%lx, %lu guards) [idx: %u]\n",
454 	    entry->bundle, (uintptr_t)handle->start, (uintptr_t)handle->stop,
455 	    handle->stop - handle->start, entry->idx);
456 	lck_mtx_unlock(&ksancov_od_lck);
457 }
458 
459 /*
460  * Coverage sanitizer pseudo-device code.
461  */
462 
463 static ksancov_dev_t
create_dev(dev_t dev)464 create_dev(dev_t dev)
465 {
466 	ksancov_dev_t d;
467 
468 	d = kalloc_type_tag(struct ksancov_dev, Z_WAITOK_ZERO_NOFAIL, VM_KERN_MEMORY_DIAG);
469 	d->mode = KS_MODE_NONE;
470 	d->maxpcs = KSANCOV_MAX_PCS;
471 	d->dev = dev;
472 	lck_mtx_init(&d->lock, &ksancov_lck_grp, LCK_ATTR_NULL);
473 
474 	return d;
475 }
476 
477 static void
free_dev(ksancov_dev_t d)478 free_dev(ksancov_dev_t d)
479 {
480 	if (d->mode == KS_MODE_TRACE && d->trace) {
481 		kmem_free(kernel_map, (uintptr_t)d->trace, d->sz);
482 	} else if (d->mode == KS_MODE_COUNTERS && d->counters) {
483 		kmem_free(kernel_map, (uintptr_t)d->counters, d->sz);
484 	}
485 	lck_mtx_destroy(&d->lock, &ksancov_lck_grp);
486 	kfree_type(struct ksancov_dev, d);
487 }
488 
489 static void *
ksancov_do_map(uintptr_t base,size_t sz,vm_prot_t prot)490 ksancov_do_map(uintptr_t base, size_t sz, vm_prot_t prot)
491 {
492 	kern_return_t kr;
493 	mach_port_t mem_entry = MACH_PORT_NULL;
494 	mach_vm_address_t user_addr = 0;
495 	memory_object_size_t size = sz;
496 
497 	kr = mach_make_memory_entry_64(kernel_map,
498 	    &size,
499 	    (mach_vm_offset_t)base,
500 	    MAP_MEM_VM_SHARE | prot,
501 	    &mem_entry,
502 	    MACH_PORT_NULL);
503 	if (kr != KERN_SUCCESS) {
504 		return NULL;
505 	}
506 
507 	kr = mach_vm_map_kernel(get_task_map(current_task()),
508 	    &user_addr,
509 	    size,
510 	    0,
511 	    VM_MAP_KERNEL_FLAGS_ANYWHERE(),
512 	    mem_entry,
513 	    0,
514 	    FALSE,
515 	    prot,
516 	    prot,
517 	    VM_INHERIT_SHARE);
518 
519 	/*
520 	 * At this point, either vm_map() has taken a reference on the memory entry
521 	 * and we can release our local reference, or the map failed and the entry
522 	 * needs to be freed.
523 	 */
524 	mach_memory_entry_port_release(mem_entry);
525 
526 	if (kr != KERN_SUCCESS) {
527 		return NULL;
528 	}
529 
530 	return (void *)user_addr;
531 }
532 
533 /*
534  * map the sancov buffer into the current process
535  */
536 static int
ksancov_map(ksancov_dev_t d,uintptr_t * bufp,size_t * sizep)537 ksancov_map(ksancov_dev_t d, uintptr_t *bufp, size_t *sizep)
538 {
539 	uintptr_t addr;
540 	size_t size = d->sz;
541 
542 	switch (d->mode) {
543 	case KS_MODE_STKSIZE:
544 	case KS_MODE_TRACE:
545 		if (!d->trace) {
546 			return EINVAL;
547 		}
548 		addr = (uintptr_t)d->trace;
549 		break;
550 	case KS_MODE_COUNTERS:
551 		if (!d->counters) {
552 			return EINVAL;
553 		}
554 		addr = (uintptr_t)d->counters;
555 		break;
556 	default:
557 		return EINVAL; /* not configured */
558 	}
559 
560 	void *buf = ksancov_do_map(addr, size, VM_PROT_READ | VM_PROT_WRITE);
561 	if (buf == NULL) {
562 		return ENOMEM;
563 	}
564 
565 	*bufp = (uintptr_t)buf;
566 	*sizep = size;
567 
568 	return 0;
569 }
570 
571 /*
572  * map the edge -> pc mapping as read-only
573  */
574 static int
ksancov_map_edgemap(uintptr_t * bufp,size_t * sizep)575 ksancov_map_edgemap(uintptr_t *bufp, size_t *sizep)
576 {
577 	uintptr_t addr;
578 	size_t size;
579 
580 	if (ksancov_edgemap == NULL) {
581 		return EINVAL;
582 	}
583 
584 	addr = (uintptr_t)ksancov_edgemap;
585 	size = sizeof(ksancov_edgemap_t) + ksancov_edgemap->ke_nedges * sizeof(uintptr_t);
586 
587 	void *buf = ksancov_do_map(addr, size, VM_PROT_READ);
588 	if (buf == NULL) {
589 		return ENOMEM;
590 	}
591 
592 	*bufp = (uintptr_t)buf;
593 	*sizep = size;
594 	return 0;
595 }
596 
597 /*
598  * Device node management
599  */
600 
601 static int
ksancov_open(dev_t dev,int flags,int devtype,proc_t p)602 ksancov_open(dev_t dev, int flags, int devtype, proc_t p)
603 {
604 #pragma unused(flags,devtype,p)
605 	const int minor_num = minor(dev);
606 
607 	if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
608 		return ENXIO;
609 	}
610 
611 	lck_rw_lock_exclusive(&ksancov_devs_lck);
612 
613 	if (ksancov_devs[minor_num]) {
614 		lck_rw_unlock_exclusive(&ksancov_devs_lck);
615 		return EBUSY;
616 	}
617 
618 	ksancov_dev_t d = create_dev(dev);
619 	if (!d) {
620 		lck_rw_unlock_exclusive(&ksancov_devs_lck);
621 		return ENOMEM;
622 	}
623 	ksancov_devs[minor_num] = d;
624 
625 	if (ksancov_edgemap == NULL) {
626 		uintptr_t buf;
627 		size_t sz = sizeof(struct ksancov_edgemap) + nedges * sizeof(uintptr_t);
628 
629 		kern_return_t kr = kmem_alloc(kernel_map, &buf, sz,
630 		    KMA_DATA | KMA_ZERO | KMA_PERMANENT, VM_KERN_MEMORY_DIAG);
631 		if (kr) {
632 			printf("ksancov: failed to allocate edge addr map\n");
633 			lck_rw_unlock_exclusive(&ksancov_devs_lck);
634 			return ENOMEM;
635 		}
636 
637 		ksancov_edgemap = (void *)buf;
638 		ksancov_edgemap->ke_magic = KSANCOV_EDGEMAP_MAGIC;
639 		ksancov_edgemap->ke_nedges = (uint32_t)nedges;
640 	}
641 
642 	lck_rw_unlock_exclusive(&ksancov_devs_lck);
643 
644 	return 0;
645 }
646 
647 static int
ksancov_trace_alloc(ksancov_dev_t d,ksancov_mode_t mode,size_t maxpcs)648 ksancov_trace_alloc(ksancov_dev_t d, ksancov_mode_t mode, size_t maxpcs)
649 {
650 	if (d->mode != KS_MODE_NONE) {
651 		return EBUSY; /* trace/counters already created */
652 	}
653 	assert(d->trace == NULL);
654 
655 	uintptr_t buf;
656 	size_t sz;
657 
658 	if (mode == KS_MODE_TRACE) {
659 		if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_pc_ent_t),
660 		    sizeof(struct ksancov_trace), &sz)) {
661 			return EINVAL;
662 		}
663 	} else if (mode == KS_MODE_STKSIZE) {
664 		if (os_mul_and_add_overflow(maxpcs, sizeof(ksancov_trace_stksize_ent_t),
665 		    sizeof(struct ksancov_trace), &sz)) {
666 			return EINVAL;
667 		}
668 	} else {
669 		return EINVAL;
670 	}
671 
672 	/* allocate the shared memory buffer */
673 	kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
674 	    VM_KERN_MEMORY_DIAG);
675 	if (kr != KERN_SUCCESS) {
676 		return ENOMEM;
677 	}
678 
679 	struct ksancov_trace *trace = (struct ksancov_trace *)buf;
680 	trace->kt_hdr.kh_magic = (mode == KS_MODE_TRACE) ? KSANCOV_TRACE_MAGIC : KSANCOV_STKSIZE_MAGIC;
681 	os_atomic_init(&trace->kt_head, 0);
682 	os_atomic_init(&trace->kt_hdr.kh_enabled, 0);
683 	trace->kt_maxent = (uint32_t)maxpcs;
684 
685 	d->trace = trace;
686 	d->sz = sz;
687 	d->maxpcs = maxpcs;
688 	d->mode = mode;
689 
690 	return 0;
691 }
692 
693 static int
ksancov_counters_alloc(ksancov_dev_t d)694 ksancov_counters_alloc(ksancov_dev_t d)
695 {
696 	if (d->mode != KS_MODE_NONE) {
697 		return EBUSY; /* trace/counters already created */
698 	}
699 	assert(d->counters == NULL);
700 
701 	uintptr_t buf;
702 	size_t sz = sizeof(struct ksancov_counters) + ksancov_edgemap->ke_nedges * sizeof(uint8_t);
703 
704 	/* allocate the shared memory buffer */
705 	kern_return_t kr = kmem_alloc(kernel_map, &buf, sz, KMA_DATA | KMA_ZERO,
706 	    VM_KERN_MEMORY_DIAG);
707 	if (kr != KERN_SUCCESS) {
708 		return ENOMEM;
709 	}
710 
711 	ksancov_counters_t *counters = (ksancov_counters_t *)buf;
712 	counters->kc_hdr.kh_magic = KSANCOV_COUNTERS_MAGIC;
713 	counters->kc_nedges = ksancov_edgemap->ke_nedges;
714 	os_atomic_init(&counters->kc_hdr.kh_enabled, 0);
715 
716 	d->counters = counters;
717 	d->sz = sz;
718 	d->mode = KS_MODE_COUNTERS;
719 
720 	return 0;
721 }
722 
723 /*
724  * attach a thread to a ksancov dev instance
725  */
726 static int
ksancov_attach(ksancov_dev_t d,thread_t th)727 ksancov_attach(ksancov_dev_t d, thread_t th)
728 {
729 	if (d->mode == KS_MODE_NONE) {
730 		return EINVAL; /* not configured */
731 	}
732 
733 	if (th != current_thread()) {
734 		/* can only attach to self presently */
735 		return EINVAL;
736 	}
737 
738 	kcov_thread_data_t *data = kcov_get_thread_data(th);
739 	if (data->ktd_device) {
740 		return EBUSY; /* one dev per thread */
741 	}
742 
743 	if (d->thread != THREAD_NULL) {
744 		ksancov_detach(d);
745 	}
746 
747 	d->thread = th;
748 	thread_reference(d->thread);
749 
750 	os_atomic_store(&data->ktd_device, d, relaxed);
751 	os_atomic_add(&ksancov_enabled, 1, relaxed);
752 	kcov_enable();
753 
754 	return 0;
755 }
756 
757 extern void
758 thread_wait(
759 	thread_t        thread,
760 	boolean_t       until_not_runnable);
761 
762 
763 /*
764  * disconnect thread from ksancov dev
765  */
766 static void
ksancov_detach(ksancov_dev_t d)767 ksancov_detach(ksancov_dev_t d)
768 {
769 	if (d->thread == THREAD_NULL) {
770 		/* no thread attached */
771 		return;
772 	}
773 
774 	/* disconnect dev from thread */
775 	kcov_thread_data_t *data = kcov_get_thread_data(d->thread);
776 	if (data->ktd_device != NULL) {
777 		assert(data->ktd_device == d);
778 		os_atomic_store(&data->ktd_device, NULL, relaxed);
779 	}
780 
781 	if (d->thread != current_thread()) {
782 		/* wait until it's safe to yank */
783 		thread_wait(d->thread, TRUE);
784 	}
785 
786 	assert(ksancov_enabled >= 1);
787 	os_atomic_sub(&ksancov_enabled, 1, relaxed);
788 	kcov_disable();
789 
790 	/* drop our thread reference */
791 	thread_deallocate(d->thread);
792 	d->thread = THREAD_NULL;
793 }
794 
795 static int
ksancov_close(dev_t dev,int flags,int devtype,proc_t p)796 ksancov_close(dev_t dev, int flags, int devtype, proc_t p)
797 {
798 #pragma unused(flags,devtype,p)
799 	const int minor_num = minor(dev);
800 
801 	if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
802 		return ENXIO;
803 	}
804 
805 	lck_rw_lock_exclusive(&ksancov_devs_lck);
806 	ksancov_dev_t d = ksancov_devs[minor_num];
807 	ksancov_devs[minor_num] = NULL; /* dev no longer discoverable */
808 	lck_rw_unlock_exclusive(&ksancov_devs_lck);
809 
810 	/*
811 	 * No need to lock d here as there is and will be no one having its
812 	 * reference except for this thread and the one which is going to
813 	 * be detached below.
814 	 */
815 
816 	if (!d) {
817 		return ENXIO;
818 	}
819 
820 	if (d->mode != KS_MODE_NONE && d->hdr != NULL) {
821 		os_atomic_store(&d->hdr->kh_enabled, 0, relaxed); /* stop tracing */
822 	}
823 
824 	ksancov_detach(d);
825 	free_dev(d);
826 
827 	return 0;
828 }
829 
830 static void
ksancov_testpanic(volatile uint64_t guess)831 ksancov_testpanic(volatile uint64_t guess)
832 {
833 	const uint64_t tgt = 0xf85de3b12891c817UL;
834 
835 #define X(n) ((tgt & (0xfUL << (4*n))) == (guess & (0xfUL << (4*n))))
836 
837 	if (X(0)) {
838 		if (X(1)) {
839 			if (X(2)) {
840 				if (X(3)) {
841 					if (X(4)) {
842 						if (X(5)) {
843 							if (X(6)) {
844 								if (X(7)) {
845 									if (X(8)) {
846 										if (X(9)) {
847 											if (X(10)) {
848 												if (X(11)) {
849 													if (X(12)) {
850 														if (X(13)) {
851 															if (X(14)) {
852 																if (X(15)) {
853 																	panic("ksancov: found test value");
854 																}
855 															}
856 														}
857 													}
858 												}
859 											}
860 										}
861 									}
862 								}
863 							}
864 						}
865 					}
866 				}
867 			}
868 		}
869 	}
870 }
871 
872 static int
ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg * kmsg)873 ksancov_handle_on_demand_cmd(struct ksancov_on_demand_msg *kmsg)
874 {
875 	struct ksancov_od_module_entry *entry = NULL;
876 	struct ksancov_od_module_handle *handle = NULL;
877 	ksancov_on_demand_operation_t op = kmsg->operation;
878 	int ret = 0;
879 
880 	lck_mtx_lock(&ksancov_od_lck);
881 
882 	/* find the entry/handle to the module */
883 	for (unsigned int idx = 0; idx < ksancov_od_modules_count; idx++) {
884 		entry = &ksancov_od_module_entries[idx];
885 		if (strncmp(entry->bundle, kmsg->bundle, sizeof(entry->bundle)) == 0) {
886 			handle = &ksancov_od_module_handles[idx];
887 			break;
888 		}
889 	}
890 
891 	if (handle == NULL) {
892 		ksancov_od_log("ksancov: Could not find module '%s'\n", kmsg->bundle);
893 		lck_mtx_unlock(&ksancov_od_lck);
894 		return EINVAL;
895 	}
896 
897 	switch (op) {
898 	case KS_OD_GET_GATE:
899 		/* Get whether on-demand instrumentation is enabled in a given module */
900 		if (handle->gate) {
901 			kmsg->gate = *handle->gate;
902 		} else {
903 			ret = EINVAL;
904 		}
905 		break;
906 	case KS_OD_SET_GATE:
907 		/* Toggle callback invocation for a given module */
908 		if (handle->gate) {
909 			ksancov_od_log("ksancov: Setting gate for '%s': %llu\n",
910 			    kmsg->bundle, kmsg->gate);
911 			if (kmsg->gate != *handle->gate) {
912 				if (kmsg->gate) {
913 					ksancov_od_enabled_count++;
914 				} else {
915 					ksancov_od_enabled_count--;
916 				}
917 				*handle->gate = kmsg->gate;
918 			}
919 		} else {
920 			ret = EINVAL;
921 		}
922 		break;
923 	case KS_OD_GET_RANGE:
924 		/* Get which range of the guards table covers the given module */
925 		ksancov_od_log("ksancov: Range for '%s': %u, %u\n",
926 		    kmsg->bundle, *handle->start, *(handle->stop - 1));
927 		kmsg->range.start = *handle->start;
928 		kmsg->range.stop = *(handle->stop - 1);
929 		break;
930 	default:
931 		ret = EINVAL;
932 		break;
933 	}
934 
935 	lck_mtx_unlock(&ksancov_od_lck);
936 	return ret;
937 }
938 
939 static int
ksancov_ioctl(dev_t dev,unsigned long cmd,caddr_t _data,int fflag,proc_t p)940 ksancov_ioctl(dev_t dev, unsigned long cmd, caddr_t _data, int fflag, proc_t p)
941 {
942 #pragma unused(fflag,p)
943 	const int minor_num = minor(dev);
944 
945 	if (minor_num < 0 || minor_num >= KSANCOV_MAX_DEV) {
946 		return ENXIO;
947 	}
948 
949 	struct ksancov_buf_desc *mcmd;
950 	void *data = (void *)_data;
951 
952 	lck_rw_lock_shared(&ksancov_devs_lck);
953 	ksancov_dev_t d = ksancov_devs[minor_num];
954 	if (!d) {
955 		lck_rw_unlock_shared(&ksancov_devs_lck);
956 		return EINVAL;         /* dev not open */
957 	}
958 
959 	int ret = 0;
960 
961 	switch (cmd) {
962 	case KSANCOV_IOC_TRACE:
963 	case KSANCOV_IOC_STKSIZE:
964 		lck_mtx_lock(&d->lock);
965 		ksancov_mode_t mode = (cmd == KSANCOV_IOC_TRACE) ? KS_MODE_TRACE : KS_MODE_STKSIZE;
966 		ret = ksancov_trace_alloc(d, mode, *(size_t *)data);
967 		lck_mtx_unlock(&d->lock);
968 		break;
969 	case KSANCOV_IOC_COUNTERS:
970 		lck_mtx_lock(&d->lock);
971 		ret = ksancov_counters_alloc(d);
972 		lck_mtx_unlock(&d->lock);
973 		break;
974 	case KSANCOV_IOC_MAP:
975 		mcmd = (struct ksancov_buf_desc *)data;
976 		lck_mtx_lock(&d->lock);
977 		ret = ksancov_map(d, &mcmd->ptr, &mcmd->sz);
978 		lck_mtx_unlock(&d->lock);
979 		break;
980 	case KSANCOV_IOC_MAP_EDGEMAP:
981 		mcmd = (struct ksancov_buf_desc *)data;
982 		ret = ksancov_map_edgemap(&mcmd->ptr, &mcmd->sz);
983 		break;
984 	case KSANCOV_IOC_START:
985 		lck_mtx_lock(&d->lock);
986 		ret = ksancov_attach(d, current_thread());
987 		lck_mtx_unlock(&d->lock);
988 		break;
989 	case KSANCOV_IOC_NEDGES:
990 		*(size_t *)data = nedges;
991 		break;
992 	case KSANCOV_IOC_ON_DEMAND:
993 		ret = ksancov_handle_on_demand_cmd((struct ksancov_on_demand_msg *)data);
994 		break;
995 	case KSANCOV_IOC_TESTPANIC:
996 		ksancov_testpanic(*(uint64_t *)data);
997 		break;
998 	default:
999 		ret = EINVAL;
1000 		break;
1001 	}
1002 
1003 	lck_rw_unlock_shared(&ksancov_devs_lck);
1004 
1005 	return ret;
1006 }
1007 
1008 static int
ksancov_dev_clone(dev_t dev,int action)1009 ksancov_dev_clone(dev_t dev, int action)
1010 {
1011 #pragma unused(dev)
1012 	if (action == DEVFS_CLONE_ALLOC) {
1013 		for (int i = 0; i < KSANCOV_MAX_DEV; i++) {
1014 			if (ksancov_devs[i] == NULL) {
1015 				return i;
1016 			}
1017 		}
1018 	} else if (action == DEVFS_CLONE_FREE) {
1019 		return 0;
1020 	}
1021 
1022 	return -1;
1023 }
1024 
1025 static const struct cdevsw
1026     ksancov_cdev = {
1027 	.d_open =  ksancov_open,
1028 	.d_close = ksancov_close,
1029 	.d_ioctl = ksancov_ioctl,
1030 
1031 	.d_read = eno_rdwrt,
1032 	.d_write = eno_rdwrt,
1033 	.d_stop = eno_stop,
1034 	.d_reset = eno_reset,
1035 	.d_select = eno_select,
1036 	.d_mmap = eno_mmap,
1037 	.d_strategy = eno_strat,
1038 	.d_type = 0
1039 };
1040 
1041 int
ksancov_init_dev(void)1042 ksancov_init_dev(void)
1043 {
1044 	dev_major = cdevsw_add(-1, &ksancov_cdev);
1045 	if (dev_major < 0) {
1046 		printf("ksancov: failed to allocate major device node\n");
1047 		return -1;
1048 	}
1049 
1050 	dev_t dev = makedev(dev_major, 0);
1051 	void *node = devfs_make_node_clone(dev, DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0666,
1052 	    ksancov_dev_clone, KSANCOV_DEVNODE);
1053 	if (!node) {
1054 		printf("ksancov: failed to create device node\n");
1055 		return -1;
1056 	}
1057 
1058 	return 0;
1059 }
1060