1 /*
2 * Copyright (c) 2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _KSANCOV_H_
30 #define _KSANCOV_H_
31
32 #include <stdint.h>
33 #include <stdatomic.h>
34 #include <sys/ioccom.h>
35 #include <sys/ioctl.h>
36 #include <strings.h>
37 #include <assert.h>
38 #include <unistd.h>
39
40 #define KSANCOV_DEVNODE "ksancov"
41 #define KSANCOV_PATH "/dev/" KSANCOV_DEVNODE
42
43 /* Set mode */
44 #define KSANCOV_IOC_TRACE _IOW('K', 1, size_t) /* number of pcs */
45 #define KSANCOV_IOC_COUNTERS _IO('K', 2)
46 #define KSANCOV_IOC_STKSIZE _IOW('K', 3, size_t) /* number of pcs */
47
48 /* Establish a shared mapping of the coverage buffer. */
49 #define KSANCOV_IOC_MAP _IOWR('K', 8, struct ksancov_buf_desc)
50
51 /* Establish a shared mapping of the edge address buffer. */
52 #define KSANCOV_IOC_MAP_EDGEMAP _IOWR('K', 9, struct ksancov_buf_desc)
53
54 /* Log the current thread */
55 #define KSANCOV_IOC_START _IOW('K', 10, uintptr_t)
56 #define KSANCOV_IOC_NEDGES _IOR('K', 50, size_t)
57 #define KSANCOV_IOC_TESTPANIC _IOW('K', 20, uint64_t)
58
59 /* Operations related to on-demand instrumentation */
60 #define KSANCOV_IOC_ON_DEMAND _IOWR('K', 60, struct ksancov_on_demand_msg)
61
62 /* Set comparison log mode */
63 #define KSANCOV_IOC_CMPS_TRACE _IOW('K', 70, size_t) /* number of cmps */
64 #define KSANCOV_IOC_CMPS_TRACE_FUNC _IOW('K', 71, size_t) /* number of cmps */
65
66 /* Establish a shared mapping of the comparisons buffer. */
67 #define KSANCOV_IOC_CMPS_MAP _IOWR('K', 90, struct ksancov_buf_desc)
68
69 /*
70 * shared kernel-user mapping
71 */
72
73 #define KSANCOV_MAX_EDGES (1 << 24)
74 #define KSANCOV_MAX_HITS UINT8_MAX
75 #define KSANCOV_TRACE_MAGIC (uint32_t)0x5AD17F5BU
76 #define KSANCOV_COUNTERS_MAGIC (uint32_t)0x5AD27F6BU
77 #define KSANCOV_EDGEMAP_MAGIC (uint32_t)0x5AD37F7BU
78 #define KSANCOV_STKSIZE_MAGIC (uint32_t)0x5AD47F8BU
79 #define KSANCOV_CMPS_TRACE_MAGIC (uint32_t)0x5AD47F9BU
80
81 /*
82 * ioctl
83 */
84
85 struct ksancov_buf_desc {
86 uintptr_t ptr; /* ptr to shared buffer [out] */
87 size_t sz; /* size of shared buffer [out] */
88 };
89
90 /*
91 * Supported coverage modes.
92 */
93 typedef enum {
94 KS_MODE_NONE,
95 KS_MODE_TRACE,
96 KS_MODE_COUNTERS,
97 KS_MODE_STKSIZE,
98 KS_MODE_MAX
99 } ksancov_mode_t;
100
101 /*
102 * A header that is always present in every ksancov mode shared memory structure.
103 */
104 typedef struct ksancov_header {
105 uint32_t kh_magic;
106 _Atomic uint32_t kh_enabled;
107 } ksancov_header_t;
108
109 /*
110 * TRACE mode data structure.
111 */
112
113 /*
114 * All trace based tools share this structure.
115 */
116 typedef struct ksancov_trace {
117 ksancov_header_t kt_hdr; /* header (must be always first) */
118 uint32_t kt_maxent; /* Maximum entries in this shared buffer. */
119 _Atomic uint32_t kt_head; /* Pointer to the first unused element. */
120 uint64_t kt_entries[]; /* Trace entries in this buffer. */
121 } ksancov_trace_t;
122
123 /* PC tracing only records PCs. */
124 typedef uintptr_t ksancov_trace_pc_ent_t;
125
126 /* STKSIZE tracing records PCs and stack size. */
127 typedef struct ksancov_trace_stksize_entry {
128 uintptr_t pc; /* PC */
129 uint32_t stksize; /* associated stack size */
130 } ksancov_trace_stksize_ent_t;
131
132 /*
133 * COUNTERS mode data structure.
134 */
135 typedef struct ksancov_counters {
136 ksancov_header_t kc_hdr;
137 uint32_t kc_nedges; /* total number of edges */
138 uint8_t kc_hits[]; /* hits on each edge (8bit saturating) */
139 } ksancov_counters_t;
140
141 /*
142 * Edge to PC mapping.
143 */
144 typedef struct ksancov_edgemap {
145 uint32_t ke_magic;
146 uint32_t ke_nedges;
147 uintptr_t ke_addrs[]; /* address of each edge relative to 'offset' */
148 } ksancov_edgemap_t;
149
150 /*
151 * Supported comparison logging modes.
152 */
153 typedef enum {
154 KS_CMPS_MODE_NONE,
155 KS_CMPS_MODE_TRACE,
156 KS_CMPS_MODE_TRACE_FUNC,
157 KS_CMPS_MODE_MAX
158 } ksancov_cmps_mode_t;
159
160 /* Comparison type values */
161 enum {
162 KCOV_CMP_CONST = 1,
163 KCOV_CMP_SIZE1 = 0,
164 KCOV_CMP_SIZE2 = 2,
165 KCOV_CMP_SIZE4 = 4,
166 KCOV_CMP_SIZE8 = 6,
167
168 KCOV_CMP_FUNC_MEMCMP = 32,
169 KCOV_CMP_FUNC_STRCMP = 34,
170 KCOV_CMP_FUNC_STRNCMP = 36,
171 KCOV_CMP_FUNC_STRBUFCMP = 38,
172 };
173
174 #define KCOV_CMP_SIZE_MASK 0xfffffff6
175 #define KCOV_CMP_IS_FUNC(type) (type >= KCOV_CMP_FUNC_MEMCMP)
176
177 /* CMPS TRACE mode tracks comparison values */
178 typedef struct __attribute__((__packed__)) ksancov_cmps_trace_entry {
179 uint64_t pc;
180 uint32_t type;
181 uint16_t len1_func;
182 uint16_t len2_func;
183 union {
184 uint64_t args[2]; /* cmp instruction arguments */
185 uint8_t args_func[0]; /* cmp function arguments (variadic) */
186 };
187 } ksancov_cmps_trace_ent_t;
188
189 /* Calculate the total space that a ksancov_cmps_trace_ent_t tracing a function takes */
190 static inline size_t
ksancov_cmps_trace_func_space(size_t len1_func,size_t len2_func)191 ksancov_cmps_trace_func_space(size_t len1_func, size_t len2_func)
192 {
193 static_assert(sizeof(ksancov_cmps_trace_ent_t) == sizeof(uint64_t) * 3 + sizeof(uint32_t) + sizeof(uint16_t) * 2, "ksancov_cmps_trace_ent_t invalid size");
194
195 size_t size = sizeof(uint64_t) + sizeof(uint32_t) + sizeof(uint16_t) * 2; // header
196 size += len1_func + len2_func;
197 size_t rem = size % sizeof(ksancov_cmps_trace_ent_t);
198 if (rem == 0) {
199 return size;
200 }
201 return size + sizeof(ksancov_cmps_trace_ent_t) - rem;
202 }
203
204 static inline uint8_t *
ksancov_cmps_trace_func_arg1(ksancov_cmps_trace_ent_t * entry)205 ksancov_cmps_trace_func_arg1(ksancov_cmps_trace_ent_t *entry)
206 {
207 return entry->args_func;
208 }
209
210 static inline uint8_t *
ksancov_cmps_trace_func_arg2(ksancov_cmps_trace_ent_t * entry)211 ksancov_cmps_trace_func_arg2(ksancov_cmps_trace_ent_t *entry)
212 {
213 uint8_t* func_args = entry->args_func;
214 return &func_args[entry->len1_func];
215 }
216
217 /*
218 * On-demand related functionalities
219 */
220 typedef enum {
221 KS_OD_GET_GATE = 1,
222 KS_OD_SET_GATE = 2,
223 KS_OD_GET_RANGE = 3,
224 } ksancov_on_demand_operation_t;
225
226 struct ksancov_on_demand_msg {
227 char bundle[/*KMOD_MAX_NAME*/ 64];
228 ksancov_on_demand_operation_t operation;
229 union {
230 uint64_t gate;
231 struct {
232 uint32_t start;
233 uint32_t stop;
234 } range;
235 };
236 };
237
238 /*
239 * ksancov userspace API
240 *
241 * Usage:
242 * 1) open the ksancov device
243 * 2) set the coverage mode
244 * 3) map the coverage buffer
245 * 4) start the trace on a thread
246 * 5) flip the enable bit
247 */
248
249 static inline int
ksancov_open(void)250 ksancov_open(void)
251 {
252 return open(KSANCOV_PATH, 0);
253 }
254
255 static inline int
ksancov_map(int fd,uintptr_t * buf,size_t * sz)256 ksancov_map(int fd, uintptr_t *buf, size_t *sz)
257 {
258 int ret;
259 struct ksancov_buf_desc mc = {0};
260
261 assert(buf != NULL);
262
263 ret = ioctl(fd, KSANCOV_IOC_MAP, &mc);
264 if (ret == -1) {
265 return errno;
266 }
267
268 *buf = mc.ptr;
269 if (sz) {
270 *sz = mc.sz;
271 }
272
273 ksancov_header_t *hdr = (ksancov_header_t *)mc.ptr;
274 assert(hdr->kh_magic == KSANCOV_TRACE_MAGIC ||
275 hdr->kh_magic == KSANCOV_COUNTERS_MAGIC ||
276 hdr->kh_magic == KSANCOV_STKSIZE_MAGIC);
277
278 return 0;
279 }
280
281 static inline int
ksancov_map_edgemap(int fd,uintptr_t * buf,size_t * sz)282 ksancov_map_edgemap(int fd, uintptr_t *buf, size_t *sz)
283 {
284 int ret;
285 struct ksancov_buf_desc mc = {0};
286
287 assert(buf != NULL);
288
289 ret = ioctl(fd, KSANCOV_IOC_MAP_EDGEMAP, &mc);
290 if (ret == -1) {
291 return errno;
292 }
293
294 *buf = mc.ptr;
295 if (sz) {
296 *sz = mc.sz;
297 }
298
299 ksancov_edgemap_t *emap = (ksancov_edgemap_t *)mc.ptr;
300 assert(emap->ke_magic == KSANCOV_EDGEMAP_MAGIC);
301
302 return 0;
303 }
304
305 static inline size_t
ksancov_nedges(int fd)306 ksancov_nedges(int fd)
307 {
308 size_t nedges;
309 int ret = ioctl(fd, KSANCOV_IOC_NEDGES, &nedges);
310 if (ret == -1) {
311 return SIZE_MAX;
312 }
313 return nedges;
314 }
315
316 static inline int
ksancov_mode_trace(int fd,size_t entries)317 ksancov_mode_trace(int fd, size_t entries)
318 {
319 int ret;
320 ret = ioctl(fd, KSANCOV_IOC_TRACE, &entries);
321 if (ret == -1) {
322 return errno;
323 }
324 return 0;
325 }
326
327 static inline int
ksancov_mode_stksize(int fd,size_t entries)328 ksancov_mode_stksize(int fd, size_t entries)
329 {
330 int ret;
331 ret = ioctl(fd, KSANCOV_IOC_STKSIZE, &entries);
332 if (ret == -1) {
333 return errno;
334 }
335 return 0;
336 }
337
338 static inline int
ksancov_mode_counters(int fd)339 ksancov_mode_counters(int fd)
340 {
341 int ret;
342 ret = ioctl(fd, KSANCOV_IOC_COUNTERS);
343 if (ret == -1) {
344 return errno;
345 }
346 return 0;
347 }
348
349 static inline int
ksancov_thread_self(int fd)350 ksancov_thread_self(int fd)
351 {
352 int ret;
353 uintptr_t th = 0;
354 ret = ioctl(fd, KSANCOV_IOC_START, &th);
355 if (ret == -1) {
356 return errno;
357 }
358 return 0;
359 }
360
361 static inline int
ksancov_start(void * buf)362 ksancov_start(void *buf)
363 {
364 ksancov_header_t *hdr = (ksancov_header_t *)buf;
365 atomic_store_explicit(&hdr->kh_enabled, 1, memory_order_relaxed);
366 return 0;
367 }
368
369 static inline int
ksancov_stop(void * buf)370 ksancov_stop(void *buf)
371 {
372 ksancov_header_t *hdr = (ksancov_header_t *)buf;
373 atomic_store_explicit(&hdr->kh_enabled, 0, memory_order_relaxed);
374 return 0;
375 }
376
377 static inline int
ksancov_reset(void * buf)378 ksancov_reset(void *buf)
379 {
380 ksancov_header_t *hdr = (ksancov_header_t *)buf;
381 if (hdr->kh_magic == KSANCOV_TRACE_MAGIC || hdr->kh_magic == KSANCOV_STKSIZE_MAGIC || hdr->kh_magic == KSANCOV_CMPS_TRACE_MAGIC) {
382 ksancov_trace_t *trace = (ksancov_trace_t *)buf;
383 atomic_store_explicit(&trace->kt_head, 0, memory_order_relaxed);
384 } else if (hdr->kh_magic == KSANCOV_COUNTERS_MAGIC) {
385 ksancov_counters_t *counters = (ksancov_counters_t *)buf;
386 bzero(counters->kc_hits, counters->kc_nedges);
387 } else {
388 return EINVAL;
389 }
390 return 0;
391 }
392
393 static inline uintptr_t
ksancov_edge_addr(ksancov_edgemap_t * kemap,size_t idx)394 ksancov_edge_addr(ksancov_edgemap_t *kemap, size_t idx)
395 {
396 assert(kemap);
397 if (idx >= kemap->ke_nedges) {
398 return 0;
399 }
400 return kemap->ke_addrs[idx];
401 }
402
403 static inline size_t
ksancov_trace_max_ent(ksancov_trace_t * trace)404 ksancov_trace_max_ent(ksancov_trace_t *trace)
405 {
406 assert(trace);
407 return trace->kt_maxent;
408 }
409
410 static inline size_t
ksancov_trace_head(ksancov_trace_t * trace)411 ksancov_trace_head(ksancov_trace_t *trace)
412 {
413 assert(trace);
414 size_t maxent = trace->kt_maxent;
415 size_t head = atomic_load_explicit(&trace->kt_head, memory_order_acquire);
416 return head < maxent ? head : maxent;
417 }
418
419 static inline uintptr_t
ksancov_trace_entry(ksancov_trace_t * trace,size_t i)420 ksancov_trace_entry(ksancov_trace_t *trace, size_t i)
421 {
422 assert(trace);
423 assert(trace->kt_hdr.kh_magic == KSANCOV_TRACE_MAGIC);
424 if (i >= trace->kt_head) {
425 return 0;
426 }
427
428 ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)trace->kt_entries;
429 return entries[i];
430 }
431
432 static inline uintptr_t
ksancov_stksize_pc(ksancov_trace_t * trace,size_t i)433 ksancov_stksize_pc(ksancov_trace_t *trace, size_t i)
434 {
435 assert(trace);
436 assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
437 if (i >= trace->kt_head) {
438 return 0;
439 }
440
441 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
442 return entries[i].pc;
443 }
444
445 static inline uint32_t
ksancov_stksize_size(ksancov_trace_t * trace,size_t i)446 ksancov_stksize_size(ksancov_trace_t *trace, size_t i)
447 {
448 assert(trace);
449 assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
450 if (i >= trace->kt_head) {
451 return 0;
452 }
453
454 ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
455 return entries[i].stksize;
456 }
457
458 static inline int
ksancov_cmps_mode_trace(int fd,size_t entries,bool trace_functions)459 ksancov_cmps_mode_trace(int fd, size_t entries, bool trace_functions)
460 {
461 int ret;
462 if (trace_functions) {
463 ret = ioctl(fd, KSANCOV_IOC_CMPS_TRACE_FUNC, &entries);
464 } else {
465 ret = ioctl(fd, KSANCOV_IOC_CMPS_TRACE, &entries);
466 }
467 if (ret == -1) {
468 return errno;
469 }
470 return 0;
471 }
472
473 static inline int
ksancov_cmps_map(int fd,uintptr_t * buf,size_t * sz)474 ksancov_cmps_map(int fd, uintptr_t *buf, size_t *sz)
475 {
476 int ret;
477 struct ksancov_buf_desc mc = {0};
478
479 assert(buf != NULL);
480
481 ret = ioctl(fd, KSANCOV_IOC_CMPS_MAP, &mc);
482 if (ret == -1) {
483 return errno;
484 }
485
486 *buf = mc.ptr;
487 if (sz) {
488 *sz = mc.sz;
489 }
490
491 ksancov_header_t *hdr = (ksancov_header_t *)mc.ptr;
492 assert(hdr->kh_magic == KSANCOV_CMPS_TRACE_MAGIC);
493
494 return 0;
495 }
496
497 static inline ksancov_cmps_trace_ent_t*
ksancov_cmps_trace_entry(ksancov_trace_t * trace,size_t i)498 ksancov_cmps_trace_entry(ksancov_trace_t *trace, size_t i)
499 {
500 assert(trace);
501 assert(trace->kt_hdr.kh_magic == KSANCOV_CMPS_TRACE_MAGIC);
502 if (i >= trace->kt_head) {
503 return NULL;
504 }
505
506 ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)trace->kt_entries;
507 return &entries[i];
508 }
509
510 /*
511 * On-demand control API
512 */
513
514 static inline int
_ksancov_on_demand_operation(int fd,const char * bundle,ksancov_on_demand_operation_t op,struct ksancov_on_demand_msg * msg)515 _ksancov_on_demand_operation(int fd, const char *bundle, ksancov_on_demand_operation_t op, struct ksancov_on_demand_msg *msg)
516 {
517 int ret;
518
519 msg->operation = op;
520 strlcpy(msg->bundle, bundle, sizeof(msg->bundle));
521
522 ret = ioctl(fd, KSANCOV_IOC_ON_DEMAND, msg);
523 if (ret == -1) {
524 return errno;
525 }
526
527 return ret;
528 }
529
530 /*
531 * Retrieve the value of the gate for a given module bundle ID.
532 */
533 static inline int
ksancov_on_demand_get_gate(int fd,const char * bundle,uint64_t * gate)534 ksancov_on_demand_get_gate(int fd, const char *bundle, uint64_t *gate)
535 {
536 assert(gate);
537
538 struct ksancov_on_demand_msg msg;
539 int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_GATE, &msg);
540 if (ret == 0) {
541 *gate = msg.gate;
542 }
543 return ret;
544 }
545
546 /*
547 * Set the value of the gate for a given module bundle ID.
548 *
549 * Any non-zero value enables the invocation of the sanitizer coverage callbacks
550 * inserted in the specified module.
551 */
552 static inline int
ksancov_on_demand_set_gate(int fd,const char * bundle,uint64_t value)553 ksancov_on_demand_set_gate(int fd, const char *bundle, uint64_t value)
554 {
555 struct ksancov_on_demand_msg msg = {};
556 msg.gate = value;
557 return _ksancov_on_demand_operation(fd, bundle, KS_OD_SET_GATE, &msg);
558 }
559
560 /*
561 * Get the guards range for a specified module.
562 */
563 static inline int
ksancov_on_demand_get_range(int fd,const char * bundle,uint32_t * start,uint32_t * stop)564 ksancov_on_demand_get_range(int fd, const char *bundle, uint32_t *start, uint32_t *stop)
565 {
566 assert(start && stop);
567
568 struct ksancov_on_demand_msg msg = {};
569 int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_RANGE, &msg);
570 if (ret == 0) {
571 *start = msg.range.start;
572 *stop = msg.range.stop;
573 }
574 return ret;
575 }
576
577 #endif /* _KSANCOV_H_ */
578