xref: /xnu-12377.41.6/san/tools/ksancov.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KSANCOV_H_
30 #define _KSANCOV_H_
31 
32 #include <stdint.h>
33 #include <stdatomic.h>
34 #include <sys/ioccom.h>
35 #include <sys/ioctl.h>
36 #include <strings.h>
37 #include <assert.h>
38 #include <unistd.h>
39 
40 #define KSANCOV_DEVNODE "ksancov"
41 #define KSANCOV_PATH "/dev/" KSANCOV_DEVNODE
42 
43 /* Set mode */
44 #define KSANCOV_IOC_TRACE            _IOW('K', 1, size_t) /* number of pcs */
45 #define KSANCOV_IOC_COUNTERS         _IO('K', 2)
46 #define KSANCOV_IOC_STKSIZE          _IOW('K', 3, size_t) /* number of pcs */
47 
48 /* Establish a shared mapping of the coverage buffer. */
49 #define KSANCOV_IOC_MAP              _IOWR('K', 8, struct ksancov_buf_desc)
50 
51 /* Establish a shared mapping of the edge address buffer. */
52 #define KSANCOV_IOC_MAP_EDGEMAP      _IOWR('K', 9, struct ksancov_buf_desc)
53 
54 /* Log the current thread */
55 #define KSANCOV_IOC_START            _IOW('K', 10, uintptr_t)
56 #define KSANCOV_IOC_NEDGES           _IOR('K', 50, size_t)
57 #define KSANCOV_IOC_TESTPANIC        _IOW('K', 20, uint64_t)
58 
59 /* Operations related to on-demand instrumentation */
60 #define KSANCOV_IOC_ON_DEMAND        _IOWR('K', 60, struct ksancov_on_demand_msg)
61 
62 /* Set comparison log mode */
63 #define KSANCOV_IOC_CMPS_TRACE       _IOW('K', 70, size_t) /* number of cmps */
64 #define KSANCOV_IOC_CMPS_TRACE_FUNC  _IOW('K', 71, size_t) /* number of cmps */
65 
66 /* Establish a shared mapping of the comparisons buffer. */
67 #define KSANCOV_IOC_CMPS_MAP         _IOWR('K', 90, struct ksancov_buf_desc)
68 
69 /* Testcases buffer */
70 #define KSANCOV_IOC_TESTCASES       _IOW('K', 100, size_t) /* number of testcases */
71 #define KSANCOV_IOC_TESTCASES_MAP   _IOWR('K', 101, struct ksancov_buf_desc)
72 #define KSANCOV_IOC_TESTCASES_LOG   _IO('K', 102)
73 
74 /*
75  * shared kernel-user mapping
76  */
77 
78 #define KSANCOV_MAX_EDGES         (1 << 24)
79 #define KSANCOV_MAX_HITS          UINT8_MAX
80 #define KSANCOV_TRACE_MAGIC       (uint32_t)0x5AD17F5BU
81 #define KSANCOV_COUNTERS_MAGIC    (uint32_t)0x5AD27F6BU
82 #define KSANCOV_EDGEMAP_MAGIC     (uint32_t)0x5AD37F7BU
83 #define KSANCOV_STKSIZE_MAGIC     (uint32_t)0x5AD47F8BU
84 #define KSANCOV_CMPS_TRACE_MAGIC  (uint32_t)0x5AD47F9BU
85 
86 /*
87  * ioctl
88  */
89 
90 struct ksancov_buf_desc {
91 	uintptr_t ptr;  /* ptr to shared buffer [out] */
92 	size_t sz;      /* size of shared buffer [out] */
93 };
94 
95 /*
96  * Supported coverage modes.
97  */
98 typedef enum {
99 	KS_MODE_NONE,
100 	KS_MODE_TRACE,
101 	KS_MODE_COUNTERS,
102 	KS_MODE_STKSIZE,
103 	KS_MODE_MAX
104 } ksancov_mode_t;
105 
106 /*
107  * A header that is always present in every ksancov mode shared memory structure.
108  */
109 typedef struct ksancov_header {
110 	uint32_t         kh_magic;
111 	_Atomic uint32_t kh_enabled;
112 } ksancov_header_t;
113 
114 /*
115  * TRACE mode data structure.
116  */
117 
118 /*
119  * All trace based tools share this structure.
120  */
121 typedef struct ksancov_trace {
122 	ksancov_header_t kt_hdr;         /* header (must be always first) */
123 	uint32_t         kt_maxent;      /* Maximum entries in this shared buffer. */
124 	_Atomic uint32_t kt_head;        /* Pointer to the first unused element. */
125 	uint64_t         kt_entries[];   /* Trace entries in this buffer. */
126 } ksancov_trace_t;
127 
128 /* PC tracing only records PCs. */
129 typedef uintptr_t ksancov_trace_pc_ent_t;
130 
131 /* STKSIZE tracing records PCs and stack size. */
132 typedef struct ksancov_trace_stksize_entry {
133 	uintptr_t pc;                      /* PC */
134 	uint32_t  stksize;                 /* associated stack size */
135 } ksancov_trace_stksize_ent_t;
136 
137 /*
138  * COUNTERS mode data structure.
139  */
140 typedef struct ksancov_counters {
141 	ksancov_header_t kc_hdr;
142 	uint32_t         kc_nedges;       /* total number of edges */
143 	uint8_t          kc_hits[];       /* hits on each edge (8bit saturating) */
144 } ksancov_counters_t;
145 
146 /*
147  * Edge to PC mapping.
148  */
149 typedef struct ksancov_edgemap {
150 	uint32_t  ke_magic;
151 	uint32_t  ke_nedges;
152 	uintptr_t ke_addrs[];             /* address of each edge relative to 'offset' */
153 } ksancov_edgemap_t;
154 
155 /*
156  * Supported comparison logging modes.
157  */
158 typedef enum {
159 	KS_CMPS_MODE_NONE,
160 	KS_CMPS_MODE_TRACE,
161 	KS_CMPS_MODE_TRACE_FUNC,
162 	KS_CMPS_MODE_MAX
163 } ksancov_cmps_mode_t;
164 
165 /* Comparison type values */
166 enum {
167 	KCOV_CMP_CONST = 1,
168 	KCOV_CMP_SIZE1 = 0,
169 	KCOV_CMP_SIZE2 = 2,
170 	KCOV_CMP_SIZE4 = 4,
171 	KCOV_CMP_SIZE8 = 6,
172 
173 	KCOV_CMP_FUNC_MEMCMP = 32,
174 	KCOV_CMP_FUNC_STRCMP = 34,
175 	KCOV_CMP_FUNC_STRNCMP = 36,
176 	KCOV_CMP_FUNC_STRBUFCMP = 38,
177 };
178 
179 #define KCOV_CMP_SIZE_MASK 0xfffffff6
180 #define KCOV_CMP_IS_FUNC(type) (type >= KCOV_CMP_FUNC_MEMCMP)
181 
182 /* CMPS TRACE mode tracks comparison values */
183 typedef struct __attribute__((__packed__)) ksancov_cmps_trace_entry {
184 	uint64_t pc;
185 	uint32_t type;
186 	uint16_t len1_func;
187 	uint16_t len2_func;
188 	union {
189 		uint64_t args[2];              /* cmp instruction arguments */
190 		uint8_t args_func[0];          /* cmp function arguments (variadic) */
191 	};
192 } ksancov_cmps_trace_ent_t;
193 
194 /* Calculate the total space that a ksancov_cmps_trace_ent_t tracing a function takes */
195 static inline size_t
ksancov_cmps_trace_func_space(size_t len1_func,size_t len2_func)196 ksancov_cmps_trace_func_space(size_t len1_func, size_t len2_func)
197 {
198 	static_assert(sizeof(ksancov_cmps_trace_ent_t) == sizeof(uint64_t) * 3 + sizeof(uint32_t) + sizeof(uint16_t) * 2, "ksancov_cmps_trace_ent_t invalid size");
199 
200 	size_t size = sizeof(uint64_t) + sizeof(uint32_t) + sizeof(uint16_t) * 2; // header
201 	size += len1_func + len2_func;
202 	size_t rem = size % sizeof(ksancov_cmps_trace_ent_t);
203 	if (rem == 0) {
204 		return size;
205 	}
206 	return size + sizeof(ksancov_cmps_trace_ent_t) - rem;
207 }
208 
209 static inline uint8_t *
ksancov_cmps_trace_func_arg1(ksancov_cmps_trace_ent_t * entry)210 ksancov_cmps_trace_func_arg1(ksancov_cmps_trace_ent_t *entry)
211 {
212 	return entry->args_func;
213 }
214 
215 static inline uint8_t *
ksancov_cmps_trace_func_arg2(ksancov_cmps_trace_ent_t * entry)216 ksancov_cmps_trace_func_arg2(ksancov_cmps_trace_ent_t *entry)
217 {
218 	uint8_t* func_args = entry->args_func;
219 	return &func_args[entry->len1_func];
220 }
221 
222 #define KSANCOV_SERIALIZED_TESTCASE_BYTES 16777216 // 16MiB
223 #define KSANCOV_SERIALIZED_TESTCASES_MAX_COUNT 100
224 
225 typedef struct ksancov_serialized_testcase {
226 	uint32_t size;
227 	uint8_t  buffer[KSANCOV_SERIALIZED_TESTCASE_BYTES];
228 } ksancov_serialized_testcase_t;
229 
230 /*
231  * Store the latest executed testcases in kernel to dump on panic.
232  */
233 typedef struct ksancov_serialized_testcases {
234 	uint32_t head;         /* current head of the circular buffer */
235 	uint32_t inner_index;  /* current inner index in the head testcase (e.g. current call being executed) */
236 	ksancov_serialized_testcase_t list[];  /* testcases circular buffer */
237 } ksancov_serialized_testcases_t;
238 
239 /*
240  * On-demand related functionalities
241  */
242 typedef enum {
243 	KS_OD_GET_GATE = 1,
244 	KS_OD_SET_GATE = 2,
245 	KS_OD_GET_RANGE = 3,
246 } ksancov_on_demand_operation_t;
247 
248 struct ksancov_on_demand_msg {
249 	char bundle[/*KMOD_MAX_NAME*/ 64];
250 	ksancov_on_demand_operation_t operation;
251 	union {
252 		uint64_t gate;
253 		struct {
254 			uint32_t start;
255 			uint32_t stop;
256 		} range;
257 	};
258 };
259 
260 /*
261  * ksancov userspace API
262  *
263  * Usage:
264  * 1) open the ksancov device
265  * 2) set the coverage mode
266  * 3) map the coverage buffer
267  * 4) start the trace on a thread
268  * 5) flip the enable bit
269  */
270 
271 static inline int
ksancov_open(void)272 ksancov_open(void)
273 {
274 	return open(KSANCOV_PATH, 0);
275 }
276 
277 static inline int
ksancov_map(int fd,uintptr_t * buf,size_t * sz)278 ksancov_map(int fd, uintptr_t *buf, size_t *sz)
279 {
280 	int ret;
281 	struct ksancov_buf_desc mc = {0};
282 
283 	assert(buf != NULL);
284 
285 	ret = ioctl(fd, KSANCOV_IOC_MAP, &mc);
286 	if (ret == -1) {
287 		return errno;
288 	}
289 
290 	*buf = mc.ptr;
291 	if (sz) {
292 		*sz = mc.sz;
293 	}
294 
295 	ksancov_header_t *hdr = (ksancov_header_t *)mc.ptr;
296 	assert(hdr->kh_magic == KSANCOV_TRACE_MAGIC ||
297 	    hdr->kh_magic == KSANCOV_COUNTERS_MAGIC ||
298 	    hdr->kh_magic == KSANCOV_STKSIZE_MAGIC);
299 
300 	return 0;
301 }
302 
303 static inline int
ksancov_map_edgemap(int fd,uintptr_t * buf,size_t * sz)304 ksancov_map_edgemap(int fd, uintptr_t *buf, size_t *sz)
305 {
306 	int ret;
307 	struct ksancov_buf_desc mc = {0};
308 
309 	assert(buf != NULL);
310 
311 	ret = ioctl(fd, KSANCOV_IOC_MAP_EDGEMAP, &mc);
312 	if (ret == -1) {
313 		return errno;
314 	}
315 
316 	*buf = mc.ptr;
317 	if (sz) {
318 		*sz = mc.sz;
319 	}
320 
321 	ksancov_edgemap_t *emap = (ksancov_edgemap_t *)mc.ptr;
322 	assert(emap->ke_magic == KSANCOV_EDGEMAP_MAGIC);
323 
324 	return 0;
325 }
326 
327 static inline size_t
ksancov_nedges(int fd)328 ksancov_nedges(int fd)
329 {
330 	size_t nedges;
331 	int ret = ioctl(fd, KSANCOV_IOC_NEDGES, &nedges);
332 	if (ret == -1) {
333 		return SIZE_MAX;
334 	}
335 	return nedges;
336 }
337 
338 static inline int
ksancov_mode_trace(int fd,size_t entries)339 ksancov_mode_trace(int fd, size_t entries)
340 {
341 	int ret;
342 	ret = ioctl(fd, KSANCOV_IOC_TRACE, &entries);
343 	if (ret == -1) {
344 		return errno;
345 	}
346 	return 0;
347 }
348 
349 static inline int
ksancov_mode_stksize(int fd,size_t entries)350 ksancov_mode_stksize(int fd, size_t entries)
351 {
352 	int ret;
353 	ret = ioctl(fd, KSANCOV_IOC_STKSIZE, &entries);
354 	if (ret == -1) {
355 		return errno;
356 	}
357 	return 0;
358 }
359 
360 static inline int
ksancov_mode_counters(int fd)361 ksancov_mode_counters(int fd)
362 {
363 	int ret;
364 	ret = ioctl(fd, KSANCOV_IOC_COUNTERS);
365 	if (ret == -1) {
366 		return errno;
367 	}
368 	return 0;
369 }
370 
371 static inline int
ksancov_thread_self(int fd)372 ksancov_thread_self(int fd)
373 {
374 	int ret;
375 	uintptr_t th = 0;
376 	ret = ioctl(fd, KSANCOV_IOC_START, &th);
377 	if (ret == -1) {
378 		return errno;
379 	}
380 	return 0;
381 }
382 
383 static inline int
ksancov_start(void * buf)384 ksancov_start(void *buf)
385 {
386 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
387 	atomic_store_explicit(&hdr->kh_enabled, 1, memory_order_relaxed);
388 	return 0;
389 }
390 
391 static inline int
ksancov_stop(void * buf)392 ksancov_stop(void *buf)
393 {
394 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
395 	atomic_store_explicit(&hdr->kh_enabled, 0, memory_order_relaxed);
396 	return 0;
397 }
398 
399 static inline int
ksancov_reset(void * buf)400 ksancov_reset(void *buf)
401 {
402 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
403 	if (hdr->kh_magic == KSANCOV_TRACE_MAGIC || hdr->kh_magic == KSANCOV_STKSIZE_MAGIC || hdr->kh_magic == KSANCOV_CMPS_TRACE_MAGIC) {
404 		ksancov_trace_t *trace = (ksancov_trace_t *)buf;
405 		atomic_store_explicit(&trace->kt_head, 0, memory_order_relaxed);
406 	} else if (hdr->kh_magic == KSANCOV_COUNTERS_MAGIC) {
407 		ksancov_counters_t *counters = (ksancov_counters_t *)buf;
408 		bzero(counters->kc_hits, counters->kc_nedges);
409 	} else {
410 		return EINVAL;
411 	}
412 	return 0;
413 }
414 
415 static inline uintptr_t
ksancov_edge_addr(ksancov_edgemap_t * kemap,size_t idx)416 ksancov_edge_addr(ksancov_edgemap_t *kemap, size_t idx)
417 {
418 	assert(kemap);
419 	if (idx >= kemap->ke_nedges) {
420 		return 0;
421 	}
422 	return kemap->ke_addrs[idx];
423 }
424 
425 static inline size_t
ksancov_trace_max_ent(ksancov_trace_t * trace)426 ksancov_trace_max_ent(ksancov_trace_t *trace)
427 {
428 	assert(trace);
429 	return trace->kt_maxent;
430 }
431 
432 static inline size_t
ksancov_trace_head(ksancov_trace_t * trace)433 ksancov_trace_head(ksancov_trace_t *trace)
434 {
435 	assert(trace);
436 	size_t maxent = trace->kt_maxent;
437 	size_t head = atomic_load_explicit(&trace->kt_head, memory_order_acquire);
438 	return head < maxent ? head : maxent;
439 }
440 
441 static inline uintptr_t
ksancov_trace_entry(ksancov_trace_t * trace,size_t i)442 ksancov_trace_entry(ksancov_trace_t *trace, size_t i)
443 {
444 	assert(trace);
445 	assert(trace->kt_hdr.kh_magic == KSANCOV_TRACE_MAGIC);
446 	if (i >= trace->kt_head) {
447 		return 0;
448 	}
449 
450 	ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)trace->kt_entries;
451 	return entries[i];
452 }
453 
454 static inline uintptr_t
ksancov_stksize_pc(ksancov_trace_t * trace,size_t i)455 ksancov_stksize_pc(ksancov_trace_t *trace, size_t i)
456 {
457 	assert(trace);
458 	assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
459 	if (i >= trace->kt_head) {
460 		return 0;
461 	}
462 
463 	ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
464 	return entries[i].pc;
465 }
466 
467 static inline uint32_t
ksancov_stksize_size(ksancov_trace_t * trace,size_t i)468 ksancov_stksize_size(ksancov_trace_t *trace, size_t i)
469 {
470 	assert(trace);
471 	assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
472 	if (i >= trace->kt_head) {
473 		return 0;
474 	}
475 
476 	ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
477 	return entries[i].stksize;
478 }
479 
480 static inline int
ksancov_cmps_mode_trace(int fd,size_t entries,bool trace_functions)481 ksancov_cmps_mode_trace(int fd, size_t entries, bool trace_functions)
482 {
483 	int ret;
484 	if (trace_functions) {
485 		ret = ioctl(fd, KSANCOV_IOC_CMPS_TRACE_FUNC, &entries);
486 	} else {
487 		ret = ioctl(fd, KSANCOV_IOC_CMPS_TRACE, &entries);
488 	}
489 	if (ret == -1) {
490 		return errno;
491 	}
492 	return 0;
493 }
494 
495 static inline int
ksancov_cmps_map(int fd,uintptr_t * buf,size_t * sz)496 ksancov_cmps_map(int fd, uintptr_t *buf, size_t *sz)
497 {
498 	int ret;
499 	struct ksancov_buf_desc mc = {0};
500 
501 	assert(buf != NULL);
502 
503 	ret = ioctl(fd, KSANCOV_IOC_CMPS_MAP, &mc);
504 	if (ret == -1) {
505 		return errno;
506 	}
507 
508 	*buf = mc.ptr;
509 	if (sz) {
510 		*sz = mc.sz;
511 	}
512 
513 	ksancov_header_t *hdr = (ksancov_header_t *)mc.ptr;
514 	assert(hdr->kh_magic == KSANCOV_CMPS_TRACE_MAGIC);
515 
516 	return 0;
517 }
518 
519 static inline ksancov_cmps_trace_ent_t*
ksancov_cmps_trace_entry(ksancov_trace_t * trace,size_t i)520 ksancov_cmps_trace_entry(ksancov_trace_t *trace, size_t i)
521 {
522 	assert(trace);
523 	assert(trace->kt_hdr.kh_magic == KSANCOV_CMPS_TRACE_MAGIC);
524 	if (i >= trace->kt_head) {
525 		return NULL;
526 	}
527 
528 	ksancov_cmps_trace_ent_t *entries = (ksancov_cmps_trace_ent_t *)trace->kt_entries;
529 	return &entries[i];
530 }
531 
532 static inline int
ksancov_testcases(int fd,size_t num_testcases)533 ksancov_testcases(int fd, size_t num_testcases)
534 {
535 	int ret = ioctl(fd, KSANCOV_IOC_TESTCASES, &num_testcases);
536 	if (ret == -1) {
537 		return errno;
538 	}
539 	return 0;
540 }
541 
542 static inline int
ksancov_testcases_map(int fd,uintptr_t * buf,size_t * sz)543 ksancov_testcases_map(int fd, uintptr_t *buf, size_t *sz)
544 {
545 	int ret;
546 	struct ksancov_buf_desc mc = {0};
547 
548 	assert(buf != NULL);
549 
550 	ret = ioctl(fd, KSANCOV_IOC_TESTCASES_MAP, &mc);
551 	if (ret == -1) {
552 		return errno;
553 	}
554 
555 	*buf = mc.ptr;
556 	if (sz) {
557 		*sz = mc.sz;
558 	}
559 
560 	return 0;
561 }
562 
563 static inline int
ksancov_testcases_log(int fd)564 ksancov_testcases_log(int fd)
565 {
566 	int ret;
567 	ret = ioctl(fd, KSANCOV_IOC_TESTCASES_LOG);
568 	if (ret == -1) {
569 		return errno;
570 	}
571 	return 0;
572 }
573 
574 /*
575  * On-demand control API
576  */
577 
578 static inline int
_ksancov_on_demand_operation(int fd,const char * bundle,ksancov_on_demand_operation_t op,struct ksancov_on_demand_msg * msg)579 _ksancov_on_demand_operation(int fd, const char *bundle, ksancov_on_demand_operation_t op, struct ksancov_on_demand_msg *msg)
580 {
581 	int ret;
582 
583 	msg->operation = op;
584 	strlcpy(msg->bundle, bundle, sizeof(msg->bundle));
585 
586 	ret = ioctl(fd, KSANCOV_IOC_ON_DEMAND, msg);
587 	if (ret == -1) {
588 		return errno;
589 	}
590 
591 	return ret;
592 }
593 
594 /*
595  * Retrieve the value of the gate for a given module bundle ID.
596  */
597 static inline int
ksancov_on_demand_get_gate(int fd,const char * bundle,uint64_t * gate)598 ksancov_on_demand_get_gate(int fd, const char *bundle, uint64_t *gate)
599 {
600 	assert(gate);
601 
602 	struct ksancov_on_demand_msg msg;
603 	int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_GATE, &msg);
604 	if (ret == 0) {
605 		*gate = msg.gate;
606 	}
607 	return ret;
608 }
609 
610 /*
611  * Set the value of the gate for a given module bundle ID.
612  *
613  * Any non-zero value enables the invocation of the sanitizer coverage callbacks
614  * inserted in the specified module.
615  */
616 static inline int
ksancov_on_demand_set_gate(int fd,const char * bundle,uint64_t value)617 ksancov_on_demand_set_gate(int fd, const char *bundle, uint64_t value)
618 {
619 	struct ksancov_on_demand_msg msg = {};
620 	msg.gate = value;
621 	return _ksancov_on_demand_operation(fd, bundle, KS_OD_SET_GATE, &msg);
622 }
623 
624 /*
625  * Get the guards range for a specified module.
626  */
627 static inline int
ksancov_on_demand_get_range(int fd,const char * bundle,uint32_t * start,uint32_t * stop)628 ksancov_on_demand_get_range(int fd, const char *bundle, uint32_t *start, uint32_t *stop)
629 {
630 	assert(start && stop);
631 
632 	struct ksancov_on_demand_msg msg = {};
633 	int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_RANGE, &msg);
634 	if (ret == 0) {
635 		*start = msg.range.start;
636 		*stop = msg.range.stop;
637 	}
638 	return ret;
639 }
640 
641 #endif /* _KSANCOV_H_ */
642