xref: /xnu-11417.140.69/san/tools/ksancov.h (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _KSANCOV_H_
30 #define _KSANCOV_H_
31 
32 #include <stdint.h>
33 #include <stdatomic.h>
34 #include <sys/ioccom.h>
35 #include <sys/ioctl.h>
36 #include <strings.h>
37 #include <assert.h>
38 #include <unistd.h>
39 
40 #define KSANCOV_DEVNODE "ksancov"
41 #define KSANCOV_PATH "/dev/" KSANCOV_DEVNODE
42 
43 /* Set mode */
44 #define KSANCOV_IOC_TRACE        _IOW('K', 1, size_t) /* number of pcs */
45 #define KSANCOV_IOC_COUNTERS     _IO('K', 2)
46 #define KSANCOV_IOC_STKSIZE      _IOW('K', 3, size_t) /* number of pcs */
47 
48 /* Establish a shared mapping of the coverage buffer. */
49 #define KSANCOV_IOC_MAP          _IOWR('K', 8, struct ksancov_buf_desc)
50 
51 /* Establish a shared mapping of the edge address buffer. */
52 #define KSANCOV_IOC_MAP_EDGEMAP  _IOWR('K', 9, struct ksancov_buf_desc)
53 
54 /* Log the current thread */
55 #define KSANCOV_IOC_START        _IOW('K', 10, uintptr_t)
56 #define KSANCOV_IOC_NEDGES       _IOR('K', 50, size_t)
57 
58 /* kext-related operations */
59 #define KSANCOV_IOC_ON_DEMAND    _IOWR('K', 60, struct ksancov_on_demand_msg)
60 
61 /*
62  * shared kernel-user mapping
63  */
64 
65 #define KSANCOV_MAX_EDGES       (1 << 24)
66 #define KSANCOV_MAX_HITS        UINT8_MAX
67 #define KSANCOV_TRACE_MAGIC     (uint32_t)0x5AD17F5BU
68 #define KSANCOV_COUNTERS_MAGIC  (uint32_t)0x5AD27F6BU
69 #define KSANCOV_EDGEMAP_MAGIC   (uint32_t)0x5AD37F7BU
70 #define KSANCOV_STKSIZE_MAGIC   (uint32_t)0x5AD47F8BU
71 
72 /*
73  * ioctl
74  */
75 
76 struct ksancov_buf_desc {
77 	uintptr_t ptr;  /* ptr to shared buffer [out] */
78 	size_t sz;      /* size of shared buffer [out] */
79 };
80 
81 /*
82  * Supported coverage modes.
83  */
84 typedef enum {
85 	KS_MODE_NONE,
86 	KS_MODE_TRACE,
87 	KS_MODE_COUNTERS,
88 	KS_MODE_STKSIZE,
89 	KS_MODE_MAX
90 } ksancov_mode_t;
91 
92 /*
93  * A header that is always present in every ksancov mode shared memory structure.
94  */
95 typedef struct ksancov_header {
96 	uint32_t         kh_magic;
97 	_Atomic uint32_t kh_enabled;
98 } ksancov_header_t;
99 
100 /*
101  * TRACE mode data structure.
102  */
103 
104 /*
105  * All trace based tools share this structure.
106  */
107 typedef struct ksancov_trace {
108 	ksancov_header_t kt_hdr;         /* header (must be always first) */
109 	uint32_t         kt_maxent;      /* Maximum entries in this shared buffer. */
110 	_Atomic uint32_t kt_head;        /* Pointer to the first unused element. */
111 	uint64_t         kt_entries[];   /* Trace entries in this buffer. */
112 } ksancov_trace_t;
113 
114 /* PC tracing only records PCs. */
115 typedef uintptr_t ksancov_trace_pc_ent_t;
116 
117 /* STKSIZE tracing records PCs and stack size. */
118 typedef struct ksancov_trace_stksize_entry {
119 	uintptr_t pc;                      /* PC */
120 	uint32_t  stksize;                 /* associated stack size */
121 } ksancov_trace_stksize_ent_t;
122 
123 /*
124  * COUNTERS mode data structure.
125  */
126 typedef struct ksancov_counters {
127 	ksancov_header_t kc_hdr;
128 	uint32_t         kc_nedges;       /* total number of edges */
129 	uint8_t          kc_hits[];       /* hits on each edge (8bit saturating) */
130 } ksancov_counters_t;
131 
132 /*
133  * Edge to PC mapping.
134  */
135 typedef struct ksancov_edgemap {
136 	uint32_t  ke_magic;
137 	uint32_t  ke_nedges;
138 	uintptr_t ke_addrs[];             /* address of each edge relative to 'offset' */
139 } ksancov_edgemap_t;
140 
141 /*
142  * On-demand related functionalities
143  */
144 typedef enum {
145 	KS_OD_GET_GATE = 1,
146 	KS_OD_SET_GATE = 2,
147 	KS_OD_GET_RANGE = 3,
148 } ksancov_on_demand_operation_t;
149 
150 struct ksancov_on_demand_msg {
151 	char bundle[/*KMOD_MAX_NAME*/ 64];
152 	ksancov_on_demand_operation_t operation;
153 	union {
154 		uint64_t gate;
155 		struct {
156 			uint32_t start;
157 			uint32_t stop;
158 		} range;
159 	};
160 };
161 
162 /*
163  * ksancov userspace API
164  *
165  * Usage:
166  * 1) open the ksancov device
167  * 2) set the coverage mode
168  * 3) map the coverage buffer
169  * 4) start the trace on a thread
170  * 5) flip the enable bit
171  */
172 
173 static inline int
ksancov_open(void)174 ksancov_open(void)
175 {
176 	return open(KSANCOV_PATH, 0);
177 }
178 
179 static inline int
ksancov_map(int fd,uintptr_t * buf,size_t * sz)180 ksancov_map(int fd, uintptr_t *buf, size_t *sz)
181 {
182 	int ret;
183 	struct ksancov_buf_desc mc = {0};
184 
185 	ret = ioctl(fd, KSANCOV_IOC_MAP, &mc);
186 	if (ret == -1) {
187 		return errno;
188 	}
189 
190 	*buf = mc.ptr;
191 	if (sz) {
192 		*sz = mc.sz;
193 	}
194 
195 	ksancov_header_t *hdr = (ksancov_header_t *)mc.ptr;
196 	assert(hdr->kh_magic == KSANCOV_TRACE_MAGIC ||
197 	    hdr->kh_magic == KSANCOV_COUNTERS_MAGIC ||
198 	    hdr->kh_magic == KSANCOV_STKSIZE_MAGIC);
199 
200 	return 0;
201 }
202 
203 static inline int
ksancov_map_edgemap(int fd,uintptr_t * buf,size_t * sz)204 ksancov_map_edgemap(int fd, uintptr_t *buf, size_t *sz)
205 {
206 	int ret;
207 	struct ksancov_buf_desc mc = {0};
208 
209 	ret = ioctl(fd, KSANCOV_IOC_MAP_EDGEMAP, &mc);
210 	if (ret == -1) {
211 		return errno;
212 	}
213 
214 	*buf = mc.ptr;
215 	if (sz) {
216 		*sz = mc.sz;
217 	}
218 
219 	ksancov_edgemap_t *emap = (ksancov_edgemap_t *)mc.ptr;
220 	assert(emap->ke_magic == KSANCOV_EDGEMAP_MAGIC);
221 
222 	return 0;
223 }
224 
225 static inline size_t
ksancov_nedges(int fd)226 ksancov_nedges(int fd)
227 {
228 	size_t nedges;
229 	int ret = ioctl(fd, KSANCOV_IOC_NEDGES, &nedges);
230 	if (ret == -1) {
231 		return SIZE_MAX;
232 	}
233 	return nedges;
234 }
235 
236 static inline int
ksancov_mode_trace(int fd,size_t entries)237 ksancov_mode_trace(int fd, size_t entries)
238 {
239 	int ret;
240 	ret = ioctl(fd, KSANCOV_IOC_TRACE, &entries);
241 	if (ret == -1) {
242 		return errno;
243 	}
244 	return 0;
245 }
246 
247 static inline int
ksancov_mode_stksize(int fd,size_t entries)248 ksancov_mode_stksize(int fd, size_t entries)
249 {
250 	int ret;
251 	ret = ioctl(fd, KSANCOV_IOC_STKSIZE, &entries);
252 	if (ret == -1) {
253 		return errno;
254 	}
255 	return 0;
256 }
257 
258 static inline int
ksancov_mode_counters(int fd)259 ksancov_mode_counters(int fd)
260 {
261 	int ret;
262 	ret = ioctl(fd, KSANCOV_IOC_COUNTERS);
263 	if (ret == -1) {
264 		return errno;
265 	}
266 	return 0;
267 }
268 
269 static inline int
ksancov_thread_self(int fd)270 ksancov_thread_self(int fd)
271 {
272 	int ret;
273 	uintptr_t th = 0;
274 	ret = ioctl(fd, KSANCOV_IOC_START, &th);
275 	if (ret == -1) {
276 		return errno;
277 	}
278 	return 0;
279 }
280 
281 static inline int
ksancov_start(void * buf)282 ksancov_start(void *buf)
283 {
284 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
285 	atomic_store_explicit(&hdr->kh_enabled, 1, memory_order_relaxed);
286 	return 0;
287 }
288 
289 static inline int
ksancov_stop(void * buf)290 ksancov_stop(void *buf)
291 {
292 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
293 	atomic_store_explicit(&hdr->kh_enabled, 0, memory_order_relaxed);
294 	return 0;
295 }
296 
297 static inline int
ksancov_reset(void * buf)298 ksancov_reset(void *buf)
299 {
300 	ksancov_header_t *hdr = (ksancov_header_t *)buf;
301 	if (hdr->kh_magic == KSANCOV_TRACE_MAGIC || hdr->kh_magic == KSANCOV_STKSIZE_MAGIC) {
302 		ksancov_trace_t *trace = (ksancov_trace_t *)buf;
303 		atomic_store_explicit(&trace->kt_head, 0, memory_order_relaxed);
304 	} else if (hdr->kh_magic == KSANCOV_COUNTERS_MAGIC) {
305 		ksancov_counters_t *counters = (ksancov_counters_t *)buf;
306 		bzero(counters->kc_hits, counters->kc_nedges);
307 	} else {
308 		return EINVAL;
309 	}
310 	return 0;
311 }
312 
313 static inline uintptr_t
ksancov_edge_addr(ksancov_edgemap_t * kemap,size_t idx)314 ksancov_edge_addr(ksancov_edgemap_t *kemap, size_t idx)
315 {
316 	assert(kemap);
317 	if (idx >= kemap->ke_nedges) {
318 		return 0;
319 	}
320 	return kemap->ke_addrs[idx];
321 }
322 
323 static inline size_t
ksancov_trace_max_ent(ksancov_trace_t * trace)324 ksancov_trace_max_ent(ksancov_trace_t *trace)
325 {
326 	assert(trace);
327 	return trace->kt_maxent;
328 }
329 
330 static inline size_t
ksancov_trace_head(ksancov_trace_t * trace)331 ksancov_trace_head(ksancov_trace_t *trace)
332 {
333 	assert(trace);
334 	size_t maxent = trace->kt_maxent;
335 	size_t head = atomic_load_explicit(&trace->kt_head, memory_order_acquire);
336 	return head < maxent ? head : maxent;
337 }
338 
339 static inline uintptr_t
ksancov_trace_entry(ksancov_trace_t * trace,size_t i)340 ksancov_trace_entry(ksancov_trace_t *trace, size_t i)
341 {
342 	assert(trace);
343 	assert(trace->kt_hdr.kh_magic == KSANCOV_TRACE_MAGIC);
344 	if (i >= trace->kt_head) {
345 		return 0;
346 	}
347 
348 	ksancov_trace_pc_ent_t *entries = (ksancov_trace_pc_ent_t *)trace->kt_entries;
349 	return entries[i];
350 }
351 
352 static inline uintptr_t
ksancov_stksize_pc(ksancov_trace_t * trace,size_t i)353 ksancov_stksize_pc(ksancov_trace_t *trace, size_t i)
354 {
355 	assert(trace);
356 	assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
357 	if (i >= trace->kt_head) {
358 		return 0;
359 	}
360 
361 	ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
362 	return entries[i].pc;
363 }
364 
365 static inline uint32_t
ksancov_stksize_size(ksancov_trace_t * trace,size_t i)366 ksancov_stksize_size(ksancov_trace_t *trace, size_t i)
367 {
368 	assert(trace);
369 	assert(trace->kt_hdr.kh_magic == KSANCOV_STKSIZE_MAGIC);
370 	if (i >= trace->kt_head) {
371 		return 0;
372 	}
373 
374 	ksancov_trace_stksize_ent_t *entries = (ksancov_trace_stksize_ent_t *)trace->kt_entries;
375 	return entries[i].stksize;
376 }
377 
378 /*
379  * On-demand control API
380  */
381 
382 static inline int
_ksancov_on_demand_operation(int fd,const char * bundle,ksancov_on_demand_operation_t op,struct ksancov_on_demand_msg * msg)383 _ksancov_on_demand_operation(int fd, const char *bundle, ksancov_on_demand_operation_t op, struct ksancov_on_demand_msg *msg)
384 {
385 	int ret;
386 
387 	msg->operation = op;
388 	strlcpy(msg->bundle, bundle, sizeof(msg->bundle));
389 
390 	ret = ioctl(fd, KSANCOV_IOC_ON_DEMAND, msg);
391 	if (ret == -1) {
392 		return errno;
393 	}
394 
395 	return ret;
396 }
397 
398 /*
399  * Retrieve the value of the gate for a given module bundle ID.
400  */
401 static inline int
ksancov_on_demand_get_gate(int fd,const char * bundle,uint64_t * gate)402 ksancov_on_demand_get_gate(int fd, const char *bundle, uint64_t *gate)
403 {
404 	assert(gate);
405 
406 	struct ksancov_on_demand_msg msg;
407 	int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_GATE, &msg);
408 	if (ret == 0) {
409 		*gate = msg.gate;
410 	}
411 	return ret;
412 }
413 
414 /*
415  * Set the value of the gate for a given module bundle ID.
416  *
417  * Any non-zero value enables the invocation of the sanitizer coverage callbacks
418  * inserted in the specified module.
419  */
420 static inline int
ksancov_on_demand_set_gate(int fd,const char * bundle,uint64_t value)421 ksancov_on_demand_set_gate(int fd, const char *bundle, uint64_t value)
422 {
423 	struct ksancov_on_demand_msg msg = {};
424 	msg.gate = value;
425 	return _ksancov_on_demand_operation(fd, bundle, KS_OD_SET_GATE, &msg);
426 }
427 
428 /*
429  * Get the guards range for a specified module.
430  */
431 static inline int
ksancov_on_demand_get_range(int fd,const char * bundle,uint32_t * start,uint32_t * stop)432 ksancov_on_demand_get_range(int fd, const char *bundle, uint32_t *start, uint32_t *stop)
433 {
434 	assert(start && stop);
435 
436 	struct ksancov_on_demand_msg msg = {};
437 	int ret = _ksancov_on_demand_operation(fd, bundle, KS_OD_GET_RANGE, &msg);
438 	if (ret == 0) {
439 		*start = msg.range.start;
440 		*stop = msg.range.stop;
441 	}
442 	return ret;
443 }
444 
445 #endif /* _KSANCOV_H_ */
446