xref: /xnu-11215.1.10/bsd/net/bpf_filter.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1990, 1991, 1993
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * This code is derived from the Stanford/CMU enet packet filter,
33  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35  * Berkeley Laboratory.
36  *
37  * Redistribution and use in source and binary forms, with or without
38  * modification, are permitted provided that the following conditions
39  * are met:
40  * 1. Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  * 2. Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in the
44  *    documentation and/or other materials provided with the distribution.
45  * 3. All advertising materials mentioning features or use of this software
46  *    must display the following acknowledgement:
47  *	This product includes software developed by the University of
48  *	California, Berkeley and its contributors.
49  * 4. Neither the name of the University nor the names of its contributors
50  *    may be used to endorse or promote products derived from this software
51  *    without specific prior written permission.
52  *
53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63  * SUCH DAMAGE.
64  *
65  *      @(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
66  *
67  * $FreeBSD: src/sys/net/bpf_filter.c,v 1.17 1999/12/29 04:38:31 peter Exp $
68  */
69 
70 #include <sys/param.h>
71 #include <string.h>
72 
73 #ifdef sun
74 #include <netinet/in.h>
75 #endif
76 
77 #ifdef KERNEL
78 #include <sys/mbuf.h>
79 #include <net/sockaddr_utils.h>
80 #endif
81 #include <net/bpf.h>
82 #ifdef KERNEL
83 
84 extern unsigned int bpf_maxbufsize;
85 
86 static inline u_int32_t
get_word_from_buffers(u_char * __indexable cp,u_char * __indexable np,size_t num_from_cp)87 get_word_from_buffers(u_char *__indexable cp, u_char *__indexable np, size_t num_from_cp)
88 {
89 	u_int32_t       val;
90 
91 	switch (num_from_cp) {
92 	case 1:
93 		val = ((u_int32_t)cp[0] << 24) |
94 		    ((u_int32_t)np[0] << 16) |
95 		    ((u_int32_t)np[1] << 8)  |
96 		    (u_int32_t)np[2];
97 		break;
98 
99 	case 2:
100 		val = ((u_int32_t)cp[0] << 24) |
101 		    ((u_int32_t)cp[1] << 16) |
102 		    ((u_int32_t)np[0] << 8) |
103 		    (u_int32_t)np[1];
104 		break;
105 	default:
106 		val = ((u_int32_t)cp[0] << 24) |
107 		    ((u_int32_t)cp[1] << 16) |
108 		    ((u_int32_t)cp[2] << 8) |
109 		    (u_int32_t)np[0];
110 		break;
111 	}
112 	return val;
113 }
114 
115 static u_char *__indexable
m_hdr_offset(struct mbuf ** m_p,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 * k_p,size_t * len_p)116 m_hdr_offset(struct mbuf **m_p, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 * k_p,
117     size_t * len_p)
118 {
119 	size_t len;
120 	u_char *cp;
121 	bpf_u_int32 k = *k_p;
122 
123 	if (k >= hdrlen) {
124 		struct mbuf *m = *m_p;
125 
126 		/* there's no header or the offset we want is past the header */
127 		k -= hdrlen;
128 
129 		len = m->m_len;
130 		while (k >= len) {
131 			k -= len;
132 			m = m->m_next;
133 			if (m == NULL) {
134 				return NULL;
135 			}
136 			len = m->m_len;
137 		}
138 		cp = mtod(m, u_char *) + k;
139 
140 		/* return next mbuf, in case it's needed */
141 		*m_p = m->m_next;
142 
143 		/* update the offset */
144 		*k_p = k;
145 	} else {
146 		len = hdrlen;
147 		cp = (u_char *)hdr + k;
148 	}
149 	*len_p = len;
150 	return cp;
151 }
152 
153 static u_int32_t
m_xword(struct mbuf * m,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)154 m_xword(struct mbuf *m, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
155 {
156 	size_t len;
157 	u_char *cp, *np;
158 
159 	cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len);
160 	if (cp == NULL) {
161 		goto bad;
162 	}
163 	if (len - k >= 4) {
164 		*err = 0;
165 		return EXTRACT_LONG(cp);
166 	}
167 	if (m == 0 || m->m_len + len - k < 4) {
168 		goto bad;
169 	}
170 	*err = 0;
171 	np = mtod(m, u_char *);
172 	return get_word_from_buffers(cp, np, len - k);
173 
174 bad:
175 	*err = 1;
176 	return 0;
177 }
178 
179 static uint16_t
m_xhalf(struct mbuf * m,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)180 m_xhalf(struct mbuf *m, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
181 {
182 	size_t len;
183 	u_char *cp;
184 
185 	cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len);
186 	if (cp == NULL) {
187 		goto bad;
188 	}
189 	if (len - k >= 2) {
190 		*err = 0;
191 		return EXTRACT_SHORT(cp);
192 	}
193 	if (m == 0) {
194 		goto bad;
195 	}
196 	*err = 0;
197 	return (uint16_t)((cp[0] << 8) | mtod(m, u_char *)[0]);
198 bad:
199 	*err = 1;
200 	return 0;
201 }
202 
203 static u_int8_t
m_xbyte(struct mbuf * m,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)204 m_xbyte(struct mbuf *m, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
205 {
206 	size_t len;
207 	u_char *cp;
208 
209 	cp = m_hdr_offset(&m, hdr, hdrlen, &k, &len);
210 	if (cp == NULL) {
211 		goto bad;
212 	}
213 	*err = 0;
214 	return *cp;
215 bad:
216 	*err = 1;
217 	return 0;
218 }
219 
220 #if SKYWALK
221 
222 #include <skywalk/os_skywalk_private.h>
223 
224 static void *__indexable
buflet_get_address(kern_buflet_t buflet)225 buflet_get_address(kern_buflet_t buflet)
226 {
227 	uint8_t *addr;
228 	uint32_t offset;
229 	uint32_t limit;
230 
231 	limit = kern_buflet_get_data_limit(buflet);
232 	addr = __unsafe_forge_bidi_indexable(uint8_t *,
233 	    kern_buflet_get_data_address(buflet),
234 	    limit);
235 	if (addr == NULL) {
236 		return NULL;
237 	}
238 	offset = kern_buflet_get_data_offset(buflet);
239 	return __unsafe_forge_bidi_indexable(uint8_t *,
240 	           addr + offset,
241 	           limit - offset);
242 }
243 
244 static u_char *__indexable
p_hdr_offset(kern_packet_t p,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 * k_p,size_t * len_p,kern_buflet_t * buflet_p)245 p_hdr_offset(kern_packet_t p, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 * k_p,
246     size_t * len_p, kern_buflet_t * buflet_p)
247 {
248 	u_char          *cp = NULL;
249 	bpf_u_int32     k = *k_p;
250 	size_t          len;
251 	kern_buflet_t __single  buflet = NULL;
252 
253 	if (k >= hdrlen) {
254 		k -= hdrlen;
255 		for (;;) {
256 			buflet = kern_packet_get_next_buflet(p, buflet);
257 			if (buflet == NULL) {
258 				break;
259 			}
260 			len = kern_buflet_get_data_length(buflet);
261 			if (k < len) {
262 				break;
263 			}
264 			k -= len;
265 		}
266 		if (buflet == NULL) {
267 			return NULL;
268 		}
269 		cp = (u_char *)buflet_get_address(buflet) + k;
270 		/* update the offset */
271 		*k_p = k;
272 	} else {
273 		len = hdrlen;
274 		cp = (u_char *)hdr + k;
275 	}
276 	*len_p = len;
277 	*buflet_p = buflet;
278 	return cp;
279 }
280 
281 static u_int32_t
p_xword(kern_packet_t p,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)282 p_xword(kern_packet_t p, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
283 {
284 	kern_buflet_t __single buflet = NULL;
285 	u_char          *cp;
286 	size_t          len = 0;
287 	u_char          *np;
288 
289 	cp = p_hdr_offset(p, hdr, hdrlen, &k, &len, &buflet);
290 	if (cp == NULL) {
291 		goto bad;
292 	}
293 	if ((len - k) >= 4) {
294 		*err = 0;
295 		return EXTRACT_LONG(cp);
296 	}
297 	buflet = kern_packet_get_next_buflet(p, buflet);
298 	if (buflet == NULL ||
299 	    (kern_buflet_get_data_length(buflet) + len - k) < 4) {
300 		goto bad;
301 	}
302 	*err = 0;
303 	np = (u_char *)buflet_get_address(buflet);
304 	return get_word_from_buffers(cp, np, len - k);
305 
306 bad:
307 	*err = 1;
308 	return 0;
309 }
310 
311 static uint16_t
p_xhalf(kern_packet_t p,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)312 p_xhalf(kern_packet_t p, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
313 {
314 	kern_buflet_t __single buflet = NULL;
315 	u_char          *cp;
316 	size_t          len = 0;
317 	u_char          *np;
318 
319 	cp = p_hdr_offset(p, hdr, hdrlen, &k, &len, &buflet);
320 	if (cp == NULL) {
321 		goto bad;
322 	}
323 	if ((len - k) >= 2) {
324 		*err = 0;
325 		return EXTRACT_SHORT(cp);
326 	}
327 	buflet = kern_packet_get_next_buflet(p, buflet);
328 	if (buflet == NULL || kern_buflet_get_data_length(buflet) == 0) {
329 		goto bad;
330 	}
331 	np = (u_char *)buflet_get_address(buflet);
332 	*err = 0;
333 	return (uint16_t)((cp[0] << 8) | np[0]);
334 bad:
335 	*err = 1;
336 	return 0;
337 }
338 
339 static u_int8_t
p_xbyte(kern_packet_t p,void * __sized_by (hdrlen)hdr,size_t hdrlen,bpf_u_int32 k,int * err)340 p_xbyte(kern_packet_t p, void *__sized_by(hdrlen) hdr, size_t hdrlen, bpf_u_int32 k, int *err)
341 {
342 	kern_buflet_t __single buflet = NULL;
343 	u_char          *cp;
344 	size_t          len = 0;
345 
346 	cp = p_hdr_offset(p, hdr, hdrlen, &k, &len, &buflet);
347 	if (cp == NULL) {
348 		goto bad;
349 	}
350 	*err = 0;
351 	return *cp;
352 bad:
353 	*err = 1;
354 	return 0;
355 }
356 
357 #endif /* SKYWALK */
358 
359 static u_int32_t
bp_xword(struct bpf_packet * bp,bpf_u_int32 k,int * err)360 bp_xword(struct bpf_packet *bp, bpf_u_int32 k, int *err)
361 {
362 	size_t hdrlen = bp->bpfp_header_length;
363 	void *hdr = bp->bpfp_header;
364 
365 	switch (bp->bpfp_type) {
366 	case BPF_PACKET_TYPE_MBUF:
367 		return m_xword(bp->bpfp_mbuf, hdr, hdrlen, k, err);
368 #if SKYWALK
369 	case BPF_PACKET_TYPE_PKT:
370 		return p_xword(bp->bpfp_pkt, hdr, hdrlen, k, err);
371 #endif /* SKYWALK */
372 	default:
373 		break;
374 	}
375 	*err = 1;
376 	return 0;
377 }
378 
379 static u_int16_t
bp_xhalf(struct bpf_packet * bp,bpf_u_int32 k,int * err)380 bp_xhalf(struct bpf_packet *bp, bpf_u_int32 k, int *err)
381 {
382 	size_t hdrlen = bp->bpfp_header_length;
383 	void *hdr = bp->bpfp_header;
384 
385 	switch (bp->bpfp_type) {
386 	case BPF_PACKET_TYPE_MBUF:
387 		return m_xhalf(bp->bpfp_mbuf, hdr, hdrlen, k, err);
388 #if SKYWALK
389 	case BPF_PACKET_TYPE_PKT:
390 		return p_xhalf(bp->bpfp_pkt, hdr, hdrlen, k, err);
391 #endif /* SKYWALK */
392 	default:
393 		break;
394 	}
395 	*err = 1;
396 	return 0;
397 }
398 
399 static u_int8_t
bp_xbyte(struct bpf_packet * bp,bpf_u_int32 k,int * err)400 bp_xbyte(struct bpf_packet *bp, bpf_u_int32 k, int *err)
401 {
402 	size_t hdrlen = bp->bpfp_header_length;
403 	void *hdr = bp->bpfp_header;
404 
405 	switch (bp->bpfp_type) {
406 	case BPF_PACKET_TYPE_MBUF:
407 		return m_xbyte(bp->bpfp_mbuf, hdr, hdrlen, k, err);
408 #if SKYWALK
409 	case BPF_PACKET_TYPE_PKT:
410 		return p_xbyte(bp->bpfp_pkt, hdr, hdrlen, k, err);
411 #endif /* SKYWALK */
412 	default:
413 		break;
414 	}
415 	*err = 1;
416 	return 0;
417 }
418 
419 #endif
420 
421 /*
422  * Execute the filter program starting at pc on the packet p
423  * wirelen is the length of the original packet
424  * buflen is the amount of data present
425  */
426 u_int
bpf_filter(const struct bpf_insn * __counted_by (pc_len)pc_orig,u_int pc_len,u_char * __sized_by (sizeof (struct bpf_packet))p,u_int wirelen,u_int buflen)427 bpf_filter(const struct bpf_insn *__counted_by(pc_len) pc_orig, u_int pc_len,
428     u_char *__sized_by(sizeof(struct bpf_packet)) p, u_int wirelen, u_int buflen)
429 {
430 	u_int32_t A = 0, X = 0;
431 	bpf_u_int32 k;
432 	int32_t mem[BPF_MEMWORDS];
433 	const struct bpf_insn *pc = pc_orig;
434 #ifdef KERNEL
435 	int merr;
436 	struct bpf_packet * bp = (struct bpf_packet *)(void *)p;
437 #endif /* KERNEL */
438 	/* Ignore warning without -fbounds-safety. */
439 	(void)pc_len;
440 
441 	bzero(mem, sizeof(mem));
442 
443 	if (pc == 0) {
444 		/*
445 		 * No filter means accept all.
446 		 */
447 		return (u_int) - 1;
448 	}
449 
450 	--pc;
451 	while (1) {
452 		++pc;
453 		switch (pc->code) {
454 		default:
455 #ifdef KERNEL
456 			return 0;
457 #else /* KERNEL */
458 			abort();
459 #endif /* KERNEL */
460 		case BPF_RET | BPF_K:
461 			return (u_int)pc->k;
462 
463 		case BPF_RET | BPF_A:
464 			return (u_int)A;
465 
466 		case BPF_LD | BPF_W | BPF_ABS:
467 			k = pc->k;
468 			if (k > buflen || sizeof(int32_t) > buflen - k) {
469 #ifdef KERNEL
470 				if (buflen != 0) {
471 					return 0;
472 				}
473 				A = bp_xword(bp, k, &merr);
474 				if (merr != 0) {
475 					return 0;
476 				}
477 				continue;
478 #else /* KERNEL */
479 				return 0;
480 #endif /* KERNEL */
481 			}
482 #if BPF_ALIGN
483 			if (((intptr_t)(p + k) & 3) != 0) {
484 				A = EXTRACT_LONG(&p[k]);
485 			} else
486 #endif /* BPF_ALIGN */
487 			A = ntohl(*(int32_t *)(void *)(p + k));
488 			continue;
489 
490 		case BPF_LD | BPF_H | BPF_ABS:
491 			k = pc->k;
492 			if (k > buflen || sizeof(int16_t) > buflen - k) {
493 #ifdef KERNEL
494 				if (buflen != 0) {
495 					return 0;
496 				}
497 				A = bp_xhalf(bp, k, &merr);
498 				if (merr != 0) {
499 					return 0;
500 				}
501 				continue;
502 #else /* KERNEL */
503 				return 0;
504 #endif /* KERNEL */
505 			}
506 			A = EXTRACT_SHORT(&p[k]);
507 			continue;
508 
509 		case BPF_LD | BPF_B | BPF_ABS:
510 			k = pc->k;
511 			if (k >= buflen) {
512 #ifdef KERNEL
513 				if (buflen != 0) {
514 					return 0;
515 				}
516 				A = bp_xbyte(bp, k, &merr);
517 				if (merr != 0) {
518 					return 0;
519 				}
520 				continue;
521 #else /* KERNEL */
522 				return 0;
523 #endif /* KERNEL */
524 			}
525 			A = p[k];
526 			continue;
527 
528 		case BPF_LD | BPF_W | BPF_LEN:
529 			A = wirelen;
530 			continue;
531 
532 		case BPF_LDX | BPF_W | BPF_LEN:
533 			X = wirelen;
534 			continue;
535 
536 		case BPF_LD | BPF_W | BPF_IND:
537 			k = X + pc->k;
538 			if (pc->k > buflen || X > buflen - pc->k ||
539 			    sizeof(int32_t) > buflen - k) {
540 #ifdef KERNEL
541 				if (buflen != 0) {
542 					return 0;
543 				}
544 				A = bp_xword(bp, k, &merr);
545 				if (merr != 0) {
546 					return 0;
547 				}
548 				continue;
549 #else /* KERNEL */
550 				return 0;
551 #endif /* KERNEL */
552 			}
553 #if BPF_ALIGN
554 			if (((intptr_t)(p + k) & 3) != 0) {
555 				A = EXTRACT_LONG(&p[k]);
556 			} else
557 #endif /* BPF_ALIGN */
558 			A = ntohl(*(int32_t *)(void *)(p + k));
559 			continue;
560 
561 		case BPF_LD | BPF_H | BPF_IND:
562 			k = X + pc->k;
563 			if (X > buflen || pc->k > buflen - X ||
564 			    sizeof(int16_t) > buflen - k) {
565 #ifdef KERNEL
566 				if (buflen != 0) {
567 					return 0;
568 				}
569 				A = bp_xhalf(bp, k, &merr);
570 				if (merr != 0) {
571 					return 0;
572 				}
573 				continue;
574 #else /* KERNEL */
575 				return 0;
576 #endif /* KERNEL */
577 			}
578 			A = EXTRACT_SHORT(&p[k]);
579 			continue;
580 
581 		case BPF_LD | BPF_B | BPF_IND:
582 			k = X + pc->k;
583 			if (pc->k >= buflen || X >= buflen - pc->k) {
584 #ifdef KERNEL
585 				if (buflen != 0) {
586 					return 0;
587 				}
588 				A = bp_xbyte(bp, k, &merr);
589 				if (merr != 0) {
590 					return 0;
591 				}
592 				continue;
593 #else /* KERNEL */
594 				return 0;
595 #endif /* KERNEL */
596 			}
597 			A = p[k];
598 			continue;
599 
600 		case BPF_LDX | BPF_MSH | BPF_B:
601 			k = pc->k;
602 			if (k >= buflen) {
603 #ifdef KERNEL
604 				if (buflen != 0) {
605 					return 0;
606 				}
607 				X = bp_xbyte(bp, k, &merr);
608 				if (merr != 0) {
609 					return 0;
610 				}
611 				X = (X & 0xf) << 2;
612 				continue;
613 #else
614 				return 0;
615 #endif
616 			}
617 			X = (p[pc->k] & 0xf) << 2;
618 			continue;
619 
620 		case BPF_LD | BPF_IMM:
621 			A = pc->k;
622 			continue;
623 
624 		case BPF_LDX | BPF_IMM:
625 			X = pc->k;
626 			continue;
627 
628 		case BPF_LD | BPF_MEM:
629 			if (pc->k >= BPF_MEMWORDS) {
630 				return 0;
631 			}
632 			A = mem[pc->k];
633 			continue;
634 
635 		case BPF_LDX | BPF_MEM:
636 			if (pc->k >= BPF_MEMWORDS) {
637 				return 0;
638 			}
639 			X = mem[pc->k];
640 			continue;
641 
642 		case BPF_ST:
643 			if (pc->k >= BPF_MEMWORDS) {
644 				return 0;
645 			}
646 			mem[pc->k] = A;
647 			continue;
648 
649 		case BPF_STX:
650 			if (pc->k >= BPF_MEMWORDS) {
651 				return 0;
652 			}
653 			mem[pc->k] = X;
654 			continue;
655 
656 		case BPF_JMP | BPF_JA:
657 			pc += pc->k;
658 			continue;
659 
660 		case BPF_JMP | BPF_JGT | BPF_K:
661 			pc += (A > pc->k) ? pc->jt : pc->jf;
662 			continue;
663 
664 		case BPF_JMP | BPF_JGE | BPF_K:
665 			pc += (A >= pc->k) ? pc->jt : pc->jf;
666 			continue;
667 
668 		case BPF_JMP | BPF_JEQ | BPF_K:
669 			pc += (A == pc->k) ? pc->jt : pc->jf;
670 			continue;
671 
672 		case BPF_JMP | BPF_JSET | BPF_K:
673 			pc += (A & pc->k) ? pc->jt : pc->jf;
674 			continue;
675 
676 		case BPF_JMP | BPF_JGT | BPF_X:
677 			pc += (A > X) ? pc->jt : pc->jf;
678 			continue;
679 
680 		case BPF_JMP | BPF_JGE | BPF_X:
681 			pc += (A >= X) ? pc->jt : pc->jf;
682 			continue;
683 
684 		case BPF_JMP | BPF_JEQ | BPF_X:
685 			pc += (A == X) ? pc->jt : pc->jf;
686 			continue;
687 
688 		case BPF_JMP | BPF_JSET | BPF_X:
689 			pc += (A & X) ? pc->jt : pc->jf;
690 			continue;
691 
692 		case BPF_ALU | BPF_ADD | BPF_X:
693 			A += X;
694 			continue;
695 
696 		case BPF_ALU | BPF_SUB | BPF_X:
697 			A -= X;
698 			continue;
699 
700 		case BPF_ALU | BPF_MUL | BPF_X:
701 			A *= X;
702 			continue;
703 
704 		case BPF_ALU | BPF_DIV | BPF_X:
705 			if (X == 0) {
706 				return 0;
707 			}
708 			A /= X;
709 			continue;
710 
711 		case BPF_ALU | BPF_AND | BPF_X:
712 			A &= X;
713 			continue;
714 
715 		case BPF_ALU | BPF_OR | BPF_X:
716 			A |= X;
717 			continue;
718 
719 		case BPF_ALU | BPF_LSH | BPF_X:
720 			A <<= X;
721 			continue;
722 
723 		case BPF_ALU | BPF_RSH | BPF_X:
724 			A >>= X;
725 			continue;
726 
727 		case BPF_ALU | BPF_ADD | BPF_K:
728 			A += pc->k;
729 			continue;
730 
731 		case BPF_ALU | BPF_SUB | BPF_K:
732 			A -= pc->k;
733 			continue;
734 
735 		case BPF_ALU | BPF_MUL | BPF_K:
736 			A *= pc->k;
737 			continue;
738 
739 		case BPF_ALU | BPF_DIV | BPF_K:
740 			A /= pc->k;
741 			continue;
742 
743 		case BPF_ALU | BPF_AND | BPF_K:
744 			A &= pc->k;
745 			continue;
746 
747 		case BPF_ALU | BPF_OR | BPF_K:
748 			A |= pc->k;
749 			continue;
750 
751 		case BPF_ALU | BPF_LSH | BPF_K:
752 			A <<= pc->k;
753 			continue;
754 
755 		case BPF_ALU | BPF_RSH | BPF_K:
756 			A >>= pc->k;
757 			continue;
758 
759 		case BPF_ALU | BPF_NEG:
760 			A = -A;
761 			continue;
762 
763 		case BPF_MISC | BPF_TAX:
764 			X = A;
765 			continue;
766 
767 		case BPF_MISC | BPF_TXA:
768 			A = X;
769 			continue;
770 		}
771 	}
772 }
773 
774 #ifdef KERNEL
775 /*
776  * Return true if the 'fcode' is a valid filter program.
777  * The constraints are that each jump be forward and to a valid
778  * code, that memory accesses are within valid ranges (to the
779  * extent that this can be checked statically; loads of packet data
780  * have to be, and are, also checked at run time), and that
781  * the code terminates with either an accept or reject.
782  *
783  * The kernel needs to be able to verify an application's filter code.
784  * Otherwise, a bogus program could easily crash the system.
785  */
786 int
bpf_validate(const struct bpf_insn * __counted_by (len)f,int len)787 bpf_validate(const struct bpf_insn *__counted_by(len) f, int len)
788 {
789 	u_int i, from;
790 	const struct bpf_insn *p;
791 
792 	if (len < 1 || len > BPF_MAXINSNS) {
793 		return 0;
794 	}
795 
796 	for (i = 0; i < ((u_int)len); ++i) {
797 		p = &f[i];
798 		switch (BPF_CLASS(p->code)) {
799 		/*
800 		 * Check that memory operations use valid addresses
801 		 */
802 		case BPF_LD:
803 		case BPF_LDX:
804 			switch (BPF_MODE(p->code)) {
805 			case BPF_IMM:
806 				break;
807 			case BPF_ABS:
808 			case BPF_IND:
809 			case BPF_MSH:
810 				/*
811 				 * More strict check with actual packet length
812 				 * is done runtime.
813 				 */
814 				if (p->k >= bpf_maxbufsize) {
815 					return 0;
816 				}
817 				break;
818 			case BPF_MEM:
819 				if (p->k >= BPF_MEMWORDS) {
820 					return 0;
821 				}
822 				break;
823 			case BPF_LEN:
824 				break;
825 			default:
826 				return 0;
827 			}
828 			break;
829 		case BPF_ST:
830 		case BPF_STX:
831 			if (p->k >= BPF_MEMWORDS) {
832 				return 0;
833 			}
834 			break;
835 		case BPF_ALU:
836 			switch (BPF_OP(p->code)) {
837 			case BPF_ADD:
838 			case BPF_SUB:
839 			case BPF_MUL:
840 			case BPF_OR:
841 			case BPF_AND:
842 			case BPF_LSH:
843 			case BPF_RSH:
844 			case BPF_NEG:
845 				break;
846 			case BPF_DIV:
847 				/*
848 				 * Check for constant division by 0
849 				 */
850 				if (BPF_SRC(p->code) == BPF_K && p->k == 0) {
851 					return 0;
852 				}
853 				break;
854 			default:
855 				return 0;
856 			}
857 			break;
858 		case BPF_JMP:
859 			/*
860 			 * Check that jumps are within the code block,
861 			 * and that unconditional branches don't go
862 			 * backwards as a result of an overflow.
863 			 * Unconditional branches have a 32-bit offset,
864 			 * so they could overflow; we check to make
865 			 * sure they don't. Conditional branches have
866 			 * an 8-bit offset, and the from address is
867 			 * less than equal to BPF_MAXINSNS, and we assume that
868 			 * BPF_MAXINSNS is sufficiently small that adding 255
869 			 * to it won't overlflow
870 			 *
871 			 * We know that len is <= BPF_MAXINSNS, and we
872 			 * assume that BPF_MAXINSNS is less than the maximum
873 			 * size of a u_int, so that i+1 doesn't overflow
874 			 */
875 			from = i + 1;
876 			switch (BPF_OP(p->code)) {
877 			case BPF_JA:
878 				if (from + p->k < from || from + p->k >= ((u_int)len)) {
879 					return 0;
880 				}
881 				break;
882 			case BPF_JEQ:
883 			case BPF_JGT:
884 			case BPF_JGE:
885 			case BPF_JSET:
886 				if (from + p->jt >= ((u_int)len) || from + p->jf >= ((u_int)len)) {
887 					return 0;
888 				}
889 				break;
890 			default:
891 				return 0;
892 			}
893 			break;
894 		case BPF_RET:
895 			break;
896 		case BPF_MISC:
897 			break;
898 		default:
899 			return 0;
900 		}
901 	}
902 	return BPF_CLASS(f[len - 1].code) == BPF_RET;
903 }
904 #endif
905