1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
66 *
67 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
68 */
69 /*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
75
76 #include "bpf.h"
77
78 #ifndef __GNUC__
79 #define inline
80 #else
81 #define inline __inline
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/conf.h>
87 #include <sys/malloc.h>
88 #include <sys/mbuf.h>
89 #include <sys/time.h>
90 #include <sys/proc.h>
91 #include <sys/signalvar.h>
92 #include <sys/filio.h>
93 #include <sys/sockio.h>
94 #include <sys/ttycom.h>
95 #include <sys/filedesc.h>
96 #include <sys/uio_internal.h>
97 #include <sys/file_internal.h>
98 #include <sys/event.h>
99
100 #include <sys/poll.h>
101
102 #include <sys/socket.h>
103 #include <sys/socketvar.h>
104 #include <sys/vnode.h>
105
106 #include <net/if.h>
107 #include <net/bpf.h>
108 #include <net/bpfdesc.h>
109
110 #include <netinet/in.h>
111 #include <netinet/ip.h>
112 #include <netinet/ip6.h>
113 #include <netinet/in_pcb.h>
114 #include <netinet/in_var.h>
115 #include <netinet/ip_var.h>
116 #include <netinet/tcp.h>
117 #include <netinet/tcp_var.h>
118 #include <netinet/udp.h>
119 #include <netinet/udp_var.h>
120 #include <netinet/if_ether.h>
121 #include <netinet/isakmp.h>
122 #include <netinet6/esp.h>
123 #include <sys/kernel.h>
124 #include <sys/sysctl.h>
125 #include <net/firewire.h>
126
127 #include <miscfs/devfs/devfs.h>
128 #include <net/dlil.h>
129 #include <net/pktap.h>
130
131 #include <kern/assert.h>
132 #include <kern/locks.h>
133 #include <kern/thread_call.h>
134 #include <libkern/section_keywords.h>
135
136 #include <os/log.h>
137
138 #include <IOKit/IOBSD.h>
139
140
141 extern int tvtohz(struct timeval *);
142 extern char *proc_name_address(void *p);
143
144 #define BPF_BUFSIZE 4096
145
146 #define PRINET 26 /* interruptible */
147
148 #define ISAKMP_HDR_SIZE (sizeof(struct isakmp) + sizeof(struct isakmp_gen))
149 #define ESP_HDR_SIZE sizeof(struct newesp)
150
151 #define BPF_WRITE_LEEWAY 18 /* space for link layer header */
152
153 #define BPF_WRITE_MAX 0x1000000 /* 16 MB arbitrary value */
154
155 typedef void (*pktcopyfunc_t)(const void *, void *, size_t);
156
157 /*
158 * The default read buffer size is patchable.
159 */
160 static unsigned int bpf_bufsize = BPF_BUFSIZE;
161 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
162 &bpf_bufsize, 0, "");
163
164 __private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
165 static int sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS;
166 SYSCTL_PROC(_debug, OID_AUTO, bpf_maxbufsize, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
167 &bpf_maxbufsize, 0,
168 sysctl_bpf_maxbufsize, "I", "Default BPF max buffer size");
169
170 extern const int copysize_limit_panic;
171 #define BPF_BUFSIZE_CAP (copysize_limit_panic >> 1)
172 static int sysctl_bpf_bufsize_cap SYSCTL_HANDLER_ARGS;
173 SYSCTL_PROC(_debug, OID_AUTO, bpf_bufsize_cap, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
174 0, 0,
175 sysctl_bpf_bufsize_cap, "I", "Upper limit on BPF max buffer size");
176
177 #define BPF_MAX_DEVICES 256
178 static unsigned int bpf_maxdevices = BPF_MAX_DEVICES;
179 SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RD | CTLFLAG_LOCKED,
180 &bpf_maxdevices, 0, "");
181
182 /*
183 * bpf_wantpktap controls the defaul visibility of DLT_PKTAP
184 * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP
185 * explicitly to be able to use DLT_PKTAP.
186 */
187 #if !XNU_TARGET_OS_OSX
188 static unsigned int bpf_wantpktap = 1;
189 #else /* XNU_TARGET_OS_OSX */
190 static unsigned int bpf_wantpktap = 0;
191 #endif /* XNU_TARGET_OS_OSX */
192 SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED,
193 &bpf_wantpktap, 0, "");
194
195 static int bpf_debug = 0;
196 SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
197 &bpf_debug, 0, "");
198
199 static unsigned long bpf_trunc_overflow = 0;
200 SYSCTL_ULONG(_debug, OID_AUTO, bpf_trunc_overflow, CTLFLAG_RD | CTLFLAG_LOCKED,
201 &bpf_trunc_overflow, "");
202
203 static int bpf_hdr_comp_enable = 1;
204 SYSCTL_INT(_debug, OID_AUTO, bpf_hdr_comp_enable, CTLFLAG_RW | CTLFLAG_LOCKED,
205 &bpf_hdr_comp_enable, 1, "");
206
207 static int sysctl_bpf_stats SYSCTL_HANDLER_ARGS;
208 SYSCTL_PROC(_debug, OID_AUTO, bpf_stats, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
209 0, 0,
210 sysctl_bpf_stats, "S", "BPF statistics");
211
212 /*
213 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
214 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
215 */
216 static struct bpf_if *bpf_iflist;
217 /*
218 * BSD now stores the bpf_d in the dev_t which is a struct
219 * on their system. Our dev_t is an int, so we still store
220 * the bpf_d in a separate table indexed by minor device #.
221 *
222 * The value stored in bpf_dtab[n] represent three states:
223 * NULL: device not opened
224 * BPF_DEV_RESERVED: device opening or closing
225 * other: device <n> opened with pointer to storage
226 */
227 #define BPF_DEV_RESERVED ((struct bpf_d *)(uintptr_t)1)
228 static struct bpf_d **bpf_dtab = NULL;
229 static unsigned int bpf_dtab_size = 0;
230 static unsigned int nbpfilter = 0;
231 static unsigned bpf_bpfd_cnt = 0;
232
233 static LCK_GRP_DECLARE(bpf_mlock_grp, "bpf");
234 static LCK_MTX_DECLARE(bpf_mlock_data, &bpf_mlock_grp);
235 static lck_mtx_t *const bpf_mlock = &bpf_mlock_data;
236
237 static int bpf_allocbufs(struct bpf_d *);
238 static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
239 static int bpf_detachd(struct bpf_d *d);
240 static void bpf_freed(struct bpf_d *);
241 static int bpf_setif(struct bpf_d *, ifnet_t ifp, bool, bool, bool);
242 static void bpf_timed_out(void *, void *);
243 static void bpf_wakeup(struct bpf_d *);
244 static uint32_t get_pkt_trunc_len(struct bpf_packet *);
245 static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int);
246 static void reset_d(struct bpf_d *);
247 static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long);
248 static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *);
249 static int bpf_setdlt(struct bpf_d *, u_int);
250 static int bpf_set_traffic_class(struct bpf_d *, int);
251 static void bpf_set_packet_service_class(struct mbuf *, int);
252
253 static void bpf_acquire_d(struct bpf_d *);
254 static void bpf_release_d(struct bpf_d *);
255
256 static int bpf_devsw_installed;
257
258 void bpf_init(void *unused);
259 static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m);
260
261 /*
262 * Darwin differs from BSD here, the following are static
263 * on BSD and not static on Darwin.
264 */
265 d_open_t bpfopen;
266 d_close_t bpfclose;
267 d_read_t bpfread;
268 d_write_t bpfwrite;
269 ioctl_fcn_t bpfioctl;
270 select_fcn_t bpfselect;
271
272 /* Darwin's cdevsw struct differs slightly from BSDs */
273 #define CDEV_MAJOR 23
274 static const struct cdevsw bpf_cdevsw = {
275 .d_open = bpfopen,
276 .d_close = bpfclose,
277 .d_read = bpfread,
278 .d_write = bpfwrite,
279 .d_ioctl = bpfioctl,
280 .d_stop = eno_stop,
281 .d_reset = eno_reset,
282 .d_ttys = NULL,
283 .d_select = bpfselect,
284 .d_mmap = eno_mmap,
285 .d_strategy = eno_strat,
286 .d_reserved_1 = eno_getc,
287 .d_reserved_2 = eno_putc,
288 .d_type = 0
289 };
290
291 #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
292
293 static int
bpf_copy_uio_to_mbuf_packet(struct uio * auio,int bytes_to_copy,struct mbuf * top)294 bpf_copy_uio_to_mbuf_packet(struct uio *auio, int bytes_to_copy, struct mbuf *top)
295 {
296 int error = 0;
297
298 for (struct mbuf *m = top; m != NULL; m = m->m_next) {
299 int mlen;
300
301 if (m->m_flags & M_EXT) {
302 mlen = m->m_ext.ext_size - (int)M_LEADINGSPACE(m);
303 } else if (m->m_flags & M_PKTHDR) {
304 mlen = MHLEN - (int)M_LEADINGSPACE(m);
305 } else {
306 mlen = MLEN - (int)M_LEADINGSPACE(m);
307 }
308 int copy_len = imin((int)mlen, bytes_to_copy);
309
310 error = uiomove(mtod(m, caddr_t), (int)copy_len, auio);
311 if (error != 0) {
312 os_log(OS_LOG_DEFAULT, "bpf_copy_uio_to_mbuf_packet: len %d error %d",
313 copy_len, error);
314 goto done;
315 }
316 m->m_len = copy_len;
317 top->m_pkthdr.len += copy_len;
318
319 if (bytes_to_copy > copy_len) {
320 bytes_to_copy -= copy_len;
321 } else {
322 break;
323 }
324 }
325 done:
326 return error;
327 }
328
329 static int
bpf_movein(struct uio * uio,int copy_len,struct bpf_d * d,struct mbuf ** mp,struct sockaddr * sockp)330 bpf_movein(struct uio *uio, int copy_len, struct bpf_d *d, struct mbuf **mp,
331 struct sockaddr *sockp)
332 {
333 struct mbuf *m = NULL;
334 int error;
335 int len;
336 uint8_t sa_family;
337 int hlen = 0;
338 struct ifnet *ifp = d->bd_bif->bif_ifp;
339 int linktype = (int)d->bd_bif->bif_dlt;
340
341 switch (linktype) {
342 #if SLIP
343 case DLT_SLIP:
344 sa_family = AF_INET;
345 hlen = 0;
346 break;
347 #endif /* SLIP */
348
349 case DLT_EN10MB:
350 sa_family = AF_UNSPEC;
351 /* XXX Would MAXLINKHDR be better? */
352 hlen = sizeof(struct ether_header);
353 break;
354
355 #if FDDI
356 case DLT_FDDI:
357 #if defined(__FreeBSD__) || defined(__bsdi__)
358 sa_family = AF_IMPLINK;
359 hlen = 0;
360 #else
361 sa_family = AF_UNSPEC;
362 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
363 hlen = 24;
364 #endif
365 break;
366 #endif /* FDDI */
367
368 case DLT_RAW:
369 case DLT_NULL:
370 sa_family = AF_UNSPEC;
371 hlen = 0;
372 break;
373
374 #ifdef __FreeBSD__
375 case DLT_ATM_RFC1483:
376 /*
377 * en atm driver requires 4-byte atm pseudo header.
378 * though it isn't standard, vpi:vci needs to be
379 * specified anyway.
380 */
381 sa_family = AF_UNSPEC;
382 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
383 break;
384 #endif
385
386 case DLT_PPP:
387 sa_family = AF_UNSPEC;
388 hlen = 4; /* This should match PPP_HDRLEN */
389 break;
390
391 case DLT_APPLE_IP_OVER_IEEE1394:
392 sa_family = AF_UNSPEC;
393 hlen = sizeof(struct firewire_header);
394 break;
395
396 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
397 sa_family = AF_IEEE80211;
398 hlen = 0;
399 break;
400
401 case DLT_IEEE802_11_RADIO:
402 sa_family = AF_IEEE80211;
403 hlen = 0;
404 break;
405
406 default:
407 return EIO;
408 }
409
410 if (sockp) {
411 /*
412 * Build a sockaddr based on the data link layer type.
413 * We do this at this level because the ethernet header
414 * is copied directly into the data field of the sockaddr.
415 * In the case of SLIP, there is no header and the packet
416 * is forwarded as is.
417 * Also, we are careful to leave room at the front of the mbuf
418 * for the link level header.
419 */
420 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
421 return EIO;
422 }
423 sockp->sa_family = sa_family;
424 } else {
425 /*
426 * We're directly sending the packet data supplied by
427 * the user; we don't need to make room for the link
428 * header, and don't need the header length value any
429 * more, so set it to 0.
430 */
431 hlen = 0;
432 }
433
434 len = (int)uio_resid(uio);
435 if (len < copy_len) {
436 os_log(OS_LOG_DEFAULT, "bpfwrite: len %d if %s less than copy_len %d",
437 (unsigned)len, ifp->if_xname, copy_len);
438 return EMSGSIZE;
439 }
440 len = copy_len;
441 if (len < hlen || (unsigned)len > BPF_WRITE_MAX) {
442 os_log(OS_LOG_DEFAULT, "bpfwrite: bad len %d if %s",
443 (unsigned)len, ifp->if_xname);
444 return EMSGSIZE;
445 }
446 if (d->bd_write_size_max != 0) {
447 if ((len - hlen) > (d->bd_write_size_max + BPF_WRITE_LEEWAY)) {
448 os_log(OS_LOG_DEFAULT, "bpfwrite: len %u - hlen %u too big if %s write_size_max %u",
449 (unsigned)len, (unsigned)hlen, ifp->if_xname, d->bd_write_size_max);
450 }
451 } else if ((len - hlen) > (ifp->if_mtu + BPF_WRITE_LEEWAY)) {
452 os_log(OS_LOG_DEFAULT, "bpfwrite: len %u - hlen %u too big if %s mtu %u",
453 (unsigned)len, (unsigned)hlen, ifp->if_xname, ifp->if_mtu);
454 return EMSGSIZE;
455 }
456
457 /* drop lock while allocating mbuf and copying data */
458 lck_mtx_unlock(bpf_mlock);
459
460 error = mbuf_allocpacket(MBUF_WAITOK, len, NULL, &m);
461 if (error != 0) {
462 os_log(OS_LOG_DEFAULT,
463 "bpfwrite mbuf_allocpacket len %d error %d", len, error);
464 goto bad;
465 }
466 /*
467 * Make room for link header -- the packet length is 0 at this stage
468 */
469 if (hlen != 0) {
470 m->m_data += hlen; /* leading space */
471 error = uiomove((caddr_t)sockp->sa_data, hlen, uio);
472 if (error) {
473 os_log(OS_LOG_DEFAULT,
474 "bpfwrite uiomove hlen %d error %d", hlen, error);
475 goto bad;
476 }
477 len -= hlen;
478 }
479 /*
480 * bpf_copy_uio_to_mbuf_packet() does set the length of each mbuf and adds it to
481 * the total packet length
482 */
483 error = bpf_copy_uio_to_mbuf_packet(uio, len, m);
484 if (error != 0) {
485 os_log(OS_LOG_DEFAULT,
486 "bpfwrite bpf_copy_uio_to_mbuf_packet error %d", error);
487 goto bad;
488 }
489
490 /* Check for multicast destination */
491 switch (linktype) {
492 case DLT_EN10MB: {
493 struct ether_header *eh;
494
495 eh = mtod(m, struct ether_header *);
496 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
497 if (_ether_cmp(etherbroadcastaddr,
498 eh->ether_dhost) == 0) {
499 m->m_flags |= M_BCAST;
500 } else {
501 m->m_flags |= M_MCAST;
502 }
503 }
504 break;
505 }
506 }
507 *mp = m;
508
509 lck_mtx_lock(bpf_mlock);
510 return 0;
511 bad:
512 if (m != NULL) {
513 m_freem(m);
514 }
515 lck_mtx_lock(bpf_mlock);
516 return error;
517 }
518
519 static int
bpf_movein_batch(struct uio * uio,struct bpf_d * d,struct mbuf ** mp,struct sockaddr * sockp)520 bpf_movein_batch(struct uio *uio, struct bpf_d *d, struct mbuf **mp,
521 struct sockaddr *sockp)
522 {
523 int error = 0;
524 user_ssize_t resid;
525 int count = 0;
526 struct mbuf *last = NULL;
527
528 *mp = NULL;
529 while ((resid = uio_resid(uio)) >= sizeof(struct bpf_hdr)) {
530 struct bpf_hdr bpfhdr = {};
531 int bpf_hdr_min_len = offsetof(struct bpf_hdr, bh_hdrlen) + sizeof(bpfhdr.bh_hdrlen);
532 int padding_len;
533
534 error = uiomove((caddr_t)&bpfhdr, bpf_hdr_min_len, uio);
535 if (error != 0) {
536 os_log(OS_LOG_DEFAULT, "bpf_movein_batch uiomove error %d", error);
537 break;
538 }
539 /*
540 * Buffer validation:
541 * - ignore bh_tstamp
542 * - bh_hdrlen must fit
543 * - bh_caplen and bh_datalen must be equal
544 */
545 if (bpfhdr.bh_hdrlen < bpf_hdr_min_len) {
546 error = EINVAL;
547 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_hdrlen %u too small",
548 bpfhdr.bh_hdrlen);
549 break;
550 }
551 if (bpfhdr.bh_caplen != bpfhdr.bh_datalen) {
552 error = EINVAL;
553 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_caplen %u != bh_datalen %u",
554 bpfhdr.bh_caplen, bpfhdr.bh_datalen);
555 break;
556 }
557 if (bpfhdr.bh_hdrlen > resid) {
558 error = EINVAL;
559 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_hdrlen %u too large",
560 bpfhdr.bh_hdrlen);
561 break;
562 }
563
564 /*
565 * Ignore additional bytes in the header
566 */
567 padding_len = bpfhdr.bh_hdrlen - bpf_hdr_min_len;
568 if (padding_len > 0) {
569 uio_update(uio, padding_len);
570 }
571
572 /* skip empty packets */
573 if (bpfhdr.bh_caplen > 0) {
574 struct mbuf *m;
575
576 /*
577 * For time being assume all packets have same destination
578 */
579 error = bpf_movein(uio, bpfhdr.bh_caplen, d, &m, sockp);
580 if (error != 0) {
581 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bpf_movein error %d",
582 error);
583 break;
584 }
585 count += 1;
586
587 if (last == NULL) {
588 *mp = m;
589 } else {
590 last->m_nextpkt = m;
591 }
592 last = m;
593 }
594
595 /*
596 * Each BPF packet is padded for alignment
597 */
598 padding_len = BPF_WORDALIGN(bpfhdr.bh_hdrlen + bpfhdr.bh_caplen) - (bpfhdr.bh_hdrlen + bpfhdr.bh_caplen);
599 if (padding_len > 0) {
600 uio_update(uio, padding_len);
601 }
602 }
603
604 if (error != 0) {
605 if (*mp != NULL) {
606 m_freem_list(*mp);
607 *mp = NULL;
608 }
609 }
610 return error;
611 }
612
613 /*
614 * The dynamic addition of a new device node must block all processes that
615 * are opening the last device so that no process will get an unexpected
616 * ENOENT
617 */
618 static void
bpf_make_dev_t(int maj)619 bpf_make_dev_t(int maj)
620 {
621 static int bpf_growing = 0;
622 unsigned int cur_size = nbpfilter, i;
623
624 if (nbpfilter >= BPF_MAX_DEVICES) {
625 return;
626 }
627
628 while (bpf_growing) {
629 /* Wait until new device has been created */
630 (void) tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0);
631 }
632 if (nbpfilter > cur_size) {
633 /* other thread grew it already */
634 return;
635 }
636 bpf_growing = 1;
637
638 /* need to grow bpf_dtab first */
639 if (nbpfilter == bpf_dtab_size) {
640 unsigned int new_dtab_size;
641 struct bpf_d **new_dtab = NULL;
642
643 new_dtab_size = bpf_dtab_size + NBPFILTER;
644 new_dtab = krealloc_type(struct bpf_d *,
645 bpf_dtab_size, new_dtab_size, bpf_dtab, Z_WAITOK | Z_ZERO);
646 if (new_dtab == 0) {
647 os_log_error(OS_LOG_DEFAULT, "bpf_make_dev_t: malloc bpf_dtab failed");
648 goto done;
649 }
650 bpf_dtab = new_dtab;
651 bpf_dtab_size = new_dtab_size;
652 }
653 i = nbpfilter++;
654 (void) devfs_make_node(makedev(maj, i),
655 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
656 "bpf%d", i);
657 done:
658 bpf_growing = 0;
659 wakeup((caddr_t)&bpf_growing);
660 }
661
662 /*
663 * Attach file to the bpf interface, i.e. make d listen on bp.
664 */
665 static errno_t
bpf_attachd(struct bpf_d * d,struct bpf_if * bp)666 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
667 {
668 int first = bp->bif_dlist == NULL;
669 int error = 0;
670
671 /*
672 * Point d at bp, and add d to the interface's list of listeners.
673 * Finally, point the driver's bpf cookie at the interface so
674 * it will divert packets to bpf.
675 */
676 d->bd_bif = bp;
677 d->bd_next = bp->bif_dlist;
678 bp->bif_dlist = d;
679 bpf_bpfd_cnt++;
680
681 /*
682 * Take a reference on the device even if an error is returned
683 * because we keep the device in the interface's list of listeners
684 */
685 bpf_acquire_d(d);
686
687 if (first) {
688 /* Find the default bpf entry for this ifp */
689 if (bp->bif_ifp->if_bpf == NULL) {
690 struct bpf_if *tmp, *primary = NULL;
691
692 for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) {
693 if (tmp->bif_ifp == bp->bif_ifp) {
694 primary = tmp;
695 break;
696 }
697 }
698 bp->bif_ifp->if_bpf = primary;
699 }
700 /* Only call dlil_set_bpf_tap for primary dlt */
701 if (bp->bif_ifp->if_bpf == bp) {
702 dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT,
703 bpf_tap_callback);
704 }
705
706 if (bp->bif_tap != NULL) {
707 error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt,
708 BPF_TAP_INPUT_OUTPUT);
709 }
710 }
711
712 /*
713 * Reset the detach flags in case we previously detached an interface
714 */
715 d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED);
716
717 if (bp->bif_dlt == DLT_PKTAP) {
718 d->bd_flags |= BPF_FINALIZE_PKTAP;
719 } else {
720 d->bd_flags &= ~BPF_FINALIZE_PKTAP;
721 }
722 return error;
723 }
724
725 /*
726 * Detach a file from its interface.
727 *
728 * Return 1 if was closed by some thread, 0 otherwise
729 */
730 static int
bpf_detachd(struct bpf_d * d)731 bpf_detachd(struct bpf_d *d)
732 {
733 struct bpf_d **p;
734 struct bpf_if *bp;
735 struct ifnet *ifp;
736 uint32_t dlt;
737 bpf_tap_func disable_tap;
738 uint8_t bd_promisc;
739
740 int bpf_closed = d->bd_flags & BPF_CLOSING;
741 /*
742 * Some other thread already detached
743 */
744 if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) {
745 goto done;
746 }
747 /*
748 * This thread is doing the detach
749 */
750 d->bd_flags |= BPF_DETACHING;
751
752 ifp = d->bd_bif->bif_ifp;
753 bp = d->bd_bif;
754
755 /* Remove d from the interface's descriptor list. */
756 p = &bp->bif_dlist;
757 while (*p != d) {
758 p = &(*p)->bd_next;
759 if (*p == 0) {
760 panic("bpf_detachd: descriptor not in list");
761 }
762 }
763 *p = (*p)->bd_next;
764 bpf_bpfd_cnt--;
765 disable_tap = NULL;
766 if (bp->bif_dlist == 0) {
767 /*
768 * Let the driver know that there are no more listeners.
769 */
770 /* Only call dlil_set_bpf_tap for primary dlt */
771 if (bp->bif_ifp->if_bpf == bp) {
772 dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL);
773 }
774
775 disable_tap = bp->bif_tap;
776 if (disable_tap) {
777 dlt = bp->bif_dlt;
778 }
779
780 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
781 if (bp->bif_ifp == ifp && bp->bif_dlist != 0) {
782 break;
783 }
784 }
785 if (bp == NULL) {
786 ifp->if_bpf = NULL;
787 }
788 }
789 d->bd_bif = NULL;
790 /*
791 * Check if this descriptor had requested promiscuous mode.
792 * If so, turn it off.
793 */
794 bd_promisc = d->bd_promisc;
795 d->bd_promisc = 0;
796
797 lck_mtx_unlock(bpf_mlock);
798 if (bd_promisc) {
799 if (ifnet_set_promiscuous(ifp, 0)) {
800 /*
801 * Something is really wrong if we were able to put
802 * the driver into promiscuous mode, but can't
803 * take it out.
804 * Most likely the network interface is gone.
805 */
806 os_log_error(OS_LOG_DEFAULT,
807 "%s: bpf%d ifnet_set_promiscuous %s failed",
808 __func__, d->bd_dev_minor, if_name(ifp));
809 }
810 }
811
812 if (disable_tap) {
813 disable_tap(ifp, dlt, BPF_TAP_DISABLE);
814 }
815 lck_mtx_lock(bpf_mlock);
816
817 /*
818 * Wake up other thread that are waiting for this thread to finish
819 * detaching
820 */
821 d->bd_flags &= ~BPF_DETACHING;
822 d->bd_flags |= BPF_DETACHED;
823
824 /* Refresh the local variable as d could have been modified */
825 bpf_closed = d->bd_flags & BPF_CLOSING;
826
827 os_log(OS_LOG_DEFAULT, "bpf%d%s detached from %s fcount %llu dcount %llu",
828 d->bd_dev_minor, bpf_closed ? " closed and" : "", if_name(ifp),
829 d->bd_fcount, d->bd_dcount);
830
831 /*
832 * Note that We've kept the reference because we may have dropped
833 * the lock when turning off promiscuous mode
834 */
835 bpf_release_d(d);
836 done:
837 /*
838 * Let the caller know the bpf_d is closed
839 */
840 if (bpf_closed) {
841 return 1;
842 } else {
843 return 0;
844 }
845 }
846
847 /*
848 * Start asynchronous timer, if necessary.
849 * Must be called with bpf_mlock held.
850 */
851 static void
bpf_start_timer(struct bpf_d * d)852 bpf_start_timer(struct bpf_d *d)
853 {
854 uint64_t deadline;
855 struct timeval tv;
856
857 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
858 tv.tv_sec = d->bd_rtout / hz;
859 tv.tv_usec = (d->bd_rtout % hz) * tick;
860
861 clock_interval_to_deadline(
862 (uint32_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec,
863 NSEC_PER_USEC, &deadline);
864 /*
865 * The state is BPF_IDLE, so the timer hasn't
866 * been started yet, and hasn't gone off yet;
867 * there is no thread call scheduled, so this
868 * won't change the schedule.
869 *
870 * XXX - what if, by the time it gets entered,
871 * the deadline has already passed?
872 */
873 thread_call_enter_delayed(d->bd_thread_call, deadline);
874 d->bd_state = BPF_WAITING;
875 }
876 }
877
878 /*
879 * Cancel asynchronous timer.
880 * Must be called with bpf_mlock held.
881 */
882 static boolean_t
bpf_stop_timer(struct bpf_d * d)883 bpf_stop_timer(struct bpf_d *d)
884 {
885 /*
886 * If the timer has already gone off, this does nothing.
887 * Our caller is expected to set d->bd_state to BPF_IDLE,
888 * with the bpf_mlock, after we are called. bpf_timed_out()
889 * also grabs bpf_mlock, so, if the timer has gone off and
890 * bpf_timed_out() hasn't finished, it's waiting for the
891 * lock; when this thread releases the lock, it will
892 * find the state is BPF_IDLE, and just release the
893 * lock and return.
894 */
895 return thread_call_cancel(d->bd_thread_call);
896 }
897
898 void
bpf_acquire_d(struct bpf_d * d)899 bpf_acquire_d(struct bpf_d *d)
900 {
901 void *lr_saved = __builtin_return_address(0);
902
903 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
904
905 d->bd_refcnt += 1;
906
907 d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved;
908 d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST;
909 }
910
911 void
bpf_release_d(struct bpf_d * d)912 bpf_release_d(struct bpf_d *d)
913 {
914 void *lr_saved = __builtin_return_address(0);
915
916 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
917
918 if (d->bd_refcnt <= 0) {
919 panic("%s: %p refcnt <= 0", __func__, d);
920 }
921
922 d->bd_refcnt -= 1;
923
924 d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved;
925 d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST;
926
927 if (d->bd_refcnt == 0) {
928 /* Assert the device is detached */
929 if ((d->bd_flags & BPF_DETACHED) == 0) {
930 panic("%s: %p BPF_DETACHED not set", __func__, d);
931 }
932
933 kfree_type(struct bpf_d, d);
934 }
935 }
936
937 /*
938 * Open ethernet device. Returns ENXIO for illegal minor device number,
939 * EBUSY if file is open by another process.
940 */
941 /* ARGSUSED */
942 int
bpfopen(dev_t dev,int flags,__unused int fmt,struct proc * p)943 bpfopen(dev_t dev, int flags, __unused int fmt,
944 struct proc *p)
945 {
946 struct bpf_d *d;
947
948 lck_mtx_lock(bpf_mlock);
949 if ((unsigned int) minor(dev) >= nbpfilter) {
950 lck_mtx_unlock(bpf_mlock);
951 return ENXIO;
952 }
953 /*
954 * New device nodes are created on demand when opening the last one.
955 * The programming model is for processes to loop on the minor starting
956 * at 0 as long as EBUSY is returned. The loop stops when either the
957 * open succeeds or an error other that EBUSY is returned. That means
958 * that bpf_make_dev_t() must block all processes that are opening the
959 * last node. If not all processes are blocked, they could unexpectedly
960 * get ENOENT and abort their opening loop.
961 */
962 if ((unsigned int) minor(dev) == (nbpfilter - 1)) {
963 bpf_make_dev_t(major(dev));
964 }
965
966 /*
967 * Each minor can be opened by only one process. If the requested
968 * minor is in use, return EBUSY.
969 *
970 * Important: bpfopen() and bpfclose() have to check and set the status
971 * of a device in the same lockin context otherwise the device may be
972 * leaked because the vnode use count will be unpextectly greater than 1
973 * when close() is called.
974 */
975 if (bpf_dtab[minor(dev)] == NULL) {
976 /* Reserve while opening */
977 bpf_dtab[minor(dev)] = BPF_DEV_RESERVED;
978 } else {
979 lck_mtx_unlock(bpf_mlock);
980 return EBUSY;
981 }
982 d = kalloc_type(struct bpf_d, Z_WAITOK | Z_ZERO);
983 if (d == NULL) {
984 /* this really is a catastrophic failure */
985 os_log_error(OS_LOG_DEFAULT,
986 "bpfopen: bpf%d kalloc_type bpf_d failed", minor(dev));
987 bpf_dtab[minor(dev)] = NULL;
988 lck_mtx_unlock(bpf_mlock);
989 return ENOMEM;
990 }
991
992 /* Mark "in use" and do most initialization. */
993 bpf_acquire_d(d);
994 d->bd_bufsize = bpf_bufsize;
995 d->bd_sig = SIGIO;
996 d->bd_direction = BPF_D_INOUT;
997 d->bd_oflags = flags;
998 d->bd_state = BPF_IDLE;
999 d->bd_traffic_class = SO_TC_BE;
1000 d->bd_flags |= BPF_DETACHED;
1001 if (bpf_wantpktap) {
1002 d->bd_flags |= BPF_WANT_PKTAP;
1003 } else {
1004 d->bd_flags &= ~BPF_WANT_PKTAP;
1005 }
1006
1007 d->bd_thread_call = thread_call_allocate(bpf_timed_out, d);
1008 if (d->bd_thread_call == NULL) {
1009 os_log_error(OS_LOG_DEFAULT, "bpfopen: bpf%d malloc thread call failed",
1010 minor(dev));
1011 bpf_dtab[minor(dev)] = NULL;
1012 bpf_release_d(d);
1013 lck_mtx_unlock(bpf_mlock);
1014
1015 return ENOMEM;
1016 }
1017 d->bd_opened_by = p;
1018 uuid_generate(d->bd_uuid);
1019 d->bd_pid = proc_pid(p);
1020
1021 d->bd_dev_minor = minor(dev);
1022 bpf_dtab[minor(dev)] = d; /* Mark opened */
1023 lck_mtx_unlock(bpf_mlock);
1024
1025 if (bpf_debug) {
1026 os_log(OS_LOG_DEFAULT, "bpf%u opened by %s.%u",
1027 d->bd_dev_minor, proc_name_address(p), d->bd_pid);
1028 }
1029 return 0;
1030 }
1031
1032 /*
1033 * Close the descriptor by detaching it from its interface,
1034 * deallocating its buffers, and marking it free.
1035 */
1036 /* ARGSUSED */
1037 int
bpfclose(dev_t dev,__unused int flags,__unused int fmt,__unused struct proc * p)1038 bpfclose(dev_t dev, __unused int flags, __unused int fmt,
1039 __unused struct proc *p)
1040 {
1041 struct bpf_d *d;
1042
1043 /* Take BPF lock to ensure no other thread is using the device */
1044 lck_mtx_lock(bpf_mlock);
1045
1046 d = bpf_dtab[minor(dev)];
1047 if (d == NULL || d == BPF_DEV_RESERVED) {
1048 lck_mtx_unlock(bpf_mlock);
1049 return ENXIO;
1050 }
1051
1052 /*
1053 * Other threads may call bpd_detachd() if we drop the bpf_mlock
1054 */
1055 d->bd_flags |= BPF_CLOSING;
1056
1057 if (bpf_debug != 0) {
1058 os_log(OS_LOG_DEFAULT, "%s: bpf%d",
1059 __func__, d->bd_dev_minor);
1060 }
1061
1062 bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; /* Reserve while closing */
1063
1064 /*
1065 * Deal with any in-progress timeouts.
1066 */
1067 switch (d->bd_state) {
1068 case BPF_IDLE:
1069 /*
1070 * Not waiting for a timeout, and no timeout happened.
1071 */
1072 break;
1073
1074 case BPF_WAITING:
1075 /*
1076 * Waiting for a timeout.
1077 * Cancel any timer that has yet to go off,
1078 * and mark the state as "closing".
1079 * Then drop the lock to allow any timers that
1080 * *have* gone off to run to completion, and wait
1081 * for them to finish.
1082 */
1083 if (!bpf_stop_timer(d)) {
1084 /*
1085 * There was no pending call, so the call must
1086 * have been in progress. Wait for the call to
1087 * complete; we have to drop the lock while
1088 * waiting. to let the in-progrss call complete
1089 */
1090 d->bd_state = BPF_DRAINING;
1091 while (d->bd_state == BPF_DRAINING) {
1092 msleep((caddr_t)d, bpf_mlock, PRINET,
1093 "bpfdraining", NULL);
1094 }
1095 }
1096 d->bd_state = BPF_IDLE;
1097 break;
1098
1099 case BPF_TIMED_OUT:
1100 /*
1101 * Timer went off, and the timeout routine finished.
1102 */
1103 d->bd_state = BPF_IDLE;
1104 break;
1105
1106 case BPF_DRAINING:
1107 /*
1108 * Another thread is blocked on a close waiting for
1109 * a timeout to finish.
1110 * This "shouldn't happen", as the first thread to enter
1111 * bpfclose() will set bpf_dtab[minor(dev)] to 1, and
1112 * all subsequent threads should see that and fail with
1113 * ENXIO.
1114 */
1115 panic("Two threads blocked in a BPF close");
1116 break;
1117 }
1118
1119 if (d->bd_bif) {
1120 bpf_detachd(d);
1121 }
1122 selthreadclear(&d->bd_sel);
1123 thread_call_free(d->bd_thread_call);
1124
1125 while (d->bd_hbuf_read || d->bd_hbuf_write) {
1126 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfclose", NULL);
1127 }
1128
1129 if (bpf_debug) {
1130 os_log(OS_LOG_DEFAULT,
1131 "bpf%u closed by %s.%u dcount %llu fcount %llu ccount %llu",
1132 d->bd_dev_minor, proc_name_address(p), d->bd_pid,
1133 d->bd_dcount, d->bd_fcount, d->bd_bcs.bcs_count_compressed_prefix);
1134 }
1135
1136 bpf_freed(d);
1137
1138 /* Mark free in same context as bpfopen comes to check */
1139 bpf_dtab[minor(dev)] = NULL; /* Mark closed */
1140
1141 bpf_release_d(d);
1142
1143 lck_mtx_unlock(bpf_mlock);
1144
1145 return 0;
1146 }
1147
1148 #define BPF_SLEEP bpf_sleep
1149
1150 static int
bpf_sleep(struct bpf_d * d,int pri,const char * wmesg,int timo)1151 bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
1152 {
1153 u_int64_t abstime = 0;
1154
1155 if (timo != 0) {
1156 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
1157 }
1158
1159 return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime);
1160 }
1161
1162 static void
bpf_finalize_pktap(struct bpf_hdr * hp,struct pktap_header * pktaphdr)1163 bpf_finalize_pktap(struct bpf_hdr *hp, struct pktap_header *pktaphdr)
1164 {
1165 if (pktaphdr->pth_flags & PTH_FLAG_V2_HDR) {
1166 struct pktap_v2_hdr *pktap_v2_hdr;
1167
1168 pktap_v2_hdr = (struct pktap_v2_hdr *)pktaphdr;
1169
1170 if (pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP) {
1171 pktap_v2_finalize_proc_info(pktap_v2_hdr);
1172 }
1173 } else {
1174 if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) {
1175 pktap_finalize_proc_info(pktaphdr);
1176 }
1177
1178 if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
1179 hp->bh_tstamp.tv_sec = pktaphdr->pth_tstamp.tv_sec;
1180 hp->bh_tstamp.tv_usec = pktaphdr->pth_tstamp.tv_usec;
1181 }
1182 }
1183 }
1184
1185 /*
1186 * Rotate the packet buffers in descriptor d. Move the store buffer
1187 * into the hold slot, and the free buffer into the store slot.
1188 * Zero the length of the new store buffer.
1189 *
1190 * Note: in head drop mode, the hold buffer can be dropped so the fist packet of the
1191 * store buffer cannot be compressed as it otherwise would refer to deleted data
1192 * in a dropped hold buffer that the reader process does know about
1193 */
1194 #define ROTATE_BUFFERS(d) do { \
1195 if (d->bd_hbuf_read) \
1196 panic("rotating bpf buffers during read"); \
1197 (d)->bd_hbuf = (d)->bd_sbuf; \
1198 (d)->bd_hlen = (d)->bd_slen; \
1199 (d)->bd_hcnt = (d)->bd_scnt; \
1200 (d)->bd_sbuf = (d)->bd_fbuf; \
1201 (d)->bd_slen = 0; \
1202 (d)->bd_scnt = 0; \
1203 (d)->bd_fbuf = NULL; \
1204 if ((d)->bd_headdrop != 0) \
1205 (d)->bd_prev_slen = 0; \
1206 } while(false)
1207
1208 /*
1209 * bpfread - read next chunk of packets from buffers
1210 */
1211 int
bpfread(dev_t dev,struct uio * uio,int ioflag)1212 bpfread(dev_t dev, struct uio *uio, int ioflag)
1213 {
1214 struct bpf_d *d;
1215 caddr_t hbuf;
1216 int timed_out, hbuf_len;
1217 int error;
1218 int flags;
1219
1220 lck_mtx_lock(bpf_mlock);
1221
1222 d = bpf_dtab[minor(dev)];
1223 if (d == NULL || d == BPF_DEV_RESERVED ||
1224 (d->bd_flags & BPF_CLOSING) != 0) {
1225 lck_mtx_unlock(bpf_mlock);
1226 return ENXIO;
1227 }
1228
1229 bpf_acquire_d(d);
1230
1231 /*
1232 * Restrict application to use a buffer the same size as
1233 * as kernel buffers.
1234 */
1235 if (uio_resid(uio) != d->bd_bufsize) {
1236 bpf_release_d(d);
1237 lck_mtx_unlock(bpf_mlock);
1238 return EINVAL;
1239 }
1240
1241 if (d->bd_state == BPF_WAITING) {
1242 bpf_stop_timer(d);
1243 }
1244
1245 timed_out = (d->bd_state == BPF_TIMED_OUT);
1246 d->bd_state = BPF_IDLE;
1247
1248 while (d->bd_hbuf_read) {
1249 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfread", NULL);
1250 }
1251
1252 if ((d->bd_flags & BPF_CLOSING) != 0) {
1253 bpf_release_d(d);
1254 lck_mtx_unlock(bpf_mlock);
1255 return ENXIO;
1256 }
1257 /*
1258 * If the hold buffer is empty, then do a timed sleep, which
1259 * ends when the timeout expires or when enough packets
1260 * have arrived to fill the store buffer.
1261 */
1262 while (d->bd_hbuf == 0) {
1263 if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY)) &&
1264 d->bd_slen != 0) {
1265 /*
1266 * We're in immediate mode, or are reading
1267 * in non-blocking mode, or a timer was
1268 * started before the read (e.g., by select()
1269 * or poll()) and has expired and a packet(s)
1270 * either arrived since the previous
1271 * read or arrived while we were asleep.
1272 * Rotate the buffers and return what's here.
1273 */
1274 ROTATE_BUFFERS(d);
1275 break;
1276 }
1277
1278 /*
1279 * No data is available, check to see if the bpf device
1280 * is still pointed at a real interface. If not, return
1281 * ENXIO so that the userland process knows to rebind
1282 * it before using it again.
1283 */
1284 if (d->bd_bif == NULL) {
1285 bpf_release_d(d);
1286 lck_mtx_unlock(bpf_mlock);
1287 return ENXIO;
1288 }
1289 if (ioflag & IO_NDELAY) {
1290 bpf_release_d(d);
1291 lck_mtx_unlock(bpf_mlock);
1292 return EWOULDBLOCK;
1293 }
1294 error = BPF_SLEEP(d, PRINET | PCATCH, "bpf", d->bd_rtout);
1295 /*
1296 * Make sure device is still opened
1297 */
1298 if ((d->bd_flags & BPF_CLOSING) != 0) {
1299 bpf_release_d(d);
1300 lck_mtx_unlock(bpf_mlock);
1301 return ENXIO;
1302 }
1303
1304 while (d->bd_hbuf_read) {
1305 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_read",
1306 NULL);
1307 }
1308
1309 if ((d->bd_flags & BPF_CLOSING) != 0) {
1310 bpf_release_d(d);
1311 lck_mtx_unlock(bpf_mlock);
1312 return ENXIO;
1313 }
1314
1315 if (error == EINTR || error == ERESTART) {
1316 if (d->bd_hbuf != NULL) {
1317 /*
1318 * Because we msleep, the hold buffer might
1319 * be filled when we wake up. Avoid rotating
1320 * in this case.
1321 */
1322 break;
1323 }
1324 if (d->bd_slen != 0) {
1325 /*
1326 * Sometimes we may be interrupted often and
1327 * the sleep above will not timeout.
1328 * Regardless, we should rotate the buffers
1329 * if there's any new data pending and
1330 * return it.
1331 */
1332 ROTATE_BUFFERS(d);
1333 break;
1334 }
1335 bpf_release_d(d);
1336 lck_mtx_unlock(bpf_mlock);
1337 if (error == ERESTART) {
1338 os_log(OS_LOG_DEFAULT, "%s: bpf%d ERESTART to EINTR",
1339 __func__, d->bd_dev_minor);
1340 error = EINTR;
1341 }
1342 return error;
1343 }
1344 if (error == EWOULDBLOCK) {
1345 /*
1346 * On a timeout, return what's in the buffer,
1347 * which may be nothing. If there is something
1348 * in the store buffer, we can rotate the buffers.
1349 */
1350 if (d->bd_hbuf) {
1351 /*
1352 * We filled up the buffer in between
1353 * getting the timeout and arriving
1354 * here, so we don't need to rotate.
1355 */
1356 break;
1357 }
1358
1359 if (d->bd_slen == 0) {
1360 bpf_release_d(d);
1361 lck_mtx_unlock(bpf_mlock);
1362 return 0;
1363 }
1364 ROTATE_BUFFERS(d);
1365 break;
1366 }
1367 }
1368 /*
1369 * At this point, we know we have something in the hold slot.
1370 */
1371
1372 /*
1373 * Set the hold buffer read. So we do not
1374 * rotate the buffers until the hold buffer
1375 * read is complete. Also to avoid issues resulting
1376 * from page faults during disk sleep (<rdar://problem/13436396>).
1377 */
1378 d->bd_hbuf_read = true;
1379 hbuf = d->bd_hbuf;
1380 hbuf_len = d->bd_hlen;
1381 flags = d->bd_flags;
1382 d->bd_bcs.bcs_total_read += d->bd_hcnt;
1383 lck_mtx_unlock(bpf_mlock);
1384
1385 /*
1386 * Before we move data to userland, we fill out the extended
1387 * header fields.
1388 */
1389 if (flags & BPF_EXTENDED_HDR) {
1390 char *p;
1391
1392 p = hbuf;
1393 while (p < hbuf + hbuf_len) {
1394 struct bpf_hdr_ext *ehp;
1395 uint32_t flowid;
1396 struct so_procinfo soprocinfo;
1397 int found = 0;
1398
1399 ehp = (struct bpf_hdr_ext *)(void *)p;
1400 if ((flowid = ehp->bh_flowid) != 0) {
1401 if (ehp->bh_flags & BPF_HDR_EXT_FLAGS_TCP) {
1402 ehp->bh_flags &= ~BPF_HDR_EXT_FLAGS_TCP;
1403 found = inp_findinpcb_procinfo(&tcbinfo,
1404 flowid, &soprocinfo);
1405 } else if (ehp->bh_flags == BPF_HDR_EXT_FLAGS_UDP) {
1406 ehp->bh_flags &= ~BPF_HDR_EXT_FLAGS_UDP;
1407 found = inp_findinpcb_procinfo(&udbinfo,
1408 flowid, &soprocinfo);
1409 }
1410 if (found == 1) {
1411 ehp->bh_pid = soprocinfo.spi_pid;
1412 strlcpy(&ehp->bh_comm[0], &soprocinfo.spi_proc_name[0], sizeof(ehp->bh_comm));
1413 }
1414 ehp->bh_flowid = 0;
1415 }
1416
1417 if ((flags & BPF_FINALIZE_PKTAP) != 0 && ehp->bh_complen == 0) {
1418 struct pktap_header *pktaphdr;
1419
1420 pktaphdr = (struct pktap_header *)(void *)
1421 (p + BPF_WORDALIGN(ehp->bh_hdrlen));
1422
1423 bpf_finalize_pktap((struct bpf_hdr *) ehp,
1424 pktaphdr);
1425 }
1426 p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen);
1427 }
1428 } else if (flags & BPF_FINALIZE_PKTAP) {
1429 char *p;
1430
1431 p = hbuf;
1432
1433 while (p < hbuf + hbuf_len) {
1434 struct bpf_hdr *hp;
1435 struct pktap_header *pktaphdr;
1436
1437 hp = (struct bpf_hdr *)(void *)p;
1438
1439 /*
1440 * Cannot finalize a compressed pktap header as we may not have
1441 * all the fields present
1442 */
1443 if (d->bd_flags & BPF_COMP_ENABLED) {
1444 struct bpf_comp_hdr *hcp;
1445
1446 hcp = (struct bpf_comp_hdr *)(void *)p;
1447
1448 if (hcp->bh_complen != 0) {
1449 p += BPF_WORDALIGN(hcp->bh_hdrlen + hcp->bh_caplen);
1450 continue;
1451 }
1452 }
1453
1454 pktaphdr = (struct pktap_header *)(void *)
1455 (p + BPF_WORDALIGN(hp->bh_hdrlen));
1456
1457 bpf_finalize_pktap(hp, pktaphdr);
1458
1459 p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen);
1460 }
1461 }
1462
1463 /*
1464 * Move data from hold buffer into user space.
1465 * We know the entire buffer is transferred since
1466 * we checked above that the read buffer is bpf_bufsize bytes.
1467 */
1468 error = uiomove(hbuf, hbuf_len, uio);
1469
1470 lck_mtx_lock(bpf_mlock);
1471 /*
1472 * Make sure device is still opened
1473 */
1474 if ((d->bd_flags & BPF_CLOSING) != 0) {
1475 bpf_release_d(d);
1476 lck_mtx_unlock(bpf_mlock);
1477 return ENXIO;
1478 }
1479
1480 d->bd_hbuf_read = false;
1481 d->bd_fbuf = d->bd_hbuf;
1482 d->bd_hbuf = NULL;
1483 d->bd_hlen = 0;
1484 d->bd_hcnt = 0;
1485 wakeup((caddr_t)d);
1486
1487 bpf_release_d(d);
1488 lck_mtx_unlock(bpf_mlock);
1489 return error;
1490 }
1491
1492 /*
1493 * If there are processes sleeping on this descriptor, wake them up.
1494 */
1495 static void
bpf_wakeup(struct bpf_d * d)1496 bpf_wakeup(struct bpf_d *d)
1497 {
1498 if (d->bd_state == BPF_WAITING) {
1499 bpf_stop_timer(d);
1500 d->bd_state = BPF_IDLE;
1501 }
1502 wakeup((caddr_t)d);
1503 if (d->bd_async && d->bd_sig && d->bd_sigio) {
1504 pgsigio(d->bd_sigio, d->bd_sig);
1505 }
1506
1507 selwakeup(&d->bd_sel);
1508 if ((d->bd_flags & BPF_KNOTE)) {
1509 KNOTE(&d->bd_sel.si_note, 1);
1510 }
1511 }
1512
1513 static void
bpf_timed_out(void * arg,__unused void * dummy)1514 bpf_timed_out(void *arg, __unused void *dummy)
1515 {
1516 struct bpf_d *d = (struct bpf_d *)arg;
1517
1518 lck_mtx_lock(bpf_mlock);
1519 if (d->bd_state == BPF_WAITING) {
1520 /*
1521 * There's a select or kqueue waiting for this; if there's
1522 * now stuff to read, wake it up.
1523 */
1524 d->bd_state = BPF_TIMED_OUT;
1525 if (d->bd_slen != 0) {
1526 bpf_wakeup(d);
1527 }
1528 } else if (d->bd_state == BPF_DRAINING) {
1529 /*
1530 * A close is waiting for this to finish.
1531 * Mark it as finished, and wake the close up.
1532 */
1533 d->bd_state = BPF_IDLE;
1534 bpf_wakeup(d);
1535 }
1536 lck_mtx_unlock(bpf_mlock);
1537 }
1538
1539 /* keep in sync with bpf_movein above: */
1540 #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
1541
1542 int
bpfwrite(dev_t dev,struct uio * uio,__unused int ioflag)1543 bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag)
1544 {
1545 struct bpf_d *d;
1546 struct ifnet *ifp;
1547 struct mbuf *m = NULL;
1548 int error = 0;
1549 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
1550 int bif_dlt;
1551 int bd_hdrcmplt;
1552 bpf_send_func bif_send;
1553
1554 lck_mtx_lock(bpf_mlock);
1555
1556 while (true) {
1557 d = bpf_dtab[minor(dev)];
1558 if (d == NULL || d == BPF_DEV_RESERVED ||
1559 (d->bd_flags & BPF_CLOSING) != 0) {
1560 lck_mtx_unlock(bpf_mlock);
1561 return ENXIO;
1562 }
1563
1564 if (d->bd_hbuf_write) {
1565 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfwrite",
1566 NULL);
1567 } else {
1568 break;
1569 }
1570 }
1571 d->bd_hbuf_write = true;
1572
1573 bpf_acquire_d(d);
1574
1575 ++d->bd_wcount;
1576
1577 if (d->bd_bif == NULL) {
1578 error = ENXIO;
1579 goto done;
1580 }
1581
1582 ifp = d->bd_bif->bif_ifp;
1583
1584 if (IFNET_IS_MANAGEMENT(ifp) &&
1585 IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT) == false) {
1586 ++d->bd_wdcount;
1587 bpf_release_d(d);
1588 lck_mtx_unlock(bpf_mlock);
1589 return ENETDOWN;
1590 }
1591
1592 if ((ifp->if_flags & IFF_UP) == 0) {
1593 error = ENETDOWN;
1594 goto done;
1595 }
1596 int resid = (int)uio_resid(uio);
1597 if (resid <= 0) {
1598 error = resid == 0 ? 0 : EINVAL;
1599 os_log(OS_LOG_DEFAULT, "bpfwrite: resid %d error %d", resid, error);
1600 goto done;
1601 }
1602 SA(dst_buf)->sa_len = sizeof(dst_buf);
1603
1604 /*
1605 * geting variables onto stack before dropping the lock
1606 */
1607 bif_dlt = (int)d->bd_bif->bif_dlt;
1608 bd_hdrcmplt = d->bd_hdrcmplt;
1609 bool batch_write = (d->bd_flags & BPF_BATCH_WRITE) ? true : false;
1610
1611 if (batch_write) {
1612 error = bpf_movein_batch(uio, d, &m, bd_hdrcmplt ? NULL : SA(dst_buf));
1613 if (error != 0) {
1614 goto done;
1615 }
1616 } else {
1617 error = bpf_movein(uio, resid, d, &m, bd_hdrcmplt ? NULL : SA(dst_buf));
1618 if (error != 0) {
1619 goto done;
1620 }
1621 bpf_set_packet_service_class(m, d->bd_traffic_class);
1622 }
1623
1624 /* verify the device is still open */
1625 if ((d->bd_flags & BPF_CLOSING) != 0) {
1626 error = ENXIO;
1627 goto done;
1628 }
1629
1630 if (d->bd_bif == NULL || d->bd_bif->bif_ifp != ifp) {
1631 error = ENXIO;
1632 goto done;
1633 }
1634
1635 bif_send = d->bd_bif->bif_send;
1636
1637 lck_mtx_unlock(bpf_mlock);
1638
1639 if (bd_hdrcmplt) {
1640 if (bif_send) {
1641 /*
1642 * Send one packet at a time, the driver frees the mbuf
1643 * but we need to take care of the leftover
1644 */
1645 while (m != NULL && error == 0) {
1646 struct mbuf *next = m->m_nextpkt;
1647
1648 m->m_nextpkt = NULL;
1649 error = bif_send(ifp, bif_dlt, m);
1650 m = next;
1651 }
1652 } else {
1653 error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL);
1654 /* Make sure we do not double free */
1655 m = NULL;
1656 }
1657 } else {
1658 error = dlil_output(ifp, PF_INET, m, NULL,
1659 SA(dst_buf), 0, NULL);
1660 /* Make sure we do not double free */
1661 m = NULL;
1662 }
1663
1664 lck_mtx_lock(bpf_mlock);
1665 done:
1666 if (error != 0 && m != NULL) {
1667 ++d->bd_wdcount;
1668 }
1669 if (m != NULL) {
1670 m_freem_list(m);
1671 }
1672 d->bd_hbuf_write = false;
1673 wakeup((caddr_t)d);
1674 bpf_release_d(d);
1675 lck_mtx_unlock(bpf_mlock);
1676
1677 return error;
1678 }
1679
1680 /*
1681 * Reset a descriptor by flushing its packet buffer and clearing the
1682 * receive and drop counts.
1683 */
1684 static void
reset_d(struct bpf_d * d)1685 reset_d(struct bpf_d *d)
1686 {
1687 if (d->bd_hbuf_read) {
1688 panic("resetting buffers during read");
1689 }
1690
1691 if (d->bd_hbuf) {
1692 /* Free the hold buffer. */
1693 d->bd_fbuf = d->bd_hbuf;
1694 d->bd_hbuf = NULL;
1695 }
1696 d->bd_slen = 0;
1697 d->bd_hlen = 0;
1698 d->bd_scnt = 0;
1699 d->bd_hcnt = 0;
1700 d->bd_rcount = 0;
1701 d->bd_dcount = 0;
1702 d->bd_fcount = 0;
1703 d->bd_wcount = 0;
1704 d->bd_wdcount = 0;
1705
1706 d->bd_prev_slen = 0;
1707 }
1708
1709 static struct bpf_d *
bpf_get_device_from_uuid(uuid_t uuid)1710 bpf_get_device_from_uuid(uuid_t uuid)
1711 {
1712 unsigned int i;
1713
1714 for (i = 0; i < nbpfilter; i++) {
1715 struct bpf_d *d = bpf_dtab[i];
1716
1717 if (d == NULL || d == BPF_DEV_RESERVED ||
1718 (d->bd_flags & BPF_CLOSING) != 0) {
1719 continue;
1720 }
1721 if (uuid_compare(uuid, d->bd_uuid) == 0) {
1722 return d;
1723 }
1724 }
1725
1726 return NULL;
1727 }
1728
1729 /*
1730 * The BIOCSETUP command "atomically" attach to the interface and
1731 * copy the buffer from another interface. This minimizes the risk
1732 * of missing packet because this is done while holding
1733 * the BPF global lock
1734 */
1735 static int
bpf_setup(struct bpf_d * d_to,uuid_t uuid_from,ifnet_t ifp)1736 bpf_setup(struct bpf_d *d_to, uuid_t uuid_from, ifnet_t ifp)
1737 {
1738 struct bpf_d *d_from;
1739 int error = 0;
1740
1741 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
1742
1743 /*
1744 * Sanity checks
1745 */
1746 d_from = bpf_get_device_from_uuid(uuid_from);
1747 if (d_from == NULL) {
1748 error = ENOENT;
1749 os_log_error(OS_LOG_DEFAULT,
1750 "%s: uuids not found error %d",
1751 __func__, error);
1752 return error;
1753 }
1754 if (d_from->bd_opened_by != d_to->bd_opened_by) {
1755 error = EACCES;
1756 os_log_error(OS_LOG_DEFAULT,
1757 "%s: processes not matching error %d",
1758 __func__, error);
1759 return error;
1760 }
1761
1762 /*
1763 * Prevent any read or write while copying
1764 */
1765 while (d_to->bd_hbuf_read || d_to->bd_hbuf_write) {
1766 msleep((caddr_t)d_to, bpf_mlock, PRINET, __func__, NULL);
1767 }
1768 d_to->bd_hbuf_read = true;
1769 d_to->bd_hbuf_write = true;
1770
1771 while (d_from->bd_hbuf_read || d_from->bd_hbuf_write) {
1772 msleep((caddr_t)d_from, bpf_mlock, PRINET, __func__, NULL);
1773 }
1774 d_from->bd_hbuf_read = true;
1775 d_from->bd_hbuf_write = true;
1776
1777 /*
1778 * Verify the devices have not been closed
1779 */
1780 if (d_to->bd_flags & BPF_CLOSING) {
1781 error = ENXIO;
1782 os_log_error(OS_LOG_DEFAULT,
1783 "%s: d_to is closing error %d",
1784 __func__, error);
1785 goto done;
1786 }
1787 if (d_from->bd_flags & BPF_CLOSING) {
1788 error = ENXIO;
1789 os_log_error(OS_LOG_DEFAULT,
1790 "%s: d_from is closing error %d",
1791 __func__, error);
1792 goto done;
1793 }
1794
1795 /*
1796 * For now require the same buffer size
1797 */
1798 if (d_from->bd_bufsize != d_to->bd_bufsize) {
1799 error = EINVAL;
1800 os_log_error(OS_LOG_DEFAULT,
1801 "%s: bufsizes not matching error %d",
1802 __func__, error);
1803 goto done;
1804 }
1805
1806 /*
1807 * Copy relevant options and flags
1808 */
1809 d_to->bd_flags = d_from->bd_flags & (BPF_EXTENDED_HDR | BPF_WANT_PKTAP |
1810 BPF_FINALIZE_PKTAP | BPF_TRUNCATE | BPF_PKTHDRV2 |
1811 BPF_COMP_REQ | BPF_COMP_ENABLED);
1812
1813 d_to->bd_headdrop = d_from->bd_headdrop;
1814
1815 /*
1816 * Allocate and copy the buffers
1817 */
1818 error = bpf_allocbufs(d_to);
1819 if (error != 0) {
1820 goto done;
1821 }
1822
1823 /*
1824 * Make sure the buffers are setup as expected by bpf_setif()
1825 */
1826 ASSERT(d_to->bd_hbuf == NULL);
1827 ASSERT(d_to->bd_sbuf != NULL);
1828 ASSERT(d_to->bd_fbuf != NULL);
1829
1830 /*
1831 * Copy the buffers and update the pointers and counts
1832 */
1833 memcpy(d_to->bd_sbuf, d_from->bd_sbuf, d_from->bd_slen);
1834 d_to->bd_slen = d_from->bd_slen;
1835 d_to->bd_scnt = d_from->bd_scnt;
1836
1837 if (d_from->bd_hbuf != NULL) {
1838 d_to->bd_hbuf = d_to->bd_fbuf;
1839 d_to->bd_fbuf = NULL;
1840 memcpy(d_to->bd_hbuf, d_from->bd_hbuf, d_from->bd_hlen);
1841 }
1842 d_to->bd_hlen = d_from->bd_hlen;
1843 d_to->bd_hcnt = d_from->bd_hcnt;
1844
1845 if (d_to->bd_flags & BPF_COMP_REQ) {
1846 ASSERT(d_to->bd_prev_sbuf != NULL);
1847 ASSERT(d_to->bd_prev_fbuf != NULL);
1848
1849 d_to->bd_prev_slen = d_from->bd_prev_slen;
1850 ASSERT(d_to->bd_prev_slen <= BPF_HDR_COMP_LEN_MAX);
1851 memcpy(d_to->bd_prev_sbuf, d_from->bd_prev_sbuf, BPF_HDR_COMP_LEN_MAX);
1852 }
1853
1854 d_to->bd_bcs = d_from->bd_bcs;
1855
1856 /*
1857 * Attach to the interface:
1858 * - don't reset the buffers
1859 * - we already prevent reads and writes
1860 * - the buffers are already allocated
1861 */
1862 error = bpf_setif(d_to, ifp, false, true, true);
1863 if (error != 0) {
1864 os_log_error(OS_LOG_DEFAULT,
1865 "%s: bpf_setif() failed error %d",
1866 __func__, error);
1867 goto done;
1868 }
1869 done:
1870 d_from->bd_hbuf_read = false;
1871 d_from->bd_hbuf_write = false;
1872 wakeup((caddr_t)d_from);
1873
1874 d_to->bd_hbuf_read = false;
1875 d_to->bd_hbuf_write = false;
1876 wakeup((caddr_t)d_to);
1877
1878 return error;
1879 }
1880
1881 #if DEVELOPMENT || DEBUG
1882 #define BPF_IOC_LIST \
1883 X(FIONREAD) \
1884 X(SIOCGIFADDR) \
1885 X(BIOCGBLEN) \
1886 X(BIOCSBLEN) \
1887 X(BIOCSETF32) \
1888 X(BIOCSETFNR32) \
1889 X(BIOCSETF64) \
1890 X(BIOCSETFNR64) \
1891 X(BIOCFLUSH) \
1892 X(BIOCPROMISC) \
1893 X(BIOCGDLT) \
1894 X(BIOCGDLTLIST) \
1895 X(BIOCSDLT) \
1896 X(BIOCGETIF) \
1897 X(BIOCSETIF) \
1898 X(BIOCSRTIMEOUT32) \
1899 X(BIOCSRTIMEOUT64) \
1900 X(BIOCGRTIMEOUT32) \
1901 X(BIOCGRTIMEOUT64) \
1902 X(BIOCGSTATS) \
1903 X(BIOCIMMEDIATE) \
1904 X(BIOCVERSION) \
1905 X(BIOCGHDRCMPLT) \
1906 X(BIOCSHDRCMPLT) \
1907 X(BIOCGSEESENT) \
1908 X(BIOCSSEESENT) \
1909 X(BIOCSETTC) \
1910 X(BIOCGETTC) \
1911 X(FIONBIO) \
1912 X(FIOASYNC) \
1913 X(BIOCSRSIG) \
1914 X(BIOCGRSIG) \
1915 X(BIOCSEXTHDR) \
1916 X(BIOCGIFATTACHCOUNT) \
1917 X(BIOCGWANTPKTAP) \
1918 X(BIOCSWANTPKTAP) \
1919 X(BIOCSHEADDROP) \
1920 X(BIOCGHEADDROP) \
1921 X(BIOCSTRUNCATE) \
1922 X(BIOCGETUUID) \
1923 X(BIOCSETUP) \
1924 X(BIOCSPKTHDRV2) \
1925 X(BIOCGHDRCOMP) \
1926 X(BIOCSHDRCOMP) \
1927 X(BIOCGHDRCOMPSTATS) \
1928 X(BIOCGHDRCOMPON) \
1929 X(BIOCGDIRECTION) \
1930 X(BIOCSDIRECTION) \
1931 X(BIOCSWRITEMAX) \
1932 X(BIOCGWRITEMAX) \
1933 X(BIOCGBATCHWRITE) \
1934 X(BIOCSBATCHWRITE)
1935
1936 static void
log_bpf_ioctl_str(struct bpf_d * d,u_long cmd)1937 log_bpf_ioctl_str(struct bpf_d *d, u_long cmd)
1938 {
1939 const char *p = NULL;
1940 char str[32];
1941
1942 #define X(x) case x: { p = #x ; printf("%s\n", p); break; }
1943 switch (cmd) {
1944 BPF_IOC_LIST
1945 }
1946 #undef X
1947 if (p == NULL) {
1948 snprintf(str, sizeof(str), "0x%08x", (unsigned int)cmd);
1949 p = str;
1950 }
1951 os_log(OS_LOG_DEFAULT, "bpfioctl bpf%u %s",
1952 d->bd_dev_minor, p);
1953 }
1954 #endif /* DEVELOPMENT || DEBUG */
1955
1956 /*
1957 * FIONREAD Check for read packet available.
1958 * SIOCGIFADDR Get interface address - convenient hook to driver.
1959 * BIOCGBLEN Get buffer len [for read()].
1960 * BIOCSETF Set ethernet read filter.
1961 * BIOCFLUSH Flush read packet buffer.
1962 * BIOCPROMISC Put interface into promiscuous mode.
1963 * BIOCGDLT Get link layer type.
1964 * BIOCGETIF Get interface name.
1965 * BIOCSETIF Set interface.
1966 * BIOCSRTIMEOUT Set read timeout.
1967 * BIOCGRTIMEOUT Get read timeout.
1968 * BIOCGSTATS Get packet stats.
1969 * BIOCIMMEDIATE Set immediate mode.
1970 * BIOCVERSION Get filter language version.
1971 * BIOCGHDRCMPLT Get "header already complete" flag
1972 * BIOCSHDRCMPLT Set "header already complete" flag
1973 * BIOCGSEESENT Get "see packets sent" flag
1974 * BIOCSSEESENT Set "see packets sent" flag
1975 * BIOCSETTC Set traffic class.
1976 * BIOCGETTC Get traffic class.
1977 * BIOCSEXTHDR Set "extended header" flag
1978 * BIOCSHEADDROP Drop head of the buffer if user is not reading
1979 * BIOCGHEADDROP Get "head-drop" flag
1980 */
1981 /* ARGSUSED */
1982 int
bpfioctl(dev_t dev,u_long cmd,caddr_t addr,__unused int flags,struct proc * p)1983 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags,
1984 struct proc *p)
1985 {
1986 struct bpf_d *d;
1987 int error = 0;
1988 u_int int_arg;
1989 struct ifreq ifr = {};
1990
1991 lck_mtx_lock(bpf_mlock);
1992
1993 d = bpf_dtab[minor(dev)];
1994 if (d == NULL || d == BPF_DEV_RESERVED ||
1995 (d->bd_flags & BPF_CLOSING) != 0) {
1996 lck_mtx_unlock(bpf_mlock);
1997 return ENXIO;
1998 }
1999
2000 bpf_acquire_d(d);
2001
2002 if (d->bd_state == BPF_WAITING) {
2003 bpf_stop_timer(d);
2004 }
2005 d->bd_state = BPF_IDLE;
2006
2007 #if DEVELOPMENT || DEBUG
2008 if (bpf_debug > 0) {
2009 log_bpf_ioctl_str(d, cmd);
2010 }
2011 #endif /* DEVELOPMENT || DEBUG */
2012
2013 switch (cmd) {
2014 default:
2015 error = EINVAL;
2016 break;
2017
2018 /*
2019 * Check for read packet available.
2020 */
2021 case FIONREAD: /* int */
2022 {
2023 int n;
2024
2025 n = d->bd_slen;
2026 if (d->bd_hbuf && d->bd_hbuf_read) {
2027 n += d->bd_hlen;
2028 }
2029
2030 bcopy(&n, addr, sizeof(n));
2031 break;
2032 }
2033
2034 case SIOCGIFADDR: /* struct ifreq */
2035 {
2036 struct ifnet *ifp;
2037
2038 if (d->bd_bif == 0) {
2039 error = EINVAL;
2040 } else {
2041 ifp = d->bd_bif->bif_ifp;
2042 error = ifnet_ioctl(ifp, 0, cmd, addr);
2043 }
2044 break;
2045 }
2046
2047 /*
2048 * Get buffer len [for read()].
2049 */
2050 case BIOCGBLEN: /* u_int */
2051 bcopy(&d->bd_bufsize, addr, sizeof(u_int));
2052 break;
2053
2054 /*
2055 * Set buffer length.
2056 */
2057 case BIOCSBLEN: { /* u_int */
2058 u_int size;
2059
2060 if (d->bd_bif != 0 || (d->bd_flags & BPF_DETACHING)) {
2061 /*
2062 * Interface already attached, unable to change buffers
2063 */
2064 error = EINVAL;
2065 break;
2066 }
2067 bcopy(addr, &size, sizeof(size));
2068
2069 if (size > BPF_BUFSIZE_CAP) {
2070 d->bd_bufsize = BPF_BUFSIZE_CAP;
2071
2072 os_log_info(OS_LOG_DEFAULT,
2073 "bpf%d BIOCSBLEN capped to %u from %u",
2074 minor(dev), d->bd_bufsize, size);
2075 } else if (size < BPF_MINBUFSIZE) {
2076 d->bd_bufsize = BPF_MINBUFSIZE;
2077
2078 os_log_info(OS_LOG_DEFAULT,
2079 "bpf%d BIOCSBLEN bumped to %u from %u",
2080 minor(dev), d->bd_bufsize, size);
2081 } else {
2082 d->bd_bufsize = size;
2083
2084 os_log_info(OS_LOG_DEFAULT,
2085 "bpf%d BIOCSBLEN %u",
2086 minor(dev), d->bd_bufsize);
2087 }
2088
2089 /* It's a read/write ioctl */
2090 bcopy(&d->bd_bufsize, addr, sizeof(u_int));
2091 break;
2092 }
2093 /*
2094 * Set link layer read filter.
2095 */
2096 case BIOCSETF32:
2097 case BIOCSETFNR32: { /* struct bpf_program32 */
2098 struct bpf_program32 prg32;
2099
2100 bcopy(addr, &prg32, sizeof(prg32));
2101 error = bpf_setf(d, prg32.bf_len,
2102 CAST_USER_ADDR_T(prg32.bf_insns), cmd);
2103 break;
2104 }
2105
2106 case BIOCSETF64:
2107 case BIOCSETFNR64: { /* struct bpf_program64 */
2108 struct bpf_program64 prg64;
2109
2110 bcopy(addr, &prg64, sizeof(prg64));
2111 error = bpf_setf(d, prg64.bf_len, CAST_USER_ADDR_T(prg64.bf_insns), cmd);
2112 break;
2113 }
2114
2115 /*
2116 * Flush read packet buffer.
2117 */
2118 case BIOCFLUSH:
2119 while (d->bd_hbuf_read) {
2120 msleep((caddr_t)d, bpf_mlock, PRINET, "BIOCFLUSH",
2121 NULL);
2122 }
2123 if ((d->bd_flags & BPF_CLOSING) != 0) {
2124 error = ENXIO;
2125 break;
2126 }
2127 reset_d(d);
2128 break;
2129
2130 /*
2131 * Put interface into promiscuous mode.
2132 */
2133 case BIOCPROMISC:
2134 if (d->bd_bif == 0) {
2135 /*
2136 * No interface attached yet.
2137 */
2138 error = EINVAL;
2139 break;
2140 }
2141 if (d->bd_promisc == 0) {
2142 lck_mtx_unlock(bpf_mlock);
2143 error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1);
2144 lck_mtx_lock(bpf_mlock);
2145 if (error == 0) {
2146 d->bd_promisc = 1;
2147 }
2148 }
2149 break;
2150
2151 /*
2152 * Get device parameters.
2153 */
2154 case BIOCGDLT: /* u_int */
2155 if (d->bd_bif == 0) {
2156 error = EINVAL;
2157 } else {
2158 bcopy(&d->bd_bif->bif_dlt, addr, sizeof(u_int));
2159 }
2160 break;
2161
2162 /*
2163 * Get a list of supported data link types.
2164 */
2165 case BIOCGDLTLIST: /* struct bpf_dltlist */
2166 if (d->bd_bif == NULL) {
2167 error = EINVAL;
2168 } else {
2169 error = bpf_getdltlist(d, addr, p);
2170 }
2171 break;
2172
2173 /*
2174 * Set data link type.
2175 */
2176 case BIOCSDLT: /* u_int */
2177 if (d->bd_bif == NULL) {
2178 error = EINVAL;
2179 } else {
2180 u_int dlt;
2181
2182 bcopy(addr, &dlt, sizeof(dlt));
2183
2184 if (dlt == DLT_PKTAP &&
2185 !(d->bd_flags & BPF_WANT_PKTAP)) {
2186 dlt = DLT_RAW;
2187 }
2188 error = bpf_setdlt(d, dlt);
2189 }
2190 break;
2191
2192 /*
2193 * Get interface name.
2194 */
2195 case BIOCGETIF: /* struct ifreq */
2196 if (d->bd_bif == 0) {
2197 error = EINVAL;
2198 } else {
2199 struct ifnet *const ifp = d->bd_bif->bif_ifp;
2200
2201 snprintf(((struct ifreq *)(void *)addr)->ifr_name,
2202 sizeof(ifr.ifr_name), "%s", if_name(ifp));
2203 }
2204 break;
2205
2206 /*
2207 * Set interface.
2208 */
2209 case BIOCSETIF: { /* struct ifreq */
2210 ifnet_t ifp;
2211
2212 bcopy(addr, &ifr, sizeof(ifr));
2213 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
2214 ifp = ifunit(ifr.ifr_name);
2215 if (ifp == NULL) {
2216 error = ENXIO;
2217 } else {
2218 error = bpf_setif(d, ifp, true, false, false);
2219 }
2220 break;
2221 }
2222
2223 /*
2224 * Set read timeout.
2225 */
2226 case BIOCSRTIMEOUT32: { /* struct user32_timeval */
2227 struct user32_timeval _tv;
2228 struct timeval tv;
2229
2230 bcopy(addr, &_tv, sizeof(_tv));
2231 tv.tv_sec = _tv.tv_sec;
2232 tv.tv_usec = _tv.tv_usec;
2233
2234 /*
2235 * Subtract 1 tick from tvtohz() since this isn't
2236 * a one-shot timer.
2237 */
2238 if ((error = itimerfix(&tv)) == 0) {
2239 d->bd_rtout = tvtohz(&tv) - 1;
2240 }
2241 break;
2242 }
2243
2244 case BIOCSRTIMEOUT64: { /* struct user64_timeval */
2245 struct user64_timeval _tv;
2246 struct timeval tv;
2247
2248 bcopy(addr, &_tv, sizeof(_tv));
2249 tv.tv_sec = (__darwin_time_t)_tv.tv_sec;
2250 tv.tv_usec = _tv.tv_usec;
2251
2252 /*
2253 * Subtract 1 tick from tvtohz() since this isn't
2254 * a one-shot timer.
2255 */
2256 if ((error = itimerfix(&tv)) == 0) {
2257 d->bd_rtout = tvtohz(&tv) - 1;
2258 }
2259 break;
2260 }
2261
2262 /*
2263 * Get read timeout.
2264 */
2265 case BIOCGRTIMEOUT32: { /* struct user32_timeval */
2266 struct user32_timeval tv;
2267
2268 bzero(&tv, sizeof(tv));
2269 tv.tv_sec = d->bd_rtout / hz;
2270 tv.tv_usec = (d->bd_rtout % hz) * tick;
2271 bcopy(&tv, addr, sizeof(tv));
2272 break;
2273 }
2274
2275 case BIOCGRTIMEOUT64: { /* struct user64_timeval */
2276 struct user64_timeval tv;
2277
2278 bzero(&tv, sizeof(tv));
2279 tv.tv_sec = d->bd_rtout / hz;
2280 tv.tv_usec = (d->bd_rtout % hz) * tick;
2281 bcopy(&tv, addr, sizeof(tv));
2282 break;
2283 }
2284
2285 /*
2286 * Get packet stats.
2287 */
2288 case BIOCGSTATS: { /* struct bpf_stat */
2289 struct bpf_stat bs;
2290
2291 bzero(&bs, sizeof(bs));
2292 bs.bs_recv = (u_int)d->bd_rcount;
2293 bs.bs_drop = (u_int)d->bd_dcount;
2294 bcopy(&bs, addr, sizeof(bs));
2295 break;
2296 }
2297
2298 /*
2299 * Set immediate mode.
2300 */
2301 case BIOCIMMEDIATE: /* u_int */
2302 d->bd_immediate = *(u_char *)(void *)addr;
2303 break;
2304
2305 case BIOCVERSION: { /* struct bpf_version */
2306 struct bpf_version bv;
2307
2308 bzero(&bv, sizeof(bv));
2309 bv.bv_major = BPF_MAJOR_VERSION;
2310 bv.bv_minor = BPF_MINOR_VERSION;
2311 bcopy(&bv, addr, sizeof(bv));
2312 break;
2313 }
2314
2315 /*
2316 * Get "header already complete" flag
2317 */
2318 case BIOCGHDRCMPLT: /* u_int */
2319 bcopy(&d->bd_hdrcmplt, addr, sizeof(u_int));
2320 break;
2321
2322 /*
2323 * Set "header already complete" flag
2324 */
2325 case BIOCSHDRCMPLT: /* u_int */
2326 bcopy(addr, &int_arg, sizeof(int_arg));
2327 if (int_arg == 0 && (d->bd_flags & BPF_BATCH_WRITE)) {
2328 os_log(OS_LOG_DEFAULT,
2329 "bpf%u cannot set BIOCSHDRCMPLT when BIOCSBATCHWRITE is set",
2330 d->bd_dev_minor);
2331 error = EINVAL;
2332 break;
2333 }
2334 d->bd_hdrcmplt = int_arg ? 1 : 0;
2335 break;
2336
2337 /*
2338 * Get "see sent packets" flag
2339 */
2340 case BIOCGSEESENT: { /* u_int */
2341 int_arg = 0;
2342
2343 if (d->bd_direction & BPF_D_OUT) {
2344 int_arg = 1;
2345 }
2346 bcopy(&int_arg, addr, sizeof(u_int));
2347 break;
2348 }
2349 /*
2350 * Set "see sent packets" flag
2351 */
2352 case BIOCSSEESENT: { /* u_int */
2353 bcopy(addr, &int_arg, sizeof(u_int));
2354
2355 if (int_arg == 0) {
2356 d->bd_direction = BPF_D_IN;
2357 } else {
2358 d->bd_direction = BPF_D_INOUT;
2359 }
2360 break;
2361 }
2362 /*
2363 * Get direction of tapped packets that can be seen for reading
2364 */
2365 case BIOCGDIRECTION: { /* u_int */
2366 int_arg = d->bd_direction;
2367
2368 bcopy(&int_arg, addr, sizeof(u_int));
2369 break;
2370 }
2371 /*
2372 * Set direction of tapped packets that can be seen for reading
2373 */
2374 case BIOCSDIRECTION: { /* u_int */
2375 bcopy(addr, &int_arg, sizeof(u_int));
2376
2377 switch (int_arg) {
2378 case BPF_D_NONE:
2379 case BPF_D_IN:
2380 case BPF_D_OUT:
2381 case BPF_D_INOUT:
2382 d->bd_direction = int_arg;
2383 break;
2384 default:
2385 error = EINVAL;
2386 break;
2387 }
2388 break;
2389 }
2390 /*
2391 * Set traffic service class
2392 */
2393 case BIOCSETTC: { /* int */
2394 int tc;
2395
2396 bcopy(addr, &tc, sizeof(int));
2397 if (tc != 0 && (d->bd_flags & BPF_BATCH_WRITE)) {
2398 os_log(OS_LOG_DEFAULT,
2399 "bpf%u cannot set BIOCSETTC when BIOCSBATCHWRITE is set",
2400 d->bd_dev_minor);
2401 error = EINVAL;
2402 break;
2403 }
2404 error = bpf_set_traffic_class(d, tc);
2405 break;
2406 }
2407
2408 /*
2409 * Get traffic service class
2410 */
2411 case BIOCGETTC: /* int */
2412 bcopy(&d->bd_traffic_class, addr, sizeof(int));
2413 break;
2414
2415 case FIONBIO: /* Non-blocking I/O; int */
2416 break;
2417
2418 case FIOASYNC: /* Send signal on receive packets; int */
2419 bcopy(addr, &d->bd_async, sizeof(int));
2420 break;
2421
2422 case BIOCSRSIG: { /* Set receive signal; u_int */
2423 u_int sig;
2424
2425 bcopy(addr, &sig, sizeof(u_int));
2426
2427 if (sig >= NSIG) {
2428 error = EINVAL;
2429 } else {
2430 d->bd_sig = sig;
2431 }
2432 break;
2433 }
2434 case BIOCGRSIG: /* u_int */
2435 bcopy(&d->bd_sig, addr, sizeof(u_int));
2436 break;
2437
2438 case BIOCSEXTHDR: /* u_int */
2439 bcopy(addr, &int_arg, sizeof(int_arg));
2440 if (int_arg) {
2441 d->bd_flags |= BPF_EXTENDED_HDR;
2442 } else {
2443 d->bd_flags &= ~BPF_EXTENDED_HDR;
2444 }
2445 break;
2446
2447 case BIOCGIFATTACHCOUNT: { /* struct ifreq */
2448 ifnet_t ifp;
2449 struct bpf_if *bp;
2450
2451 bcopy(addr, &ifr, sizeof(ifr));
2452 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
2453 ifp = ifunit(ifr.ifr_name);
2454 if (ifp == NULL) {
2455 error = ENXIO;
2456 break;
2457 }
2458 ifr.ifr_intval = 0;
2459 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
2460 struct bpf_d *bpf_d;
2461
2462 if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) {
2463 continue;
2464 }
2465 for (bpf_d = bp->bif_dlist; bpf_d;
2466 bpf_d = bpf_d->bd_next) {
2467 ifr.ifr_intval += 1;
2468 }
2469 }
2470 bcopy(&ifr, addr, sizeof(ifr));
2471 break;
2472 }
2473 case BIOCGWANTPKTAP: /* u_int */
2474 int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0;
2475 bcopy(&int_arg, addr, sizeof(int_arg));
2476 break;
2477
2478 case BIOCSWANTPKTAP: /* u_int */
2479 bcopy(addr, &int_arg, sizeof(int_arg));
2480 if (int_arg) {
2481 d->bd_flags |= BPF_WANT_PKTAP;
2482 } else {
2483 d->bd_flags &= ~BPF_WANT_PKTAP;
2484 }
2485 break;
2486
2487 case BIOCSHEADDROP:
2488 bcopy(addr, &int_arg, sizeof(int_arg));
2489 d->bd_headdrop = int_arg ? 1 : 0;
2490 break;
2491
2492 case BIOCGHEADDROP:
2493 bcopy(&d->bd_headdrop, addr, sizeof(int));
2494 break;
2495
2496 case BIOCSTRUNCATE:
2497 bcopy(addr, &int_arg, sizeof(int_arg));
2498 if (int_arg) {
2499 d->bd_flags |= BPF_TRUNCATE;
2500 } else {
2501 d->bd_flags &= ~BPF_TRUNCATE;
2502 }
2503 break;
2504
2505 case BIOCGETUUID:
2506 bcopy(&d->bd_uuid, addr, sizeof(uuid_t));
2507 break;
2508
2509 case BIOCSETUP: {
2510 struct bpf_setup_args bsa;
2511 ifnet_t ifp;
2512
2513 bcopy(addr, &bsa, sizeof(struct bpf_setup_args));
2514 bsa.bsa_ifname[IFNAMSIZ - 1] = 0;
2515 ifp = ifunit(bsa.bsa_ifname);
2516 if (ifp == NULL) {
2517 error = ENXIO;
2518 os_log_error(OS_LOG_DEFAULT,
2519 "%s: ifnet not found for %s error %d",
2520 __func__, bsa.bsa_ifname, error);
2521 break;
2522 }
2523
2524 error = bpf_setup(d, bsa.bsa_uuid, ifp);
2525 break;
2526 }
2527 case BIOCSPKTHDRV2:
2528 bcopy(addr, &int_arg, sizeof(int_arg));
2529 if (int_arg != 0) {
2530 d->bd_flags |= BPF_PKTHDRV2;
2531 } else {
2532 d->bd_flags &= ~BPF_PKTHDRV2;
2533 }
2534 break;
2535
2536 case BIOCGPKTHDRV2:
2537 int_arg = d->bd_flags & BPF_PKTHDRV2 ? 1 : 0;
2538 bcopy(&int_arg, addr, sizeof(int_arg));
2539 break;
2540
2541 case BIOCGHDRCOMP:
2542 int_arg = d->bd_flags & BPF_COMP_REQ ? 1 : 0;
2543 bcopy(&int_arg, addr, sizeof(int_arg));
2544 break;
2545
2546 case BIOCSHDRCOMP:
2547 bcopy(addr, &int_arg, sizeof(int_arg));
2548 if (int_arg != 0 && int_arg != 1) {
2549 return EINVAL;
2550 }
2551 if (d->bd_bif != 0 || (d->bd_flags & BPF_DETACHING)) {
2552 /*
2553 * Interface already attached, unable to change buffers
2554 */
2555 error = EINVAL;
2556 break;
2557 }
2558 if (int_arg != 0) {
2559 d->bd_flags |= BPF_COMP_REQ;
2560 if (bpf_hdr_comp_enable != 0) {
2561 d->bd_flags |= BPF_COMP_ENABLED;
2562 }
2563 } else {
2564 d->bd_flags &= ~(BPF_COMP_REQ | BPF_COMP_ENABLED);
2565 }
2566 break;
2567
2568 case BIOCGHDRCOMPON:
2569 int_arg = d->bd_flags & BPF_COMP_ENABLED ? 1 : 0;
2570 bcopy(&int_arg, addr, sizeof(int_arg));
2571 break;
2572
2573 case BIOCGHDRCOMPSTATS: {
2574 struct bpf_comp_stats bcs = {};
2575
2576 bcs = d->bd_bcs;
2577
2578 bcopy(&bcs, addr, sizeof(bcs));
2579 break;
2580 }
2581 case BIOCSWRITEMAX:
2582 bcopy(addr, &int_arg, sizeof(int_arg));
2583 if (int_arg > BPF_WRITE_MAX) {
2584 os_log(OS_LOG_DEFAULT, "bpf%u bd_write_size_max %u too big",
2585 d->bd_dev_minor, d->bd_write_size_max);
2586 error = EINVAL;
2587 break;
2588 }
2589 d->bd_write_size_max = int_arg;
2590 break;
2591
2592 case BIOCGWRITEMAX:
2593 int_arg = d->bd_write_size_max;
2594 bcopy(&int_arg, addr, sizeof(int_arg));
2595 break;
2596
2597 case BIOCGBATCHWRITE: /* int */
2598 int_arg = d->bd_flags & BPF_BATCH_WRITE ? 1 : 0;
2599 bcopy(&int_arg, addr, sizeof(int_arg));
2600 break;
2601
2602 case BIOCSBATCHWRITE: /* int */
2603 bcopy(addr, &int_arg, sizeof(int_arg));
2604 if (int_arg != 0) {
2605 if (d->bd_hdrcmplt == 0) {
2606 os_log(OS_LOG_DEFAULT,
2607 "bpf%u cannot set BIOCSBATCHWRITE when BIOCSHDRCMPLT is not set",
2608 d->bd_dev_minor);
2609 error = EINVAL;
2610 break;
2611 }
2612 if (d->bd_traffic_class != 0) {
2613 os_log(OS_LOG_DEFAULT,
2614 "bpf%u cannot set BIOCSBATCHWRITE when BIOCSETTC is set",
2615 d->bd_dev_minor);
2616 error = EINVAL;
2617 break;
2618 }
2619 d->bd_flags |= BPF_BATCH_WRITE;
2620 } else {
2621 d->bd_flags &= ~BPF_BATCH_WRITE;
2622 }
2623 break;
2624 }
2625
2626 bpf_release_d(d);
2627 lck_mtx_unlock(bpf_mlock);
2628
2629 return error;
2630 }
2631
2632 /*
2633 * Set d's packet filter program to fp. If this file already has a filter,
2634 * free it and replace it. Returns EINVAL for bogus requests.
2635 */
2636 static int
bpf_setf(struct bpf_d * d,u_int bf_len,user_addr_t bf_insns,u_long cmd)2637 bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns,
2638 u_long cmd)
2639 {
2640 struct bpf_insn *fcode, *old;
2641 u_int flen, size;
2642
2643 while (d->bd_hbuf_read) {
2644 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setf", NULL);
2645 }
2646
2647 if ((d->bd_flags & BPF_CLOSING) != 0) {
2648 return ENXIO;
2649 }
2650
2651 old = d->bd_filter;
2652 if (bf_insns == USER_ADDR_NULL) {
2653 if (bf_len != 0) {
2654 return EINVAL;
2655 }
2656 d->bd_filter = NULL;
2657 reset_d(d);
2658 if (old != 0) {
2659 kfree_data_addr(old);
2660 }
2661 return 0;
2662 }
2663 flen = bf_len;
2664 if (flen > BPF_MAXINSNS) {
2665 return EINVAL;
2666 }
2667
2668 size = flen * sizeof(struct bpf_insn);
2669 fcode = (struct bpf_insn *) kalloc_data(size, Z_WAITOK | Z_ZERO);
2670 if (fcode == NULL) {
2671 return ENOMEM;
2672 }
2673 if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
2674 bpf_validate(fcode, (int)flen)) {
2675 d->bd_filter = fcode;
2676
2677 if (cmd == BIOCSETF32 || cmd == BIOCSETF64) {
2678 reset_d(d);
2679 }
2680
2681 if (old != 0) {
2682 kfree_data_addr(old);
2683 }
2684
2685 return 0;
2686 }
2687 kfree_data(fcode, size);
2688 return EINVAL;
2689 }
2690
2691 /*
2692 * Detach a file from its current interface (if attached at all) and attach
2693 * to the interface indicated by the name stored in ifr.
2694 * Return an errno or 0.
2695 */
2696 static int
bpf_setif(struct bpf_d * d,ifnet_t theywant,bool do_reset,bool has_hbuf_read_write,bool has_bufs_allocated)2697 bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read_write,
2698 bool has_bufs_allocated)
2699 {
2700 struct bpf_if *bp;
2701 int error;
2702
2703 while (!has_hbuf_read_write && (d->bd_hbuf_read || d->bd_hbuf_write)) {
2704 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setif", NULL);
2705 }
2706
2707 if ((d->bd_flags & BPF_CLOSING) != 0) {
2708 return ENXIO;
2709 }
2710
2711 /*
2712 * Look through attached interfaces for the named one.
2713 */
2714 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
2715 struct ifnet *ifp = bp->bif_ifp;
2716
2717 if (ifp == 0 || ifp != theywant) {
2718 continue;
2719 }
2720 /*
2721 * Do not use DLT_PKTAP, unless requested explicitly
2722 */
2723 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
2724 continue;
2725 }
2726 /*
2727 * Skip the coprocessor interface
2728 */
2729 if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) {
2730 continue;
2731 }
2732 /*
2733 * We found the requested interface.
2734 * Allocate the packet buffers.
2735 */
2736 if (has_bufs_allocated == false) {
2737 error = bpf_allocbufs(d);
2738 if (error != 0) {
2739 return error;
2740 }
2741 }
2742 /*
2743 * Detach if attached to something else.
2744 */
2745 if (bp != d->bd_bif) {
2746 if (d->bd_bif != NULL) {
2747 if (bpf_detachd(d) != 0) {
2748 return ENXIO;
2749 }
2750 }
2751 if (bpf_attachd(d, bp) != 0) {
2752 return ENXIO;
2753 }
2754 }
2755 if (do_reset) {
2756 reset_d(d);
2757 }
2758 os_log(OS_LOG_DEFAULT, "bpf%u attached to %s",
2759 d->bd_dev_minor, if_name(theywant));
2760 return 0;
2761 }
2762 /* Not found. */
2763 return ENXIO;
2764 }
2765
2766 /*
2767 * Get a list of available data link type of the interface.
2768 */
2769 static int
bpf_getdltlist(struct bpf_d * d,caddr_t addr,struct proc * p)2770 bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p)
2771 {
2772 u_int n;
2773 int error;
2774 struct ifnet *ifp;
2775 struct bpf_if *bp;
2776 user_addr_t dlist;
2777 struct bpf_dltlist bfl;
2778
2779 bcopy(addr, &bfl, sizeof(bfl));
2780 if (proc_is64bit(p)) {
2781 dlist = (user_addr_t)bfl.bfl_u.bflu_pad;
2782 } else {
2783 dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list);
2784 }
2785
2786 ifp = d->bd_bif->bif_ifp;
2787 n = 0;
2788 error = 0;
2789
2790 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
2791 if (bp->bif_ifp != ifp) {
2792 continue;
2793 }
2794 /*
2795 * Do not use DLT_PKTAP, unless requested explicitly
2796 */
2797 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
2798 continue;
2799 }
2800 if (dlist != USER_ADDR_NULL) {
2801 if (n >= bfl.bfl_len) {
2802 return ENOMEM;
2803 }
2804 error = copyout(&bp->bif_dlt, dlist,
2805 sizeof(bp->bif_dlt));
2806 if (error != 0) {
2807 break;
2808 }
2809 dlist += sizeof(bp->bif_dlt);
2810 }
2811 n++;
2812 }
2813 bfl.bfl_len = n;
2814 bcopy(&bfl, addr, sizeof(bfl));
2815
2816 return error;
2817 }
2818
2819 /*
2820 * Set the data link type of a BPF instance.
2821 */
2822 static int
bpf_setdlt(struct bpf_d * d,uint32_t dlt)2823 bpf_setdlt(struct bpf_d *d, uint32_t dlt)
2824 {
2825 int error, opromisc;
2826 struct ifnet *ifp;
2827 struct bpf_if *bp;
2828
2829 if (d->bd_bif->bif_dlt == dlt) {
2830 return 0;
2831 }
2832
2833 while (d->bd_hbuf_read) {
2834 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setdlt", NULL);
2835 }
2836
2837 if ((d->bd_flags & BPF_CLOSING) != 0) {
2838 return ENXIO;
2839 }
2840
2841 ifp = d->bd_bif->bif_ifp;
2842 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
2843 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) {
2844 /*
2845 * Do not use DLT_PKTAP, unless requested explicitly
2846 */
2847 if (bp->bif_dlt == DLT_PKTAP &&
2848 !(d->bd_flags & BPF_WANT_PKTAP)) {
2849 continue;
2850 }
2851 break;
2852 }
2853 }
2854 if (bp != NULL) {
2855 opromisc = d->bd_promisc;
2856 if (bpf_detachd(d) != 0) {
2857 return ENXIO;
2858 }
2859 error = bpf_attachd(d, bp);
2860 if (error != 0) {
2861 os_log_error(OS_LOG_DEFAULT,
2862 "bpf_setdlt: bpf%d bpf_attachd %s error %d",
2863 d->bd_dev_minor, if_name(bp->bif_ifp),
2864 error);
2865 return error;
2866 }
2867 reset_d(d);
2868 if (opromisc) {
2869 lck_mtx_unlock(bpf_mlock);
2870 error = ifnet_set_promiscuous(bp->bif_ifp, 1);
2871 lck_mtx_lock(bpf_mlock);
2872 if (error != 0) {
2873 os_log_error(OS_LOG_DEFAULT,
2874 "bpf_setdlt: bpf%d ifpromisc %s error %d",
2875 d->bd_dev_minor, if_name(bp->bif_ifp), error);
2876 } else {
2877 d->bd_promisc = 1;
2878 }
2879 }
2880 }
2881 return bp == NULL ? EINVAL : 0;
2882 }
2883
2884 static int
bpf_set_traffic_class(struct bpf_d * d,int tc)2885 bpf_set_traffic_class(struct bpf_d *d, int tc)
2886 {
2887 int error = 0;
2888
2889 if (!SO_VALID_TC(tc)) {
2890 error = EINVAL;
2891 } else {
2892 d->bd_traffic_class = tc;
2893 }
2894
2895 return error;
2896 }
2897
2898 static void
bpf_set_packet_service_class(struct mbuf * m,int tc)2899 bpf_set_packet_service_class(struct mbuf *m, int tc)
2900 {
2901 if (!(m->m_flags & M_PKTHDR)) {
2902 return;
2903 }
2904
2905 VERIFY(SO_VALID_TC(tc));
2906 (void) m_set_service_class(m, so_tc2msc(tc));
2907 }
2908
2909 /*
2910 * Support for select()
2911 *
2912 * Return true iff the specific operation will not block indefinitely.
2913 * Otherwise, return false but make a note that a selwakeup() must be done.
2914 */
2915 int
bpfselect(dev_t dev,int which,void * wql,struct proc * p)2916 bpfselect(dev_t dev, int which, void * wql, struct proc *p)
2917 {
2918 struct bpf_d *d;
2919 int ret = 0;
2920
2921 lck_mtx_lock(bpf_mlock);
2922
2923 d = bpf_dtab[minor(dev)];
2924 if (d == NULL || d == BPF_DEV_RESERVED ||
2925 (d->bd_flags & BPF_CLOSING) != 0) {
2926 lck_mtx_unlock(bpf_mlock);
2927 return ENXIO;
2928 }
2929
2930 bpf_acquire_d(d);
2931
2932 if (d->bd_bif == NULL) {
2933 bpf_release_d(d);
2934 lck_mtx_unlock(bpf_mlock);
2935 return ENXIO;
2936 }
2937
2938 while (d->bd_hbuf_read) {
2939 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfselect", NULL);
2940 }
2941
2942 if ((d->bd_flags & BPF_CLOSING) != 0) {
2943 bpf_release_d(d);
2944 lck_mtx_unlock(bpf_mlock);
2945 return ENXIO;
2946 }
2947
2948 switch (which) {
2949 case FREAD:
2950 if (d->bd_hlen != 0 ||
2951 ((d->bd_immediate ||
2952 d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) {
2953 ret = 1; /* read has data to return */
2954 } else {
2955 /*
2956 * Read has no data to return.
2957 * Make the select wait, and start a timer if
2958 * necessary.
2959 */
2960 selrecord(p, &d->bd_sel, wql);
2961 bpf_start_timer(d);
2962 }
2963 break;
2964
2965 case FWRITE:
2966 /* can't determine whether a write would block */
2967 ret = 1;
2968 break;
2969 }
2970
2971 bpf_release_d(d);
2972 lck_mtx_unlock(bpf_mlock);
2973
2974 return ret;
2975 }
2976
2977 /*
2978 * Support for kevent() system call. Register EVFILT_READ filters and
2979 * reject all others.
2980 */
2981 int bpfkqfilter(dev_t dev, struct knote *kn);
2982 static void filt_bpfdetach(struct knote *);
2983 static int filt_bpfread(struct knote *, long);
2984 static int filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev);
2985 static int filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev);
2986
2987 SECURITY_READ_ONLY_EARLY(struct filterops) bpfread_filtops = {
2988 .f_isfd = 1,
2989 .f_detach = filt_bpfdetach,
2990 .f_event = filt_bpfread,
2991 .f_touch = filt_bpftouch,
2992 .f_process = filt_bpfprocess,
2993 };
2994
2995 static int
filt_bpfread_common(struct knote * kn,struct kevent_qos_s * kev,struct bpf_d * d)2996 filt_bpfread_common(struct knote *kn, struct kevent_qos_s *kev, struct bpf_d *d)
2997 {
2998 int ready = 0;
2999 int64_t data = 0;
3000
3001 if (d->bd_immediate) {
3002 /*
3003 * If there's data in the hold buffer, it's the
3004 * amount of data a read will return.
3005 *
3006 * If there's no data in the hold buffer, but
3007 * there's data in the store buffer, a read will
3008 * immediately rotate the store buffer to the
3009 * hold buffer, the amount of data in the store
3010 * buffer is the amount of data a read will
3011 * return.
3012 *
3013 * If there's no data in either buffer, we're not
3014 * ready to read.
3015 */
3016 data = (d->bd_hlen == 0 || d->bd_hbuf_read ?
3017 d->bd_slen : d->bd_hlen);
3018 int64_t lowwat = knote_low_watermark(kn);
3019 if (lowwat > d->bd_bufsize) {
3020 lowwat = d->bd_bufsize;
3021 }
3022 ready = (data >= lowwat);
3023 } else {
3024 /*
3025 * If there's data in the hold buffer, it's the
3026 * amount of data a read will return.
3027 *
3028 * If there's no data in the hold buffer, but
3029 * there's data in the store buffer, if the
3030 * timer has expired a read will immediately
3031 * rotate the store buffer to the hold buffer,
3032 * so the amount of data in the store buffer is
3033 * the amount of data a read will return.
3034 *
3035 * If there's no data in either buffer, or there's
3036 * no data in the hold buffer and the timer hasn't
3037 * expired, we're not ready to read.
3038 */
3039 data = ((d->bd_hlen == 0 || d->bd_hbuf_read) &&
3040 d->bd_state == BPF_TIMED_OUT ? d->bd_slen : d->bd_hlen);
3041 ready = (data > 0);
3042 }
3043 if (!ready) {
3044 bpf_start_timer(d);
3045 } else if (kev) {
3046 knote_fill_kevent(kn, kev, data);
3047 }
3048
3049 return ready;
3050 }
3051
3052 int
bpfkqfilter(dev_t dev,struct knote * kn)3053 bpfkqfilter(dev_t dev, struct knote *kn)
3054 {
3055 struct bpf_d *d;
3056 int res;
3057
3058 /*
3059 * Is this device a bpf?
3060 */
3061 if (major(dev) != CDEV_MAJOR || kn->kn_filter != EVFILT_READ) {
3062 knote_set_error(kn, EINVAL);
3063 return 0;
3064 }
3065
3066 lck_mtx_lock(bpf_mlock);
3067
3068 d = bpf_dtab[minor(dev)];
3069
3070 if (d == NULL || d == BPF_DEV_RESERVED ||
3071 (d->bd_flags & BPF_CLOSING) != 0 ||
3072 d->bd_bif == NULL) {
3073 lck_mtx_unlock(bpf_mlock);
3074 knote_set_error(kn, ENXIO);
3075 return 0;
3076 }
3077
3078 kn->kn_filtid = EVFILTID_BPFREAD;
3079 knote_kn_hook_set_raw(kn, d);
3080 KNOTE_ATTACH(&d->bd_sel.si_note, kn);
3081 d->bd_flags |= BPF_KNOTE;
3082
3083 /* capture the current state */
3084 res = filt_bpfread_common(kn, NULL, d);
3085
3086 lck_mtx_unlock(bpf_mlock);
3087
3088 return res;
3089 }
3090
3091 static void
filt_bpfdetach(struct knote * kn)3092 filt_bpfdetach(struct knote *kn)
3093 {
3094 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3095
3096 lck_mtx_lock(bpf_mlock);
3097 if (d->bd_flags & BPF_KNOTE) {
3098 KNOTE_DETACH(&d->bd_sel.si_note, kn);
3099 d->bd_flags &= ~BPF_KNOTE;
3100 }
3101 lck_mtx_unlock(bpf_mlock);
3102 }
3103
3104 static int
filt_bpfread(struct knote * kn,long hint)3105 filt_bpfread(struct knote *kn, long hint)
3106 {
3107 #pragma unused(hint)
3108 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3109
3110 return filt_bpfread_common(kn, NULL, d);
3111 }
3112
3113 static int
filt_bpftouch(struct knote * kn,struct kevent_qos_s * kev)3114 filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev)
3115 {
3116 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3117 int res;
3118
3119 lck_mtx_lock(bpf_mlock);
3120
3121 /* save off the lowat threshold and flag */
3122 kn->kn_sdata = kev->data;
3123 kn->kn_sfflags = kev->fflags;
3124
3125 /* output data will be re-generated here */
3126 res = filt_bpfread_common(kn, NULL, d);
3127
3128 lck_mtx_unlock(bpf_mlock);
3129
3130 return res;
3131 }
3132
3133 static int
filt_bpfprocess(struct knote * kn,struct kevent_qos_s * kev)3134 filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev)
3135 {
3136 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3137 int res;
3138
3139 lck_mtx_lock(bpf_mlock);
3140 res = filt_bpfread_common(kn, kev, d);
3141 lck_mtx_unlock(bpf_mlock);
3142
3143 return res;
3144 }
3145
3146 /*
3147 * Copy data from an mbuf chain into a buffer. This code is derived
3148 * from m_copydata in kern/uipc_mbuf.c.
3149 */
3150 static void
bpf_mcopy(struct mbuf * m,void * dst_arg,size_t len,size_t offset)3151 bpf_mcopy(struct mbuf *m, void *dst_arg, size_t len, size_t offset)
3152 {
3153 u_int count;
3154 u_char *dst;
3155
3156 dst = dst_arg;
3157
3158 while (offset >= m->m_len) {
3159 offset -= m->m_len;
3160 m = m->m_next;
3161 if (m == NULL) {
3162 panic("bpf_mcopy");
3163 }
3164 continue;
3165 }
3166
3167 while (len > 0) {
3168 if (m == NULL) {
3169 panic("bpf_mcopy");
3170 }
3171 count = MIN(m->m_len - (u_int)offset, (u_int)len);
3172 bcopy((u_char *)mbuf_data(m) + offset, dst, count);
3173 m = m->m_next;
3174 dst += count;
3175 len -= count;
3176 offset = 0;
3177 }
3178 }
3179
3180 static inline void
bpf_tap_imp(ifnet_t ifp,u_int32_t dlt,struct bpf_packet * bpf_pkt,int outbound)3181 bpf_tap_imp(
3182 ifnet_t ifp,
3183 u_int32_t dlt,
3184 struct bpf_packet *bpf_pkt,
3185 int outbound)
3186 {
3187 struct bpf_d *d;
3188 u_int slen;
3189 struct bpf_if *bp;
3190
3191 /*
3192 * It's possible that we get here after the bpf descriptor has been
3193 * detached from the interface; in such a case we simply return.
3194 * Lock ordering is important since we can be called asynchronously
3195 * (from IOKit) to process an inbound packet; when that happens
3196 * we would have been holding its "gateLock" and will be acquiring
3197 * "bpf_mlock" upon entering this routine. Due to that, we release
3198 * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
3199 * acquire "gateLock" in the IOKit), in order to avoid a deadlock
3200 * when a ifnet_set_promiscuous request simultaneously collides with
3201 * an inbound packet being passed into the tap callback.
3202 */
3203 lck_mtx_lock(bpf_mlock);
3204 if (ifp->if_bpf == NULL) {
3205 lck_mtx_unlock(bpf_mlock);
3206 return;
3207 }
3208 for (bp = ifp->if_bpf; bp != NULL; bp = bp->bif_next) {
3209 if (bp->bif_ifp != ifp) {
3210 /* wrong interface */
3211 bp = NULL;
3212 break;
3213 }
3214 if (dlt == 0 || bp->bif_dlt == dlt) {
3215 /* tapping default DLT or DLT matches */
3216 break;
3217 }
3218 }
3219 if (bp == NULL) {
3220 goto done;
3221 }
3222 for (d = bp->bif_dlist; d != NULL; d = d->bd_next) {
3223 struct bpf_packet *bpf_pkt_saved = bpf_pkt;
3224 struct bpf_packet bpf_pkt_tmp = {};
3225 struct pktap_header_buffer bpfp_header_tmp = {};
3226
3227 if (outbound && (d->bd_direction & BPF_D_OUT) == 0) {
3228 continue;
3229 }
3230 if (!outbound && (d->bd_direction & BPF_D_IN) == 0) {
3231 continue;
3232 }
3233
3234 ++d->bd_rcount;
3235 slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt,
3236 (u_int)bpf_pkt->bpfp_total_length, 0);
3237
3238 if (slen != 0) {
3239 if (bp->bif_ifp->if_type == IFT_PKTAP &&
3240 bp->bif_dlt == DLT_PKTAP) {
3241 if (d->bd_flags & BPF_TRUNCATE) {
3242 slen = min(slen, get_pkt_trunc_len(bpf_pkt));
3243 }
3244 /*
3245 * Need to copy the bpf_pkt because the conversion
3246 * to v2 pktap header modifies the content of the
3247 * bpfp_header
3248 */
3249 if ((d->bd_flags & BPF_PKTHDRV2) &&
3250 bpf_pkt->bpfp_header_length <= sizeof(bpfp_header_tmp)) {
3251 bpf_pkt_tmp = *bpf_pkt;
3252
3253 bpf_pkt = &bpf_pkt_tmp;
3254
3255 memcpy(&bpfp_header_tmp, bpf_pkt->bpfp_header,
3256 bpf_pkt->bpfp_header_length);
3257
3258 bpf_pkt->bpfp_header = &bpfp_header_tmp;
3259
3260 convert_to_pktap_header_to_v2(bpf_pkt,
3261 !!(d->bd_flags & BPF_TRUNCATE));
3262 }
3263 }
3264 ++d->bd_fcount;
3265 catchpacket(d, bpf_pkt, slen, outbound);
3266 }
3267 bpf_pkt = bpf_pkt_saved;
3268 }
3269
3270 done:
3271 lck_mtx_unlock(bpf_mlock);
3272 }
3273
3274 static inline void
bpf_tap_mbuf(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen,int outbound)3275 bpf_tap_mbuf(
3276 ifnet_t ifp,
3277 u_int32_t dlt,
3278 mbuf_t m,
3279 void* hdr,
3280 size_t hlen,
3281 int outbound)
3282 {
3283 struct bpf_packet bpf_pkt;
3284 struct mbuf *m0;
3285
3286 if (ifp->if_bpf == NULL) {
3287 /* quickly check without taking lock */
3288 return;
3289 }
3290 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
3291 bpf_pkt.bpfp_mbuf = m;
3292 bpf_pkt.bpfp_total_length = 0;
3293 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
3294 bpf_pkt.bpfp_total_length += m0->m_len;
3295 }
3296 bpf_pkt.bpfp_header = hdr;
3297 if (hdr != NULL) {
3298 bpf_pkt.bpfp_total_length += hlen;
3299 bpf_pkt.bpfp_header_length = hlen;
3300 } else {
3301 bpf_pkt.bpfp_header_length = 0;
3302 }
3303 bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
3304 }
3305
3306 void
bpf_tap_out(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen)3307 bpf_tap_out(
3308 ifnet_t ifp,
3309 u_int32_t dlt,
3310 mbuf_t m,
3311 void* hdr,
3312 size_t hlen)
3313 {
3314 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1);
3315 }
3316
3317 void
bpf_tap_in(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen)3318 bpf_tap_in(
3319 ifnet_t ifp,
3320 u_int32_t dlt,
3321 mbuf_t m,
3322 void* hdr,
3323 size_t hlen)
3324 {
3325 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0);
3326 }
3327
3328 /* Callback registered with Ethernet driver. */
3329 static int
bpf_tap_callback(struct ifnet * ifp,struct mbuf * m)3330 bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
3331 {
3332 bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL);
3333
3334 return 0;
3335 }
3336
3337 #if SKYWALK
3338 #include <skywalk/os_skywalk_private.h>
3339
3340 static void
bpf_pktcopy(kern_packet_t pkt,void * dst_arg,size_t len,size_t offset)3341 bpf_pktcopy(kern_packet_t pkt, void *dst_arg, size_t len, size_t offset)
3342 {
3343 kern_buflet_t buflet = NULL;
3344 size_t count;
3345 u_char *dst;
3346
3347 dst = dst_arg;
3348 while (len > 0) {
3349 uint8_t *addr;
3350
3351 u_int32_t buflet_length;
3352
3353 buflet = kern_packet_get_next_buflet(pkt, buflet);
3354 VERIFY(buflet != NULL);
3355 addr = kern_buflet_get_data_address(buflet);
3356 VERIFY(addr != NULL);
3357 addr += kern_buflet_get_data_offset(buflet);
3358 buflet_length = kern_buflet_get_data_length(buflet);
3359 if (offset >= buflet_length) {
3360 offset -= buflet_length;
3361 continue;
3362 }
3363 count = MIN(buflet_length - offset, len);
3364 bcopy((void *)(addr + offset), (void *)dst, count);
3365 dst += count;
3366 len -= count;
3367 offset = 0;
3368 }
3369 }
3370
3371 static inline void
bpf_tap_packet(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen,int outbound)3372 bpf_tap_packet(
3373 ifnet_t ifp,
3374 u_int32_t dlt,
3375 kern_packet_t pkt,
3376 void* hdr,
3377 size_t hlen,
3378 int outbound)
3379 {
3380 struct bpf_packet bpf_pkt;
3381 struct mbuf * m;
3382
3383 if (ifp->if_bpf == NULL) {
3384 /* quickly check without taking lock */
3385 return;
3386 }
3387 m = kern_packet_get_mbuf(pkt);
3388 if (m != NULL) {
3389 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
3390 bpf_pkt.bpfp_mbuf = m;
3391 bpf_pkt.bpfp_total_length = m_length(m);
3392 } else {
3393 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_PKT;
3394 bpf_pkt.bpfp_pkt = pkt;
3395 bpf_pkt.bpfp_total_length = kern_packet_get_data_length(pkt);
3396 }
3397 bpf_pkt.bpfp_header = hdr;
3398 bpf_pkt.bpfp_header_length = hlen;
3399 if (hlen != 0) {
3400 bpf_pkt.bpfp_total_length += hlen;
3401 }
3402 bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
3403 }
3404
3405 void
bpf_tap_packet_out(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen)3406 bpf_tap_packet_out(
3407 ifnet_t ifp,
3408 u_int32_t dlt,
3409 kern_packet_t pkt,
3410 void* hdr,
3411 size_t hlen)
3412 {
3413 bpf_tap_packet(ifp, dlt, pkt, hdr, hlen, 1);
3414 }
3415
3416 void
bpf_tap_packet_in(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen)3417 bpf_tap_packet_in(
3418 ifnet_t ifp,
3419 u_int32_t dlt,
3420 kern_packet_t pkt,
3421 void* hdr,
3422 size_t hlen)
3423 {
3424 bpf_tap_packet(ifp, dlt, pkt, hdr, hlen, 0);
3425 }
3426
3427 #endif /* SKYWALK */
3428
3429 static errno_t
bpf_copydata(struct bpf_packet * pkt,size_t off,size_t len,void * out_data)3430 bpf_copydata(struct bpf_packet *pkt, size_t off, size_t len, void* out_data)
3431 {
3432 errno_t err = 0;
3433 if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) {
3434 err = mbuf_copydata(pkt->bpfp_mbuf, off, len, out_data);
3435 #if SKYWALK
3436 } else if (pkt->bpfp_type == BPF_PACKET_TYPE_PKT) {
3437 err = kern_packet_copy_bytes(pkt->bpfp_pkt, off, len, out_data);
3438 #endif /* SKYWALK */
3439 } else {
3440 err = EINVAL;
3441 }
3442
3443 return err;
3444 }
3445
3446 static void
copy_bpf_packet_offset(struct bpf_packet * pkt,void * dst,size_t len,size_t offset)3447 copy_bpf_packet_offset(struct bpf_packet * pkt, void * dst, size_t len, size_t offset)
3448 {
3449 /* copy the optional header */
3450 if (offset < pkt->bpfp_header_length) {
3451 size_t count = MIN(len, pkt->bpfp_header_length - offset);
3452 caddr_t src = (caddr_t)pkt->bpfp_header;
3453 bcopy(src + offset, dst, count);
3454 len -= count;
3455 dst = (void *)((uintptr_t)dst + count);
3456 offset = 0;
3457 } else {
3458 offset -= pkt->bpfp_header_length;
3459 }
3460
3461 if (len == 0) {
3462 /* nothing past the header */
3463 return;
3464 }
3465 /* copy the packet */
3466 switch (pkt->bpfp_type) {
3467 case BPF_PACKET_TYPE_MBUF:
3468 bpf_mcopy(pkt->bpfp_mbuf, dst, len, offset);
3469 break;
3470 #if SKYWALK
3471 case BPF_PACKET_TYPE_PKT:
3472 bpf_pktcopy(pkt->bpfp_pkt, dst, len, offset);
3473 break;
3474 #endif /* SKYWALK */
3475 default:
3476 break;
3477 }
3478 }
3479
3480 static void
copy_bpf_packet(struct bpf_packet * pkt,void * dst,size_t len)3481 copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len)
3482 {
3483 copy_bpf_packet_offset(pkt, dst, len, 0);
3484 }
3485
3486 static uint32_t
get_esp_trunc_len(__unused struct bpf_packet * pkt,__unused uint32_t off,const uint32_t remaining_caplen)3487 get_esp_trunc_len(__unused struct bpf_packet *pkt, __unused uint32_t off,
3488 const uint32_t remaining_caplen)
3489 {
3490 /*
3491 * For some reason tcpdump expects to have one byte beyond the ESP header
3492 */
3493 uint32_t trunc_len = ESP_HDR_SIZE + 1;
3494
3495 if (trunc_len > remaining_caplen) {
3496 return remaining_caplen;
3497 }
3498
3499 return trunc_len;
3500 }
3501
3502 static uint32_t
get_isakmp_trunc_len(__unused struct bpf_packet * pkt,__unused uint32_t off,const uint32_t remaining_caplen)3503 get_isakmp_trunc_len(__unused struct bpf_packet *pkt, __unused uint32_t off,
3504 const uint32_t remaining_caplen)
3505 {
3506 /*
3507 * Include the payload generic header
3508 */
3509 uint32_t trunc_len = ISAKMP_HDR_SIZE;
3510
3511 if (trunc_len > remaining_caplen) {
3512 return remaining_caplen;
3513 }
3514
3515 return trunc_len;
3516 }
3517
3518 static uint32_t
get_isakmp_natt_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3519 get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint32_t off,
3520 const uint32_t remaining_caplen)
3521 {
3522 int err = 0;
3523 uint32_t trunc_len = 0;
3524 char payload[remaining_caplen];
3525
3526 err = bpf_copydata(pkt, off, remaining_caplen, payload);
3527 if (err != 0) {
3528 return remaining_caplen;
3529 }
3530 /*
3531 * They are three cases:
3532 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
3533 * - keep alive: 1 byte payload
3534 * - otherwise it's ESP
3535 */
3536 if (remaining_caplen >= 4 &&
3537 payload[0] == 0 && payload[1] == 0 &&
3538 payload[2] == 0 && payload[3] == 0) {
3539 trunc_len = 4 + get_isakmp_trunc_len(pkt, off + 4, remaining_caplen - 4);
3540 } else if (remaining_caplen == 1) {
3541 trunc_len = 1;
3542 } else {
3543 trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);
3544 }
3545
3546 if (trunc_len > remaining_caplen) {
3547 return remaining_caplen;
3548 }
3549
3550 return trunc_len;
3551 }
3552
3553 static uint32_t
get_udp_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3554 get_udp_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3555 {
3556 int err = 0;
3557 uint32_t trunc_len = sizeof(struct udphdr); /* By default no UDP payload */
3558
3559 if (trunc_len >= remaining_caplen) {
3560 return remaining_caplen;
3561 }
3562
3563 struct udphdr udphdr;
3564 err = bpf_copydata(pkt, off, sizeof(struct udphdr), &udphdr);
3565 if (err != 0) {
3566 return remaining_caplen;
3567 }
3568
3569 u_short sport, dport;
3570
3571 sport = EXTRACT_SHORT(&udphdr.uh_sport);
3572 dport = EXTRACT_SHORT(&udphdr.uh_dport);
3573
3574 if (dport == PORT_DNS || sport == PORT_DNS) {
3575 /*
3576 * Full UDP payload for DNS
3577 */
3578 trunc_len = remaining_caplen;
3579 } else if ((sport == PORT_BOOTPS && dport == PORT_BOOTPC) ||
3580 (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) {
3581 /*
3582 * Full UDP payload for BOOTP and DHCP
3583 */
3584 trunc_len = remaining_caplen;
3585 } else if (dport == PORT_ISAKMP && sport == PORT_ISAKMP) {
3586 /*
3587 * Return the ISAKMP header
3588 */
3589 trunc_len += get_isakmp_trunc_len(pkt, off + sizeof(struct udphdr),
3590 remaining_caplen - sizeof(struct udphdr));
3591 } else if (dport == PORT_ISAKMP_NATT && sport == PORT_ISAKMP_NATT) {
3592 trunc_len += get_isakmp_natt_trunc_len(pkt, off + sizeof(struct udphdr),
3593 remaining_caplen - sizeof(struct udphdr));
3594 }
3595 if (trunc_len >= remaining_caplen) {
3596 return remaining_caplen;
3597 }
3598
3599 return trunc_len;
3600 }
3601
3602 static uint32_t
get_tcp_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3603 get_tcp_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3604 {
3605 int err = 0;
3606 uint32_t trunc_len = sizeof(struct tcphdr); /* By default no TCP payload */
3607 if (trunc_len >= remaining_caplen) {
3608 return remaining_caplen;
3609 }
3610
3611 struct tcphdr tcphdr;
3612 err = bpf_copydata(pkt, off, sizeof(struct tcphdr), &tcphdr);
3613 if (err != 0) {
3614 return remaining_caplen;
3615 }
3616
3617 u_short sport, dport;
3618 sport = EXTRACT_SHORT(&tcphdr.th_sport);
3619 dport = EXTRACT_SHORT(&tcphdr.th_dport);
3620
3621 if (dport == PORT_DNS || sport == PORT_DNS) {
3622 /*
3623 * Full TCP payload for DNS
3624 */
3625 trunc_len = remaining_caplen;
3626 } else {
3627 trunc_len = (uint16_t)(tcphdr.th_off << 2);
3628 }
3629 if (trunc_len >= remaining_caplen) {
3630 return remaining_caplen;
3631 }
3632
3633 return trunc_len;
3634 }
3635
3636 static uint32_t
get_proto_trunc_len(uint8_t proto,struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3637 get_proto_trunc_len(uint8_t proto, struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3638 {
3639 uint32_t trunc_len;
3640
3641 switch (proto) {
3642 case IPPROTO_ICMP: {
3643 /*
3644 * Full IMCP payload
3645 */
3646 trunc_len = remaining_caplen;
3647 break;
3648 }
3649 case IPPROTO_ICMPV6: {
3650 /*
3651 * Full IMCPV6 payload
3652 */
3653 trunc_len = remaining_caplen;
3654 break;
3655 }
3656 case IPPROTO_IGMP: {
3657 /*
3658 * Full IGMP payload
3659 */
3660 trunc_len = remaining_caplen;
3661 break;
3662 }
3663 case IPPROTO_UDP: {
3664 trunc_len = get_udp_trunc_len(pkt, off, remaining_caplen);
3665 break;
3666 }
3667 case IPPROTO_TCP: {
3668 trunc_len = get_tcp_trunc_len(pkt, off, remaining_caplen);
3669 break;
3670 }
3671 case IPPROTO_ESP: {
3672 trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);
3673 break;
3674 }
3675 default: {
3676 /*
3677 * By default we only include the IP header
3678 */
3679 trunc_len = 0;
3680 break;
3681 }
3682 }
3683 if (trunc_len >= remaining_caplen) {
3684 return remaining_caplen;
3685 }
3686
3687 return trunc_len;
3688 }
3689
3690 static uint32_t
get_ip_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3691 get_ip_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3692 {
3693 int err = 0;
3694 uint32_t iplen = sizeof(struct ip);
3695 if (iplen >= remaining_caplen) {
3696 return remaining_caplen;
3697 }
3698
3699 struct ip iphdr;
3700 err = bpf_copydata(pkt, off, sizeof(struct ip), &iphdr);
3701 if (err != 0) {
3702 return remaining_caplen;
3703 }
3704
3705 uint8_t proto = 0;
3706
3707 iplen = (uint16_t)(iphdr.ip_hl << 2);
3708 if (iplen >= remaining_caplen) {
3709 return remaining_caplen;
3710 }
3711
3712 proto = iphdr.ip_p;
3713 iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen);
3714
3715 if (iplen >= remaining_caplen) {
3716 return remaining_caplen;
3717 }
3718
3719 return iplen;
3720 }
3721
3722 static uint32_t
get_ip6_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3723 get_ip6_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3724 {
3725 int err = 0;
3726 uint32_t iplen = sizeof(struct ip6_hdr);
3727 if (iplen >= remaining_caplen) {
3728 return remaining_caplen;
3729 }
3730
3731 struct ip6_hdr ip6hdr;
3732 err = bpf_copydata(pkt, off, sizeof(struct ip6_hdr), &ip6hdr);
3733 if (err != 0) {
3734 return remaining_caplen;
3735 }
3736
3737 uint8_t proto = 0;
3738
3739 /*
3740 * TBD: process the extension headers
3741 */
3742 proto = ip6hdr.ip6_nxt;
3743 iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen);
3744
3745 if (iplen >= remaining_caplen) {
3746 return remaining_caplen;
3747 }
3748
3749 return iplen;
3750 }
3751
3752 static uint32_t
get_ether_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3753 get_ether_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3754 {
3755 int err = 0;
3756 uint32_t ethlen = sizeof(struct ether_header);
3757 if (ethlen >= remaining_caplen) {
3758 return remaining_caplen;
3759 }
3760
3761 struct ether_header eh = {};
3762 err = bpf_copydata(pkt, off, sizeof(struct ether_header), &eh);
3763 if (err != 0) {
3764 return remaining_caplen;
3765 }
3766
3767 u_short type = EXTRACT_SHORT(&eh.ether_type);
3768 /* Include full ARP */
3769 if (type == ETHERTYPE_ARP) {
3770 ethlen = remaining_caplen;
3771 } else if (type == ETHERTYPE_IP) {
3772 ethlen += get_ip_trunc_len(pkt, off + sizeof(struct ether_header),
3773 remaining_caplen - ethlen);
3774 } else if (type == ETHERTYPE_IPV6) {
3775 ethlen += get_ip6_trunc_len(pkt, off + sizeof(struct ether_header),
3776 remaining_caplen - ethlen);
3777 } else {
3778 ethlen = MIN(BPF_MIN_PKT_SIZE, remaining_caplen);
3779 }
3780 return ethlen;
3781 }
3782
3783 static uint32_t
get_pkt_trunc_len(struct bpf_packet * pkt)3784 get_pkt_trunc_len(struct bpf_packet *pkt)
3785 {
3786 struct pktap_header *pktap = (struct pktap_header *) (pkt->bpfp_header);
3787 uint32_t in_pkt_len = 0;
3788 uint32_t out_pkt_len = 0;
3789 uint32_t tlen = 0;
3790 uint32_t pre_adjust; // L2 header not in mbuf or kern_packet
3791
3792 // bpfp_total_length must contain the BPF packet header
3793 assert3u(pkt->bpfp_total_length, >=, pkt->bpfp_header_length);
3794
3795 // The BPF packet header must contain the pktap header
3796 assert3u(pkt->bpfp_header_length, >=, pktap->pth_length);
3797
3798 // The pre frame length (L2 header) must be contained in the packet
3799 assert3u(pkt->bpfp_total_length, >=, pktap->pth_length + pktap->pth_frame_pre_length);
3800
3801 /*
3802 * pktap->pth_frame_pre_length is the L2 header length and accounts
3803 * for both L2 header in the packet payload and pre_adjust.
3804 *
3805 * pre_adjust represents an adjustment for a pseudo L2 header that is not
3806 * part of packet payload -- not in the mbuf or kern_packet -- and comes
3807 * just after the pktap header.
3808 *
3809 * pktap->pth_length is the size of the pktap header (exclude pre_adjust)
3810 *
3811 * pkt->bpfp_header_length is (pktap->pth_length + pre_adjust)
3812 */
3813 pre_adjust = (uint32_t)(pkt->bpfp_header_length - pktap->pth_length);
3814
3815 if (pktap->pth_iftype == IFT_ETHER) {
3816 /*
3817 * We need to parse the Ethernet header to find the network layer
3818 * protocol
3819 */
3820 in_pkt_len = (uint32_t)(pkt->bpfp_total_length - pktap->pth_length - pre_adjust);
3821
3822 out_pkt_len = get_ether_trunc_len(pkt, 0, in_pkt_len);
3823
3824 tlen = pktap->pth_length + pre_adjust + out_pkt_len;
3825 } else {
3826 /*
3827 * For other interface types, we only know to parse IPv4 and IPv6.
3828 *
3829 * To get to the beginning of the IPv4 or IPv6 packet, we need to to skip
3830 * over the L2 header that is the actual packet payload (mbuf or kern_packet)
3831 */
3832 uint32_t off; // offset past the L2 header in the actual packet payload
3833
3834 off = pktap->pth_frame_pre_length - pre_adjust;
3835
3836 in_pkt_len = (uint32_t)(pkt->bpfp_total_length - pktap->pth_length - pktap->pth_frame_pre_length);
3837
3838 if (pktap->pth_protocol_family == AF_INET) {
3839 out_pkt_len = get_ip_trunc_len(pkt, off, in_pkt_len);
3840 } else if (pktap->pth_protocol_family == AF_INET6) {
3841 out_pkt_len = get_ip6_trunc_len(pkt, off, in_pkt_len);
3842 } else {
3843 out_pkt_len = MIN(BPF_MIN_PKT_SIZE, in_pkt_len);
3844 }
3845 tlen = pktap->pth_length + pktap->pth_frame_pre_length + out_pkt_len;
3846 }
3847
3848 // Verify we do not overflow the buffer
3849 if (__improbable(tlen > pkt->bpfp_total_length)) {
3850 bool do_panic = bpf_debug != 0 ? true : false;
3851
3852 #if DEBUG
3853 do_panic = true;
3854 #endif /* DEBUG */
3855 if (do_panic) {
3856 panic("%s:%d tlen %u > bpfp_total_length %lu bpfp_header_length %lu pth_frame_pre_length %u pre_adjust %u in_pkt_len %u out_pkt_len %u",
3857 __func__, __LINE__,
3858 tlen, pkt->bpfp_total_length, pkt->bpfp_header_length, pktap->pth_frame_pre_length, pre_adjust, in_pkt_len, out_pkt_len);
3859 } else {
3860 os_log(OS_LOG_DEFAULT,
3861 "%s:%d tlen %u > bpfp_total_length %lu bpfp_header_length %lu pth_frame_pre_length %u pre_adjust %u in_pkt_len %u out_pkt_len %u",
3862 __func__, __LINE__,
3863 tlen, pkt->bpfp_total_length, pkt->bpfp_header_length, pktap->pth_frame_pre_length, pre_adjust, in_pkt_len, out_pkt_len);
3864 }
3865 bpf_trunc_overflow += 1;
3866 tlen = (uint32_t)pkt->bpfp_total_length;
3867 }
3868
3869 return tlen;
3870 }
3871
3872 static uint8_t
get_common_prefix_size(const void * a,const void * b,uint8_t max_bytes)3873 get_common_prefix_size(const void *a, const void *b, uint8_t max_bytes)
3874 {
3875 uint8_t max_words = max_bytes >> 2;
3876 const uint32_t *x = (const uint32_t *)a;
3877 const uint32_t *y = (const uint32_t *)b;
3878 uint8_t i;
3879
3880 for (i = 0; i < max_words; i++) {
3881 if (x[i] != y[i]) {
3882 break;
3883 }
3884 }
3885 return (uint8_t)(i << 2);
3886 }
3887
3888 /*
3889 * Move the packet data from interface memory (pkt) into the
3890 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
3891 * otherwise 0.
3892 */
3893 static void
catchpacket(struct bpf_d * d,struct bpf_packet * pkt,u_int snaplen,int outbound)3894 catchpacket(struct bpf_d *d, struct bpf_packet * pkt,
3895 u_int snaplen, int outbound)
3896 {
3897 struct bpf_hdr *hp;
3898 struct bpf_hdr_ext *ehp;
3899 uint32_t totlen, curlen;
3900 uint32_t hdrlen, caplen;
3901 int do_wakeup = 0;
3902 u_char *payload;
3903 struct timeval tv;
3904
3905 hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen :
3906 (d->bd_flags & BPF_COMP_REQ) ? d->bd_bif->bif_comphdrlen:
3907 d->bd_bif->bif_hdrlen;
3908 /*
3909 * Figure out how many bytes to move. If the packet is
3910 * greater or equal to the snapshot length, transfer that
3911 * much. Otherwise, transfer the whole packet (unless
3912 * we hit the buffer size limit).
3913 */
3914 totlen = hdrlen + MIN(snaplen, (int)pkt->bpfp_total_length);
3915 if (totlen > d->bd_bufsize) {
3916 totlen = d->bd_bufsize;
3917 }
3918
3919 if (hdrlen > totlen) {
3920 return;
3921 }
3922
3923 /*
3924 * Round up the end of the previous packet to the next longword.
3925 */
3926 curlen = BPF_WORDALIGN(d->bd_slen);
3927 if (curlen + totlen > d->bd_bufsize) {
3928 /*
3929 * This packet will overflow the storage buffer.
3930 * Rotate the buffers if we can, then wakeup any
3931 * pending reads.
3932 *
3933 * We cannot rotate buffers if a read is in progress
3934 * so drop the packet
3935 */
3936 if (d->bd_hbuf_read) {
3937 ++d->bd_dcount;
3938 return;
3939 }
3940
3941 if (d->bd_fbuf == NULL) {
3942 if (d->bd_headdrop == 0) {
3943 /*
3944 * We haven't completed the previous read yet,
3945 * so drop the packet.
3946 */
3947 ++d->bd_dcount;
3948 return;
3949 }
3950 /*
3951 * Drop the hold buffer as it contains older packets
3952 */
3953 d->bd_dcount += d->bd_hcnt;
3954 d->bd_fbuf = d->bd_hbuf;
3955 ROTATE_BUFFERS(d);
3956 } else {
3957 ROTATE_BUFFERS(d);
3958 }
3959 do_wakeup = 1;
3960 curlen = 0;
3961 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
3962 /*
3963 * Immediate mode is set, or the read timeout has
3964 * already expired during a select call. A packet
3965 * arrived, so the reader should be woken up.
3966 */
3967 do_wakeup = 1;
3968 }
3969
3970 /*
3971 * Append the bpf header.
3972 */
3973 microtime(&tv);
3974 if (d->bd_flags & BPF_EXTENDED_HDR) {
3975 ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen);
3976 memset(ehp, 0, sizeof(*ehp));
3977 ehp->bh_tstamp.tv_sec = (int)tv.tv_sec;
3978 ehp->bh_tstamp.tv_usec = tv.tv_usec;
3979
3980 ehp->bh_datalen = (bpf_u_int32)pkt->bpfp_total_length;
3981 ehp->bh_hdrlen = (u_short)hdrlen;
3982 caplen = ehp->bh_caplen = totlen - hdrlen;
3983 payload = (u_char *)ehp + hdrlen;
3984
3985 if (outbound) {
3986 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
3987 } else {
3988 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
3989 }
3990
3991 if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) {
3992 struct mbuf *m = pkt->bpfp_mbuf;
3993
3994 if (outbound) {
3995 /* only do lookups on non-raw INPCB */
3996 if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID |
3997 PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK)) ==
3998 (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC) &&
3999 m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) {
4000 ehp->bh_flowid = m->m_pkthdr.pkt_flowid;
4001 if (m->m_pkthdr.pkt_proto == IPPROTO_TCP) {
4002 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_TCP;
4003 } else if (m->m_pkthdr.pkt_proto == IPPROTO_UDP) {
4004 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_UDP;
4005 }
4006 }
4007 ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc);
4008 if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) {
4009 ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT;
4010 }
4011 if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) {
4012 ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ;
4013 }
4014 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
4015 ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT;
4016 }
4017 if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) {
4018 ehp->bh_unsent_bytes =
4019 m->m_pkthdr.bufstatus_if;
4020 ehp->bh_unsent_snd =
4021 m->m_pkthdr.bufstatus_sndbuf;
4022 }
4023 } else {
4024 if (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) {
4025 ehp->bh_pktflags |= BPF_PKTFLAGS_WAKE_PKT;
4026 }
4027 }
4028 #if SKYWALK
4029 } else {
4030 kern_packet_t kern_pkt = pkt->bpfp_pkt;
4031 packet_flowid_t flowid = 0;
4032
4033 if (outbound) {
4034 /*
4035 * Note: pp_init() asserts that kern_packet_svc_class_t is equivalent
4036 * to mbuf_svc_class_t
4037 */
4038 ehp->bh_svc = so_svc2tc((mbuf_svc_class_t)kern_packet_get_service_class(kern_pkt));
4039 if (kern_packet_get_transport_retransmit(kern_pkt)) {
4040 ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT;
4041 }
4042 if (kern_packet_get_transport_last_packet(kern_pkt)) {
4043 ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT;
4044 }
4045 } else {
4046 if (kern_packet_get_wake_flag(kern_pkt)) {
4047 ehp->bh_pktflags |= BPF_PKTFLAGS_WAKE_PKT;
4048 }
4049 }
4050 ehp->bh_trace_tag = kern_packet_get_trace_tag(kern_pkt);
4051 if (kern_packet_get_flowid(kern_pkt, &flowid) == 0) {
4052 ehp->bh_flowid = flowid;
4053 }
4054 #endif /* SKYWALK */
4055 }
4056 } else {
4057 hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen);
4058 memset(hp, 0, BPF_WORDALIGN(sizeof(*hp)));
4059 hp->bh_tstamp.tv_sec = (int)tv.tv_sec;
4060 hp->bh_tstamp.tv_usec = tv.tv_usec;
4061 hp->bh_datalen = (bpf_u_int32)pkt->bpfp_total_length;
4062 hp->bh_hdrlen = (u_short)hdrlen;
4063 caplen = hp->bh_caplen = totlen - hdrlen;
4064 payload = (u_char *)hp + hdrlen;
4065 }
4066 if (d->bd_flags & BPF_COMP_REQ) {
4067 uint8_t common_prefix_size = 0;
4068 uint8_t copy_len = MIN((uint8_t)caplen, BPF_HDR_COMP_LEN_MAX);
4069
4070 copy_bpf_packet(pkt, d->bd_prev_fbuf, copy_len);
4071
4072 if (d->bd_prev_slen != 0) {
4073 common_prefix_size = get_common_prefix_size(d->bd_prev_fbuf,
4074 d->bd_prev_sbuf, MIN(copy_len, d->bd_prev_slen));
4075 }
4076
4077 if (d->bd_flags & BPF_COMP_ENABLED) {
4078 assert3u(caplen, >=, common_prefix_size);
4079 copy_bpf_packet_offset(pkt, payload, caplen - common_prefix_size,
4080 common_prefix_size);
4081 d->bd_slen = curlen + totlen - common_prefix_size;
4082 } else {
4083 copy_bpf_packet(pkt, payload, caplen);
4084 d->bd_slen = curlen + totlen;
4085 }
4086
4087 /*
4088 * Update the caplen only if compression is enabled -- the caller
4089 * must pay attention to bpf_hdr_comp_enable
4090 */
4091 if (d->bd_flags & BPF_EXTENDED_HDR) {
4092 ehp->bh_complen = common_prefix_size;
4093 if (d->bd_flags & BPF_COMP_ENABLED) {
4094 ehp->bh_caplen -= common_prefix_size;
4095 }
4096 } else {
4097 struct bpf_comp_hdr *hcp;
4098
4099 hcp = (struct bpf_comp_hdr *)(void *)(d->bd_sbuf + curlen);
4100 hcp->bh_complen = common_prefix_size;
4101 if (d->bd_flags & BPF_COMP_ENABLED) {
4102 hcp->bh_caplen -= common_prefix_size;
4103 }
4104 }
4105
4106 if (common_prefix_size > 0) {
4107 d->bd_bcs.bcs_total_compressed_prefix_size += common_prefix_size;
4108 if (common_prefix_size > d->bd_bcs.bcs_max_compressed_prefix_size) {
4109 d->bd_bcs.bcs_max_compressed_prefix_size = common_prefix_size;
4110 }
4111 d->bd_bcs.bcs_count_compressed_prefix += 1;
4112 } else {
4113 d->bd_bcs.bcs_count_no_common_prefix += 1;
4114 }
4115
4116 /* The current compression buffer becomes the previous one */
4117 caddr_t tmp = d->bd_prev_sbuf;
4118 d->bd_prev_sbuf = d->bd_prev_fbuf;
4119 d->bd_prev_slen = copy_len;
4120 d->bd_prev_fbuf = tmp;
4121 } else {
4122 /*
4123 * Copy the packet data into the store buffer and update its length.
4124 */
4125 copy_bpf_packet(pkt, payload, caplen);
4126 d->bd_slen = curlen + totlen;
4127 }
4128 d->bd_scnt += 1;
4129 d->bd_bcs.bcs_total_hdr_size += pkt->bpfp_header_length;
4130 d->bd_bcs.bcs_total_size += caplen;
4131
4132 if (do_wakeup) {
4133 bpf_wakeup(d);
4134 }
4135 }
4136
4137 static void
bpf_freebufs(struct bpf_d * d)4138 bpf_freebufs(struct bpf_d *d)
4139 {
4140 if (d->bd_sbuf != NULL) {
4141 kfree_data_addr(d->bd_sbuf);
4142 }
4143 if (d->bd_hbuf != NULL) {
4144 kfree_data_addr(d->bd_hbuf);
4145 }
4146 if (d->bd_fbuf != NULL) {
4147 kfree_data_addr(d->bd_fbuf);
4148 }
4149
4150 if (d->bd_prev_sbuf != NULL) {
4151 kfree_data_addr(d->bd_prev_sbuf);
4152 }
4153 if (d->bd_prev_fbuf != NULL) {
4154 kfree_data_addr(d->bd_prev_fbuf);
4155 }
4156 }
4157 /*
4158 * Initialize all nonzero fields of a descriptor.
4159 */
4160 static int
bpf_allocbufs(struct bpf_d * d)4161 bpf_allocbufs(struct bpf_d *d)
4162 {
4163 bpf_freebufs(d);
4164
4165 d->bd_fbuf = (caddr_t) kalloc_data(d->bd_bufsize, Z_WAITOK | Z_ZERO);
4166 if (d->bd_fbuf == NULL) {
4167 goto nobufs;
4168 }
4169
4170 d->bd_sbuf = (caddr_t) kalloc_data(d->bd_bufsize, Z_WAITOK | Z_ZERO);
4171 if (d->bd_sbuf == NULL) {
4172 goto nobufs;
4173 }
4174 d->bd_slen = 0;
4175 d->bd_hlen = 0;
4176 d->bd_scnt = 0;
4177 d->bd_hcnt = 0;
4178
4179 d->bd_prev_slen = 0;
4180 if (d->bd_flags & BPF_COMP_REQ) {
4181 d->bd_prev_sbuf = (caddr_t) kalloc_data(BPF_HDR_COMP_LEN_MAX, Z_WAITOK | Z_ZERO);
4182 if (d->bd_prev_sbuf == NULL) {
4183 goto nobufs;
4184 }
4185 d->bd_prev_fbuf = (caddr_t) kalloc_data(BPF_HDR_COMP_LEN_MAX, Z_WAITOK | Z_ZERO);
4186 if (d->bd_prev_fbuf == NULL) {
4187 goto nobufs;
4188 }
4189 }
4190 return 0;
4191 nobufs:
4192 bpf_freebufs(d);
4193 return ENOMEM;
4194 }
4195
4196 /*
4197 * Free buffers currently in use by a descriptor.
4198 * Called on close.
4199 */
4200 static void
bpf_freed(struct bpf_d * d)4201 bpf_freed(struct bpf_d *d)
4202 {
4203 /*
4204 * We don't need to lock out interrupts since this descriptor has
4205 * been detached from its interface and it yet hasn't been marked
4206 * free.
4207 */
4208 if (d->bd_hbuf_read || d->bd_hbuf_write) {
4209 panic("bpf buffer freed during read/write");
4210 }
4211
4212 bpf_freebufs(d);
4213
4214 if (d->bd_filter) {
4215 kfree_data_addr(d->bd_filter);
4216 }
4217 }
4218
4219 /*
4220 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
4221 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
4222 * size of the link header (variable length headers not yet supported).
4223 */
4224 void
bpfattach(struct ifnet * ifp,u_int dlt,u_int hdrlen)4225 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
4226 {
4227 bpf_attach(ifp, dlt, hdrlen, NULL, NULL);
4228 }
4229
4230 errno_t
bpf_attach(ifnet_t ifp,u_int32_t dlt,u_int32_t hdrlen,bpf_send_func send,bpf_tap_func tap)4231 bpf_attach(
4232 ifnet_t ifp,
4233 u_int32_t dlt,
4234 u_int32_t hdrlen,
4235 bpf_send_func send,
4236 bpf_tap_func tap)
4237 {
4238 struct bpf_if *bp;
4239 struct bpf_if *bp_new;
4240 struct bpf_if *bp_before_first = NULL;
4241 struct bpf_if *bp_first = NULL;
4242 struct bpf_if *bp_last = NULL;
4243 boolean_t found;
4244
4245 /*
4246 * Z_NOFAIL will cause a panic if the allocation fails
4247 */
4248 bp_new = kalloc_type(struct bpf_if, Z_WAITOK | Z_NOFAIL | Z_ZERO);
4249
4250 lck_mtx_lock(bpf_mlock);
4251
4252 /*
4253 * Check if this interface/dlt is already attached. Remember the
4254 * first and last attachment for this interface, as well as the
4255 * element before the first attachment.
4256 */
4257 found = FALSE;
4258 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
4259 if (bp->bif_ifp != ifp) {
4260 if (bp_first != NULL) {
4261 /* no more elements for this interface */
4262 break;
4263 }
4264 bp_before_first = bp;
4265 } else {
4266 if (bp->bif_dlt == dlt) {
4267 found = TRUE;
4268 break;
4269 }
4270 if (bp_first == NULL) {
4271 bp_first = bp;
4272 }
4273 bp_last = bp;
4274 }
4275 }
4276 if (found) {
4277 lck_mtx_unlock(bpf_mlock);
4278 os_log_error(OS_LOG_DEFAULT,
4279 "bpfattach - %s with dlt %d is already attached",
4280 if_name(ifp), dlt);
4281 kfree_type(struct bpf_if, bp_new);
4282 return EEXIST;
4283 }
4284
4285 bp_new->bif_ifp = ifp;
4286 bp_new->bif_dlt = dlt;
4287 bp_new->bif_send = send;
4288 bp_new->bif_tap = tap;
4289
4290 if (bp_first == NULL) {
4291 /* No other entries for this ifp */
4292 bp_new->bif_next = bpf_iflist;
4293 bpf_iflist = bp_new;
4294 } else {
4295 if (ifnet_type(ifp) == IFT_ETHER && dlt == DLT_EN10MB) {
4296 /* Make this the first entry for this interface */
4297 if (bp_before_first != NULL) {
4298 /* point the previous to us */
4299 bp_before_first->bif_next = bp_new;
4300 } else {
4301 /* we're the new head */
4302 bpf_iflist = bp_new;
4303 }
4304 bp_new->bif_next = bp_first;
4305 } else {
4306 /* Add this after the last entry for this interface */
4307 bp_new->bif_next = bp_last->bif_next;
4308 bp_last->bif_next = bp_new;
4309 }
4310 }
4311
4312 /*
4313 * Compute the length of the bpf header. This is not necessarily
4314 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
4315 * that the network layer header begins on a longword boundary (for
4316 * performance reasons and to alleviate alignment restrictions).
4317 */
4318 bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
4319 bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen +
4320 sizeof(struct bpf_hdr_ext)) - hdrlen;
4321 bp_new->bif_comphdrlen = BPF_WORDALIGN(hdrlen +
4322 sizeof(struct bpf_comp_hdr)) - hdrlen;
4323
4324 /* Take a reference on the interface */
4325 ifnet_reference(ifp);
4326
4327 lck_mtx_unlock(bpf_mlock);
4328
4329 return 0;
4330 }
4331
4332 /*
4333 * Detach bpf from an interface. This involves detaching each descriptor
4334 * associated with the interface, and leaving bd_bif NULL. Notify each
4335 * descriptor as it's detached so that any sleepers wake up and get
4336 * ENXIO.
4337 */
4338 void
bpfdetach(struct ifnet * ifp)4339 bpfdetach(struct ifnet *ifp)
4340 {
4341 struct bpf_if *bp, *bp_prev, *bp_next;
4342 struct bpf_d *d;
4343
4344 if (bpf_debug != 0) {
4345 os_log(OS_LOG_DEFAULT, "%s: %s", __func__, if_name(ifp));
4346 }
4347
4348 lck_mtx_lock(bpf_mlock);
4349
4350 /*
4351 * Build the list of devices attached to that interface
4352 * that we need to free while keeping the lock to maintain
4353 * the integrity of the interface list
4354 */
4355 bp_prev = NULL;
4356 for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
4357 bp_next = bp->bif_next;
4358
4359 if (ifp != bp->bif_ifp) {
4360 bp_prev = bp;
4361 continue;
4362 }
4363 /* Unlink from the interface list */
4364 if (bp_prev) {
4365 bp_prev->bif_next = bp->bif_next;
4366 } else {
4367 bpf_iflist = bp->bif_next;
4368 }
4369
4370 /* Detach the devices attached to the interface */
4371 while ((d = bp->bif_dlist) != NULL) {
4372 /*
4373 * Take an extra reference to prevent the device
4374 * from being freed when bpf_detachd() releases
4375 * the reference for the interface list
4376 */
4377 bpf_acquire_d(d);
4378
4379 /*
4380 * Wait for active read and writes to complete
4381 */
4382 while (d->bd_hbuf_read || d->bd_hbuf_write) {
4383 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfdetach", NULL);
4384 }
4385
4386 bpf_detachd(d);
4387 bpf_wakeup(d);
4388 bpf_release_d(d);
4389 }
4390 ifnet_release(ifp);
4391 }
4392
4393 lck_mtx_unlock(bpf_mlock);
4394 }
4395
4396 void
bpf_init(__unused void * unused)4397 bpf_init(__unused void *unused)
4398 {
4399 int maj;
4400
4401 /* bpf_comp_hdr is an overlay of bpf_hdr */
4402 _CASSERT(BPF_WORDALIGN(sizeof(struct bpf_hdr)) ==
4403 BPF_WORDALIGN(sizeof(struct bpf_comp_hdr)));
4404
4405 /* compression length must fits in a byte */
4406 _CASSERT(BPF_HDR_COMP_LEN_MAX <= UCHAR_MAX );
4407
4408 (void) PE_parse_boot_argn("bpf_hdr_comp", &bpf_hdr_comp_enable,
4409 sizeof(bpf_hdr_comp_enable));
4410
4411 if (bpf_devsw_installed == 0) {
4412 bpf_devsw_installed = 1;
4413 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
4414 if (maj == -1) {
4415 bpf_devsw_installed = 0;
4416 os_log_error(OS_LOG_DEFAULT,
4417 "bpf_init: failed to allocate a major number");
4418 return;
4419 }
4420
4421 for (int i = 0; i < NBPFILTER; i++) {
4422 bpf_make_dev_t(maj);
4423 }
4424 }
4425 }
4426
4427 static int
4428 sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS
4429 {
4430 #pragma unused(arg1, arg2)
4431 int i, err;
4432
4433 i = bpf_maxbufsize;
4434
4435 err = sysctl_handle_int(oidp, &i, 0, req);
4436 if (err != 0 || req->newptr == USER_ADDR_NULL) {
4437 return err;
4438 }
4439
4440 if (i < 0 || i > BPF_BUFSIZE_CAP) {
4441 i = BPF_BUFSIZE_CAP;
4442 }
4443
4444 bpf_maxbufsize = i;
4445 return err;
4446 }
4447
4448 static int
4449 sysctl_bpf_bufsize_cap SYSCTL_HANDLER_ARGS
4450 {
4451 #pragma unused(arg1, arg2)
4452 int i, err;
4453
4454 i = BPF_BUFSIZE_CAP;
4455
4456 err = sysctl_handle_int(oidp, &i, 0, req);
4457 if (err != 0 || req->newptr == USER_ADDR_NULL) {
4458 return err;
4459 }
4460
4461 return err;
4462 }
4463
4464 /*
4465 * Fill filter statistics
4466 */
4467 static void
bpfstats_fill_xbpf(struct xbpf_d * d,struct bpf_d * bd)4468 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
4469 {
4470 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
4471
4472 d->bd_structsize = sizeof(struct xbpf_d);
4473 d->bd_promisc = bd->bd_promisc != 0 ? 1 : 0;
4474 d->bd_immediate = d->bd_immediate != 0 ? 1 : 0;
4475 d->bd_hdrcmplt = bd->bd_hdrcmplt != 0 ? 1 : 0;
4476 d->bd_async = bd->bd_async != 0 ? 1 : 0;
4477 d->bd_headdrop = bd->bd_headdrop != 0 ? 1 : 0;
4478 d->bd_direction = (uint8_t)bd->bd_direction;
4479 d->bh_compreq = bd->bd_flags & BPF_COMP_REQ ? 1 : 0;
4480 d->bh_compenabled = bd->bd_flags & BPF_COMP_ENABLED ? 1 : 0;
4481 d->bd_exthdr = bd->bd_flags & BPF_EXTENDED_HDR ? 1 : 0;
4482 d->bd_trunc = bd->bd_flags & BPF_TRUNCATE ? 1 : 0;
4483 d->bd_pkthdrv2 = bd->bd_flags & BPF_PKTHDRV2 ? 1 : 0;
4484
4485 d->bd_dev_minor = (uint8_t)bd->bd_dev_minor;
4486
4487 d->bd_sig = bd->bd_sig;
4488
4489 d->bd_rcount = bd->bd_rcount;
4490 d->bd_dcount = bd->bd_dcount;
4491 d->bd_fcount = bd->bd_fcount;
4492 d->bd_wcount = bd->bd_wcount;
4493 d->bd_wdcount = bd->bd_wdcount;
4494 d->bd_slen = bd->bd_slen;
4495 d->bd_hlen = bd->bd_hlen;
4496 d->bd_bufsize = bd->bd_bufsize;
4497 d->bd_pid = bd->bd_pid;
4498 if (bd->bd_bif != NULL && bd->bd_bif->bif_ifp != NULL) {
4499 strlcpy(d->bd_ifname,
4500 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
4501 }
4502
4503 d->bd_comp_count = bd->bd_bcs.bcs_count_compressed_prefix;
4504 d->bd_comp_size = bd->bd_bcs.bcs_total_compressed_prefix_size;
4505
4506 d->bd_scnt = bd->bd_scnt;
4507 d->bd_hcnt = bd->bd_hcnt;
4508
4509 d->bd_read_count = bd->bd_bcs.bcs_total_read;
4510 d->bd_fsize = bd->bd_bcs.bcs_total_size;
4511 }
4512
4513 /*
4514 * Handle `netstat -B' stats request
4515 */
4516 static int
4517 sysctl_bpf_stats SYSCTL_HANDLER_ARGS
4518 {
4519 int error;
4520 struct xbpf_d *xbdbuf;
4521 unsigned int x_cnt;
4522 vm_size_t buf_size;
4523
4524 if (req->oldptr == USER_ADDR_NULL) {
4525 return SYSCTL_OUT(req, 0, nbpfilter * sizeof(struct xbpf_d));
4526 }
4527 if (nbpfilter == 0) {
4528 return SYSCTL_OUT(req, 0, 0);
4529 }
4530 buf_size = req->oldlen;
4531 if (buf_size > BPF_MAX_DEVICES * sizeof(struct xbpf_d)) {
4532 buf_size = BPF_MAX_DEVICES * sizeof(struct xbpf_d);
4533 }
4534 xbdbuf = kalloc_data(buf_size, Z_WAITOK | Z_ZERO);
4535
4536 lck_mtx_lock(bpf_mlock);
4537 if (buf_size < (nbpfilter * sizeof(struct xbpf_d))) {
4538 lck_mtx_unlock(bpf_mlock);
4539 kfree_data(xbdbuf, buf_size);
4540 return ENOMEM;
4541 }
4542 x_cnt = 0;
4543 unsigned int i;
4544
4545 for (i = 0; i < nbpfilter; i++) {
4546 struct bpf_d *bd = bpf_dtab[i];
4547 struct xbpf_d *xbd;
4548
4549 if (bd == NULL || bd == BPF_DEV_RESERVED ||
4550 (bd->bd_flags & BPF_CLOSING) != 0) {
4551 continue;
4552 }
4553 VERIFY(x_cnt < nbpfilter);
4554
4555 xbd = &xbdbuf[x_cnt++];
4556 bpfstats_fill_xbpf(xbd, bd);
4557 }
4558 lck_mtx_unlock(bpf_mlock);
4559
4560 error = SYSCTL_OUT(req, xbdbuf, x_cnt * sizeof(struct xbpf_d));
4561 kfree_data(xbdbuf, buf_size);
4562 return error;
4563 }
4564