1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1990, 1991, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * This code is derived from the Stanford/CMU enet packet filter,
33 * (net/enet.c) distributed as part of 4.3BSD, and code contributed
34 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
35 * Berkeley Laboratory.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in the
44 * documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 * must display the following acknowledgement:
47 * This product includes software developed by the University of
48 * California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 * may be used to endorse or promote products derived from this software
51 * without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 * @(#)bpf.c 8.2 (Berkeley) 3/28/94
66 *
67 * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.5 2001/01/05 04:49:09 jdp Exp $
68 */
69 /*
70 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
71 * support for mandatory and extensible security protections. This notice
72 * is included in support of clause 2.2 (b) of the Apple Public License,
73 * Version 2.0.
74 */
75
76 #include "bpf.h"
77
78 #ifndef __GNUC__
79 #define inline
80 #else
81 #define inline __inline
82 #endif
83
84 #include <sys/param.h>
85 #include <sys/systm.h>
86 #include <sys/conf.h>
87 #include <sys/malloc.h>
88 #include <sys/mbuf.h>
89 #include <sys/time.h>
90 #include <sys/proc.h>
91 #include <sys/signalvar.h>
92 #include <sys/filio.h>
93 #include <sys/sockio.h>
94 #include <sys/ttycom.h>
95 #include <sys/filedesc.h>
96 #include <sys/uio_internal.h>
97 #include <sys/file_internal.h>
98 #include <sys/event.h>
99
100 #include <sys/poll.h>
101
102 #include <sys/socket.h>
103 #include <sys/socketvar.h>
104 #include <sys/vnode.h>
105
106 #include <net/if.h>
107 #include <net/bpf.h>
108 #include <net/bpfdesc.h>
109
110 #include <netinet/in.h>
111 #include <netinet/ip.h>
112 #include <netinet/ip6.h>
113 #include <netinet/in_pcb.h>
114 #include <netinet/in_var.h>
115 #include <netinet/ip_var.h>
116 #include <netinet/tcp.h>
117 #include <netinet/tcp_var.h>
118 #include <netinet/udp.h>
119 #include <netinet/udp_var.h>
120 #include <netinet/if_ether.h>
121 #include <netinet/isakmp.h>
122 #include <netinet6/esp.h>
123 #include <sys/kernel.h>
124 #include <sys/sysctl.h>
125 #include <net/firewire.h>
126
127 #include <miscfs/devfs/devfs.h>
128 #include <net/dlil.h>
129 #include <net/pktap.h>
130
131 #include <net/sockaddr_utils.h>
132
133 #include <kern/assert.h>
134 #include <kern/locks.h>
135 #include <kern/thread_call.h>
136 #include <libkern/section_keywords.h>
137
138 #include <os/log.h>
139
140 #include <IOKit/IOBSD.h>
141
142
143 extern int tvtohz(struct timeval *);
144 extern char *proc_name_address(void *p);
145
146 #define BPF_BUFSIZE 4096
147
148 #define PRINET 26 /* interruptible */
149
150 #define ISAKMP_HDR_SIZE (sizeof(struct isakmp) + sizeof(struct isakmp_gen))
151 #define ESP_HDR_SIZE sizeof(struct newesp)
152
153 #define BPF_WRITE_LEEWAY 18 /* space for link layer header */
154
155 #define BPF_WRITE_MAX 0x1000000 /* 16 MB arbitrary value */
156
157 typedef void (*pktcopyfunc_t)(const void *, void *, size_t);
158
159 /*
160 * The default read buffer size is patchable.
161 */
162 static unsigned int bpf_bufsize = BPF_BUFSIZE;
163 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW | CTLFLAG_LOCKED,
164 &bpf_bufsize, 0, "");
165
166 __private_extern__ unsigned int bpf_maxbufsize = BPF_MAXBUFSIZE;
167 static int sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS;
168 SYSCTL_PROC(_debug, OID_AUTO, bpf_maxbufsize, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
169 &bpf_maxbufsize, 0,
170 sysctl_bpf_maxbufsize, "I", "Default BPF max buffer size");
171
172 extern const int copysize_limit_panic;
173 #define BPF_BUFSIZE_CAP (copysize_limit_panic >> 1)
174 static int sysctl_bpf_bufsize_cap SYSCTL_HANDLER_ARGS;
175 SYSCTL_PROC(_debug, OID_AUTO, bpf_bufsize_cap, CTLTYPE_INT | CTLFLAG_RD | CTLFLAG_LOCKED,
176 0, 0,
177 sysctl_bpf_bufsize_cap, "I", "Upper limit on BPF max buffer size");
178
179 #define BPF_MAX_DEVICES 256
180 static unsigned int bpf_maxdevices = BPF_MAX_DEVICES;
181 SYSCTL_UINT(_debug, OID_AUTO, bpf_maxdevices, CTLFLAG_RD | CTLFLAG_LOCKED,
182 &bpf_maxdevices, 0, "");
183
184 /*
185 * bpf_wantpktap controls the defaul visibility of DLT_PKTAP
186 * For OS X is off by default so process need to use the ioctl BPF_WANT_PKTAP
187 * explicitly to be able to use DLT_PKTAP.
188 */
189 #if !XNU_TARGET_OS_OSX
190 static unsigned int bpf_wantpktap = 1;
191 #else /* XNU_TARGET_OS_OSX */
192 static unsigned int bpf_wantpktap = 0;
193 #endif /* XNU_TARGET_OS_OSX */
194 SYSCTL_UINT(_debug, OID_AUTO, bpf_wantpktap, CTLFLAG_RW | CTLFLAG_LOCKED,
195 &bpf_wantpktap, 0, "");
196
197 static int bpf_debug = 0;
198 SYSCTL_INT(_debug, OID_AUTO, bpf_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
199 &bpf_debug, 0, "");
200
201 static unsigned long bpf_trunc_overflow = 0;
202 SYSCTL_ULONG(_debug, OID_AUTO, bpf_trunc_overflow, CTLFLAG_RD | CTLFLAG_LOCKED,
203 &bpf_trunc_overflow, "");
204
205 static int bpf_hdr_comp_enable = 1;
206 SYSCTL_INT(_debug, OID_AUTO, bpf_hdr_comp_enable, CTLFLAG_RW | CTLFLAG_LOCKED,
207 &bpf_hdr_comp_enable, 1, "");
208
209 static int sysctl_bpf_stats SYSCTL_HANDLER_ARGS;
210 SYSCTL_PROC(_debug, OID_AUTO, bpf_stats, CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
211 0, 0,
212 sysctl_bpf_stats, "S", "BPF statistics");
213
214 /*
215 * bpf_iflist is the list of interfaces; each corresponds to an ifnet
216 * bpf_dtab holds pointer to the descriptors, indexed by minor device #
217 */
218 static struct bpf_if *bpf_iflist;
219 /*
220 * BSD now stores the bpf_d in the dev_t which is a struct
221 * on their system. Our dev_t is an int, so we still store
222 * the bpf_d in a separate table indexed by minor device #.
223 *
224 * The value stored in bpf_dtab[n] represent three states:
225 * NULL: device not opened
226 * BPF_DEV_RESERVED: device opening or closing
227 * other: device <n> opened with pointer to storage
228 */
229 #define BPF_DEV_RESERVED ((struct bpf_d *)(uintptr_t)1)
230 static struct bpf_d **bpf_dtab = NULL;
231 static unsigned int bpf_dtab_size = 0;
232 static unsigned int nbpfilter = 0;
233 static unsigned bpf_bpfd_cnt = 0;
234
235 static LCK_GRP_DECLARE(bpf_mlock_grp, "bpf");
236 static LCK_MTX_DECLARE(bpf_mlock_data, &bpf_mlock_grp);
237 static lck_mtx_t *const bpf_mlock = &bpf_mlock_data;
238
239 static int bpf_allocbufs(struct bpf_d *);
240 static errno_t bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
241 static int bpf_detachd(struct bpf_d *d);
242 static void bpf_freed(struct bpf_d *);
243 static int bpf_setif(struct bpf_d *, ifnet_t ifp, bool, bool, bool);
244 static void bpf_timed_out(void *, void *);
245 static void bpf_wakeup(struct bpf_d *);
246 static uint32_t get_pkt_trunc_len(struct bpf_packet *);
247 static void catchpacket(struct bpf_d *, struct bpf_packet *, u_int, int);
248 static void reset_d(struct bpf_d *);
249 static int bpf_setf(struct bpf_d *, u_int, user_addr_t, u_long);
250 static int bpf_getdltlist(struct bpf_d *, caddr_t, struct proc *);
251 static int bpf_setdlt(struct bpf_d *, u_int);
252 static int bpf_set_traffic_class(struct bpf_d *, int);
253 static void bpf_set_packet_service_class(struct mbuf *, int);
254
255 static void bpf_acquire_d(struct bpf_d *);
256 static void bpf_release_d(struct bpf_d *);
257
258 static int bpf_devsw_installed;
259
260 void bpf_init(void *unused);
261 static int bpf_tap_callback(struct ifnet *ifp, struct mbuf *m);
262
263 /*
264 * Darwin differs from BSD here, the following are static
265 * on BSD and not static on Darwin.
266 */
267 d_open_t bpfopen;
268 d_close_t bpfclose;
269 d_read_t bpfread;
270 d_write_t bpfwrite;
271 ioctl_fcn_t bpfioctl;
272 select_fcn_t bpfselect;
273
274 /* Darwin's cdevsw struct differs slightly from BSDs */
275 #define CDEV_MAJOR 23
276 static const struct cdevsw bpf_cdevsw = {
277 .d_open = bpfopen,
278 .d_close = bpfclose,
279 .d_read = bpfread,
280 .d_write = bpfwrite,
281 .d_ioctl = bpfioctl,
282 .d_stop = eno_stop,
283 .d_reset = eno_reset,
284 .d_ttys = NULL,
285 .d_select = bpfselect,
286 .d_mmap = eno_mmap,
287 .d_strategy = eno_strat,
288 .d_reserved_1 = eno_getc,
289 .d_reserved_2 = eno_putc,
290 .d_type = 0
291 };
292
293 #define SOCKADDR_HDR_LEN offsetof(struct sockaddr, sa_data)
294
295 static int
bpf_copy_uio_to_mbuf_packet(struct uio * auio,int bytes_to_copy,struct mbuf * top)296 bpf_copy_uio_to_mbuf_packet(struct uio *auio, int bytes_to_copy, struct mbuf *top)
297 {
298 int error = 0;
299
300 for (struct mbuf *m = top; m != NULL; m = m->m_next) {
301 int mlen;
302
303 if (m->m_flags & M_EXT) {
304 mlen = m->m_ext.ext_size - (int)M_LEADINGSPACE(m);
305 } else if (m->m_flags & M_PKTHDR) {
306 mlen = MHLEN - (int)M_LEADINGSPACE(m);
307 } else {
308 mlen = MLEN - (int)M_LEADINGSPACE(m);
309 }
310 int copy_len = imin((int)mlen, bytes_to_copy);
311
312 error = uiomove(mtod(m, caddr_t), (int)copy_len, auio);
313 if (error != 0) {
314 os_log(OS_LOG_DEFAULT, "bpf_copy_uio_to_mbuf_packet: len %d error %d",
315 copy_len, error);
316 goto done;
317 }
318 m->m_len = copy_len;
319 top->m_pkthdr.len += copy_len;
320
321 if (bytes_to_copy > copy_len) {
322 bytes_to_copy -= copy_len;
323 } else {
324 break;
325 }
326 }
327 done:
328 return error;
329 }
330
331 static int
bpf_movein(struct uio * uio,int copy_len,struct bpf_d * d,struct mbuf ** mp,struct sockaddr * sockp)332 bpf_movein(struct uio *uio, int copy_len, struct bpf_d *d, struct mbuf **mp,
333 struct sockaddr *sockp)
334 {
335 struct mbuf *m = NULL;
336 int error;
337 int len;
338 uint8_t sa_family;
339 int hlen = 0;
340 struct ifnet *ifp = d->bd_bif->bif_ifp;
341 int linktype = (int)d->bd_bif->bif_dlt;
342
343 switch (linktype) {
344 #if SLIP
345 case DLT_SLIP:
346 sa_family = AF_INET;
347 hlen = 0;
348 break;
349 #endif /* SLIP */
350
351 case DLT_EN10MB:
352 sa_family = AF_UNSPEC;
353 /* XXX Would MAXLINKHDR be better? */
354 hlen = sizeof(struct ether_header);
355 break;
356
357 #if FDDI
358 case DLT_FDDI:
359 #if defined(__FreeBSD__) || defined(__bsdi__)
360 sa_family = AF_IMPLINK;
361 hlen = 0;
362 #else
363 sa_family = AF_UNSPEC;
364 /* XXX 4(FORMAC)+6(dst)+6(src)+3(LLC)+5(SNAP) */
365 hlen = 24;
366 #endif
367 break;
368 #endif /* FDDI */
369
370 case DLT_RAW:
371 case DLT_NULL:
372 sa_family = AF_UNSPEC;
373 hlen = 0;
374 break;
375
376 #ifdef __FreeBSD__
377 case DLT_ATM_RFC1483:
378 /*
379 * en atm driver requires 4-byte atm pseudo header.
380 * though it isn't standard, vpi:vci needs to be
381 * specified anyway.
382 */
383 sa_family = AF_UNSPEC;
384 hlen = 12; /* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
385 break;
386 #endif
387
388 case DLT_PPP:
389 sa_family = AF_UNSPEC;
390 hlen = 4; /* This should match PPP_HDRLEN */
391 break;
392
393 case DLT_APPLE_IP_OVER_IEEE1394:
394 sa_family = AF_UNSPEC;
395 hlen = sizeof(struct firewire_header);
396 break;
397
398 case DLT_IEEE802_11: /* IEEE 802.11 wireless */
399 sa_family = AF_IEEE80211;
400 hlen = 0;
401 break;
402
403 case DLT_IEEE802_11_RADIO:
404 sa_family = AF_IEEE80211;
405 hlen = 0;
406 break;
407
408 default:
409 return EIO;
410 }
411
412 if (sockp) {
413 /*
414 * Build a sockaddr based on the data link layer type.
415 * We do this at this level because the ethernet header
416 * is copied directly into the data field of the sockaddr.
417 * In the case of SLIP, there is no header and the packet
418 * is forwarded as is.
419 * Also, we are careful to leave room at the front of the mbuf
420 * for the link level header.
421 */
422 if ((hlen + SOCKADDR_HDR_LEN) > sockp->sa_len) {
423 return EIO;
424 }
425 sockp->sa_family = sa_family;
426 } else {
427 /*
428 * We're directly sending the packet data supplied by
429 * the user; we don't need to make room for the link
430 * header, and don't need the header length value any
431 * more, so set it to 0.
432 */
433 hlen = 0;
434 }
435
436 len = (int)uio_resid(uio);
437 if (len < copy_len) {
438 os_log(OS_LOG_DEFAULT, "bpfwrite: len %d if %s less than copy_len %d",
439 (unsigned)len, ifp->if_xname, copy_len);
440 return EMSGSIZE;
441 }
442 len = copy_len;
443 if (len < hlen || (unsigned)len > BPF_WRITE_MAX) {
444 os_log(OS_LOG_DEFAULT, "bpfwrite: bad len %d if %s",
445 (unsigned)len, ifp->if_xname);
446 return EMSGSIZE;
447 }
448 if (d->bd_write_size_max != 0) {
449 if ((len - hlen) > (d->bd_write_size_max + BPF_WRITE_LEEWAY)) {
450 os_log(OS_LOG_DEFAULT, "bpfwrite: len %u - hlen %u too big if %s write_size_max %u",
451 (unsigned)len, (unsigned)hlen, ifp->if_xname, d->bd_write_size_max);
452 }
453 } else if ((len - hlen) > (ifp->if_mtu + BPF_WRITE_LEEWAY)) {
454 os_log(OS_LOG_DEFAULT, "bpfwrite: len %u - hlen %u too big if %s mtu %u",
455 (unsigned)len, (unsigned)hlen, ifp->if_xname, ifp->if_mtu);
456 return EMSGSIZE;
457 }
458
459 /* drop lock while allocating mbuf and copying data */
460 lck_mtx_unlock(bpf_mlock);
461
462 error = mbuf_allocpacket(MBUF_WAITOK, len, NULL, &m);
463 if (error != 0) {
464 os_log(OS_LOG_DEFAULT,
465 "bpfwrite mbuf_allocpacket len %d error %d", len, error);
466 goto bad;
467 }
468 /*
469 * Make room for link header -- the packet length is 0 at this stage
470 */
471 if (hlen != 0) {
472 m->m_data += hlen; /* leading space */
473 error = uiomove((caddr_t)sockp->sa_data, hlen, uio);
474 if (error) {
475 os_log(OS_LOG_DEFAULT,
476 "bpfwrite uiomove hlen %d error %d", hlen, error);
477 goto bad;
478 }
479 len -= hlen;
480 }
481 /*
482 * bpf_copy_uio_to_mbuf_packet() does set the length of each mbuf and adds it to
483 * the total packet length
484 */
485 error = bpf_copy_uio_to_mbuf_packet(uio, len, m);
486 if (error != 0) {
487 os_log(OS_LOG_DEFAULT,
488 "bpfwrite bpf_copy_uio_to_mbuf_packet error %d", error);
489 goto bad;
490 }
491
492 /* Check for multicast destination */
493 switch (linktype) {
494 case DLT_EN10MB: {
495 struct ether_header *eh;
496
497 eh = mtod(m, struct ether_header *);
498 if (ETHER_IS_MULTICAST(eh->ether_dhost)) {
499 if (_ether_cmp(etherbroadcastaddr,
500 eh->ether_dhost) == 0) {
501 m->m_flags |= M_BCAST;
502 } else {
503 m->m_flags |= M_MCAST;
504 }
505 }
506 break;
507 }
508 }
509 *mp = m;
510
511 lck_mtx_lock(bpf_mlock);
512 return 0;
513 bad:
514 if (m != NULL) {
515 m_freem(m);
516 }
517 lck_mtx_lock(bpf_mlock);
518 return error;
519 }
520
521 static int
bpf_movein_batch(struct uio * uio,struct bpf_d * d,struct mbuf ** mp,struct sockaddr * sockp)522 bpf_movein_batch(struct uio *uio, struct bpf_d *d, struct mbuf **mp,
523 struct sockaddr *sockp)
524 {
525 int error = 0;
526 user_ssize_t resid;
527 int count = 0;
528 struct mbuf *last = NULL;
529
530 *mp = NULL;
531 while ((resid = uio_resid(uio)) >= sizeof(struct bpf_hdr)) {
532 struct bpf_hdr bpfhdr = {};
533 int bpf_hdr_min_len = offsetof(struct bpf_hdr, bh_hdrlen) + sizeof(bpfhdr.bh_hdrlen);
534 int padding_len;
535
536 error = uiomove((caddr_t)&bpfhdr, bpf_hdr_min_len, uio);
537 if (error != 0) {
538 os_log(OS_LOG_DEFAULT, "bpf_movein_batch uiomove error %d", error);
539 break;
540 }
541 /*
542 * Buffer validation:
543 * - ignore bh_tstamp
544 * - bh_hdrlen must fit
545 * - bh_caplen and bh_datalen must be equal
546 */
547 if (bpfhdr.bh_hdrlen < bpf_hdr_min_len) {
548 error = EINVAL;
549 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_hdrlen %u too small",
550 bpfhdr.bh_hdrlen);
551 break;
552 }
553 if (bpfhdr.bh_caplen != bpfhdr.bh_datalen) {
554 error = EINVAL;
555 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_caplen %u != bh_datalen %u",
556 bpfhdr.bh_caplen, bpfhdr.bh_datalen);
557 break;
558 }
559 if (bpfhdr.bh_hdrlen > resid) {
560 error = EINVAL;
561 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bh_hdrlen %u too large",
562 bpfhdr.bh_hdrlen);
563 break;
564 }
565
566 /*
567 * Ignore additional bytes in the header
568 */
569 padding_len = bpfhdr.bh_hdrlen - bpf_hdr_min_len;
570 if (padding_len > 0) {
571 uio_update(uio, padding_len);
572 }
573
574 /* skip empty packets */
575 if (bpfhdr.bh_caplen > 0) {
576 struct mbuf *m;
577
578 /*
579 * For time being assume all packets have same destination
580 */
581 error = bpf_movein(uio, bpfhdr.bh_caplen, d, &m, sockp);
582 if (error != 0) {
583 os_log(OS_LOG_DEFAULT, "bpf_movein_batch bpf_movein error %d",
584 error);
585 break;
586 }
587 count += 1;
588
589 if (last == NULL) {
590 *mp = m;
591 } else {
592 last->m_nextpkt = m;
593 }
594 last = m;
595 }
596
597 /*
598 * Each BPF packet is padded for alignment
599 */
600 padding_len = BPF_WORDALIGN(bpfhdr.bh_hdrlen + bpfhdr.bh_caplen) - (bpfhdr.bh_hdrlen + bpfhdr.bh_caplen);
601 if (padding_len > 0) {
602 uio_update(uio, padding_len);
603 }
604 }
605
606 if (error != 0) {
607 if (*mp != NULL) {
608 m_freem_list(*mp);
609 *mp = NULL;
610 }
611 }
612 return error;
613 }
614
615 /*
616 * The dynamic addition of a new device node must block all processes that
617 * are opening the last device so that no process will get an unexpected
618 * ENOENT
619 */
620 static void
bpf_make_dev_t(int maj)621 bpf_make_dev_t(int maj)
622 {
623 static int bpf_growing = 0;
624 unsigned int cur_size = nbpfilter, i;
625
626 if (nbpfilter >= BPF_MAX_DEVICES) {
627 return;
628 }
629
630 while (bpf_growing) {
631 /* Wait until new device has been created */
632 (void) tsleep((caddr_t)&bpf_growing, PZERO, "bpf_growing", 0);
633 }
634 if (nbpfilter > cur_size) {
635 /* other thread grew it already */
636 return;
637 }
638 bpf_growing = 1;
639
640 /* need to grow bpf_dtab first */
641 if (nbpfilter == bpf_dtab_size) {
642 unsigned int new_dtab_size;
643 struct bpf_d **new_dtab = NULL;
644
645 new_dtab_size = bpf_dtab_size + NBPFILTER;
646 new_dtab = krealloc_type(struct bpf_d *,
647 bpf_dtab_size, new_dtab_size, bpf_dtab, Z_WAITOK | Z_ZERO);
648 if (new_dtab == 0) {
649 os_log_error(OS_LOG_DEFAULT, "bpf_make_dev_t: malloc bpf_dtab failed");
650 goto done;
651 }
652 bpf_dtab = new_dtab;
653 bpf_dtab_size = new_dtab_size;
654 }
655 i = nbpfilter++;
656 (void) devfs_make_node(makedev(maj, i),
657 DEVFS_CHAR, UID_ROOT, GID_WHEEL, 0600,
658 "bpf%d", i);
659 done:
660 bpf_growing = 0;
661 wakeup((caddr_t)&bpf_growing);
662 }
663
664 /*
665 * Attach file to the bpf interface, i.e. make d listen on bp.
666 */
667 static errno_t
bpf_attachd(struct bpf_d * d,struct bpf_if * bp)668 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
669 {
670 int first = bp->bif_dlist == NULL;
671 int error = 0;
672
673 /*
674 * Point d at bp, and add d to the interface's list of listeners.
675 * Finally, point the driver's bpf cookie at the interface so
676 * it will divert packets to bpf.
677 */
678 d->bd_bif = bp;
679 d->bd_next = bp->bif_dlist;
680 bp->bif_dlist = d;
681 bpf_bpfd_cnt++;
682
683 /*
684 * Take a reference on the device even if an error is returned
685 * because we keep the device in the interface's list of listeners
686 */
687 bpf_acquire_d(d);
688
689 if (first) {
690 /* Find the default bpf entry for this ifp */
691 if (bp->bif_ifp->if_bpf == NULL) {
692 struct bpf_if *tmp, *primary = NULL;
693
694 for (tmp = bpf_iflist; tmp; tmp = tmp->bif_next) {
695 if (tmp->bif_ifp == bp->bif_ifp) {
696 primary = tmp;
697 break;
698 }
699 }
700 bp->bif_ifp->if_bpf = primary;
701 }
702 /* Only call dlil_set_bpf_tap for primary dlt */
703 if (bp->bif_ifp->if_bpf == bp) {
704 dlil_set_bpf_tap(bp->bif_ifp, BPF_TAP_INPUT_OUTPUT,
705 bpf_tap_callback);
706 }
707
708 if (bp->bif_tap != NULL) {
709 error = bp->bif_tap(bp->bif_ifp, bp->bif_dlt,
710 BPF_TAP_INPUT_OUTPUT);
711 }
712 }
713
714 /*
715 * Reset the detach flags in case we previously detached an interface
716 */
717 d->bd_flags &= ~(BPF_DETACHING | BPF_DETACHED);
718
719 if (bp->bif_dlt == DLT_PKTAP) {
720 d->bd_flags |= BPF_FINALIZE_PKTAP;
721 } else {
722 d->bd_flags &= ~BPF_FINALIZE_PKTAP;
723 }
724 return error;
725 }
726
727 /*
728 * Detach a file from its interface.
729 *
730 * Return 1 if was closed by some thread, 0 otherwise
731 */
732 static int
bpf_detachd(struct bpf_d * d)733 bpf_detachd(struct bpf_d *d)
734 {
735 struct bpf_d **p;
736 struct bpf_if *bp;
737 struct ifnet *ifp;
738 uint32_t dlt;
739 bpf_tap_func disable_tap;
740 uint8_t bd_promisc;
741
742 int bpf_closed = d->bd_flags & BPF_CLOSING;
743 /*
744 * Some other thread already detached
745 */
746 if ((d->bd_flags & (BPF_DETACHED | BPF_DETACHING)) != 0) {
747 goto done;
748 }
749 /*
750 * This thread is doing the detach
751 */
752 d->bd_flags |= BPF_DETACHING;
753
754 ifp = d->bd_bif->bif_ifp;
755 bp = d->bd_bif;
756
757 /* Remove d from the interface's descriptor list. */
758 p = &bp->bif_dlist;
759 while (*p != d) {
760 p = &(*p)->bd_next;
761 if (*p == 0) {
762 panic("bpf_detachd: descriptor not in list");
763 }
764 }
765 *p = (*p)->bd_next;
766 bpf_bpfd_cnt--;
767 disable_tap = NULL;
768 if (bp->bif_dlist == 0) {
769 /*
770 * Let the driver know that there are no more listeners.
771 */
772 /* Only call dlil_set_bpf_tap for primary dlt */
773 if (bp->bif_ifp->if_bpf == bp) {
774 dlil_set_bpf_tap(ifp, BPF_TAP_DISABLE, NULL);
775 }
776
777 disable_tap = bp->bif_tap;
778 if (disable_tap) {
779 dlt = bp->bif_dlt;
780 }
781
782 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
783 if (bp->bif_ifp == ifp && bp->bif_dlist != 0) {
784 break;
785 }
786 }
787 if (bp == NULL) {
788 ifp->if_bpf = NULL;
789 }
790 }
791 d->bd_bif = NULL;
792 /*
793 * Check if this descriptor had requested promiscuous mode.
794 * If so, turn it off.
795 */
796 bd_promisc = d->bd_promisc;
797 d->bd_promisc = 0;
798
799 lck_mtx_unlock(bpf_mlock);
800 if (bd_promisc) {
801 if (ifnet_set_promiscuous(ifp, 0)) {
802 /*
803 * Something is really wrong if we were able to put
804 * the driver into promiscuous mode, but can't
805 * take it out.
806 * Most likely the network interface is gone.
807 */
808 os_log_error(OS_LOG_DEFAULT,
809 "%s: bpf%d ifnet_set_promiscuous %s failed",
810 __func__, d->bd_dev_minor, if_name(ifp));
811 }
812 }
813
814 if (disable_tap) {
815 disable_tap(ifp, dlt, BPF_TAP_DISABLE);
816 }
817 lck_mtx_lock(bpf_mlock);
818
819 /*
820 * Wake up other thread that are waiting for this thread to finish
821 * detaching
822 */
823 d->bd_flags &= ~BPF_DETACHING;
824 d->bd_flags |= BPF_DETACHED;
825
826 /* Refresh the local variable as d could have been modified */
827 bpf_closed = d->bd_flags & BPF_CLOSING;
828
829 os_log(OS_LOG_DEFAULT, "bpf%d%s detached from %s fcount %llu dcount %llu",
830 d->bd_dev_minor, bpf_closed ? " closed and" : "", if_name(ifp),
831 d->bd_fcount, d->bd_dcount);
832
833 /*
834 * Note that We've kept the reference because we may have dropped
835 * the lock when turning off promiscuous mode
836 */
837 bpf_release_d(d);
838 done:
839 /*
840 * Let the caller know the bpf_d is closed
841 */
842 if (bpf_closed) {
843 return 1;
844 } else {
845 return 0;
846 }
847 }
848
849 /*
850 * Start asynchronous timer, if necessary.
851 * Must be called with bpf_mlock held.
852 */
853 static void
bpf_start_timer(struct bpf_d * d)854 bpf_start_timer(struct bpf_d *d)
855 {
856 uint64_t deadline;
857 struct timeval tv;
858
859 if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
860 tv.tv_sec = d->bd_rtout / hz;
861 tv.tv_usec = (d->bd_rtout % hz) * tick;
862
863 clock_interval_to_deadline(
864 (uint32_t)tv.tv_sec * USEC_PER_SEC + tv.tv_usec,
865 NSEC_PER_USEC, &deadline);
866 /*
867 * The state is BPF_IDLE, so the timer hasn't
868 * been started yet, and hasn't gone off yet;
869 * there is no thread call scheduled, so this
870 * won't change the schedule.
871 *
872 * XXX - what if, by the time it gets entered,
873 * the deadline has already passed?
874 */
875 thread_call_enter_delayed(d->bd_thread_call, deadline);
876 d->bd_state = BPF_WAITING;
877 }
878 }
879
880 /*
881 * Cancel asynchronous timer.
882 * Must be called with bpf_mlock held.
883 */
884 static boolean_t
bpf_stop_timer(struct bpf_d * d)885 bpf_stop_timer(struct bpf_d *d)
886 {
887 /*
888 * If the timer has already gone off, this does nothing.
889 * Our caller is expected to set d->bd_state to BPF_IDLE,
890 * with the bpf_mlock, after we are called. bpf_timed_out()
891 * also grabs bpf_mlock, so, if the timer has gone off and
892 * bpf_timed_out() hasn't finished, it's waiting for the
893 * lock; when this thread releases the lock, it will
894 * find the state is BPF_IDLE, and just release the
895 * lock and return.
896 */
897 return thread_call_cancel(d->bd_thread_call);
898 }
899
900 void
bpf_acquire_d(struct bpf_d * d)901 bpf_acquire_d(struct bpf_d *d)
902 {
903 void *lr_saved = __builtin_return_address(0);
904
905 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
906
907 d->bd_refcnt += 1;
908
909 d->bd_ref_lr[d->bd_next_ref_lr] = lr_saved;
910 d->bd_next_ref_lr = (d->bd_next_ref_lr + 1) % BPF_REF_HIST;
911 }
912
913 void
bpf_release_d(struct bpf_d * d)914 bpf_release_d(struct bpf_d *d)
915 {
916 void *lr_saved = __builtin_return_address(0);
917
918 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
919
920 if (d->bd_refcnt <= 0) {
921 panic("%s: %p refcnt <= 0", __func__, d);
922 }
923
924 d->bd_refcnt -= 1;
925
926 d->bd_unref_lr[d->bd_next_unref_lr] = lr_saved;
927 d->bd_next_unref_lr = (d->bd_next_unref_lr + 1) % BPF_REF_HIST;
928
929 if (d->bd_refcnt == 0) {
930 /* Assert the device is detached */
931 if ((d->bd_flags & BPF_DETACHED) == 0) {
932 panic("%s: %p BPF_DETACHED not set", __func__, d);
933 }
934
935 kfree_type(struct bpf_d, d);
936 }
937 }
938
939 /*
940 * Open ethernet device. Returns ENXIO for illegal minor device number,
941 * EBUSY if file is open by another process.
942 */
943 /* ARGSUSED */
944 int
bpfopen(dev_t dev,int flags,__unused int fmt,struct proc * p)945 bpfopen(dev_t dev, int flags, __unused int fmt,
946 struct proc *p)
947 {
948 struct bpf_d *d;
949
950 lck_mtx_lock(bpf_mlock);
951 if ((unsigned int) minor(dev) >= nbpfilter) {
952 lck_mtx_unlock(bpf_mlock);
953 return ENXIO;
954 }
955 /*
956 * New device nodes are created on demand when opening the last one.
957 * The programming model is for processes to loop on the minor starting
958 * at 0 as long as EBUSY is returned. The loop stops when either the
959 * open succeeds or an error other that EBUSY is returned. That means
960 * that bpf_make_dev_t() must block all processes that are opening the
961 * last node. If not all processes are blocked, they could unexpectedly
962 * get ENOENT and abort their opening loop.
963 */
964 if ((unsigned int) minor(dev) == (nbpfilter - 1)) {
965 bpf_make_dev_t(major(dev));
966 }
967
968 /*
969 * Each minor can be opened by only one process. If the requested
970 * minor is in use, return EBUSY.
971 *
972 * Important: bpfopen() and bpfclose() have to check and set the status
973 * of a device in the same lockin context otherwise the device may be
974 * leaked because the vnode use count will be unpextectly greater than 1
975 * when close() is called.
976 */
977 if (bpf_dtab[minor(dev)] == NULL) {
978 /* Reserve while opening */
979 bpf_dtab[minor(dev)] = BPF_DEV_RESERVED;
980 } else {
981 lck_mtx_unlock(bpf_mlock);
982 return EBUSY;
983 }
984 d = kalloc_type(struct bpf_d, Z_WAITOK | Z_ZERO);
985 if (d == NULL) {
986 /* this really is a catastrophic failure */
987 os_log_error(OS_LOG_DEFAULT,
988 "bpfopen: bpf%d kalloc_type bpf_d failed", minor(dev));
989 bpf_dtab[minor(dev)] = NULL;
990 lck_mtx_unlock(bpf_mlock);
991 return ENOMEM;
992 }
993
994 /* Mark "in use" and do most initialization. */
995 bpf_acquire_d(d);
996 d->bd_bufsize = bpf_bufsize;
997 d->bd_sig = SIGIO;
998 d->bd_direction = BPF_D_INOUT;
999 d->bd_oflags = flags;
1000 d->bd_state = BPF_IDLE;
1001 d->bd_traffic_class = SO_TC_BE;
1002 d->bd_flags |= BPF_DETACHED;
1003 if (bpf_wantpktap) {
1004 d->bd_flags |= BPF_WANT_PKTAP;
1005 } else {
1006 d->bd_flags &= ~BPF_WANT_PKTAP;
1007 }
1008
1009 d->bd_thread_call = thread_call_allocate(bpf_timed_out, d);
1010 if (d->bd_thread_call == NULL) {
1011 os_log_error(OS_LOG_DEFAULT, "bpfopen: bpf%d malloc thread call failed",
1012 minor(dev));
1013 bpf_dtab[minor(dev)] = NULL;
1014 bpf_release_d(d);
1015 lck_mtx_unlock(bpf_mlock);
1016
1017 return ENOMEM;
1018 }
1019 d->bd_opened_by = p;
1020 uuid_generate(d->bd_uuid);
1021 d->bd_pid = proc_pid(p);
1022
1023 d->bd_dev_minor = minor(dev);
1024 bpf_dtab[minor(dev)] = d; /* Mark opened */
1025 lck_mtx_unlock(bpf_mlock);
1026
1027 if (bpf_debug) {
1028 os_log(OS_LOG_DEFAULT, "bpf%u opened by %s.%u",
1029 d->bd_dev_minor, proc_name_address(p), d->bd_pid);
1030 }
1031 return 0;
1032 }
1033
1034 /*
1035 * Close the descriptor by detaching it from its interface,
1036 * deallocating its buffers, and marking it free.
1037 */
1038 /* ARGSUSED */
1039 int
bpfclose(dev_t dev,__unused int flags,__unused int fmt,__unused struct proc * p)1040 bpfclose(dev_t dev, __unused int flags, __unused int fmt,
1041 __unused struct proc *p)
1042 {
1043 struct bpf_d *d;
1044
1045 /* Take BPF lock to ensure no other thread is using the device */
1046 lck_mtx_lock(bpf_mlock);
1047
1048 d = bpf_dtab[minor(dev)];
1049 if (d == NULL || d == BPF_DEV_RESERVED) {
1050 lck_mtx_unlock(bpf_mlock);
1051 return ENXIO;
1052 }
1053
1054 /*
1055 * Other threads may call bpd_detachd() if we drop the bpf_mlock
1056 */
1057 d->bd_flags |= BPF_CLOSING;
1058
1059 if (bpf_debug != 0) {
1060 os_log(OS_LOG_DEFAULT, "%s: bpf%d",
1061 __func__, d->bd_dev_minor);
1062 }
1063
1064 bpf_dtab[minor(dev)] = BPF_DEV_RESERVED; /* Reserve while closing */
1065
1066 /*
1067 * Deal with any in-progress timeouts.
1068 */
1069 switch (d->bd_state) {
1070 case BPF_IDLE:
1071 /*
1072 * Not waiting for a timeout, and no timeout happened.
1073 */
1074 break;
1075
1076 case BPF_WAITING:
1077 /*
1078 * Waiting for a timeout.
1079 * Cancel any timer that has yet to go off,
1080 * and mark the state as "closing".
1081 * Then drop the lock to allow any timers that
1082 * *have* gone off to run to completion, and wait
1083 * for them to finish.
1084 */
1085 if (!bpf_stop_timer(d)) {
1086 /*
1087 * There was no pending call, so the call must
1088 * have been in progress. Wait for the call to
1089 * complete; we have to drop the lock while
1090 * waiting. to let the in-progrss call complete
1091 */
1092 d->bd_state = BPF_DRAINING;
1093 while (d->bd_state == BPF_DRAINING) {
1094 msleep((caddr_t)d, bpf_mlock, PRINET,
1095 "bpfdraining", NULL);
1096 }
1097 }
1098 d->bd_state = BPF_IDLE;
1099 break;
1100
1101 case BPF_TIMED_OUT:
1102 /*
1103 * Timer went off, and the timeout routine finished.
1104 */
1105 d->bd_state = BPF_IDLE;
1106 break;
1107
1108 case BPF_DRAINING:
1109 /*
1110 * Another thread is blocked on a close waiting for
1111 * a timeout to finish.
1112 * This "shouldn't happen", as the first thread to enter
1113 * bpfclose() will set bpf_dtab[minor(dev)] to 1, and
1114 * all subsequent threads should see that and fail with
1115 * ENXIO.
1116 */
1117 panic("Two threads blocked in a BPF close");
1118 break;
1119 }
1120
1121 if (d->bd_bif) {
1122 bpf_detachd(d);
1123 }
1124 selthreadclear(&d->bd_sel);
1125 thread_call_free(d->bd_thread_call);
1126
1127 while (d->bd_hbuf_read || d->bd_hbuf_write) {
1128 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfclose", NULL);
1129 }
1130
1131 if (bpf_debug) {
1132 os_log(OS_LOG_DEFAULT,
1133 "bpf%u closed by %s.%u dcount %llu fcount %llu ccount %llu",
1134 d->bd_dev_minor, proc_name_address(p), d->bd_pid,
1135 d->bd_dcount, d->bd_fcount, d->bd_bcs.bcs_count_compressed_prefix);
1136 }
1137
1138 bpf_freed(d);
1139
1140 /* Mark free in same context as bpfopen comes to check */
1141 bpf_dtab[minor(dev)] = NULL; /* Mark closed */
1142
1143 bpf_release_d(d);
1144
1145 lck_mtx_unlock(bpf_mlock);
1146
1147 return 0;
1148 }
1149
1150 #define BPF_SLEEP bpf_sleep
1151
1152 static int
bpf_sleep(struct bpf_d * d,int pri,const char * wmesg,int timo)1153 bpf_sleep(struct bpf_d *d, int pri, const char *wmesg, int timo)
1154 {
1155 u_int64_t abstime = 0;
1156
1157 if (timo != 0) {
1158 clock_interval_to_deadline(timo, NSEC_PER_SEC / hz, &abstime);
1159 }
1160
1161 return msleep1((caddr_t)d, bpf_mlock, pri, wmesg, abstime);
1162 }
1163
1164 static void
bpf_finalize_pktap(struct bpf_hdr * hp,struct pktap_header * pktaphdr)1165 bpf_finalize_pktap(struct bpf_hdr *hp, struct pktap_header *pktaphdr)
1166 {
1167 if (pktaphdr->pth_flags & PTH_FLAG_V2_HDR) {
1168 struct pktap_v2_hdr *pktap_v2_hdr;
1169
1170 pktap_v2_hdr = (struct pktap_v2_hdr *)pktaphdr;
1171
1172 if (pktap_v2_hdr->pth_flags & PTH_FLAG_DELAY_PKTAP) {
1173 pktap_v2_finalize_proc_info(pktap_v2_hdr);
1174 }
1175 } else {
1176 if (pktaphdr->pth_flags & PTH_FLAG_DELAY_PKTAP) {
1177 pktap_finalize_proc_info(pktaphdr);
1178 }
1179
1180 if (pktaphdr->pth_flags & PTH_FLAG_TSTAMP) {
1181 hp->bh_tstamp.tv_sec = pktaphdr->pth_tstamp.tv_sec;
1182 hp->bh_tstamp.tv_usec = pktaphdr->pth_tstamp.tv_usec;
1183 }
1184 }
1185 }
1186
1187 /*
1188 * Rotate the packet buffers in descriptor d. Move the store buffer
1189 * into the hold slot, and the free buffer into the store slot.
1190 * Zero the length of the new store buffer.
1191 *
1192 * Note: in head drop mode, the hold buffer can be dropped so the fist packet of the
1193 * store buffer cannot be compressed as it otherwise would refer to deleted data
1194 * in a dropped hold buffer that the reader process does know about
1195 */
1196 #define ROTATE_BUFFERS(d) do { \
1197 if (d->bd_hbuf_read) \
1198 panic("rotating bpf buffers during read"); \
1199 (d)->bd_hbuf = (d)->bd_sbuf; \
1200 (d)->bd_hlen = (d)->bd_slen; \
1201 (d)->bd_hcnt = (d)->bd_scnt; \
1202 (d)->bd_sbuf = (d)->bd_fbuf; \
1203 (d)->bd_slen = 0; \
1204 (d)->bd_scnt = 0; \
1205 (d)->bd_fbuf = NULL; \
1206 if ((d)->bd_headdrop != 0) \
1207 (d)->bd_prev_slen = 0; \
1208 } while(false)
1209
1210 /*
1211 * bpfread - read next chunk of packets from buffers
1212 */
1213 int
bpfread(dev_t dev,struct uio * uio,int ioflag)1214 bpfread(dev_t dev, struct uio *uio, int ioflag)
1215 {
1216 struct bpf_d *d;
1217 caddr_t hbuf;
1218 int timed_out, hbuf_len;
1219 int error;
1220 int flags;
1221
1222 lck_mtx_lock(bpf_mlock);
1223
1224 d = bpf_dtab[minor(dev)];
1225 if (d == NULL || d == BPF_DEV_RESERVED ||
1226 (d->bd_flags & BPF_CLOSING) != 0) {
1227 lck_mtx_unlock(bpf_mlock);
1228 return ENXIO;
1229 }
1230
1231 bpf_acquire_d(d);
1232
1233 /*
1234 * Restrict application to use a buffer the same size as
1235 * as kernel buffers.
1236 */
1237 if (uio_resid(uio) != d->bd_bufsize) {
1238 bpf_release_d(d);
1239 lck_mtx_unlock(bpf_mlock);
1240 return EINVAL;
1241 }
1242
1243 if (d->bd_state == BPF_WAITING) {
1244 bpf_stop_timer(d);
1245 }
1246
1247 timed_out = (d->bd_state == BPF_TIMED_OUT);
1248 d->bd_state = BPF_IDLE;
1249
1250 while (d->bd_hbuf_read) {
1251 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfread", NULL);
1252 }
1253
1254 if ((d->bd_flags & BPF_CLOSING) != 0) {
1255 bpf_release_d(d);
1256 lck_mtx_unlock(bpf_mlock);
1257 return ENXIO;
1258 }
1259 /*
1260 * If the hold buffer is empty, then do a timed sleep, which
1261 * ends when the timeout expires or when enough packets
1262 * have arrived to fill the store buffer.
1263 */
1264 while (d->bd_hbuf == 0) {
1265 if ((d->bd_immediate || timed_out || (ioflag & IO_NDELAY)) &&
1266 d->bd_slen != 0) {
1267 /*
1268 * We're in immediate mode, or are reading
1269 * in non-blocking mode, or a timer was
1270 * started before the read (e.g., by select()
1271 * or poll()) and has expired and a packet(s)
1272 * either arrived since the previous
1273 * read or arrived while we were asleep.
1274 * Rotate the buffers and return what's here.
1275 */
1276 ROTATE_BUFFERS(d);
1277 break;
1278 }
1279
1280 /*
1281 * No data is available, check to see if the bpf device
1282 * is still pointed at a real interface. If not, return
1283 * ENXIO so that the userland process knows to rebind
1284 * it before using it again.
1285 */
1286 if (d->bd_bif == NULL) {
1287 bpf_release_d(d);
1288 lck_mtx_unlock(bpf_mlock);
1289 return ENXIO;
1290 }
1291 if (ioflag & IO_NDELAY) {
1292 bpf_release_d(d);
1293 lck_mtx_unlock(bpf_mlock);
1294 return EWOULDBLOCK;
1295 }
1296 error = BPF_SLEEP(d, PRINET | PCATCH, "bpf", d->bd_rtout);
1297 /*
1298 * Make sure device is still opened
1299 */
1300 if ((d->bd_flags & BPF_CLOSING) != 0) {
1301 bpf_release_d(d);
1302 lck_mtx_unlock(bpf_mlock);
1303 return ENXIO;
1304 }
1305
1306 while (d->bd_hbuf_read) {
1307 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_read",
1308 NULL);
1309 }
1310
1311 if ((d->bd_flags & BPF_CLOSING) != 0) {
1312 bpf_release_d(d);
1313 lck_mtx_unlock(bpf_mlock);
1314 return ENXIO;
1315 }
1316
1317 if (error == EINTR || error == ERESTART) {
1318 if (d->bd_hbuf != NULL) {
1319 /*
1320 * Because we msleep, the hold buffer might
1321 * be filled when we wake up. Avoid rotating
1322 * in this case.
1323 */
1324 break;
1325 }
1326 if (d->bd_slen != 0) {
1327 /*
1328 * Sometimes we may be interrupted often and
1329 * the sleep above will not timeout.
1330 * Regardless, we should rotate the buffers
1331 * if there's any new data pending and
1332 * return it.
1333 */
1334 ROTATE_BUFFERS(d);
1335 break;
1336 }
1337 bpf_release_d(d);
1338 lck_mtx_unlock(bpf_mlock);
1339 if (error == ERESTART) {
1340 os_log(OS_LOG_DEFAULT, "%s: bpf%d ERESTART to EINTR",
1341 __func__, d->bd_dev_minor);
1342 error = EINTR;
1343 }
1344 return error;
1345 }
1346 if (error == EWOULDBLOCK) {
1347 /*
1348 * On a timeout, return what's in the buffer,
1349 * which may be nothing. If there is something
1350 * in the store buffer, we can rotate the buffers.
1351 */
1352 if (d->bd_hbuf) {
1353 /*
1354 * We filled up the buffer in between
1355 * getting the timeout and arriving
1356 * here, so we don't need to rotate.
1357 */
1358 break;
1359 }
1360
1361 if (d->bd_slen == 0) {
1362 bpf_release_d(d);
1363 lck_mtx_unlock(bpf_mlock);
1364 return 0;
1365 }
1366 ROTATE_BUFFERS(d);
1367 break;
1368 }
1369 }
1370 /*
1371 * At this point, we know we have something in the hold slot.
1372 */
1373
1374 /*
1375 * Set the hold buffer read. So we do not
1376 * rotate the buffers until the hold buffer
1377 * read is complete. Also to avoid issues resulting
1378 * from page faults during disk sleep (<rdar://problem/13436396>).
1379 */
1380 d->bd_hbuf_read = true;
1381 hbuf = d->bd_hbuf;
1382 hbuf_len = d->bd_hlen;
1383 flags = d->bd_flags;
1384 d->bd_bcs.bcs_total_read += d->bd_hcnt;
1385 lck_mtx_unlock(bpf_mlock);
1386
1387 /*
1388 * Before we move data to userland, we fill out the extended
1389 * header fields.
1390 */
1391 if (flags & BPF_EXTENDED_HDR) {
1392 char *p;
1393
1394 p = hbuf;
1395 while (p < hbuf + hbuf_len) {
1396 struct bpf_hdr_ext *ehp;
1397 uint32_t flowid;
1398 struct so_procinfo soprocinfo;
1399 int found = 0;
1400
1401 ehp = (struct bpf_hdr_ext *)(void *)p;
1402 if ((flowid = ehp->bh_flowid) != 0) {
1403 if (ehp->bh_flags & BPF_HDR_EXT_FLAGS_TCP) {
1404 ehp->bh_flags &= ~BPF_HDR_EXT_FLAGS_TCP;
1405 found = inp_findinpcb_procinfo(&tcbinfo,
1406 flowid, &soprocinfo);
1407 } else if (ehp->bh_flags == BPF_HDR_EXT_FLAGS_UDP) {
1408 ehp->bh_flags &= ~BPF_HDR_EXT_FLAGS_UDP;
1409 found = inp_findinpcb_procinfo(&udbinfo,
1410 flowid, &soprocinfo);
1411 }
1412 if (found == 1) {
1413 ehp->bh_pid = soprocinfo.spi_pid;
1414 strlcpy(&ehp->bh_comm[0], &soprocinfo.spi_proc_name[0], sizeof(ehp->bh_comm));
1415 }
1416 ehp->bh_flowid = 0;
1417 }
1418
1419 if ((flags & BPF_FINALIZE_PKTAP) != 0 && ehp->bh_complen == 0) {
1420 struct pktap_header *pktaphdr;
1421
1422 pktaphdr = (struct pktap_header *)(void *)
1423 (p + BPF_WORDALIGN(ehp->bh_hdrlen));
1424
1425 bpf_finalize_pktap((struct bpf_hdr *) ehp,
1426 pktaphdr);
1427 }
1428 p += BPF_WORDALIGN(ehp->bh_hdrlen + ehp->bh_caplen);
1429 }
1430 } else if (flags & BPF_FINALIZE_PKTAP) {
1431 char *p;
1432
1433 p = hbuf;
1434
1435 while (p < hbuf + hbuf_len) {
1436 struct bpf_hdr *hp;
1437 struct pktap_header *pktaphdr;
1438
1439 hp = (struct bpf_hdr *)(void *)p;
1440
1441 /*
1442 * Cannot finalize a compressed pktap header as we may not have
1443 * all the fields present
1444 */
1445 if (d->bd_flags & BPF_COMP_ENABLED) {
1446 struct bpf_comp_hdr *hcp;
1447
1448 hcp = (struct bpf_comp_hdr *)(void *)p;
1449
1450 if (hcp->bh_complen != 0) {
1451 p += BPF_WORDALIGN(hcp->bh_hdrlen + hcp->bh_caplen);
1452 continue;
1453 }
1454 }
1455
1456 pktaphdr = (struct pktap_header *)(void *)
1457 (p + BPF_WORDALIGN(hp->bh_hdrlen));
1458
1459 bpf_finalize_pktap(hp, pktaphdr);
1460
1461 p += BPF_WORDALIGN(hp->bh_hdrlen + hp->bh_caplen);
1462 }
1463 }
1464
1465 /*
1466 * Move data from hold buffer into user space.
1467 * We know the entire buffer is transferred since
1468 * we checked above that the read buffer is bpf_bufsize bytes.
1469 */
1470 error = uiomove(hbuf, hbuf_len, uio);
1471
1472 lck_mtx_lock(bpf_mlock);
1473 /*
1474 * Make sure device is still opened
1475 */
1476 if ((d->bd_flags & BPF_CLOSING) != 0) {
1477 bpf_release_d(d);
1478 lck_mtx_unlock(bpf_mlock);
1479 return ENXIO;
1480 }
1481
1482 d->bd_hbuf_read = false;
1483 d->bd_fbuf = d->bd_hbuf;
1484 d->bd_hbuf = NULL;
1485 d->bd_hlen = 0;
1486 d->bd_hcnt = 0;
1487 wakeup((caddr_t)d);
1488
1489 bpf_release_d(d);
1490 lck_mtx_unlock(bpf_mlock);
1491 return error;
1492 }
1493
1494 /*
1495 * If there are processes sleeping on this descriptor, wake them up.
1496 */
1497 static void
bpf_wakeup(struct bpf_d * d)1498 bpf_wakeup(struct bpf_d *d)
1499 {
1500 if (d->bd_state == BPF_WAITING) {
1501 bpf_stop_timer(d);
1502 d->bd_state = BPF_IDLE;
1503 }
1504 wakeup((caddr_t)d);
1505 if (d->bd_async && d->bd_sig && d->bd_sigio) {
1506 pgsigio(d->bd_sigio, d->bd_sig);
1507 }
1508
1509 selwakeup(&d->bd_sel);
1510 if ((d->bd_flags & BPF_KNOTE)) {
1511 KNOTE(&d->bd_sel.si_note, 1);
1512 }
1513 }
1514
1515 static void
bpf_timed_out(void * arg,__unused void * dummy)1516 bpf_timed_out(void *arg, __unused void *dummy)
1517 {
1518 struct bpf_d *d = (struct bpf_d *)arg;
1519
1520 lck_mtx_lock(bpf_mlock);
1521 if (d->bd_state == BPF_WAITING) {
1522 /*
1523 * There's a select or kqueue waiting for this; if there's
1524 * now stuff to read, wake it up.
1525 */
1526 d->bd_state = BPF_TIMED_OUT;
1527 if (d->bd_slen != 0) {
1528 bpf_wakeup(d);
1529 }
1530 } else if (d->bd_state == BPF_DRAINING) {
1531 /*
1532 * A close is waiting for this to finish.
1533 * Mark it as finished, and wake the close up.
1534 */
1535 d->bd_state = BPF_IDLE;
1536 bpf_wakeup(d);
1537 }
1538 lck_mtx_unlock(bpf_mlock);
1539 }
1540
1541 /* keep in sync with bpf_movein above: */
1542 #define MAX_DATALINK_HDR_LEN (sizeof(struct firewire_header))
1543
1544 int
bpfwrite(dev_t dev,struct uio * uio,__unused int ioflag)1545 bpfwrite(dev_t dev, struct uio *uio, __unused int ioflag)
1546 {
1547 struct bpf_d *d;
1548 struct ifnet *ifp;
1549 struct mbuf *m = NULL;
1550 int error = 0;
1551 char dst_buf[SOCKADDR_HDR_LEN + MAX_DATALINK_HDR_LEN];
1552 int bif_dlt;
1553 int bd_hdrcmplt;
1554 bpf_send_func bif_send;
1555
1556 lck_mtx_lock(bpf_mlock);
1557
1558 while (true) {
1559 d = bpf_dtab[minor(dev)];
1560 if (d == NULL || d == BPF_DEV_RESERVED ||
1561 (d->bd_flags & BPF_CLOSING) != 0) {
1562 lck_mtx_unlock(bpf_mlock);
1563 return ENXIO;
1564 }
1565
1566 if (d->bd_hbuf_write) {
1567 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfwrite",
1568 NULL);
1569 } else {
1570 break;
1571 }
1572 }
1573 d->bd_hbuf_write = true;
1574
1575 bpf_acquire_d(d);
1576
1577 ++d->bd_wcount;
1578
1579 if (d->bd_bif == NULL) {
1580 error = ENXIO;
1581 goto done;
1582 }
1583
1584 ifp = d->bd_bif->bif_ifp;
1585
1586 if (IFNET_IS_MANAGEMENT(ifp) &&
1587 IOCurrentTaskHasEntitlement(MANAGEMENT_DATA_ENTITLEMENT) == false) {
1588 ++d->bd_wdcount;
1589 bpf_release_d(d);
1590 lck_mtx_unlock(bpf_mlock);
1591 return ENETDOWN;
1592 }
1593
1594 if ((ifp->if_flags & IFF_UP) == 0) {
1595 error = ENETDOWN;
1596 goto done;
1597 }
1598 int resid = (int)uio_resid(uio);
1599 if (resid <= 0) {
1600 error = resid == 0 ? 0 : EINVAL;
1601 os_log(OS_LOG_DEFAULT, "bpfwrite: resid %d error %d", resid, error);
1602 goto done;
1603 }
1604 SA(dst_buf)->sa_len = sizeof(dst_buf);
1605
1606 /*
1607 * geting variables onto stack before dropping the lock
1608 */
1609 bif_dlt = (int)d->bd_bif->bif_dlt;
1610 bd_hdrcmplt = d->bd_hdrcmplt;
1611 bool batch_write = (d->bd_flags & BPF_BATCH_WRITE) ? true : false;
1612
1613 if (batch_write) {
1614 error = bpf_movein_batch(uio, d, &m, bd_hdrcmplt ? NULL : SA(dst_buf));
1615 if (error != 0) {
1616 goto done;
1617 }
1618 } else {
1619 error = bpf_movein(uio, resid, d, &m, bd_hdrcmplt ? NULL : SA(dst_buf));
1620 if (error != 0) {
1621 goto done;
1622 }
1623 bpf_set_packet_service_class(m, d->bd_traffic_class);
1624 }
1625
1626 /* verify the device is still open */
1627 if ((d->bd_flags & BPF_CLOSING) != 0) {
1628 error = ENXIO;
1629 goto done;
1630 }
1631
1632 if (d->bd_bif == NULL || d->bd_bif->bif_ifp != ifp) {
1633 error = ENXIO;
1634 goto done;
1635 }
1636
1637 bif_send = d->bd_bif->bif_send;
1638
1639 lck_mtx_unlock(bpf_mlock);
1640
1641 if (bd_hdrcmplt) {
1642 if (bif_send) {
1643 /*
1644 * Send one packet at a time, the driver frees the mbuf
1645 * but we need to take care of the leftover
1646 */
1647 while (m != NULL && error == 0) {
1648 struct mbuf *next = m->m_nextpkt;
1649
1650 m->m_nextpkt = NULL;
1651 error = bif_send(ifp, bif_dlt, m);
1652 m = next;
1653 }
1654 } else {
1655 error = dlil_output(ifp, 0, m, NULL, NULL, 1, NULL);
1656 /* Make sure we do not double free */
1657 m = NULL;
1658 }
1659 } else {
1660 error = dlil_output(ifp, PF_INET, m, NULL,
1661 SA(dst_buf), 0, NULL);
1662 /* Make sure we do not double free */
1663 m = NULL;
1664 }
1665
1666 lck_mtx_lock(bpf_mlock);
1667 done:
1668 if (error != 0 && m != NULL) {
1669 ++d->bd_wdcount;
1670 }
1671 if (m != NULL) {
1672 m_freem_list(m);
1673 }
1674 d->bd_hbuf_write = false;
1675 wakeup((caddr_t)d);
1676 bpf_release_d(d);
1677 lck_mtx_unlock(bpf_mlock);
1678
1679 return error;
1680 }
1681
1682 /*
1683 * Reset a descriptor by flushing its packet buffer and clearing the
1684 * receive and drop counts.
1685 */
1686 static void
reset_d(struct bpf_d * d)1687 reset_d(struct bpf_d *d)
1688 {
1689 if (d->bd_hbuf_read) {
1690 panic("resetting buffers during read");
1691 }
1692
1693 if (d->bd_hbuf) {
1694 /* Free the hold buffer. */
1695 d->bd_fbuf = d->bd_hbuf;
1696 d->bd_hbuf = NULL;
1697 }
1698 d->bd_slen = 0;
1699 d->bd_hlen = 0;
1700 d->bd_scnt = 0;
1701 d->bd_hcnt = 0;
1702 d->bd_rcount = 0;
1703 d->bd_dcount = 0;
1704 d->bd_fcount = 0;
1705 d->bd_wcount = 0;
1706 d->bd_wdcount = 0;
1707
1708 d->bd_prev_slen = 0;
1709 }
1710
1711 static struct bpf_d *
bpf_get_device_from_uuid(uuid_t uuid)1712 bpf_get_device_from_uuid(uuid_t uuid)
1713 {
1714 unsigned int i;
1715
1716 for (i = 0; i < nbpfilter; i++) {
1717 struct bpf_d *d = bpf_dtab[i];
1718
1719 if (d == NULL || d == BPF_DEV_RESERVED ||
1720 (d->bd_flags & BPF_CLOSING) != 0) {
1721 continue;
1722 }
1723 if (uuid_compare(uuid, d->bd_uuid) == 0) {
1724 return d;
1725 }
1726 }
1727
1728 return NULL;
1729 }
1730
1731 /*
1732 * The BIOCSETUP command "atomically" attach to the interface and
1733 * copy the buffer from another interface. This minimizes the risk
1734 * of missing packet because this is done while holding
1735 * the BPF global lock
1736 */
1737 static int
bpf_setup(struct bpf_d * d_to,uuid_t uuid_from,ifnet_t ifp)1738 bpf_setup(struct bpf_d *d_to, uuid_t uuid_from, ifnet_t ifp)
1739 {
1740 struct bpf_d *d_from;
1741 int error = 0;
1742
1743 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
1744
1745 /*
1746 * Sanity checks
1747 */
1748 d_from = bpf_get_device_from_uuid(uuid_from);
1749 if (d_from == NULL) {
1750 error = ENOENT;
1751 os_log_error(OS_LOG_DEFAULT,
1752 "%s: uuids not found error %d",
1753 __func__, error);
1754 return error;
1755 }
1756 if (d_from->bd_opened_by != d_to->bd_opened_by) {
1757 error = EACCES;
1758 os_log_error(OS_LOG_DEFAULT,
1759 "%s: processes not matching error %d",
1760 __func__, error);
1761 return error;
1762 }
1763
1764 /*
1765 * Prevent any read or write while copying
1766 */
1767 while (d_to->bd_hbuf_read || d_to->bd_hbuf_write) {
1768 msleep((caddr_t)d_to, bpf_mlock, PRINET, __func__, NULL);
1769 }
1770 d_to->bd_hbuf_read = true;
1771 d_to->bd_hbuf_write = true;
1772
1773 while (d_from->bd_hbuf_read || d_from->bd_hbuf_write) {
1774 msleep((caddr_t)d_from, bpf_mlock, PRINET, __func__, NULL);
1775 }
1776 d_from->bd_hbuf_read = true;
1777 d_from->bd_hbuf_write = true;
1778
1779 /*
1780 * Verify the devices have not been closed
1781 */
1782 if (d_to->bd_flags & BPF_CLOSING) {
1783 error = ENXIO;
1784 os_log_error(OS_LOG_DEFAULT,
1785 "%s: d_to is closing error %d",
1786 __func__, error);
1787 goto done;
1788 }
1789 if (d_from->bd_flags & BPF_CLOSING) {
1790 error = ENXIO;
1791 os_log_error(OS_LOG_DEFAULT,
1792 "%s: d_from is closing error %d",
1793 __func__, error);
1794 goto done;
1795 }
1796
1797 /*
1798 * For now require the same buffer size
1799 */
1800 if (d_from->bd_bufsize != d_to->bd_bufsize) {
1801 error = EINVAL;
1802 os_log_error(OS_LOG_DEFAULT,
1803 "%s: bufsizes not matching error %d",
1804 __func__, error);
1805 goto done;
1806 }
1807
1808 /*
1809 * Copy relevant options and flags
1810 */
1811 d_to->bd_flags = d_from->bd_flags & (BPF_EXTENDED_HDR | BPF_WANT_PKTAP |
1812 BPF_FINALIZE_PKTAP | BPF_TRUNCATE | BPF_PKTHDRV2 |
1813 BPF_COMP_REQ | BPF_COMP_ENABLED);
1814
1815 d_to->bd_headdrop = d_from->bd_headdrop;
1816
1817 /*
1818 * Allocate and copy the buffers
1819 */
1820 error = bpf_allocbufs(d_to);
1821 if (error != 0) {
1822 goto done;
1823 }
1824
1825 /*
1826 * Make sure the buffers are setup as expected by bpf_setif()
1827 */
1828 ASSERT(d_to->bd_hbuf == NULL);
1829 ASSERT(d_to->bd_sbuf != NULL);
1830 ASSERT(d_to->bd_fbuf != NULL);
1831
1832 /*
1833 * Copy the buffers and update the pointers and counts
1834 */
1835 memcpy(d_to->bd_sbuf, d_from->bd_sbuf, d_from->bd_slen);
1836 d_to->bd_slen = d_from->bd_slen;
1837 d_to->bd_scnt = d_from->bd_scnt;
1838
1839 if (d_from->bd_hbuf != NULL) {
1840 d_to->bd_hbuf = d_to->bd_fbuf;
1841 d_to->bd_fbuf = NULL;
1842 memcpy(d_to->bd_hbuf, d_from->bd_hbuf, d_from->bd_hlen);
1843 }
1844 d_to->bd_hlen = d_from->bd_hlen;
1845 d_to->bd_hcnt = d_from->bd_hcnt;
1846
1847 if (d_to->bd_flags & BPF_COMP_REQ) {
1848 ASSERT(d_to->bd_prev_sbuf != NULL);
1849 ASSERT(d_to->bd_prev_fbuf != NULL);
1850
1851 d_to->bd_prev_slen = d_from->bd_prev_slen;
1852 ASSERT(d_to->bd_prev_slen <= BPF_HDR_COMP_LEN_MAX);
1853 memcpy(d_to->bd_prev_sbuf, d_from->bd_prev_sbuf, BPF_HDR_COMP_LEN_MAX);
1854 }
1855
1856 d_to->bd_bcs = d_from->bd_bcs;
1857
1858 /*
1859 * Attach to the interface:
1860 * - don't reset the buffers
1861 * - we already prevent reads and writes
1862 * - the buffers are already allocated
1863 */
1864 error = bpf_setif(d_to, ifp, false, true, true);
1865 if (error != 0) {
1866 os_log_error(OS_LOG_DEFAULT,
1867 "%s: bpf_setif() failed error %d",
1868 __func__, error);
1869 goto done;
1870 }
1871 done:
1872 d_from->bd_hbuf_read = false;
1873 d_from->bd_hbuf_write = false;
1874 wakeup((caddr_t)d_from);
1875
1876 d_to->bd_hbuf_read = false;
1877 d_to->bd_hbuf_write = false;
1878 wakeup((caddr_t)d_to);
1879
1880 return error;
1881 }
1882
1883 #if DEVELOPMENT || DEBUG
1884 #define BPF_IOC_LIST \
1885 X(FIONREAD) \
1886 X(SIOCGIFADDR) \
1887 X(BIOCGBLEN) \
1888 X(BIOCSBLEN) \
1889 X(BIOCSETF32) \
1890 X(BIOCSETFNR32) \
1891 X(BIOCSETF64) \
1892 X(BIOCSETFNR64) \
1893 X(BIOCFLUSH) \
1894 X(BIOCPROMISC) \
1895 X(BIOCGDLT) \
1896 X(BIOCGDLTLIST) \
1897 X(BIOCSDLT) \
1898 X(BIOCGETIF) \
1899 X(BIOCSETIF) \
1900 X(BIOCSRTIMEOUT32) \
1901 X(BIOCSRTIMEOUT64) \
1902 X(BIOCGRTIMEOUT32) \
1903 X(BIOCGRTIMEOUT64) \
1904 X(BIOCGSTATS) \
1905 X(BIOCIMMEDIATE) \
1906 X(BIOCVERSION) \
1907 X(BIOCGHDRCMPLT) \
1908 X(BIOCSHDRCMPLT) \
1909 X(BIOCGSEESENT) \
1910 X(BIOCSSEESENT) \
1911 X(BIOCSETTC) \
1912 X(BIOCGETTC) \
1913 X(FIONBIO) \
1914 X(FIOASYNC) \
1915 X(BIOCSRSIG) \
1916 X(BIOCGRSIG) \
1917 X(BIOCSEXTHDR) \
1918 X(BIOCGIFATTACHCOUNT) \
1919 X(BIOCGWANTPKTAP) \
1920 X(BIOCSWANTPKTAP) \
1921 X(BIOCSHEADDROP) \
1922 X(BIOCGHEADDROP) \
1923 X(BIOCSTRUNCATE) \
1924 X(BIOCGETUUID) \
1925 X(BIOCSETUP) \
1926 X(BIOCSPKTHDRV2) \
1927 X(BIOCGHDRCOMP) \
1928 X(BIOCSHDRCOMP) \
1929 X(BIOCGHDRCOMPSTATS) \
1930 X(BIOCGHDRCOMPON) \
1931 X(BIOCGDIRECTION) \
1932 X(BIOCSDIRECTION) \
1933 X(BIOCSWRITEMAX) \
1934 X(BIOCGWRITEMAX) \
1935 X(BIOCGBATCHWRITE) \
1936 X(BIOCSBATCHWRITE)
1937
1938 static void
log_bpf_ioctl_str(struct bpf_d * d,u_long cmd)1939 log_bpf_ioctl_str(struct bpf_d *d, u_long cmd)
1940 {
1941 const char *p = NULL;
1942 char str[32];
1943
1944 #define X(x) case x: { p = #x ; printf("%s\n", p); break; }
1945 switch (cmd) {
1946 BPF_IOC_LIST
1947 }
1948 #undef X
1949 if (p == NULL) {
1950 snprintf(str, sizeof(str), "0x%08x", (unsigned int)cmd);
1951 p = str;
1952 }
1953 os_log(OS_LOG_DEFAULT, "bpfioctl bpf%u %s",
1954 d->bd_dev_minor, p);
1955 }
1956 #endif /* DEVELOPMENT || DEBUG */
1957
1958 /*
1959 * FIONREAD Check for read packet available.
1960 * SIOCGIFADDR Get interface address - convenient hook to driver.
1961 * BIOCGBLEN Get buffer len [for read()].
1962 * BIOCSETF Set ethernet read filter.
1963 * BIOCFLUSH Flush read packet buffer.
1964 * BIOCPROMISC Put interface into promiscuous mode.
1965 * BIOCGDLT Get link layer type.
1966 * BIOCGETIF Get interface name.
1967 * BIOCSETIF Set interface.
1968 * BIOCSRTIMEOUT Set read timeout.
1969 * BIOCGRTIMEOUT Get read timeout.
1970 * BIOCGSTATS Get packet stats.
1971 * BIOCIMMEDIATE Set immediate mode.
1972 * BIOCVERSION Get filter language version.
1973 * BIOCGHDRCMPLT Get "header already complete" flag
1974 * BIOCSHDRCMPLT Set "header already complete" flag
1975 * BIOCGSEESENT Get "see packets sent" flag
1976 * BIOCSSEESENT Set "see packets sent" flag
1977 * BIOCSETTC Set traffic class.
1978 * BIOCGETTC Get traffic class.
1979 * BIOCSEXTHDR Set "extended header" flag
1980 * BIOCSHEADDROP Drop head of the buffer if user is not reading
1981 * BIOCGHEADDROP Get "head-drop" flag
1982 */
1983 /* ARGSUSED */
1984 int
bpfioctl(dev_t dev,u_long cmd,caddr_t addr,__unused int flags,struct proc * p)1985 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, __unused int flags,
1986 struct proc *p)
1987 {
1988 struct bpf_d *d;
1989 int error = 0;
1990 u_int int_arg;
1991 struct ifreq ifr = {};
1992
1993 lck_mtx_lock(bpf_mlock);
1994
1995 d = bpf_dtab[minor(dev)];
1996 if (d == NULL || d == BPF_DEV_RESERVED ||
1997 (d->bd_flags & BPF_CLOSING) != 0) {
1998 lck_mtx_unlock(bpf_mlock);
1999 return ENXIO;
2000 }
2001
2002 bpf_acquire_d(d);
2003
2004 if (d->bd_state == BPF_WAITING) {
2005 bpf_stop_timer(d);
2006 }
2007 d->bd_state = BPF_IDLE;
2008
2009 #if DEVELOPMENT || DEBUG
2010 if (bpf_debug > 0) {
2011 log_bpf_ioctl_str(d, cmd);
2012 }
2013 #endif /* DEVELOPMENT || DEBUG */
2014
2015 switch (cmd) {
2016 default:
2017 error = EINVAL;
2018 break;
2019
2020 /*
2021 * Check for read packet available.
2022 */
2023 case FIONREAD: /* int */
2024 {
2025 int n;
2026
2027 n = d->bd_slen;
2028 if (d->bd_hbuf && d->bd_hbuf_read) {
2029 n += d->bd_hlen;
2030 }
2031
2032 bcopy(&n, addr, sizeof(n));
2033 break;
2034 }
2035
2036 case SIOCGIFADDR: /* struct ifreq */
2037 {
2038 struct ifnet *ifp;
2039
2040 if (d->bd_bif == 0) {
2041 error = EINVAL;
2042 } else {
2043 ifp = d->bd_bif->bif_ifp;
2044 error = ifnet_ioctl(ifp, 0, cmd, addr);
2045 }
2046 break;
2047 }
2048
2049 /*
2050 * Get buffer len [for read()].
2051 */
2052 case BIOCGBLEN: /* u_int */
2053 bcopy(&d->bd_bufsize, addr, sizeof(u_int));
2054 break;
2055
2056 /*
2057 * Set buffer length.
2058 */
2059 case BIOCSBLEN: { /* u_int */
2060 u_int size;
2061
2062 if (d->bd_bif != 0 || (d->bd_flags & BPF_DETACHING)) {
2063 /*
2064 * Interface already attached, unable to change buffers
2065 */
2066 error = EINVAL;
2067 break;
2068 }
2069 bcopy(addr, &size, sizeof(size));
2070
2071 if (size > BPF_BUFSIZE_CAP) {
2072 d->bd_bufsize = BPF_BUFSIZE_CAP;
2073
2074 os_log_info(OS_LOG_DEFAULT,
2075 "bpf%d BIOCSBLEN capped to %u from %u",
2076 minor(dev), d->bd_bufsize, size);
2077 } else if (size < BPF_MINBUFSIZE) {
2078 d->bd_bufsize = BPF_MINBUFSIZE;
2079
2080 os_log_info(OS_LOG_DEFAULT,
2081 "bpf%d BIOCSBLEN bumped to %u from %u",
2082 minor(dev), d->bd_bufsize, size);
2083 } else {
2084 d->bd_bufsize = size;
2085
2086 os_log_info(OS_LOG_DEFAULT,
2087 "bpf%d BIOCSBLEN %u",
2088 minor(dev), d->bd_bufsize);
2089 }
2090
2091 /* It's a read/write ioctl */
2092 bcopy(&d->bd_bufsize, addr, sizeof(u_int));
2093 break;
2094 }
2095 /*
2096 * Set link layer read filter.
2097 */
2098 case BIOCSETF32:
2099 case BIOCSETFNR32: { /* struct bpf_program32 */
2100 struct bpf_program32 prg32;
2101
2102 bcopy(addr, &prg32, sizeof(prg32));
2103 error = bpf_setf(d, prg32.bf_len,
2104 CAST_USER_ADDR_T(prg32.bf_insns), cmd);
2105 break;
2106 }
2107
2108 case BIOCSETF64:
2109 case BIOCSETFNR64: { /* struct bpf_program64 */
2110 struct bpf_program64 prg64;
2111
2112 bcopy(addr, &prg64, sizeof(prg64));
2113 error = bpf_setf(d, prg64.bf_len, CAST_USER_ADDR_T(prg64.bf_insns), cmd);
2114 break;
2115 }
2116
2117 /*
2118 * Flush read packet buffer.
2119 */
2120 case BIOCFLUSH:
2121 while (d->bd_hbuf_read) {
2122 msleep((caddr_t)d, bpf_mlock, PRINET, "BIOCFLUSH",
2123 NULL);
2124 }
2125 if ((d->bd_flags & BPF_CLOSING) != 0) {
2126 error = ENXIO;
2127 break;
2128 }
2129 reset_d(d);
2130 break;
2131
2132 /*
2133 * Put interface into promiscuous mode.
2134 */
2135 case BIOCPROMISC:
2136 if (d->bd_bif == 0) {
2137 /*
2138 * No interface attached yet.
2139 */
2140 error = EINVAL;
2141 break;
2142 }
2143 if (d->bd_promisc == 0) {
2144 lck_mtx_unlock(bpf_mlock);
2145 error = ifnet_set_promiscuous(d->bd_bif->bif_ifp, 1);
2146 lck_mtx_lock(bpf_mlock);
2147 if (error == 0) {
2148 d->bd_promisc = 1;
2149 }
2150 }
2151 break;
2152
2153 /*
2154 * Get device parameters.
2155 */
2156 case BIOCGDLT: /* u_int */
2157 if (d->bd_bif == 0) {
2158 error = EINVAL;
2159 } else {
2160 bcopy(&d->bd_bif->bif_dlt, addr, sizeof(u_int));
2161 }
2162 break;
2163
2164 /*
2165 * Get a list of supported data link types.
2166 */
2167 case BIOCGDLTLIST: /* struct bpf_dltlist */
2168 if (d->bd_bif == NULL) {
2169 error = EINVAL;
2170 } else {
2171 error = bpf_getdltlist(d, addr, p);
2172 }
2173 break;
2174
2175 /*
2176 * Set data link type.
2177 */
2178 case BIOCSDLT: /* u_int */
2179 if (d->bd_bif == NULL) {
2180 error = EINVAL;
2181 } else {
2182 u_int dlt;
2183
2184 bcopy(addr, &dlt, sizeof(dlt));
2185
2186 if (dlt == DLT_PKTAP &&
2187 !(d->bd_flags & BPF_WANT_PKTAP)) {
2188 dlt = DLT_RAW;
2189 }
2190 error = bpf_setdlt(d, dlt);
2191 }
2192 break;
2193
2194 /*
2195 * Get interface name.
2196 */
2197 case BIOCGETIF: /* struct ifreq */
2198 if (d->bd_bif == 0) {
2199 error = EINVAL;
2200 } else {
2201 struct ifnet *const ifp = d->bd_bif->bif_ifp;
2202
2203 snprintf(((struct ifreq *)(void *)addr)->ifr_name,
2204 sizeof(ifr.ifr_name), "%s", if_name(ifp));
2205 }
2206 break;
2207
2208 /*
2209 * Set interface.
2210 */
2211 case BIOCSETIF: { /* struct ifreq */
2212 ifnet_t ifp;
2213
2214 bcopy(addr, &ifr, sizeof(ifr));
2215 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
2216 ifp = ifunit(ifr.ifr_name);
2217 if (ifp == NULL) {
2218 error = ENXIO;
2219 } else {
2220 error = bpf_setif(d, ifp, true, false, false);
2221 }
2222 break;
2223 }
2224
2225 /*
2226 * Set read timeout.
2227 */
2228 case BIOCSRTIMEOUT32: { /* struct user32_timeval */
2229 struct user32_timeval _tv;
2230 struct timeval tv;
2231
2232 bcopy(addr, &_tv, sizeof(_tv));
2233 tv.tv_sec = _tv.tv_sec;
2234 tv.tv_usec = _tv.tv_usec;
2235
2236 /*
2237 * Subtract 1 tick from tvtohz() since this isn't
2238 * a one-shot timer.
2239 */
2240 if ((error = itimerfix(&tv)) == 0) {
2241 d->bd_rtout = tvtohz(&tv) - 1;
2242 }
2243 break;
2244 }
2245
2246 case BIOCSRTIMEOUT64: { /* struct user64_timeval */
2247 struct user64_timeval _tv;
2248 struct timeval tv;
2249
2250 bcopy(addr, &_tv, sizeof(_tv));
2251 tv.tv_sec = (__darwin_time_t)_tv.tv_sec;
2252 tv.tv_usec = _tv.tv_usec;
2253
2254 /*
2255 * Subtract 1 tick from tvtohz() since this isn't
2256 * a one-shot timer.
2257 */
2258 if ((error = itimerfix(&tv)) == 0) {
2259 d->bd_rtout = tvtohz(&tv) - 1;
2260 }
2261 break;
2262 }
2263
2264 /*
2265 * Get read timeout.
2266 */
2267 case BIOCGRTIMEOUT32: { /* struct user32_timeval */
2268 struct user32_timeval tv;
2269
2270 bzero(&tv, sizeof(tv));
2271 tv.tv_sec = d->bd_rtout / hz;
2272 tv.tv_usec = (d->bd_rtout % hz) * tick;
2273 bcopy(&tv, addr, sizeof(tv));
2274 break;
2275 }
2276
2277 case BIOCGRTIMEOUT64: { /* struct user64_timeval */
2278 struct user64_timeval tv;
2279
2280 bzero(&tv, sizeof(tv));
2281 tv.tv_sec = d->bd_rtout / hz;
2282 tv.tv_usec = (d->bd_rtout % hz) * tick;
2283 bcopy(&tv, addr, sizeof(tv));
2284 break;
2285 }
2286
2287 /*
2288 * Get packet stats.
2289 */
2290 case BIOCGSTATS: { /* struct bpf_stat */
2291 struct bpf_stat bs;
2292
2293 bzero(&bs, sizeof(bs));
2294 bs.bs_recv = (u_int)d->bd_rcount;
2295 bs.bs_drop = (u_int)d->bd_dcount;
2296 bcopy(&bs, addr, sizeof(bs));
2297 break;
2298 }
2299
2300 /*
2301 * Set immediate mode.
2302 */
2303 case BIOCIMMEDIATE: /* u_int */
2304 d->bd_immediate = *(u_char *)(void *)addr;
2305 break;
2306
2307 case BIOCVERSION: { /* struct bpf_version */
2308 struct bpf_version bv;
2309
2310 bzero(&bv, sizeof(bv));
2311 bv.bv_major = BPF_MAJOR_VERSION;
2312 bv.bv_minor = BPF_MINOR_VERSION;
2313 bcopy(&bv, addr, sizeof(bv));
2314 break;
2315 }
2316
2317 /*
2318 * Get "header already complete" flag
2319 */
2320 case BIOCGHDRCMPLT: /* u_int */
2321 bcopy(&d->bd_hdrcmplt, addr, sizeof(u_int));
2322 break;
2323
2324 /*
2325 * Set "header already complete" flag
2326 */
2327 case BIOCSHDRCMPLT: /* u_int */
2328 bcopy(addr, &int_arg, sizeof(int_arg));
2329 if (int_arg == 0 && (d->bd_flags & BPF_BATCH_WRITE)) {
2330 os_log(OS_LOG_DEFAULT,
2331 "bpf%u cannot set BIOCSHDRCMPLT when BIOCSBATCHWRITE is set",
2332 d->bd_dev_minor);
2333 error = EINVAL;
2334 break;
2335 }
2336 d->bd_hdrcmplt = int_arg ? 1 : 0;
2337 break;
2338
2339 /*
2340 * Get "see sent packets" flag
2341 */
2342 case BIOCGSEESENT: { /* u_int */
2343 int_arg = 0;
2344
2345 if (d->bd_direction & BPF_D_OUT) {
2346 int_arg = 1;
2347 }
2348 bcopy(&int_arg, addr, sizeof(u_int));
2349 break;
2350 }
2351 /*
2352 * Set "see sent packets" flag
2353 */
2354 case BIOCSSEESENT: { /* u_int */
2355 bcopy(addr, &int_arg, sizeof(u_int));
2356
2357 if (int_arg == 0) {
2358 d->bd_direction = BPF_D_IN;
2359 } else {
2360 d->bd_direction = BPF_D_INOUT;
2361 }
2362 break;
2363 }
2364 /*
2365 * Get direction of tapped packets that can be seen for reading
2366 */
2367 case BIOCGDIRECTION: { /* u_int */
2368 int_arg = d->bd_direction;
2369
2370 bcopy(&int_arg, addr, sizeof(u_int));
2371 break;
2372 }
2373 /*
2374 * Set direction of tapped packets that can be seen for reading
2375 */
2376 case BIOCSDIRECTION: { /* u_int */
2377 bcopy(addr, &int_arg, sizeof(u_int));
2378
2379 switch (int_arg) {
2380 case BPF_D_NONE:
2381 case BPF_D_IN:
2382 case BPF_D_OUT:
2383 case BPF_D_INOUT:
2384 d->bd_direction = int_arg;
2385 break;
2386 default:
2387 error = EINVAL;
2388 break;
2389 }
2390 break;
2391 }
2392 /*
2393 * Set traffic service class
2394 */
2395 case BIOCSETTC: { /* int */
2396 int tc;
2397
2398 bcopy(addr, &tc, sizeof(int));
2399 if (tc != 0 && (d->bd_flags & BPF_BATCH_WRITE)) {
2400 os_log(OS_LOG_DEFAULT,
2401 "bpf%u cannot set BIOCSETTC when BIOCSBATCHWRITE is set",
2402 d->bd_dev_minor);
2403 error = EINVAL;
2404 break;
2405 }
2406 error = bpf_set_traffic_class(d, tc);
2407 break;
2408 }
2409
2410 /*
2411 * Get traffic service class
2412 */
2413 case BIOCGETTC: /* int */
2414 bcopy(&d->bd_traffic_class, addr, sizeof(int));
2415 break;
2416
2417 case FIONBIO: /* Non-blocking I/O; int */
2418 break;
2419
2420 case FIOASYNC: /* Send signal on receive packets; int */
2421 bcopy(addr, &d->bd_async, sizeof(int));
2422 break;
2423
2424 case BIOCSRSIG: { /* Set receive signal; u_int */
2425 u_int sig;
2426
2427 bcopy(addr, &sig, sizeof(u_int));
2428
2429 if (sig >= NSIG) {
2430 error = EINVAL;
2431 } else {
2432 d->bd_sig = sig;
2433 }
2434 break;
2435 }
2436 case BIOCGRSIG: /* u_int */
2437 bcopy(&d->bd_sig, addr, sizeof(u_int));
2438 break;
2439
2440 case BIOCSEXTHDR: /* u_int */
2441 bcopy(addr, &int_arg, sizeof(int_arg));
2442 if (int_arg) {
2443 d->bd_flags |= BPF_EXTENDED_HDR;
2444 } else {
2445 d->bd_flags &= ~BPF_EXTENDED_HDR;
2446 }
2447 break;
2448
2449 case BIOCGIFATTACHCOUNT: { /* struct ifreq */
2450 ifnet_t ifp;
2451 struct bpf_if *bp;
2452
2453 bcopy(addr, &ifr, sizeof(ifr));
2454 ifr.ifr_name[IFNAMSIZ - 1] = '\0';
2455 ifp = ifunit(ifr.ifr_name);
2456 if (ifp == NULL) {
2457 error = ENXIO;
2458 break;
2459 }
2460 ifr.ifr_intval = 0;
2461 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
2462 struct bpf_d *bpf_d;
2463
2464 if (bp->bif_ifp == NULL || bp->bif_ifp != ifp) {
2465 continue;
2466 }
2467 for (bpf_d = bp->bif_dlist; bpf_d;
2468 bpf_d = bpf_d->bd_next) {
2469 ifr.ifr_intval += 1;
2470 }
2471 }
2472 bcopy(&ifr, addr, sizeof(ifr));
2473 break;
2474 }
2475 case BIOCGWANTPKTAP: /* u_int */
2476 int_arg = d->bd_flags & BPF_WANT_PKTAP ? 1 : 0;
2477 bcopy(&int_arg, addr, sizeof(int_arg));
2478 break;
2479
2480 case BIOCSWANTPKTAP: /* u_int */
2481 bcopy(addr, &int_arg, sizeof(int_arg));
2482 if (int_arg) {
2483 d->bd_flags |= BPF_WANT_PKTAP;
2484 } else {
2485 d->bd_flags &= ~BPF_WANT_PKTAP;
2486 }
2487 break;
2488
2489 case BIOCSHEADDROP:
2490 bcopy(addr, &int_arg, sizeof(int_arg));
2491 d->bd_headdrop = int_arg ? 1 : 0;
2492 break;
2493
2494 case BIOCGHEADDROP:
2495 bcopy(&d->bd_headdrop, addr, sizeof(int));
2496 break;
2497
2498 case BIOCSTRUNCATE:
2499 bcopy(addr, &int_arg, sizeof(int_arg));
2500 if (int_arg) {
2501 d->bd_flags |= BPF_TRUNCATE;
2502 } else {
2503 d->bd_flags &= ~BPF_TRUNCATE;
2504 }
2505 break;
2506
2507 case BIOCGETUUID:
2508 bcopy(&d->bd_uuid, addr, sizeof(uuid_t));
2509 break;
2510
2511 case BIOCSETUP: {
2512 struct bpf_setup_args bsa;
2513 ifnet_t ifp;
2514
2515 bcopy(addr, &bsa, sizeof(struct bpf_setup_args));
2516 bsa.bsa_ifname[IFNAMSIZ - 1] = 0;
2517 ifp = ifunit(bsa.bsa_ifname);
2518 if (ifp == NULL) {
2519 error = ENXIO;
2520 os_log_error(OS_LOG_DEFAULT,
2521 "%s: ifnet not found for %s error %d",
2522 __func__, bsa.bsa_ifname, error);
2523 break;
2524 }
2525
2526 error = bpf_setup(d, bsa.bsa_uuid, ifp);
2527 break;
2528 }
2529 case BIOCSPKTHDRV2:
2530 bcopy(addr, &int_arg, sizeof(int_arg));
2531 if (int_arg != 0) {
2532 d->bd_flags |= BPF_PKTHDRV2;
2533 } else {
2534 d->bd_flags &= ~BPF_PKTHDRV2;
2535 }
2536 break;
2537
2538 case BIOCGPKTHDRV2:
2539 int_arg = d->bd_flags & BPF_PKTHDRV2 ? 1 : 0;
2540 bcopy(&int_arg, addr, sizeof(int_arg));
2541 break;
2542
2543 case BIOCGHDRCOMP:
2544 int_arg = d->bd_flags & BPF_COMP_REQ ? 1 : 0;
2545 bcopy(&int_arg, addr, sizeof(int_arg));
2546 break;
2547
2548 case BIOCSHDRCOMP:
2549 bcopy(addr, &int_arg, sizeof(int_arg));
2550 if (int_arg != 0 && int_arg != 1) {
2551 return EINVAL;
2552 }
2553 if (d->bd_bif != 0 || (d->bd_flags & BPF_DETACHING)) {
2554 /*
2555 * Interface already attached, unable to change buffers
2556 */
2557 error = EINVAL;
2558 break;
2559 }
2560 if (int_arg != 0) {
2561 d->bd_flags |= BPF_COMP_REQ;
2562 if (bpf_hdr_comp_enable != 0) {
2563 d->bd_flags |= BPF_COMP_ENABLED;
2564 }
2565 } else {
2566 d->bd_flags &= ~(BPF_COMP_REQ | BPF_COMP_ENABLED);
2567 }
2568 break;
2569
2570 case BIOCGHDRCOMPON:
2571 int_arg = d->bd_flags & BPF_COMP_ENABLED ? 1 : 0;
2572 bcopy(&int_arg, addr, sizeof(int_arg));
2573 break;
2574
2575 case BIOCGHDRCOMPSTATS: {
2576 struct bpf_comp_stats bcs = {};
2577
2578 bcs = d->bd_bcs;
2579
2580 bcopy(&bcs, addr, sizeof(bcs));
2581 break;
2582 }
2583 case BIOCSWRITEMAX:
2584 bcopy(addr, &int_arg, sizeof(int_arg));
2585 if (int_arg > BPF_WRITE_MAX) {
2586 os_log(OS_LOG_DEFAULT, "bpf%u bd_write_size_max %u too big",
2587 d->bd_dev_minor, d->bd_write_size_max);
2588 error = EINVAL;
2589 break;
2590 }
2591 d->bd_write_size_max = int_arg;
2592 break;
2593
2594 case BIOCGWRITEMAX:
2595 int_arg = d->bd_write_size_max;
2596 bcopy(&int_arg, addr, sizeof(int_arg));
2597 break;
2598
2599 case BIOCGBATCHWRITE: /* int */
2600 int_arg = d->bd_flags & BPF_BATCH_WRITE ? 1 : 0;
2601 bcopy(&int_arg, addr, sizeof(int_arg));
2602 break;
2603
2604 case BIOCSBATCHWRITE: /* int */
2605 bcopy(addr, &int_arg, sizeof(int_arg));
2606 if (int_arg != 0) {
2607 if (d->bd_hdrcmplt == 0) {
2608 os_log(OS_LOG_DEFAULT,
2609 "bpf%u cannot set BIOCSBATCHWRITE when BIOCSHDRCMPLT is not set",
2610 d->bd_dev_minor);
2611 error = EINVAL;
2612 break;
2613 }
2614 if (d->bd_traffic_class != 0) {
2615 os_log(OS_LOG_DEFAULT,
2616 "bpf%u cannot set BIOCSBATCHWRITE when BIOCSETTC is set",
2617 d->bd_dev_minor);
2618 error = EINVAL;
2619 break;
2620 }
2621 d->bd_flags |= BPF_BATCH_WRITE;
2622 } else {
2623 d->bd_flags &= ~BPF_BATCH_WRITE;
2624 }
2625 break;
2626 }
2627
2628 bpf_release_d(d);
2629 lck_mtx_unlock(bpf_mlock);
2630
2631 return error;
2632 }
2633
2634 /*
2635 * Set d's packet filter program to fp. If this file already has a filter,
2636 * free it and replace it. Returns EINVAL for bogus requests.
2637 */
2638 static int
bpf_setf(struct bpf_d * d,u_int bf_len,user_addr_t bf_insns,u_long cmd)2639 bpf_setf(struct bpf_d *d, u_int bf_len, user_addr_t bf_insns,
2640 u_long cmd)
2641 {
2642 struct bpf_insn *fcode, *old;
2643 u_int flen, size;
2644
2645 while (d->bd_hbuf_read) {
2646 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setf", NULL);
2647 }
2648
2649 if ((d->bd_flags & BPF_CLOSING) != 0) {
2650 return ENXIO;
2651 }
2652
2653 old = d->bd_filter;
2654 if (bf_insns == USER_ADDR_NULL) {
2655 if (bf_len != 0) {
2656 return EINVAL;
2657 }
2658 d->bd_filter = NULL;
2659 reset_d(d);
2660 if (old != 0) {
2661 kfree_data_addr(old);
2662 }
2663 return 0;
2664 }
2665 flen = bf_len;
2666 if (flen > BPF_MAXINSNS) {
2667 return EINVAL;
2668 }
2669
2670 size = flen * sizeof(struct bpf_insn);
2671 fcode = (struct bpf_insn *) kalloc_data(size, Z_WAITOK | Z_ZERO);
2672 if (fcode == NULL) {
2673 return ENOMEM;
2674 }
2675 if (copyin(bf_insns, (caddr_t)fcode, size) == 0 &&
2676 bpf_validate(fcode, (int)flen)) {
2677 d->bd_filter = fcode;
2678
2679 if (cmd == BIOCSETF32 || cmd == BIOCSETF64) {
2680 reset_d(d);
2681 }
2682
2683 if (old != 0) {
2684 kfree_data_addr(old);
2685 }
2686
2687 return 0;
2688 }
2689 kfree_data(fcode, size);
2690 return EINVAL;
2691 }
2692
2693 /*
2694 * Detach a file from its current interface (if attached at all) and attach
2695 * to the interface indicated by the name stored in ifr.
2696 * Return an errno or 0.
2697 */
2698 static int
bpf_setif(struct bpf_d * d,ifnet_t theywant,bool do_reset,bool has_hbuf_read_write,bool has_bufs_allocated)2699 bpf_setif(struct bpf_d *d, ifnet_t theywant, bool do_reset, bool has_hbuf_read_write,
2700 bool has_bufs_allocated)
2701 {
2702 struct bpf_if *bp;
2703 int error;
2704
2705 while (!has_hbuf_read_write && (d->bd_hbuf_read || d->bd_hbuf_write)) {
2706 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setif", NULL);
2707 }
2708
2709 if ((d->bd_flags & BPF_CLOSING) != 0) {
2710 return ENXIO;
2711 }
2712
2713 /*
2714 * Look through attached interfaces for the named one.
2715 */
2716 for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
2717 struct ifnet *ifp = bp->bif_ifp;
2718
2719 if (ifp == 0 || ifp != theywant) {
2720 continue;
2721 }
2722 /*
2723 * Do not use DLT_PKTAP, unless requested explicitly
2724 */
2725 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
2726 continue;
2727 }
2728 /*
2729 * Skip the coprocessor interface
2730 */
2731 if (!intcoproc_unrestricted && IFNET_IS_INTCOPROC(ifp)) {
2732 continue;
2733 }
2734 /*
2735 * We found the requested interface.
2736 * Allocate the packet buffers.
2737 */
2738 if (has_bufs_allocated == false) {
2739 error = bpf_allocbufs(d);
2740 if (error != 0) {
2741 return error;
2742 }
2743 }
2744 /*
2745 * Detach if attached to something else.
2746 */
2747 if (bp != d->bd_bif) {
2748 if (d->bd_bif != NULL) {
2749 if (bpf_detachd(d) != 0) {
2750 return ENXIO;
2751 }
2752 }
2753 if (bpf_attachd(d, bp) != 0) {
2754 return ENXIO;
2755 }
2756 }
2757 if (do_reset) {
2758 reset_d(d);
2759 }
2760 os_log(OS_LOG_DEFAULT, "bpf%u attached to %s",
2761 d->bd_dev_minor, if_name(theywant));
2762 return 0;
2763 }
2764 /* Not found. */
2765 return ENXIO;
2766 }
2767
2768 /*
2769 * Get a list of available data link type of the interface.
2770 */
2771 static int
bpf_getdltlist(struct bpf_d * d,caddr_t addr,struct proc * p)2772 bpf_getdltlist(struct bpf_d *d, caddr_t addr, struct proc *p)
2773 {
2774 u_int n;
2775 int error;
2776 struct ifnet *ifp;
2777 struct bpf_if *bp;
2778 user_addr_t dlist;
2779 struct bpf_dltlist bfl;
2780
2781 bcopy(addr, &bfl, sizeof(bfl));
2782 if (proc_is64bit(p)) {
2783 dlist = (user_addr_t)bfl.bfl_u.bflu_pad;
2784 } else {
2785 dlist = CAST_USER_ADDR_T(bfl.bfl_u.bflu_list);
2786 }
2787
2788 ifp = d->bd_bif->bif_ifp;
2789 n = 0;
2790 error = 0;
2791
2792 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
2793 if (bp->bif_ifp != ifp) {
2794 continue;
2795 }
2796 /*
2797 * Do not use DLT_PKTAP, unless requested explicitly
2798 */
2799 if (bp->bif_dlt == DLT_PKTAP && !(d->bd_flags & BPF_WANT_PKTAP)) {
2800 continue;
2801 }
2802 if (dlist != USER_ADDR_NULL) {
2803 if (n >= bfl.bfl_len) {
2804 return ENOMEM;
2805 }
2806 error = copyout(&bp->bif_dlt, dlist,
2807 sizeof(bp->bif_dlt));
2808 if (error != 0) {
2809 break;
2810 }
2811 dlist += sizeof(bp->bif_dlt);
2812 }
2813 n++;
2814 }
2815 bfl.bfl_len = n;
2816 bcopy(&bfl, addr, sizeof(bfl));
2817
2818 return error;
2819 }
2820
2821 /*
2822 * Set the data link type of a BPF instance.
2823 */
2824 static int
bpf_setdlt(struct bpf_d * d,uint32_t dlt)2825 bpf_setdlt(struct bpf_d *d, uint32_t dlt)
2826 {
2827 int error, opromisc;
2828 struct ifnet *ifp;
2829 struct bpf_if *bp;
2830
2831 if (d->bd_bif->bif_dlt == dlt) {
2832 return 0;
2833 }
2834
2835 while (d->bd_hbuf_read) {
2836 msleep((caddr_t)d, bpf_mlock, PRINET, "bpf_setdlt", NULL);
2837 }
2838
2839 if ((d->bd_flags & BPF_CLOSING) != 0) {
2840 return ENXIO;
2841 }
2842
2843 ifp = d->bd_bif->bif_ifp;
2844 for (bp = bpf_iflist; bp; bp = bp->bif_next) {
2845 if (bp->bif_ifp == ifp && bp->bif_dlt == dlt) {
2846 /*
2847 * Do not use DLT_PKTAP, unless requested explicitly
2848 */
2849 if (bp->bif_dlt == DLT_PKTAP &&
2850 !(d->bd_flags & BPF_WANT_PKTAP)) {
2851 continue;
2852 }
2853 break;
2854 }
2855 }
2856 if (bp != NULL) {
2857 opromisc = d->bd_promisc;
2858 if (bpf_detachd(d) != 0) {
2859 return ENXIO;
2860 }
2861 error = bpf_attachd(d, bp);
2862 if (error != 0) {
2863 os_log_error(OS_LOG_DEFAULT,
2864 "bpf_setdlt: bpf%d bpf_attachd %s error %d",
2865 d->bd_dev_minor, if_name(bp->bif_ifp),
2866 error);
2867 return error;
2868 }
2869 reset_d(d);
2870 if (opromisc) {
2871 lck_mtx_unlock(bpf_mlock);
2872 error = ifnet_set_promiscuous(bp->bif_ifp, 1);
2873 lck_mtx_lock(bpf_mlock);
2874 if (error != 0) {
2875 os_log_error(OS_LOG_DEFAULT,
2876 "bpf_setdlt: bpf%d ifpromisc %s error %d",
2877 d->bd_dev_minor, if_name(bp->bif_ifp), error);
2878 } else {
2879 d->bd_promisc = 1;
2880 }
2881 }
2882 }
2883 return bp == NULL ? EINVAL : 0;
2884 }
2885
2886 static int
bpf_set_traffic_class(struct bpf_d * d,int tc)2887 bpf_set_traffic_class(struct bpf_d *d, int tc)
2888 {
2889 int error = 0;
2890
2891 if (!SO_VALID_TC(tc)) {
2892 error = EINVAL;
2893 } else {
2894 d->bd_traffic_class = tc;
2895 }
2896
2897 return error;
2898 }
2899
2900 static void
bpf_set_packet_service_class(struct mbuf * m,int tc)2901 bpf_set_packet_service_class(struct mbuf *m, int tc)
2902 {
2903 if (!(m->m_flags & M_PKTHDR)) {
2904 return;
2905 }
2906
2907 VERIFY(SO_VALID_TC(tc));
2908 (void) m_set_service_class(m, so_tc2msc(tc));
2909 }
2910
2911 /*
2912 * Support for select()
2913 *
2914 * Return true iff the specific operation will not block indefinitely.
2915 * Otherwise, return false but make a note that a selwakeup() must be done.
2916 */
2917 int
bpfselect(dev_t dev,int which,void * wql,struct proc * p)2918 bpfselect(dev_t dev, int which, void * wql, struct proc *p)
2919 {
2920 struct bpf_d *d;
2921 int ret = 0;
2922
2923 lck_mtx_lock(bpf_mlock);
2924
2925 d = bpf_dtab[minor(dev)];
2926 if (d == NULL || d == BPF_DEV_RESERVED ||
2927 (d->bd_flags & BPF_CLOSING) != 0) {
2928 lck_mtx_unlock(bpf_mlock);
2929 return ENXIO;
2930 }
2931
2932 bpf_acquire_d(d);
2933
2934 if (d->bd_bif == NULL) {
2935 bpf_release_d(d);
2936 lck_mtx_unlock(bpf_mlock);
2937 return ENXIO;
2938 }
2939
2940 while (d->bd_hbuf_read) {
2941 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfselect", NULL);
2942 }
2943
2944 if ((d->bd_flags & BPF_CLOSING) != 0) {
2945 bpf_release_d(d);
2946 lck_mtx_unlock(bpf_mlock);
2947 return ENXIO;
2948 }
2949
2950 switch (which) {
2951 case FREAD:
2952 if (d->bd_hlen != 0 ||
2953 ((d->bd_immediate ||
2954 d->bd_state == BPF_TIMED_OUT) && d->bd_slen != 0)) {
2955 ret = 1; /* read has data to return */
2956 } else {
2957 /*
2958 * Read has no data to return.
2959 * Make the select wait, and start a timer if
2960 * necessary.
2961 */
2962 selrecord(p, &d->bd_sel, wql);
2963 bpf_start_timer(d);
2964 }
2965 break;
2966
2967 case FWRITE:
2968 /* can't determine whether a write would block */
2969 ret = 1;
2970 break;
2971 }
2972
2973 bpf_release_d(d);
2974 lck_mtx_unlock(bpf_mlock);
2975
2976 return ret;
2977 }
2978
2979 /*
2980 * Support for kevent() system call. Register EVFILT_READ filters and
2981 * reject all others.
2982 */
2983 int bpfkqfilter(dev_t dev, struct knote *kn);
2984 static void filt_bpfdetach(struct knote *);
2985 static int filt_bpfread(struct knote *, long);
2986 static int filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev);
2987 static int filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev);
2988
2989 SECURITY_READ_ONLY_EARLY(struct filterops) bpfread_filtops = {
2990 .f_isfd = 1,
2991 .f_detach = filt_bpfdetach,
2992 .f_event = filt_bpfread,
2993 .f_touch = filt_bpftouch,
2994 .f_process = filt_bpfprocess,
2995 };
2996
2997 static int
filt_bpfread_common(struct knote * kn,struct kevent_qos_s * kev,struct bpf_d * d)2998 filt_bpfread_common(struct knote *kn, struct kevent_qos_s *kev, struct bpf_d *d)
2999 {
3000 int ready = 0;
3001 int64_t data = 0;
3002
3003 if (d->bd_immediate) {
3004 /*
3005 * If there's data in the hold buffer, it's the
3006 * amount of data a read will return.
3007 *
3008 * If there's no data in the hold buffer, but
3009 * there's data in the store buffer, a read will
3010 * immediately rotate the store buffer to the
3011 * hold buffer, the amount of data in the store
3012 * buffer is the amount of data a read will
3013 * return.
3014 *
3015 * If there's no data in either buffer, we're not
3016 * ready to read.
3017 */
3018 data = (d->bd_hlen == 0 || d->bd_hbuf_read ?
3019 d->bd_slen : d->bd_hlen);
3020 int64_t lowwat = knote_low_watermark(kn);
3021 if (lowwat > d->bd_bufsize) {
3022 lowwat = d->bd_bufsize;
3023 }
3024 ready = (data >= lowwat);
3025 } else {
3026 /*
3027 * If there's data in the hold buffer, it's the
3028 * amount of data a read will return.
3029 *
3030 * If there's no data in the hold buffer, but
3031 * there's data in the store buffer, if the
3032 * timer has expired a read will immediately
3033 * rotate the store buffer to the hold buffer,
3034 * so the amount of data in the store buffer is
3035 * the amount of data a read will return.
3036 *
3037 * If there's no data in either buffer, or there's
3038 * no data in the hold buffer and the timer hasn't
3039 * expired, we're not ready to read.
3040 */
3041 data = ((d->bd_hlen == 0 || d->bd_hbuf_read) &&
3042 d->bd_state == BPF_TIMED_OUT ? d->bd_slen : d->bd_hlen);
3043 ready = (data > 0);
3044 }
3045 if (!ready) {
3046 bpf_start_timer(d);
3047 } else if (kev) {
3048 knote_fill_kevent(kn, kev, data);
3049 }
3050
3051 return ready;
3052 }
3053
3054 int
bpfkqfilter(dev_t dev,struct knote * kn)3055 bpfkqfilter(dev_t dev, struct knote *kn)
3056 {
3057 struct bpf_d *d;
3058 int res;
3059
3060 /*
3061 * Is this device a bpf?
3062 */
3063 if (major(dev) != CDEV_MAJOR || kn->kn_filter != EVFILT_READ) {
3064 knote_set_error(kn, EINVAL);
3065 return 0;
3066 }
3067
3068 lck_mtx_lock(bpf_mlock);
3069
3070 d = bpf_dtab[minor(dev)];
3071
3072 if (d == NULL || d == BPF_DEV_RESERVED ||
3073 (d->bd_flags & BPF_CLOSING) != 0 ||
3074 d->bd_bif == NULL) {
3075 lck_mtx_unlock(bpf_mlock);
3076 knote_set_error(kn, ENXIO);
3077 return 0;
3078 }
3079
3080 kn->kn_filtid = EVFILTID_BPFREAD;
3081 knote_kn_hook_set_raw(kn, d);
3082 KNOTE_ATTACH(&d->bd_sel.si_note, kn);
3083 d->bd_flags |= BPF_KNOTE;
3084
3085 /* capture the current state */
3086 res = filt_bpfread_common(kn, NULL, d);
3087
3088 lck_mtx_unlock(bpf_mlock);
3089
3090 return res;
3091 }
3092
3093 static void
filt_bpfdetach(struct knote * kn)3094 filt_bpfdetach(struct knote *kn)
3095 {
3096 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3097
3098 lck_mtx_lock(bpf_mlock);
3099 if (d->bd_flags & BPF_KNOTE) {
3100 KNOTE_DETACH(&d->bd_sel.si_note, kn);
3101 d->bd_flags &= ~BPF_KNOTE;
3102 }
3103 lck_mtx_unlock(bpf_mlock);
3104 }
3105
3106 static int
filt_bpfread(struct knote * kn,long hint)3107 filt_bpfread(struct knote *kn, long hint)
3108 {
3109 #pragma unused(hint)
3110 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3111
3112 return filt_bpfread_common(kn, NULL, d);
3113 }
3114
3115 static int
filt_bpftouch(struct knote * kn,struct kevent_qos_s * kev)3116 filt_bpftouch(struct knote *kn, struct kevent_qos_s *kev)
3117 {
3118 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3119 int res;
3120
3121 lck_mtx_lock(bpf_mlock);
3122
3123 /* save off the lowat threshold and flag */
3124 kn->kn_sdata = kev->data;
3125 kn->kn_sfflags = kev->fflags;
3126
3127 /* output data will be re-generated here */
3128 res = filt_bpfread_common(kn, NULL, d);
3129
3130 lck_mtx_unlock(bpf_mlock);
3131
3132 return res;
3133 }
3134
3135 static int
filt_bpfprocess(struct knote * kn,struct kevent_qos_s * kev)3136 filt_bpfprocess(struct knote *kn, struct kevent_qos_s *kev)
3137 {
3138 struct bpf_d *d = (struct bpf_d *)knote_kn_hook_get_raw(kn);
3139 int res;
3140
3141 lck_mtx_lock(bpf_mlock);
3142 res = filt_bpfread_common(kn, kev, d);
3143 lck_mtx_unlock(bpf_mlock);
3144
3145 return res;
3146 }
3147
3148 /*
3149 * Copy data from an mbuf chain into a buffer. This code is derived
3150 * from m_copydata in kern/uipc_mbuf.c.
3151 */
3152 static void
bpf_mcopy(struct mbuf * m,void * dst_arg,size_t len,size_t offset)3153 bpf_mcopy(struct mbuf *m, void *dst_arg, size_t len, size_t offset)
3154 {
3155 u_int count;
3156 u_char *dst;
3157
3158 dst = dst_arg;
3159
3160 while (offset >= m->m_len) {
3161 offset -= m->m_len;
3162 m = m->m_next;
3163 if (m == NULL) {
3164 panic("bpf_mcopy");
3165 }
3166 continue;
3167 }
3168
3169 while (len > 0) {
3170 if (m == NULL) {
3171 panic("bpf_mcopy");
3172 }
3173 count = MIN(m->m_len - (u_int)offset, (u_int)len);
3174 bcopy((u_char *)mbuf_data(m) + offset, dst, count);
3175 m = m->m_next;
3176 dst += count;
3177 len -= count;
3178 offset = 0;
3179 }
3180 }
3181
3182 static inline void
bpf_tap_imp(ifnet_t ifp,u_int32_t dlt,struct bpf_packet * bpf_pkt,int outbound)3183 bpf_tap_imp(
3184 ifnet_t ifp,
3185 u_int32_t dlt,
3186 struct bpf_packet *bpf_pkt,
3187 int outbound)
3188 {
3189 struct bpf_d *d;
3190 u_int slen;
3191 struct bpf_if *bp;
3192
3193 /*
3194 * It's possible that we get here after the bpf descriptor has been
3195 * detached from the interface; in such a case we simply return.
3196 * Lock ordering is important since we can be called asynchronously
3197 * (from IOKit) to process an inbound packet; when that happens
3198 * we would have been holding its "gateLock" and will be acquiring
3199 * "bpf_mlock" upon entering this routine. Due to that, we release
3200 * "bpf_mlock" prior to calling ifnet_set_promiscuous (which will
3201 * acquire "gateLock" in the IOKit), in order to avoid a deadlock
3202 * when a ifnet_set_promiscuous request simultaneously collides with
3203 * an inbound packet being passed into the tap callback.
3204 */
3205 lck_mtx_lock(bpf_mlock);
3206 if (ifp->if_bpf == NULL) {
3207 lck_mtx_unlock(bpf_mlock);
3208 return;
3209 }
3210 for (bp = ifp->if_bpf; bp != NULL; bp = bp->bif_next) {
3211 if (bp->bif_ifp != ifp) {
3212 /* wrong interface */
3213 bp = NULL;
3214 break;
3215 }
3216 if (dlt == 0 || bp->bif_dlt == dlt) {
3217 /* tapping default DLT or DLT matches */
3218 break;
3219 }
3220 }
3221 if (bp == NULL) {
3222 goto done;
3223 }
3224 for (d = bp->bif_dlist; d != NULL; d = d->bd_next) {
3225 struct bpf_packet *bpf_pkt_saved = bpf_pkt;
3226 struct bpf_packet bpf_pkt_tmp = {};
3227 struct pktap_header_buffer bpfp_header_tmp = {};
3228
3229 if (outbound && (d->bd_direction & BPF_D_OUT) == 0) {
3230 continue;
3231 }
3232 if (!outbound && (d->bd_direction & BPF_D_IN) == 0) {
3233 continue;
3234 }
3235
3236 ++d->bd_rcount;
3237 slen = bpf_filter(d->bd_filter, (u_char *)bpf_pkt,
3238 (u_int)bpf_pkt->bpfp_total_length, 0);
3239
3240 if (slen != 0) {
3241 if (bp->bif_ifp->if_type == IFT_PKTAP &&
3242 bp->bif_dlt == DLT_PKTAP) {
3243 if (d->bd_flags & BPF_TRUNCATE) {
3244 slen = min(slen, get_pkt_trunc_len(bpf_pkt));
3245 }
3246 /*
3247 * Need to copy the bpf_pkt because the conversion
3248 * to v2 pktap header modifies the content of the
3249 * bpfp_header
3250 */
3251 if ((d->bd_flags & BPF_PKTHDRV2) &&
3252 bpf_pkt->bpfp_header_length <= sizeof(bpfp_header_tmp)) {
3253 bpf_pkt_tmp = *bpf_pkt;
3254
3255 bpf_pkt = &bpf_pkt_tmp;
3256
3257 memcpy(&bpfp_header_tmp, bpf_pkt->bpfp_header,
3258 bpf_pkt->bpfp_header_length);
3259
3260 bpf_pkt->bpfp_header = &bpfp_header_tmp;
3261
3262 convert_to_pktap_header_to_v2(bpf_pkt,
3263 !!(d->bd_flags & BPF_TRUNCATE));
3264 }
3265 }
3266 ++d->bd_fcount;
3267 catchpacket(d, bpf_pkt, slen, outbound);
3268 }
3269 bpf_pkt = bpf_pkt_saved;
3270 }
3271
3272 done:
3273 lck_mtx_unlock(bpf_mlock);
3274 }
3275
3276 static inline void
bpf_tap_mbuf(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen,int outbound)3277 bpf_tap_mbuf(
3278 ifnet_t ifp,
3279 u_int32_t dlt,
3280 mbuf_t m,
3281 void* hdr,
3282 size_t hlen,
3283 int outbound)
3284 {
3285 struct bpf_packet bpf_pkt;
3286 struct mbuf *m0;
3287
3288 if (ifp->if_bpf == NULL) {
3289 /* quickly check without taking lock */
3290 return;
3291 }
3292 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
3293 bpf_pkt.bpfp_mbuf = m;
3294 bpf_pkt.bpfp_total_length = 0;
3295 for (m0 = m; m0 != NULL; m0 = m0->m_next) {
3296 bpf_pkt.bpfp_total_length += m0->m_len;
3297 }
3298 bpf_pkt.bpfp_header = hdr;
3299 if (hdr != NULL) {
3300 bpf_pkt.bpfp_total_length += hlen;
3301 bpf_pkt.bpfp_header_length = hlen;
3302 } else {
3303 bpf_pkt.bpfp_header_length = 0;
3304 }
3305 bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
3306 }
3307
3308 void
bpf_tap_out(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen)3309 bpf_tap_out(
3310 ifnet_t ifp,
3311 u_int32_t dlt,
3312 mbuf_t m,
3313 void* hdr,
3314 size_t hlen)
3315 {
3316 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 1);
3317 }
3318
3319 void
bpf_tap_in(ifnet_t ifp,u_int32_t dlt,mbuf_t m,void * hdr,size_t hlen)3320 bpf_tap_in(
3321 ifnet_t ifp,
3322 u_int32_t dlt,
3323 mbuf_t m,
3324 void* hdr,
3325 size_t hlen)
3326 {
3327 bpf_tap_mbuf(ifp, dlt, m, hdr, hlen, 0);
3328 }
3329
3330 /* Callback registered with Ethernet driver. */
3331 static int
bpf_tap_callback(struct ifnet * ifp,struct mbuf * m)3332 bpf_tap_callback(struct ifnet *ifp, struct mbuf *m)
3333 {
3334 bpf_tap_mbuf(ifp, 0, m, NULL, 0, mbuf_pkthdr_rcvif(m) == NULL);
3335
3336 return 0;
3337 }
3338
3339 #if SKYWALK
3340 #include <skywalk/os_skywalk_private.h>
3341
3342 static void
bpf_pktcopy(kern_packet_t pkt,void * dst_arg,size_t len,size_t offset)3343 bpf_pktcopy(kern_packet_t pkt, void *dst_arg, size_t len, size_t offset)
3344 {
3345 kern_buflet_t buflet = NULL;
3346 size_t count;
3347 u_char *dst;
3348
3349 dst = dst_arg;
3350 while (len > 0) {
3351 uint8_t *addr;
3352
3353 u_int32_t buflet_length;
3354
3355 buflet = kern_packet_get_next_buflet(pkt, buflet);
3356 VERIFY(buflet != NULL);
3357 addr = kern_buflet_get_data_address(buflet);
3358 VERIFY(addr != NULL);
3359 addr += kern_buflet_get_data_offset(buflet);
3360 buflet_length = kern_buflet_get_data_length(buflet);
3361 if (offset >= buflet_length) {
3362 offset -= buflet_length;
3363 continue;
3364 }
3365 count = MIN(buflet_length - offset, len);
3366 bcopy((void *)(addr + offset), (void *)dst, count);
3367 dst += count;
3368 len -= count;
3369 offset = 0;
3370 }
3371 }
3372
3373 static inline void
bpf_tap_packet(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen,int outbound)3374 bpf_tap_packet(
3375 ifnet_t ifp,
3376 u_int32_t dlt,
3377 kern_packet_t pkt,
3378 void* hdr,
3379 size_t hlen,
3380 int outbound)
3381 {
3382 struct bpf_packet bpf_pkt;
3383 struct mbuf * m;
3384
3385 if (ifp->if_bpf == NULL) {
3386 /* quickly check without taking lock */
3387 return;
3388 }
3389 m = kern_packet_get_mbuf(pkt);
3390 if (m != NULL) {
3391 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_MBUF;
3392 bpf_pkt.bpfp_mbuf = m;
3393 bpf_pkt.bpfp_total_length = m_length(m);
3394 } else {
3395 bpf_pkt.bpfp_type = BPF_PACKET_TYPE_PKT;
3396 bpf_pkt.bpfp_pkt = pkt;
3397 bpf_pkt.bpfp_total_length = kern_packet_get_data_length(pkt);
3398 }
3399 bpf_pkt.bpfp_header = hdr;
3400 bpf_pkt.bpfp_header_length = hlen;
3401 if (hlen != 0) {
3402 bpf_pkt.bpfp_total_length += hlen;
3403 }
3404 bpf_tap_imp(ifp, dlt, &bpf_pkt, outbound);
3405 }
3406
3407 void
bpf_tap_packet_out(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen)3408 bpf_tap_packet_out(
3409 ifnet_t ifp,
3410 u_int32_t dlt,
3411 kern_packet_t pkt,
3412 void* hdr,
3413 size_t hlen)
3414 {
3415 bpf_tap_packet(ifp, dlt, pkt, hdr, hlen, 1);
3416 }
3417
3418 void
bpf_tap_packet_in(ifnet_t ifp,u_int32_t dlt,kern_packet_t pkt,void * hdr,size_t hlen)3419 bpf_tap_packet_in(
3420 ifnet_t ifp,
3421 u_int32_t dlt,
3422 kern_packet_t pkt,
3423 void* hdr,
3424 size_t hlen)
3425 {
3426 bpf_tap_packet(ifp, dlt, pkt, hdr, hlen, 0);
3427 }
3428
3429 #endif /* SKYWALK */
3430
3431 static errno_t
bpf_copydata(struct bpf_packet * pkt,size_t off,size_t len,void * out_data)3432 bpf_copydata(struct bpf_packet *pkt, size_t off, size_t len, void* out_data)
3433 {
3434 errno_t err = 0;
3435 if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) {
3436 err = mbuf_copydata(pkt->bpfp_mbuf, off, len, out_data);
3437 #if SKYWALK
3438 } else if (pkt->bpfp_type == BPF_PACKET_TYPE_PKT) {
3439 err = kern_packet_copy_bytes(pkt->bpfp_pkt, off, len, out_data);
3440 #endif /* SKYWALK */
3441 } else {
3442 err = EINVAL;
3443 }
3444
3445 return err;
3446 }
3447
3448 static void
copy_bpf_packet_offset(struct bpf_packet * pkt,void * dst,size_t len,size_t offset)3449 copy_bpf_packet_offset(struct bpf_packet * pkt, void * dst, size_t len, size_t offset)
3450 {
3451 /* copy the optional header */
3452 if (offset < pkt->bpfp_header_length) {
3453 size_t count = MIN(len, pkt->bpfp_header_length - offset);
3454 caddr_t src = (caddr_t)pkt->bpfp_header;
3455 bcopy(src + offset, dst, count);
3456 len -= count;
3457 dst = (void *)((uintptr_t)dst + count);
3458 offset = 0;
3459 } else {
3460 offset -= pkt->bpfp_header_length;
3461 }
3462
3463 if (len == 0) {
3464 /* nothing past the header */
3465 return;
3466 }
3467 /* copy the packet */
3468 switch (pkt->bpfp_type) {
3469 case BPF_PACKET_TYPE_MBUF:
3470 bpf_mcopy(pkt->bpfp_mbuf, dst, len, offset);
3471 break;
3472 #if SKYWALK
3473 case BPF_PACKET_TYPE_PKT:
3474 bpf_pktcopy(pkt->bpfp_pkt, dst, len, offset);
3475 break;
3476 #endif /* SKYWALK */
3477 default:
3478 break;
3479 }
3480 }
3481
3482 static void
copy_bpf_packet(struct bpf_packet * pkt,void * dst,size_t len)3483 copy_bpf_packet(struct bpf_packet * pkt, void * dst, size_t len)
3484 {
3485 copy_bpf_packet_offset(pkt, dst, len, 0);
3486 }
3487
3488 static uint32_t
get_esp_trunc_len(__unused struct bpf_packet * pkt,__unused uint32_t off,const uint32_t remaining_caplen)3489 get_esp_trunc_len(__unused struct bpf_packet *pkt, __unused uint32_t off,
3490 const uint32_t remaining_caplen)
3491 {
3492 /*
3493 * For some reason tcpdump expects to have one byte beyond the ESP header
3494 */
3495 uint32_t trunc_len = ESP_HDR_SIZE + 1;
3496
3497 if (trunc_len > remaining_caplen) {
3498 return remaining_caplen;
3499 }
3500
3501 return trunc_len;
3502 }
3503
3504 static uint32_t
get_isakmp_trunc_len(__unused struct bpf_packet * pkt,__unused uint32_t off,const uint32_t remaining_caplen)3505 get_isakmp_trunc_len(__unused struct bpf_packet *pkt, __unused uint32_t off,
3506 const uint32_t remaining_caplen)
3507 {
3508 /*
3509 * Include the payload generic header
3510 */
3511 uint32_t trunc_len = ISAKMP_HDR_SIZE;
3512
3513 if (trunc_len > remaining_caplen) {
3514 return remaining_caplen;
3515 }
3516
3517 return trunc_len;
3518 }
3519
3520 static uint32_t
get_isakmp_natt_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3521 get_isakmp_natt_trunc_len(struct bpf_packet *pkt, uint32_t off,
3522 const uint32_t remaining_caplen)
3523 {
3524 int err = 0;
3525 uint32_t trunc_len = 0;
3526 char payload[remaining_caplen];
3527
3528 err = bpf_copydata(pkt, off, remaining_caplen, payload);
3529 if (err != 0) {
3530 return remaining_caplen;
3531 }
3532 /*
3533 * They are three cases:
3534 * - IKE: payload start with 4 bytes header set to zero before ISAKMP header
3535 * - keep alive: 1 byte payload
3536 * - otherwise it's ESP
3537 */
3538 if (remaining_caplen >= 4 &&
3539 payload[0] == 0 && payload[1] == 0 &&
3540 payload[2] == 0 && payload[3] == 0) {
3541 trunc_len = 4 + get_isakmp_trunc_len(pkt, off + 4, remaining_caplen - 4);
3542 } else if (remaining_caplen == 1) {
3543 trunc_len = 1;
3544 } else {
3545 trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);
3546 }
3547
3548 if (trunc_len > remaining_caplen) {
3549 return remaining_caplen;
3550 }
3551
3552 return trunc_len;
3553 }
3554
3555 static uint32_t
get_udp_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3556 get_udp_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3557 {
3558 int err = 0;
3559 uint32_t trunc_len = sizeof(struct udphdr); /* By default no UDP payload */
3560
3561 if (trunc_len >= remaining_caplen) {
3562 return remaining_caplen;
3563 }
3564
3565 struct udphdr udphdr;
3566 err = bpf_copydata(pkt, off, sizeof(struct udphdr), &udphdr);
3567 if (err != 0) {
3568 return remaining_caplen;
3569 }
3570
3571 u_short sport, dport;
3572
3573 sport = EXTRACT_SHORT(&udphdr.uh_sport);
3574 dport = EXTRACT_SHORT(&udphdr.uh_dport);
3575
3576 if (dport == PORT_DNS || sport == PORT_DNS) {
3577 /*
3578 * Full UDP payload for DNS
3579 */
3580 trunc_len = remaining_caplen;
3581 } else if ((sport == PORT_BOOTPS && dport == PORT_BOOTPC) ||
3582 (sport == PORT_BOOTPC && dport == PORT_BOOTPS)) {
3583 /*
3584 * Full UDP payload for BOOTP and DHCP
3585 */
3586 trunc_len = remaining_caplen;
3587 } else if (dport == PORT_ISAKMP && sport == PORT_ISAKMP) {
3588 /*
3589 * Return the ISAKMP header
3590 */
3591 trunc_len += get_isakmp_trunc_len(pkt, off + sizeof(struct udphdr),
3592 remaining_caplen - sizeof(struct udphdr));
3593 } else if (dport == PORT_ISAKMP_NATT && sport == PORT_ISAKMP_NATT) {
3594 trunc_len += get_isakmp_natt_trunc_len(pkt, off + sizeof(struct udphdr),
3595 remaining_caplen - sizeof(struct udphdr));
3596 }
3597 if (trunc_len >= remaining_caplen) {
3598 return remaining_caplen;
3599 }
3600
3601 return trunc_len;
3602 }
3603
3604 static uint32_t
get_tcp_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3605 get_tcp_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3606 {
3607 int err = 0;
3608 uint32_t trunc_len = sizeof(struct tcphdr); /* By default no TCP payload */
3609 if (trunc_len >= remaining_caplen) {
3610 return remaining_caplen;
3611 }
3612
3613 struct tcphdr tcphdr;
3614 err = bpf_copydata(pkt, off, sizeof(struct tcphdr), &tcphdr);
3615 if (err != 0) {
3616 return remaining_caplen;
3617 }
3618
3619 u_short sport, dport;
3620 sport = EXTRACT_SHORT(&tcphdr.th_sport);
3621 dport = EXTRACT_SHORT(&tcphdr.th_dport);
3622
3623 if (dport == PORT_DNS || sport == PORT_DNS) {
3624 /*
3625 * Full TCP payload for DNS
3626 */
3627 trunc_len = remaining_caplen;
3628 } else {
3629 trunc_len = (uint16_t)(tcphdr.th_off << 2);
3630 }
3631 if (trunc_len >= remaining_caplen) {
3632 return remaining_caplen;
3633 }
3634
3635 return trunc_len;
3636 }
3637
3638 static uint32_t
get_proto_trunc_len(uint8_t proto,struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3639 get_proto_trunc_len(uint8_t proto, struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3640 {
3641 uint32_t trunc_len;
3642
3643 switch (proto) {
3644 case IPPROTO_ICMP: {
3645 /*
3646 * Full IMCP payload
3647 */
3648 trunc_len = remaining_caplen;
3649 break;
3650 }
3651 case IPPROTO_ICMPV6: {
3652 /*
3653 * Full IMCPV6 payload
3654 */
3655 trunc_len = remaining_caplen;
3656 break;
3657 }
3658 case IPPROTO_IGMP: {
3659 /*
3660 * Full IGMP payload
3661 */
3662 trunc_len = remaining_caplen;
3663 break;
3664 }
3665 case IPPROTO_UDP: {
3666 trunc_len = get_udp_trunc_len(pkt, off, remaining_caplen);
3667 break;
3668 }
3669 case IPPROTO_TCP: {
3670 trunc_len = get_tcp_trunc_len(pkt, off, remaining_caplen);
3671 break;
3672 }
3673 case IPPROTO_ESP: {
3674 trunc_len = get_esp_trunc_len(pkt, off, remaining_caplen);
3675 break;
3676 }
3677 default: {
3678 /*
3679 * By default we only include the IP header
3680 */
3681 trunc_len = 0;
3682 break;
3683 }
3684 }
3685 if (trunc_len >= remaining_caplen) {
3686 return remaining_caplen;
3687 }
3688
3689 return trunc_len;
3690 }
3691
3692 static uint32_t
get_ip_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3693 get_ip_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3694 {
3695 int err = 0;
3696 uint32_t iplen = sizeof(struct ip);
3697 if (iplen >= remaining_caplen) {
3698 return remaining_caplen;
3699 }
3700
3701 struct ip iphdr;
3702 err = bpf_copydata(pkt, off, sizeof(struct ip), &iphdr);
3703 if (err != 0) {
3704 return remaining_caplen;
3705 }
3706
3707 uint8_t proto = 0;
3708
3709 iplen = (uint16_t)(iphdr.ip_hl << 2);
3710 if (iplen >= remaining_caplen) {
3711 return remaining_caplen;
3712 }
3713
3714 proto = iphdr.ip_p;
3715 iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen);
3716
3717 if (iplen >= remaining_caplen) {
3718 return remaining_caplen;
3719 }
3720
3721 return iplen;
3722 }
3723
3724 static uint32_t
get_ip6_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3725 get_ip6_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3726 {
3727 int err = 0;
3728 uint32_t iplen = sizeof(struct ip6_hdr);
3729 if (iplen >= remaining_caplen) {
3730 return remaining_caplen;
3731 }
3732
3733 struct ip6_hdr ip6hdr;
3734 err = bpf_copydata(pkt, off, sizeof(struct ip6_hdr), &ip6hdr);
3735 if (err != 0) {
3736 return remaining_caplen;
3737 }
3738
3739 uint8_t proto = 0;
3740
3741 /*
3742 * TBD: process the extension headers
3743 */
3744 proto = ip6hdr.ip6_nxt;
3745 iplen += get_proto_trunc_len(proto, pkt, off + iplen, remaining_caplen - iplen);
3746
3747 if (iplen >= remaining_caplen) {
3748 return remaining_caplen;
3749 }
3750
3751 return iplen;
3752 }
3753
3754 static uint32_t
get_ether_trunc_len(struct bpf_packet * pkt,uint32_t off,const uint32_t remaining_caplen)3755 get_ether_trunc_len(struct bpf_packet *pkt, uint32_t off, const uint32_t remaining_caplen)
3756 {
3757 int err = 0;
3758 uint32_t ethlen = sizeof(struct ether_header);
3759 if (ethlen >= remaining_caplen) {
3760 return remaining_caplen;
3761 }
3762
3763 struct ether_header eh = {};
3764 err = bpf_copydata(pkt, off, sizeof(struct ether_header), &eh);
3765 if (err != 0) {
3766 return remaining_caplen;
3767 }
3768
3769 u_short type = EXTRACT_SHORT(&eh.ether_type);
3770 /* Include full ARP */
3771 if (type == ETHERTYPE_ARP) {
3772 ethlen = remaining_caplen;
3773 } else if (type == ETHERTYPE_IP) {
3774 ethlen += get_ip_trunc_len(pkt, off + sizeof(struct ether_header),
3775 remaining_caplen - ethlen);
3776 } else if (type == ETHERTYPE_IPV6) {
3777 ethlen += get_ip6_trunc_len(pkt, off + sizeof(struct ether_header),
3778 remaining_caplen - ethlen);
3779 } else {
3780 ethlen = MIN(BPF_MIN_PKT_SIZE, remaining_caplen);
3781 }
3782 return ethlen;
3783 }
3784
3785 static uint32_t
get_pkt_trunc_len(struct bpf_packet * pkt)3786 get_pkt_trunc_len(struct bpf_packet *pkt)
3787 {
3788 struct pktap_header *pktap = (struct pktap_header *) (pkt->bpfp_header);
3789 uint32_t in_pkt_len = 0;
3790 uint32_t out_pkt_len = 0;
3791 uint32_t tlen = 0;
3792 uint32_t pre_adjust; // L2 header not in mbuf or kern_packet
3793
3794 // bpfp_total_length must contain the BPF packet header
3795 assert3u(pkt->bpfp_total_length, >=, pkt->bpfp_header_length);
3796
3797 // The BPF packet header must contain the pktap header
3798 assert3u(pkt->bpfp_header_length, >=, pktap->pth_length);
3799
3800 // The pre frame length (L2 header) must be contained in the packet
3801 assert3u(pkt->bpfp_total_length, >=, pktap->pth_length + pktap->pth_frame_pre_length);
3802
3803 /*
3804 * pktap->pth_frame_pre_length is the L2 header length and accounts
3805 * for both L2 header in the packet payload and pre_adjust.
3806 *
3807 * pre_adjust represents an adjustment for a pseudo L2 header that is not
3808 * part of packet payload -- not in the mbuf or kern_packet -- and comes
3809 * just after the pktap header.
3810 *
3811 * pktap->pth_length is the size of the pktap header (exclude pre_adjust)
3812 *
3813 * pkt->bpfp_header_length is (pktap->pth_length + pre_adjust)
3814 */
3815 pre_adjust = (uint32_t)(pkt->bpfp_header_length - pktap->pth_length);
3816
3817 if (pktap->pth_iftype == IFT_ETHER) {
3818 /*
3819 * We need to parse the Ethernet header to find the network layer
3820 * protocol
3821 */
3822 in_pkt_len = (uint32_t)(pkt->bpfp_total_length - pktap->pth_length - pre_adjust);
3823
3824 out_pkt_len = get_ether_trunc_len(pkt, 0, in_pkt_len);
3825
3826 tlen = pktap->pth_length + pre_adjust + out_pkt_len;
3827 } else {
3828 /*
3829 * For other interface types, we only know to parse IPv4 and IPv6.
3830 *
3831 * To get to the beginning of the IPv4 or IPv6 packet, we need to to skip
3832 * over the L2 header that is the actual packet payload (mbuf or kern_packet)
3833 */
3834 uint32_t off; // offset past the L2 header in the actual packet payload
3835
3836 off = pktap->pth_frame_pre_length - pre_adjust;
3837
3838 in_pkt_len = (uint32_t)(pkt->bpfp_total_length - pktap->pth_length - pktap->pth_frame_pre_length);
3839
3840 if (pktap->pth_protocol_family == AF_INET) {
3841 out_pkt_len = get_ip_trunc_len(pkt, off, in_pkt_len);
3842 } else if (pktap->pth_protocol_family == AF_INET6) {
3843 out_pkt_len = get_ip6_trunc_len(pkt, off, in_pkt_len);
3844 } else {
3845 out_pkt_len = MIN(BPF_MIN_PKT_SIZE, in_pkt_len);
3846 }
3847 tlen = pktap->pth_length + pktap->pth_frame_pre_length + out_pkt_len;
3848 }
3849
3850 // Verify we do not overflow the buffer
3851 if (__improbable(tlen > pkt->bpfp_total_length)) {
3852 bool do_panic = bpf_debug != 0 ? true : false;
3853
3854 #if DEBUG
3855 do_panic = true;
3856 #endif /* DEBUG */
3857 if (do_panic) {
3858 panic("%s:%d tlen %u > bpfp_total_length %lu bpfp_header_length %lu pth_frame_pre_length %u pre_adjust %u in_pkt_len %u out_pkt_len %u",
3859 __func__, __LINE__,
3860 tlen, pkt->bpfp_total_length, pkt->bpfp_header_length, pktap->pth_frame_pre_length, pre_adjust, in_pkt_len, out_pkt_len);
3861 } else {
3862 os_log(OS_LOG_DEFAULT,
3863 "%s:%d tlen %u > bpfp_total_length %lu bpfp_header_length %lu pth_frame_pre_length %u pre_adjust %u in_pkt_len %u out_pkt_len %u",
3864 __func__, __LINE__,
3865 tlen, pkt->bpfp_total_length, pkt->bpfp_header_length, pktap->pth_frame_pre_length, pre_adjust, in_pkt_len, out_pkt_len);
3866 }
3867 bpf_trunc_overflow += 1;
3868 tlen = (uint32_t)pkt->bpfp_total_length;
3869 }
3870
3871 return tlen;
3872 }
3873
3874 static uint8_t
get_common_prefix_size(const void * a,const void * b,uint8_t max_bytes)3875 get_common_prefix_size(const void *a, const void *b, uint8_t max_bytes)
3876 {
3877 uint8_t max_words = max_bytes >> 2;
3878 const uint32_t *x = (const uint32_t *)a;
3879 const uint32_t *y = (const uint32_t *)b;
3880 uint8_t i;
3881
3882 for (i = 0; i < max_words; i++) {
3883 if (x[i] != y[i]) {
3884 break;
3885 }
3886 }
3887 return (uint8_t)(i << 2);
3888 }
3889
3890 /*
3891 * Move the packet data from interface memory (pkt) into the
3892 * store buffer. Return 1 if it's time to wakeup a listener (buffer full),
3893 * otherwise 0.
3894 */
3895 static void
catchpacket(struct bpf_d * d,struct bpf_packet * pkt,u_int snaplen,int outbound)3896 catchpacket(struct bpf_d *d, struct bpf_packet * pkt,
3897 u_int snaplen, int outbound)
3898 {
3899 struct bpf_hdr *hp;
3900 struct bpf_hdr_ext *ehp;
3901 uint32_t totlen, curlen;
3902 uint32_t hdrlen, caplen;
3903 int do_wakeup = 0;
3904 u_char *payload;
3905 struct timeval tv;
3906
3907 hdrlen = (d->bd_flags & BPF_EXTENDED_HDR) ? d->bd_bif->bif_exthdrlen :
3908 (d->bd_flags & BPF_COMP_REQ) ? d->bd_bif->bif_comphdrlen:
3909 d->bd_bif->bif_hdrlen;
3910 /*
3911 * Figure out how many bytes to move. If the packet is
3912 * greater or equal to the snapshot length, transfer that
3913 * much. Otherwise, transfer the whole packet (unless
3914 * we hit the buffer size limit).
3915 */
3916 totlen = hdrlen + MIN(snaplen, (int)pkt->bpfp_total_length);
3917 if (totlen > d->bd_bufsize) {
3918 totlen = d->bd_bufsize;
3919 }
3920
3921 if (hdrlen > totlen) {
3922 return;
3923 }
3924
3925 /*
3926 * Round up the end of the previous packet to the next longword.
3927 */
3928 curlen = BPF_WORDALIGN(d->bd_slen);
3929 if (curlen + totlen > d->bd_bufsize) {
3930 /*
3931 * This packet will overflow the storage buffer.
3932 * Rotate the buffers if we can, then wakeup any
3933 * pending reads.
3934 *
3935 * We cannot rotate buffers if a read is in progress
3936 * so drop the packet
3937 */
3938 if (d->bd_hbuf_read) {
3939 ++d->bd_dcount;
3940 return;
3941 }
3942
3943 if (d->bd_fbuf == NULL) {
3944 if (d->bd_headdrop == 0) {
3945 /*
3946 * We haven't completed the previous read yet,
3947 * so drop the packet.
3948 */
3949 ++d->bd_dcount;
3950 return;
3951 }
3952 /*
3953 * Drop the hold buffer as it contains older packets
3954 */
3955 d->bd_dcount += d->bd_hcnt;
3956 d->bd_fbuf = d->bd_hbuf;
3957 ROTATE_BUFFERS(d);
3958 } else {
3959 ROTATE_BUFFERS(d);
3960 }
3961 do_wakeup = 1;
3962 curlen = 0;
3963 } else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
3964 /*
3965 * Immediate mode is set, or the read timeout has
3966 * already expired during a select call. A packet
3967 * arrived, so the reader should be woken up.
3968 */
3969 do_wakeup = 1;
3970 }
3971
3972 /*
3973 * Append the bpf header.
3974 */
3975 microtime(&tv);
3976 if (d->bd_flags & BPF_EXTENDED_HDR) {
3977 ehp = (struct bpf_hdr_ext *)(void *)(d->bd_sbuf + curlen);
3978 memset(ehp, 0, sizeof(*ehp));
3979 ehp->bh_tstamp.tv_sec = (int)tv.tv_sec;
3980 ehp->bh_tstamp.tv_usec = tv.tv_usec;
3981
3982 ehp->bh_datalen = (bpf_u_int32)pkt->bpfp_total_length;
3983 ehp->bh_hdrlen = (u_short)hdrlen;
3984 caplen = ehp->bh_caplen = totlen - hdrlen;
3985 payload = (u_char *)ehp + hdrlen;
3986
3987 if (outbound) {
3988 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_OUT;
3989 } else {
3990 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_DIR_IN;
3991 }
3992
3993 if (pkt->bpfp_type == BPF_PACKET_TYPE_MBUF) {
3994 struct mbuf *m = pkt->bpfp_mbuf;
3995
3996 if (outbound) {
3997 /* only do lookups on non-raw INPCB */
3998 if ((m->m_pkthdr.pkt_flags & (PKTF_FLOW_ID |
3999 PKTF_FLOW_LOCALSRC | PKTF_FLOW_RAWSOCK)) ==
4000 (PKTF_FLOW_ID | PKTF_FLOW_LOCALSRC) &&
4001 m->m_pkthdr.pkt_flowsrc == FLOWSRC_INPCB) {
4002 ehp->bh_flowid = m->m_pkthdr.pkt_flowid;
4003 if (m->m_pkthdr.pkt_proto == IPPROTO_TCP) {
4004 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_TCP;
4005 } else if (m->m_pkthdr.pkt_proto == IPPROTO_UDP) {
4006 ehp->bh_flags |= BPF_HDR_EXT_FLAGS_UDP;
4007 }
4008 }
4009 ehp->bh_svc = so_svc2tc(m->m_pkthdr.pkt_svc);
4010 if (m->m_pkthdr.pkt_flags & PKTF_TCP_REXMT) {
4011 ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT;
4012 }
4013 if (m->m_pkthdr.pkt_flags & PKTF_START_SEQ) {
4014 ehp->bh_pktflags |= BPF_PKTFLAGS_START_SEQ;
4015 }
4016 if (m->m_pkthdr.pkt_flags & PKTF_LAST_PKT) {
4017 ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT;
4018 }
4019 if (m->m_pkthdr.pkt_flags & PKTF_VALID_UNSENT_DATA) {
4020 ehp->bh_unsent_bytes =
4021 m->m_pkthdr.bufstatus_if;
4022 ehp->bh_unsent_snd =
4023 m->m_pkthdr.bufstatus_sndbuf;
4024 }
4025 } else {
4026 if (m->m_pkthdr.pkt_flags & PKTF_WAKE_PKT) {
4027 ehp->bh_pktflags |= BPF_PKTFLAGS_WAKE_PKT;
4028 }
4029 }
4030 #if SKYWALK
4031 } else {
4032 kern_packet_t kern_pkt = pkt->bpfp_pkt;
4033 packet_flowid_t flowid = 0;
4034
4035 if (outbound) {
4036 /*
4037 * Note: pp_init() asserts that kern_packet_svc_class_t is equivalent
4038 * to mbuf_svc_class_t
4039 */
4040 ehp->bh_svc = so_svc2tc((mbuf_svc_class_t)kern_packet_get_service_class(kern_pkt));
4041 if (kern_packet_get_transport_retransmit(kern_pkt)) {
4042 ehp->bh_pktflags |= BPF_PKTFLAGS_TCP_REXMT;
4043 }
4044 if (kern_packet_get_transport_last_packet(kern_pkt)) {
4045 ehp->bh_pktflags |= BPF_PKTFLAGS_LAST_PKT;
4046 }
4047 } else {
4048 if (kern_packet_get_wake_flag(kern_pkt)) {
4049 ehp->bh_pktflags |= BPF_PKTFLAGS_WAKE_PKT;
4050 }
4051 }
4052 ehp->bh_trace_tag = kern_packet_get_trace_tag(kern_pkt);
4053 if (kern_packet_get_flowid(kern_pkt, &flowid) == 0) {
4054 ehp->bh_flowid = flowid;
4055 }
4056 #endif /* SKYWALK */
4057 }
4058 } else {
4059 hp = (struct bpf_hdr *)(void *)(d->bd_sbuf + curlen);
4060 memset(hp, 0, BPF_WORDALIGN(sizeof(*hp)));
4061 hp->bh_tstamp.tv_sec = (int)tv.tv_sec;
4062 hp->bh_tstamp.tv_usec = tv.tv_usec;
4063 hp->bh_datalen = (bpf_u_int32)pkt->bpfp_total_length;
4064 hp->bh_hdrlen = (u_short)hdrlen;
4065 caplen = hp->bh_caplen = totlen - hdrlen;
4066 payload = (u_char *)hp + hdrlen;
4067 }
4068 if (d->bd_flags & BPF_COMP_REQ) {
4069 uint8_t common_prefix_size = 0;
4070 uint8_t copy_len = MIN((uint8_t)caplen, BPF_HDR_COMP_LEN_MAX);
4071
4072 copy_bpf_packet(pkt, d->bd_prev_fbuf, copy_len);
4073
4074 if (d->bd_prev_slen != 0) {
4075 common_prefix_size = get_common_prefix_size(d->bd_prev_fbuf,
4076 d->bd_prev_sbuf, MIN(copy_len, d->bd_prev_slen));
4077 }
4078
4079 if (d->bd_flags & BPF_COMP_ENABLED) {
4080 assert3u(caplen, >=, common_prefix_size);
4081 copy_bpf_packet_offset(pkt, payload, caplen - common_prefix_size,
4082 common_prefix_size);
4083 d->bd_slen = curlen + totlen - common_prefix_size;
4084 } else {
4085 copy_bpf_packet(pkt, payload, caplen);
4086 d->bd_slen = curlen + totlen;
4087 }
4088
4089 /*
4090 * Update the caplen only if compression is enabled -- the caller
4091 * must pay attention to bpf_hdr_comp_enable
4092 */
4093 if (d->bd_flags & BPF_EXTENDED_HDR) {
4094 ehp->bh_complen = common_prefix_size;
4095 if (d->bd_flags & BPF_COMP_ENABLED) {
4096 ehp->bh_caplen -= common_prefix_size;
4097 }
4098 } else {
4099 struct bpf_comp_hdr *hcp;
4100
4101 hcp = (struct bpf_comp_hdr *)(void *)(d->bd_sbuf + curlen);
4102 hcp->bh_complen = common_prefix_size;
4103 if (d->bd_flags & BPF_COMP_ENABLED) {
4104 hcp->bh_caplen -= common_prefix_size;
4105 }
4106 }
4107
4108 if (common_prefix_size > 0) {
4109 d->bd_bcs.bcs_total_compressed_prefix_size += common_prefix_size;
4110 if (common_prefix_size > d->bd_bcs.bcs_max_compressed_prefix_size) {
4111 d->bd_bcs.bcs_max_compressed_prefix_size = common_prefix_size;
4112 }
4113 d->bd_bcs.bcs_count_compressed_prefix += 1;
4114 } else {
4115 d->bd_bcs.bcs_count_no_common_prefix += 1;
4116 }
4117
4118 /* The current compression buffer becomes the previous one */
4119 caddr_t tmp = d->bd_prev_sbuf;
4120 d->bd_prev_sbuf = d->bd_prev_fbuf;
4121 d->bd_prev_slen = copy_len;
4122 d->bd_prev_fbuf = tmp;
4123 } else {
4124 /*
4125 * Copy the packet data into the store buffer and update its length.
4126 */
4127 copy_bpf_packet(pkt, payload, caplen);
4128 d->bd_slen = curlen + totlen;
4129 }
4130 d->bd_scnt += 1;
4131 d->bd_bcs.bcs_total_hdr_size += pkt->bpfp_header_length;
4132 d->bd_bcs.bcs_total_size += caplen;
4133
4134 if (do_wakeup) {
4135 bpf_wakeup(d);
4136 }
4137 }
4138
4139 static void
bpf_freebufs(struct bpf_d * d)4140 bpf_freebufs(struct bpf_d *d)
4141 {
4142 if (d->bd_sbuf != NULL) {
4143 kfree_data_addr(d->bd_sbuf);
4144 }
4145 if (d->bd_hbuf != NULL) {
4146 kfree_data_addr(d->bd_hbuf);
4147 }
4148 if (d->bd_fbuf != NULL) {
4149 kfree_data_addr(d->bd_fbuf);
4150 }
4151
4152 if (d->bd_prev_sbuf != NULL) {
4153 kfree_data_addr(d->bd_prev_sbuf);
4154 }
4155 if (d->bd_prev_fbuf != NULL) {
4156 kfree_data_addr(d->bd_prev_fbuf);
4157 }
4158 }
4159 /*
4160 * Initialize all nonzero fields of a descriptor.
4161 */
4162 static int
bpf_allocbufs(struct bpf_d * d)4163 bpf_allocbufs(struct bpf_d *d)
4164 {
4165 bpf_freebufs(d);
4166
4167 d->bd_fbuf = (caddr_t) kalloc_data(d->bd_bufsize, Z_WAITOK | Z_ZERO);
4168 if (d->bd_fbuf == NULL) {
4169 goto nobufs;
4170 }
4171
4172 d->bd_sbuf = (caddr_t) kalloc_data(d->bd_bufsize, Z_WAITOK | Z_ZERO);
4173 if (d->bd_sbuf == NULL) {
4174 goto nobufs;
4175 }
4176 d->bd_slen = 0;
4177 d->bd_hlen = 0;
4178 d->bd_scnt = 0;
4179 d->bd_hcnt = 0;
4180
4181 d->bd_prev_slen = 0;
4182 if (d->bd_flags & BPF_COMP_REQ) {
4183 d->bd_prev_sbuf = (caddr_t) kalloc_data(BPF_HDR_COMP_LEN_MAX, Z_WAITOK | Z_ZERO);
4184 if (d->bd_prev_sbuf == NULL) {
4185 goto nobufs;
4186 }
4187 d->bd_prev_fbuf = (caddr_t) kalloc_data(BPF_HDR_COMP_LEN_MAX, Z_WAITOK | Z_ZERO);
4188 if (d->bd_prev_fbuf == NULL) {
4189 goto nobufs;
4190 }
4191 }
4192 return 0;
4193 nobufs:
4194 bpf_freebufs(d);
4195 return ENOMEM;
4196 }
4197
4198 /*
4199 * Free buffers currently in use by a descriptor.
4200 * Called on close.
4201 */
4202 static void
bpf_freed(struct bpf_d * d)4203 bpf_freed(struct bpf_d *d)
4204 {
4205 /*
4206 * We don't need to lock out interrupts since this descriptor has
4207 * been detached from its interface and it yet hasn't been marked
4208 * free.
4209 */
4210 if (d->bd_hbuf_read || d->bd_hbuf_write) {
4211 panic("bpf buffer freed during read/write");
4212 }
4213
4214 bpf_freebufs(d);
4215
4216 if (d->bd_filter) {
4217 kfree_data_addr(d->bd_filter);
4218 }
4219 }
4220
4221 /*
4222 * Attach an interface to bpf. driverp is a pointer to a (struct bpf_if *)
4223 * in the driver's softc; dlt is the link layer type; hdrlen is the fixed
4224 * size of the link header (variable length headers not yet supported).
4225 */
4226 void
bpfattach(struct ifnet * ifp,u_int dlt,u_int hdrlen)4227 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
4228 {
4229 bpf_attach(ifp, dlt, hdrlen, NULL, NULL);
4230 }
4231
4232 errno_t
bpf_attach(ifnet_t ifp,u_int32_t dlt,u_int32_t hdrlen,bpf_send_func send,bpf_tap_func tap)4233 bpf_attach(
4234 ifnet_t ifp,
4235 u_int32_t dlt,
4236 u_int32_t hdrlen,
4237 bpf_send_func send,
4238 bpf_tap_func tap)
4239 {
4240 struct bpf_if *bp;
4241 struct bpf_if *bp_new;
4242 struct bpf_if *bp_before_first = NULL;
4243 struct bpf_if *bp_first = NULL;
4244 struct bpf_if *bp_last = NULL;
4245 boolean_t found;
4246
4247 /*
4248 * Z_NOFAIL will cause a panic if the allocation fails
4249 */
4250 bp_new = kalloc_type(struct bpf_if, Z_WAITOK | Z_NOFAIL | Z_ZERO);
4251
4252 lck_mtx_lock(bpf_mlock);
4253
4254 /*
4255 * Check if this interface/dlt is already attached. Remember the
4256 * first and last attachment for this interface, as well as the
4257 * element before the first attachment.
4258 */
4259 found = FALSE;
4260 for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
4261 if (bp->bif_ifp != ifp) {
4262 if (bp_first != NULL) {
4263 /* no more elements for this interface */
4264 break;
4265 }
4266 bp_before_first = bp;
4267 } else {
4268 if (bp->bif_dlt == dlt) {
4269 found = TRUE;
4270 break;
4271 }
4272 if (bp_first == NULL) {
4273 bp_first = bp;
4274 }
4275 bp_last = bp;
4276 }
4277 }
4278 if (found) {
4279 lck_mtx_unlock(bpf_mlock);
4280 os_log_error(OS_LOG_DEFAULT,
4281 "bpfattach - %s with dlt %d is already attached",
4282 if_name(ifp), dlt);
4283 kfree_type(struct bpf_if, bp_new);
4284 return EEXIST;
4285 }
4286
4287 bp_new->bif_ifp = ifp;
4288 bp_new->bif_dlt = dlt;
4289 bp_new->bif_send = send;
4290 bp_new->bif_tap = tap;
4291
4292 if (bp_first == NULL) {
4293 /* No other entries for this ifp */
4294 bp_new->bif_next = bpf_iflist;
4295 bpf_iflist = bp_new;
4296 } else {
4297 if (ifnet_type(ifp) == IFT_ETHER && dlt == DLT_EN10MB) {
4298 /* Make this the first entry for this interface */
4299 if (bp_before_first != NULL) {
4300 /* point the previous to us */
4301 bp_before_first->bif_next = bp_new;
4302 } else {
4303 /* we're the new head */
4304 bpf_iflist = bp_new;
4305 }
4306 bp_new->bif_next = bp_first;
4307 } else {
4308 /* Add this after the last entry for this interface */
4309 bp_new->bif_next = bp_last->bif_next;
4310 bp_last->bif_next = bp_new;
4311 }
4312 }
4313
4314 /*
4315 * Compute the length of the bpf header. This is not necessarily
4316 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
4317 * that the network layer header begins on a longword boundary (for
4318 * performance reasons and to alleviate alignment restrictions).
4319 */
4320 bp_new->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
4321 bp_new->bif_exthdrlen = BPF_WORDALIGN(hdrlen +
4322 sizeof(struct bpf_hdr_ext)) - hdrlen;
4323 bp_new->bif_comphdrlen = BPF_WORDALIGN(hdrlen +
4324 sizeof(struct bpf_comp_hdr)) - hdrlen;
4325
4326 /* Take a reference on the interface */
4327 ifnet_reference(ifp);
4328
4329 lck_mtx_unlock(bpf_mlock);
4330
4331 return 0;
4332 }
4333
4334 /*
4335 * Detach bpf from an interface. This involves detaching each descriptor
4336 * associated with the interface, and leaving bd_bif NULL. Notify each
4337 * descriptor as it's detached so that any sleepers wake up and get
4338 * ENXIO.
4339 */
4340 void
bpfdetach(struct ifnet * ifp)4341 bpfdetach(struct ifnet *ifp)
4342 {
4343 struct bpf_if *bp, *bp_prev, *bp_next;
4344 struct bpf_d *d;
4345
4346 if (bpf_debug != 0) {
4347 os_log(OS_LOG_DEFAULT, "%s: %s", __func__, if_name(ifp));
4348 }
4349
4350 lck_mtx_lock(bpf_mlock);
4351
4352 /*
4353 * Build the list of devices attached to that interface
4354 * that we need to free while keeping the lock to maintain
4355 * the integrity of the interface list
4356 */
4357 bp_prev = NULL;
4358 for (bp = bpf_iflist; bp != NULL; bp = bp_next) {
4359 bp_next = bp->bif_next;
4360
4361 if (ifp != bp->bif_ifp) {
4362 bp_prev = bp;
4363 continue;
4364 }
4365 /* Unlink from the interface list */
4366 if (bp_prev) {
4367 bp_prev->bif_next = bp->bif_next;
4368 } else {
4369 bpf_iflist = bp->bif_next;
4370 }
4371
4372 /* Detach the devices attached to the interface */
4373 while ((d = bp->bif_dlist) != NULL) {
4374 /*
4375 * Take an extra reference to prevent the device
4376 * from being freed when bpf_detachd() releases
4377 * the reference for the interface list
4378 */
4379 bpf_acquire_d(d);
4380
4381 /*
4382 * Wait for active read and writes to complete
4383 */
4384 while (d->bd_hbuf_read || d->bd_hbuf_write) {
4385 msleep((caddr_t)d, bpf_mlock, PRINET, "bpfdetach", NULL);
4386 }
4387
4388 bpf_detachd(d);
4389 bpf_wakeup(d);
4390 bpf_release_d(d);
4391 }
4392 ifnet_release(ifp);
4393 }
4394
4395 lck_mtx_unlock(bpf_mlock);
4396 }
4397
4398 void
bpf_init(__unused void * unused)4399 bpf_init(__unused void *unused)
4400 {
4401 int maj;
4402
4403 /* bpf_comp_hdr is an overlay of bpf_hdr */
4404 _CASSERT(BPF_WORDALIGN(sizeof(struct bpf_hdr)) ==
4405 BPF_WORDALIGN(sizeof(struct bpf_comp_hdr)));
4406
4407 /* compression length must fits in a byte */
4408 _CASSERT(BPF_HDR_COMP_LEN_MAX <= UCHAR_MAX );
4409
4410 (void) PE_parse_boot_argn("bpf_hdr_comp", &bpf_hdr_comp_enable,
4411 sizeof(bpf_hdr_comp_enable));
4412
4413 if (bpf_devsw_installed == 0) {
4414 bpf_devsw_installed = 1;
4415 maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw);
4416 if (maj == -1) {
4417 bpf_devsw_installed = 0;
4418 os_log_error(OS_LOG_DEFAULT,
4419 "bpf_init: failed to allocate a major number");
4420 return;
4421 }
4422
4423 for (int i = 0; i < NBPFILTER; i++) {
4424 bpf_make_dev_t(maj);
4425 }
4426 }
4427 }
4428
4429 static int
4430 sysctl_bpf_maxbufsize SYSCTL_HANDLER_ARGS
4431 {
4432 #pragma unused(arg1, arg2)
4433 int i, err;
4434
4435 i = bpf_maxbufsize;
4436
4437 err = sysctl_handle_int(oidp, &i, 0, req);
4438 if (err != 0 || req->newptr == USER_ADDR_NULL) {
4439 return err;
4440 }
4441
4442 if (i < 0 || i > BPF_BUFSIZE_CAP) {
4443 i = BPF_BUFSIZE_CAP;
4444 }
4445
4446 bpf_maxbufsize = i;
4447 return err;
4448 }
4449
4450 static int
4451 sysctl_bpf_bufsize_cap SYSCTL_HANDLER_ARGS
4452 {
4453 #pragma unused(arg1, arg2)
4454 int i, err;
4455
4456 i = BPF_BUFSIZE_CAP;
4457
4458 err = sysctl_handle_int(oidp, &i, 0, req);
4459 if (err != 0 || req->newptr == USER_ADDR_NULL) {
4460 return err;
4461 }
4462
4463 return err;
4464 }
4465
4466 /*
4467 * Fill filter statistics
4468 */
4469 static void
bpfstats_fill_xbpf(struct xbpf_d * d,struct bpf_d * bd)4470 bpfstats_fill_xbpf(struct xbpf_d *d, struct bpf_d *bd)
4471 {
4472 LCK_MTX_ASSERT(bpf_mlock, LCK_MTX_ASSERT_OWNED);
4473
4474 d->bd_structsize = sizeof(struct xbpf_d);
4475 d->bd_promisc = bd->bd_promisc != 0 ? 1 : 0;
4476 d->bd_immediate = d->bd_immediate != 0 ? 1 : 0;
4477 d->bd_hdrcmplt = bd->bd_hdrcmplt != 0 ? 1 : 0;
4478 d->bd_async = bd->bd_async != 0 ? 1 : 0;
4479 d->bd_headdrop = bd->bd_headdrop != 0 ? 1 : 0;
4480 d->bd_direction = (uint8_t)bd->bd_direction;
4481 d->bh_compreq = bd->bd_flags & BPF_COMP_REQ ? 1 : 0;
4482 d->bh_compenabled = bd->bd_flags & BPF_COMP_ENABLED ? 1 : 0;
4483 d->bd_exthdr = bd->bd_flags & BPF_EXTENDED_HDR ? 1 : 0;
4484 d->bd_trunc = bd->bd_flags & BPF_TRUNCATE ? 1 : 0;
4485 d->bd_pkthdrv2 = bd->bd_flags & BPF_PKTHDRV2 ? 1 : 0;
4486
4487 d->bd_dev_minor = (uint8_t)bd->bd_dev_minor;
4488
4489 d->bd_sig = bd->bd_sig;
4490
4491 d->bd_rcount = bd->bd_rcount;
4492 d->bd_dcount = bd->bd_dcount;
4493 d->bd_fcount = bd->bd_fcount;
4494 d->bd_wcount = bd->bd_wcount;
4495 d->bd_wdcount = bd->bd_wdcount;
4496 d->bd_slen = bd->bd_slen;
4497 d->bd_hlen = bd->bd_hlen;
4498 d->bd_bufsize = bd->bd_bufsize;
4499 d->bd_pid = bd->bd_pid;
4500 if (bd->bd_bif != NULL && bd->bd_bif->bif_ifp != NULL) {
4501 strlcpy(d->bd_ifname,
4502 bd->bd_bif->bif_ifp->if_xname, IFNAMSIZ);
4503 }
4504
4505 d->bd_comp_count = bd->bd_bcs.bcs_count_compressed_prefix;
4506 d->bd_comp_size = bd->bd_bcs.bcs_total_compressed_prefix_size;
4507
4508 d->bd_scnt = bd->bd_scnt;
4509 d->bd_hcnt = bd->bd_hcnt;
4510
4511 d->bd_read_count = bd->bd_bcs.bcs_total_read;
4512 d->bd_fsize = bd->bd_bcs.bcs_total_size;
4513 }
4514
4515 /*
4516 * Handle `netstat -B' stats request
4517 */
4518 static int
4519 sysctl_bpf_stats SYSCTL_HANDLER_ARGS
4520 {
4521 int error;
4522 struct xbpf_d *xbdbuf;
4523 unsigned int x_cnt;
4524 vm_size_t buf_size;
4525
4526 if (req->oldptr == USER_ADDR_NULL) {
4527 return SYSCTL_OUT(req, 0, nbpfilter * sizeof(struct xbpf_d));
4528 }
4529 if (nbpfilter == 0) {
4530 return SYSCTL_OUT(req, 0, 0);
4531 }
4532 buf_size = req->oldlen;
4533 if (buf_size > BPF_MAX_DEVICES * sizeof(struct xbpf_d)) {
4534 buf_size = BPF_MAX_DEVICES * sizeof(struct xbpf_d);
4535 }
4536 xbdbuf = kalloc_data(buf_size, Z_WAITOK | Z_ZERO);
4537
4538 lck_mtx_lock(bpf_mlock);
4539 if (buf_size < (nbpfilter * sizeof(struct xbpf_d))) {
4540 lck_mtx_unlock(bpf_mlock);
4541 kfree_data(xbdbuf, buf_size);
4542 return ENOMEM;
4543 }
4544 x_cnt = 0;
4545 unsigned int i;
4546
4547 for (i = 0; i < nbpfilter; i++) {
4548 struct bpf_d *bd = bpf_dtab[i];
4549 struct xbpf_d *xbd;
4550
4551 if (bd == NULL || bd == BPF_DEV_RESERVED ||
4552 (bd->bd_flags & BPF_CLOSING) != 0) {
4553 continue;
4554 }
4555 VERIFY(x_cnt < nbpfilter);
4556
4557 xbd = &xbdbuf[x_cnt++];
4558 bpfstats_fill_xbpf(xbd, bd);
4559 }
4560 lck_mtx_unlock(bpf_mlock);
4561
4562 error = SYSCTL_OUT(req, xbdbuf, x_cnt * sizeof(struct xbpf_d));
4563 kfree_data(xbdbuf, buf_size);
4564 return error;
4565 }
4566