1 /*
2 * Copyright (c) 2000-2022 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1989, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * From: @(#)if.h 8.1 (Berkeley) 6/10/93
61 * $FreeBSD: src/sys/net/if_var.h,v 1.18.2.7 2001/07/24 19:10:18 brooks Exp $
62 */
63
64 #ifndef _NET_IF_VAR_PRIVATE_H_
65 #define _NET_IF_VAR_PRIVATE_H_
66
67 #ifndef DRIVERKIT
68 #ifndef DRIVERKIT_PRIVATE
69 #include <net/if_var_status.h>
70 #endif
71 #include <net/route.h>
72 #include <stdint.h>
73 #include <sys/types.h>
74 #ifdef KERNEL_PRIVATE
75 #include <kern/locks.h>
76 #endif /* KERNEL_PRIVATE */
77
78 struct if_traffic_class {
79 u_int64_t ifi_ibepackets;/* TC_BE packets received on interface */
80 u_int64_t ifi_ibebytes;/* TC_BE bytes received on interface */
81 u_int64_t ifi_obepackets;/* TC_BE packet sent on interface */
82 u_int64_t ifi_obebytes;/* TC_BE bytes sent on interface */
83 u_int64_t ifi_ibkpackets;/* TC_BK packets received on interface */
84 u_int64_t ifi_ibkbytes;/* TC_BK bytes received on interface */
85 u_int64_t ifi_obkpackets;/* TC_BK packet sent on interface */
86 u_int64_t ifi_obkbytes;/* TC_BK bytes sent on interface */
87 u_int64_t ifi_ivipackets;/* TC_VI packets received on interface */
88 u_int64_t ifi_ivibytes;/* TC_VI bytes received on interface */
89 u_int64_t ifi_ovipackets;/* TC_VI packets sent on interface */
90 u_int64_t ifi_ovibytes;/* TC_VI bytes sent on interface */
91 u_int64_t ifi_ivopackets;/* TC_VO packets received on interface */
92 u_int64_t ifi_ivobytes;/* TC_VO bytes received on interface */
93 u_int64_t ifi_ovopackets;/* TC_VO packets sent on interface */
94 u_int64_t ifi_ovobytes;/* TC_VO bytes sent on interface */
95 u_int64_t ifi_ipvpackets;/* TC priv packets received on interface */
96 u_int64_t ifi_ipvbytes;/* TC priv bytes received on interface */
97 u_int64_t ifi_opvpackets;/* TC priv packets sent on interface */
98 u_int64_t ifi_opvbytes;/* TC priv bytes sent on interface */
99 };
100
101 struct if_data_extended {
102 u_int64_t ifi_alignerrs;/* unaligned (32-bit) input pkts */
103 u_int64_t ifi_dt_bytes;/* Data threshold counter */
104 u_int64_t ifi_fpackets;/* forwarded packets on interface */
105 u_int64_t ifi_fbytes; /* forwarded bytes on interface */
106 u_int64_t reserved[12];/* for future */
107 };
108
109 struct if_packet_stats {
110 /* TCP */
111 u_int64_t ifi_tcp_badformat;
112 u_int64_t ifi_tcp_unspecv6;
113 u_int64_t ifi_tcp_synfin;
114 u_int64_t ifi_tcp_badformatipsec;
115 u_int64_t ifi_tcp_noconnnolist;
116 u_int64_t ifi_tcp_noconnlist;
117 u_int64_t ifi_tcp_listbadsyn;
118 u_int64_t ifi_tcp_icmp6unreach;
119 u_int64_t ifi_tcp_deprecate6;
120 u_int64_t ifi_tcp_rstinsynrcv;
121 u_int64_t ifi_tcp_ooopacket;
122 u_int64_t ifi_tcp_dospacket;
123 u_int64_t ifi_tcp_cleanup;
124 u_int64_t ifi_tcp_synwindow;
125 u_int64_t reserved[6];
126 /* UDP */
127 u_int64_t ifi_udp_port_unreach;
128 u_int64_t ifi_udp_faithprefix;
129 u_int64_t ifi_udp_port0;
130 u_int64_t ifi_udp_badlength;
131 u_int64_t ifi_udp_badchksum;
132 u_int64_t ifi_udp_badmcast;
133 u_int64_t ifi_udp_cleanup;
134 u_int64_t ifi_udp_badipsec;
135 u_int64_t _reserved[4];
136 };
137
138 struct if_description {
139 u_int32_t ifd_maxlen; /* must be IF_DESCSIZE */
140 u_int32_t ifd_len; /* actual ifd_desc length */
141 u_int8_t *ifd_desc; /* ptr to desc buffer */
142 };
143
144 struct if_bandwidths {
145 uint64_t eff_bw; /* effective bandwidth */
146 uint64_t max_bw; /* maximum theoretical bandwidth */
147 };
148
149 struct if_latencies {
150 u_int64_t eff_lt; /* effective latency */
151 u_int64_t max_lt; /* maximum theoretical latency */
152 };
153
154 typedef enum {
155 IF_NETEM_MODEL_NULL = 0,
156 IF_NETEM_MODEL_NLC = 1,
157 } if_netem_model_t;
158
159 #define IF_NETEM_PARAMS_PSCALE 100000
160 struct if_netem_params {
161 /* packet scheduler model */
162 if_netem_model_t ifnetem_model;
163
164 /* bandwidth limit */
165 uint64_t ifnetem_bandwidth_bps;
166
167 /* latency (normal distribution with jitter as stdev) */
168 uint32_t ifnetem_latency_ms;
169 uint32_t ifnetem_jitter_ms;
170
171 /*
172 * NetEm probabilistic model parameters has a scaling factor of 100,000
173 * for 5 digits precision. For instance, probability 12.345% is
174 * expressed as uint32_t fixed point 12345 in ifnet_*_p variable below.
175 */
176 /* random packet corruption */
177 uint32_t ifnetem_corruption_p;
178
179 /* random packet duplication */
180 uint32_t ifnetem_duplication_p;
181
182 /* 4 state Markov loss model */
183 uint32_t ifnetem_loss_p_gr_gl;/* P( gap_loss | gap_rx ) */
184 uint32_t ifnetem_loss_p_gr_bl;/* P( burst_loss | gap_rx ) */
185 uint32_t ifnetem_loss_p_bl_br;/* P( burst_rx | burst_loss ) */
186 uint32_t ifnetem_loss_p_bl_gr;/* P( gap_rx | burst_loss ) */
187 uint32_t ifnetem_loss_p_br_bl;/* P( burst_loss | burst_rx ) */
188
189 uint32_t ifnetem_loss_recovery_ms;/* time to recovery loss */
190
191 /* random packet reordering */
192 uint32_t ifnetem_reordering_p;/* reorder probability */
193
194 /*
195 * NetEm output scheduler by default is waken up upon input event as
196 * well as timer interval to avoid excessive delay. If
197 * ifnetem_output_ival is set to non-zero value, it overrides the
198 * default output interval as well as disables output scheduler wakeup
199 * upon input events.
200 */
201 uint32_t ifnetem_output_ival_ms;/* output interval */
202 };
203
204 struct if_rxpoll_stats {
205 u_int32_t ifi_poll_off_req; /* total # of POLL_OFF reqs */
206 u_int32_t ifi_poll_off_err; /* total # of POLL_OFF errors */
207 u_int32_t ifi_poll_on_req; /* total # of POLL_ON reqs */
208 u_int32_t ifi_poll_on_err; /* total # of POLL_ON errors */
209
210 u_int32_t ifi_poll_wakeups_avg;/* avg # of wakeup reqs */
211 u_int32_t ifi_poll_wakeups_lowat;/* wakeups low watermark */
212 u_int32_t ifi_poll_wakeups_hiwat;/* wakeups high watermark */
213
214 u_int64_t ifi_poll_packets; /* total # of polled packets */
215 u_int32_t ifi_poll_packets_avg;/* average polled packets */
216 u_int32_t ifi_poll_packets_min;/* smallest polled packets */
217 u_int32_t ifi_poll_packets_max;/* largest polled packets */
218 u_int32_t ifi_poll_packets_lowat;/* packets low watermark */
219 u_int32_t ifi_poll_packets_hiwat;/* packets high watermark */
220
221 u_int64_t ifi_poll_bytes; /* total # of polled bytes */
222 u_int32_t ifi_poll_bytes_avg; /* average polled bytes */
223 u_int32_t ifi_poll_bytes_min; /* smallest polled bytes */
224 u_int32_t ifi_poll_bytes_max; /* largest polled bytes */
225 u_int32_t ifi_poll_bytes_lowat;/* bytes low watermark */
226 u_int32_t ifi_poll_bytes_hiwat;/* bytes high watermark */
227
228 u_int32_t ifi_poll_packets_limit;/* max packets per poll call */
229 u_int64_t ifi_poll_interval_time;/* poll interval (nsec) */
230 };
231
232 struct if_netif_stats {
233 u_int64_t ifn_rx_mit_interval;/* rx mitigation ival (nsec) */
234 u_int32_t ifn_rx_mit_mode; /* 0: static, 1: dynamic */
235 u_int32_t ifn_rx_mit_packets_avg;/* average # of packets */
236 u_int32_t ifn_rx_mit_packets_min;/* smallest # of packets */
237 u_int32_t ifn_rx_mit_packets_max;/* largest # of packets */
238 u_int32_t ifn_rx_mit_bytes_avg;/* average # of bytes */
239 u_int32_t ifn_rx_mit_bytes_min;/* smallest # of bytes */
240 u_int32_t ifn_rx_mit_bytes_max;/* largest # of bytes */
241 u_int32_t ifn_rx_mit_cfg_idx; /* current config selector */
242 u_int32_t ifn_rx_mit_cfg_packets_lowat;/* pkts low watermark */
243 u_int32_t ifn_rx_mit_cfg_packets_hiwat;/* pkts high watermark */
244 u_int32_t ifn_rx_mit_cfg_bytes_lowat;/* bytes low watermark */
245 u_int32_t ifn_rx_mit_cfg_bytes_hiwat;/* bytes high watermark */
246 u_int32_t ifn_rx_mit_cfg_interval;/* delay interval (nsec) */
247 };
248
249 struct if_tcp_ecn_perf_stat {
250 u_int64_t total_txpkts;
251 u_int64_t total_rxmitpkts;
252 u_int64_t total_rxpkts;
253 u_int64_t total_oopkts;
254 u_int64_t total_reorderpkts;
255 u_int64_t rtt_avg;
256 u_int64_t rtt_var;
257 u_int64_t sack_episodes;
258 u_int64_t rxmit_drop;
259 u_int64_t rst_drop;
260 u_int64_t oo_percent;
261 u_int64_t reorder_percent;
262 u_int64_t rxmit_percent;
263 };
264
265 struct if_tcp_ecn_stat {
266 u_int64_t timestamp;
267 u_int64_t ecn_client_setup;
268 u_int64_t ecn_server_setup;
269 u_int64_t ecn_client_success;
270 u_int64_t ecn_server_success;
271 u_int64_t ecn_peer_nosupport;
272 u_int64_t ecn_syn_lost;
273 u_int64_t ecn_synack_lost;
274 u_int64_t ecn_recv_ce;
275 u_int64_t ecn_recv_ece;
276 u_int64_t ecn_conn_recv_ce;
277 u_int64_t ecn_conn_recv_ece;
278 u_int64_t ecn_conn_plnoce;
279 u_int64_t ecn_conn_plce;
280 u_int64_t ecn_conn_noplce;
281 u_int64_t ecn_fallback_synloss;
282 u_int64_t ecn_fallback_reorder;
283 u_int64_t ecn_fallback_ce;
284 u_int64_t ecn_off_conn;
285 u_int64_t ecn_total_conn;
286 u_int64_t ecn_fallback_droprst;
287 u_int64_t ecn_fallback_droprxmt;
288 u_int64_t ecn_fallback_synrst;
289 struct if_tcp_ecn_perf_stat ecn_on;
290 struct if_tcp_ecn_perf_stat ecn_off;
291 };
292
293 struct if_lim_perf_stat {
294 u_int64_t lim_dl_max_bandwidth; /* bits per second */
295 u_int64_t lim_ul_max_bandwidth; /* bits per second */
296 u_int64_t lim_total_txpkts; /* Total transmit packets, count */
297 u_int64_t lim_total_rxpkts; /* Total receive packets, count */
298 u_int64_t lim_total_retxpkts; /* Total retransmit packets */
299 u_int64_t lim_packet_loss_percent; /* Packet loss rate */
300 u_int64_t lim_total_oopkts; /* Total out-of-order packets */
301 u_int64_t lim_packet_ooo_percent; /* Out-of-order packet rate */
302 u_int64_t lim_rtt_variance; /* RTT variance, milliseconds */
303 u_int64_t lim_rtt_average; /* RTT average, milliseconds */
304 u_int64_t lim_rtt_min; /* RTT minimum, milliseconds */
305 u_int64_t lim_conn_timeouts; /* connection timeouts */
306 u_int64_t lim_conn_attempts; /* connection attempts */
307 u_int64_t lim_conn_timeout_percent; /* Rate of connection timeouts */
308 u_int64_t lim_bk_txpkts; /* Transmit packets with BK service class, that use delay based algorithms */
309 u_int64_t lim_dl_detected:1, /* Low internet */
310 lim_ul_detected:1;
311 };
312
313 #define IF_VAR_H_HAS_IFNET_STATS_PER_FLOW 1
314 struct ifnet_stats_per_flow {
315 u_int64_t bk_txpackets;
316 u_int64_t txpackets;
317 u_int64_t rxpackets;
318 u_int32_t txretransmitbytes;
319 u_int32_t rxoutoforderbytes;
320 u_int32_t rxmitpkts;
321 u_int32_t rcvoopack;
322 u_int32_t pawsdrop;
323 u_int32_t sack_recovery_episodes;
324 u_int32_t reordered_pkts;
325 u_int32_t dsack_sent;
326 u_int32_t dsack_recvd;
327 u_int32_t srtt;
328 u_int32_t rttupdated;
329 u_int32_t rttvar;
330 u_int32_t rttmin;
331 u_int32_t bw_sndbw_max;
332 u_int32_t bw_rcvbw_max;
333 u_int32_t ecn_recv_ece;
334 u_int32_t ecn_recv_ce;
335 u_int32_t ecn_flags;
336 u_int16_t ipv4:1,
337 local:1,
338 connreset:1,
339 conntimeout:1,
340 rxmit_drop:1,
341 ecn_fallback_synloss:1,
342 ecn_fallback_droprst:1,
343 ecn_fallback_droprxmt:1,
344 ecn_fallback_ce:1,
345 ecn_fallback_reorder:1;
346 };
347
348 struct if_interface_state {
349 /*
350 * The bitmask tells which of the fields
351 * to consider:
352 * - When setting, to control which fields
353 * are being modified;
354 * - When getting, it tells which fields are set.
355 */
356 u_int8_t valid_bitmask;
357 #define IF_INTERFACE_STATE_RRC_STATE_VALID 0x1
358 #define IF_INTERFACE_STATE_LQM_STATE_VALID 0x2
359 #define IF_INTERFACE_STATE_INTERFACE_AVAILABILITY_VALID 0x4
360
361 /*
362 * Valid only for cellular interface
363 */
364 u_int8_t rrc_state;
365 #define IF_INTERFACE_STATE_RRC_STATE_IDLE 0x0
366 #define IF_INTERFACE_STATE_RRC_STATE_CONNECTED 0x1
367
368 /*
369 * Values normalized to the edge of the following values
370 * that are defined on <net/if.h>:
371 * IFNET_LQM_THRESH_BAD
372 * IFNET_LQM_THRESH_POOR
373 * IFNET_LQM_THRESH_GOOD
374 */
375 int8_t lqm_state;
376
377 /*
378 * Indicate if the underlying link is currently
379 * available
380 */
381 u_int8_t interface_availability;
382 #define IF_INTERFACE_STATE_INTERFACE_AVAILABLE 0x0
383 #define IF_INTERFACE_STATE_INTERFACE_UNAVAILABLE 0x1
384 };
385
386 struct chain_len_stats {
387 uint64_t cls_one;
388 uint64_t cls_two;
389 uint64_t cls_three;
390 uint64_t cls_four;
391 uint64_t cls_five_or_more;
392 } __attribute__((__aligned__(sizeof(uint64_t))));
393
394 #ifdef BSD_KERNEL_PRIVATE
395 #define IFNETS_MAX 64
396
397 /*
398 * Internal storage of if_data. This is bound to change. Various places in the
399 * stack will translate this data structure in to the externally visible
400 * if_data structure above. Note that during interface attach time, the
401 * embedded if_data structure in ifnet is cleared, with the exception of
402 * some non-statistics related fields.
403 */
404 struct if_data_internal {
405 /* generic interface information */
406 u_char ifi_type; /* ethernet, tokenring, etc */
407 u_char ifi_typelen; /* Length of frame type id */
408 u_char ifi_physical; /* e.g., AUI, Thinnet, 10base-T, etc */
409 u_char ifi_addrlen; /* media address length */
410 u_char ifi_hdrlen; /* media header length */
411 u_char ifi_recvquota; /* polling quota for receive intrs */
412 u_char ifi_xmitquota; /* polling quota for xmit intrs */
413 u_char ifi_unused1; /* for future use */
414 u_int32_t ifi_mtu; /* maximum transmission unit */
415 u_int32_t ifi_metric; /* routing metric (external only) */
416 u_int32_t ifi_baudrate; /* linespeed */
417 /* volatile statistics */
418 u_int64_t ifi_ipackets; /* packets received on interface */
419 u_int64_t ifi_ierrors; /* input errors on interface */
420 u_int64_t ifi_opackets; /* packets sent on interface */
421 u_int64_t ifi_oerrors; /* output errors on interface */
422 u_int64_t ifi_collisions; /* collisions on csma interfaces */
423 u_int64_t ifi_ibytes; /* total number of octets received */
424 u_int64_t ifi_obytes; /* total number of octets sent */
425 u_int64_t ifi_imcasts; /* packets received via multicast */
426 u_int64_t ifi_omcasts; /* packets sent via multicast */
427 u_int64_t ifi_iqdrops; /* dropped on input, this interface */
428 u_int64_t ifi_noproto; /* destined for unsupported protocol */
429 u_int32_t ifi_recvtiming; /* usec spent receiving when timing */
430 u_int32_t ifi_xmittiming; /* usec spent xmitting when timing */
431 u_int64_t ifi_alignerrs; /* unaligned (32-bit) input pkts */
432 u_int64_t ifi_dt_bytes; /* Data threshold counter */
433 u_int64_t ifi_fpackets; /* forwarded packets on interface */
434 u_int64_t ifi_fbytes; /* forwarded bytes on interface */
435 struct timeval ifi_lastchange; /* time of last administrative change */
436 struct timeval ifi_lastupdown; /* time of last up/down event */
437 u_int32_t ifi_hwassist; /* HW offload capabilities */
438 u_int32_t ifi_tso_v4_mtu; /* TCP Segment Offload IPv4 maximum segment size */
439 u_int32_t ifi_tso_v6_mtu; /* TCP Segment Offload IPv6 maximum segment size */
440 };
441 #endif /* BSD_KERNEL_PRIVATE */
442
443 #define if_mtu if_data.ifi_mtu
444 #define if_type if_data.ifi_type
445 #define if_typelen if_data.ifi_typelen
446 #define if_physical if_data.ifi_physical
447 #define if_addrlen if_data.ifi_addrlen
448 #define if_hdrlen if_data.ifi_hdrlen
449 #define if_metric if_data.ifi_metric
450 #define if_baudrate if_data.ifi_baudrate
451 #define if_hwassist if_data.ifi_hwassist
452 #define if_ipackets if_data.ifi_ipackets
453 #define if_ierrors if_data.ifi_ierrors
454 #define if_opackets if_data.ifi_opackets
455 #define if_oerrors if_data.ifi_oerrors
456 #define if_collisions if_data.ifi_collisions
457 #define if_ibytes if_data.ifi_ibytes
458 #define if_obytes if_data.ifi_obytes
459 #define if_imcasts if_data.ifi_imcasts
460 #define if_omcasts if_data.ifi_omcasts
461 #define if_iqdrops if_data.ifi_iqdrops
462 #define if_noproto if_data.ifi_noproto
463 #define if_lastchange if_data.ifi_lastchange
464 #define if_recvquota if_data.ifi_recvquota
465 #define if_xmitquota if_data.ifi_xmitquota
466 #ifdef BSD_KERNEL_PRIVATE
467 #define if_tso_v4_mtu if_data.ifi_tso_v4_mtu
468 #define if_tso_v6_mtu if_data.ifi_tso_v6_mtu
469 #define if_alignerrs if_data.ifi_alignerrs
470 #define if_dt_bytes if_data.ifi_dt_bytes
471 #define if_fpackets if_data.ifi_fpackets
472 #define if_fbytes if_data.ifi_fbytes
473 #define if_lastupdown if_data.ifi_lastupdown
474
475 /*
476 * Forward structure declarations for function prototypes [sic].
477 */
478 struct proc;
479 struct rtentry;
480 struct socket;
481 struct ifnet_filter;
482 struct mbuf;
483 struct ifaddr;
484 struct tqdummy;
485 struct proto_hash_entry;
486 struct dlil_threading_info;
487 struct tcpstat_local;
488 struct udpstat_local;
489 #if PF
490 struct pfi_kif;
491 #endif /* PF */
492 #if SKYWALK
493 struct nexus_netif_adapter;
494 #endif /* SKYWALK */
495
496 /* we use TAILQs so that the order of instantiation is preserved in the list */
497 TAILQ_HEAD(ifnethead, ifnet);
498 TAILQ_HEAD(ifaddrhead, ifaddr);
499 LIST_HEAD(ifmultihead, ifmultiaddr);
500 TAILQ_HEAD(tailq_head, tqdummy);
501 TAILQ_HEAD(ifnet_filter_head, ifnet_filter);
502 TAILQ_HEAD(ddesc_head_name, dlil_demux_desc);
503
504 extern bool intcoproc_unrestricted;
505 #endif /* BSD_KERNEL_PRIVATE */
506
507 /*
508 * All of the following IF_HWASSIST_* flags are defined in kpi_interface.h as
509 * IFNET_* flags. These are redefined here as constants to avoid failures to
510 * build user level programs that can not include kpi_interface.h. It is
511 * important to keep this in sync with the definitions in kpi_interface.h.
512 * The corresponding constant for each definition is mentioned in the comment.
513 *
514 * Bottom 16 bits reserved for hardware checksum
515 */
516 #define IF_HWASSIST_CSUM_IP 0x0001 /* will csum IP, IFNET_CSUM_IP */
517 #define IF_HWASSIST_CSUM_TCP 0x0002 /* will csum TCP, IFNET_CSUM_TCP */
518 #define IF_HWASSIST_CSUM_UDP 0x0004 /* will csum UDP, IFNET_CSUM_UDP */
519 #define IF_HWASSIST_CSUM_IP_FRAGS 0x0008 /* will csum IP fragments, IFNET_CSUM_FRAGMENT */
520 #define IF_HWASSIST_CSUM_FRAGMENT 0x0010 /* will do IP fragmentation, IFNET_IP_FRAGMENT */
521 #define IF_HWASSIST_CSUM_TCPIPV6 0x0020 /* will csum TCPv6, IFNET_CSUM_TCPIPV6 */
522 #define IF_HWASSIST_CSUM_UDPIPV6 0x0040 /* will csum UDPv6, IFNET_CSUM_UDP */
523 #define IF_HWASSIST_CSUM_FRAGMENT_IPV6 0x0080 /* will do IPv6 fragmentation, IFNET_IPV6_FRAGMENT */
524 #define IF_HWASSIST_CSUM_PARTIAL 0x1000 /* simple Sum16 computation, IFNET_CSUM_PARTIAL */
525 #define IF_HWASSIST_CSUM_ZERO_INVERT 0x2000 /* capable of inverting csum of 0 to -0 (0xffff) */
526 #define IF_HWASSIST_CSUM_MASK 0xffff
527 #define IF_HWASSIST_CSUM_FLAGS(hwassist) ((hwassist) & IF_HWASSIST_CSUM_MASK)
528
529 /* VLAN support */
530 #define IF_HWASSIST_VLAN_TAGGING 0x00010000 /* supports VLAN tagging, IFNET_VLAN_TAGGING */
531 #define IF_HWASSIST_VLAN_MTU 0x00020000 /* supports VLAN MTU-sized packet (for software VLAN), IFNET_VLAN_MTU */
532
533 /* TCP Segment Offloading support */
534
535 #define IF_HWASSIST_TSO_V4 0x00200000 /* will do TCP Segment offload for IPv4, IFNET_TSO_IPV4 */
536 #define IF_HWASSIST_TSO_V6 0x00400000 /* will do TCP Segment offload for IPv6, IFNET_TSO_IPV6 */
537
538 #define IFXNAMSIZ (IFNAMSIZ + 8) /* external name (name + unit) */
539 #define IFNET_NETWORK_ID_LEN 32
540
541 #ifdef BSD_KERNEL_PRIVATE
542 /*
543 * ifnet is private to BSD portion of kernel
544 */
545 #include <sys/mcache.h>
546 #include <sys/tree.h>
547 #include <netinet/in.h>
548 #include <net/if_dl.h>
549 #include <net/classq/if_classq.h>
550 #include <net/if_types.h>
551 #include <net/route.h>
552
553 RB_HEAD(ll_reach_tree, if_llreach); /* define struct ll_reach_tree */
554
555 #if SKYWALK
556 struct nexus_ifnet_ops {
557 void (*ni_finalize)(struct nexus_netif_adapter *, struct ifnet *);
558 void (*ni_reap)(struct nexus_netif_adapter *, struct ifnet *,
559 uint32_t, boolean_t);
560 errno_t (*ni_dequeue)(struct nexus_netif_adapter *, uint32_t,
561 uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *, uint32_t *,
562 uint32_t *, boolean_t, errno_t);
563 errno_t (*ni_get_len)(struct nexus_netif_adapter *, uint32_t,
564 uint32_t *, uint32_t *, errno_t);
565 void (*ni_detach_notify)(struct nexus_netif_adapter *);
566 };
567 typedef struct {
568 uuid_t if_nif_provider;
569 uuid_t if_nif_instance;
570 uuid_t if_nif_attach;
571 } if_nexus_netif, *if_nexus_netif_t;
572
573 typedef struct {
574 uuid_t if_fsw_provider;
575 uuid_t if_fsw_instance;
576 uuid_t if_fsw_device;
577 uint32_t if_fsw_ipaddr_gencnt;
578 } if_nexus_flowswitch, *if_nexus_flowswitch_t;
579 #endif /* SKYWALK */
580
581 typedef errno_t (*dlil_input_func)(ifnet_t ifp, mbuf_t m_head,
582 mbuf_t m_tail, const struct ifnet_stat_increment_param *s,
583 boolean_t poll, struct thread *tp);
584 typedef errno_t (*dlil_output_func)(ifnet_t interface, mbuf_t data);
585
586 typedef u_int8_t ipv6_router_mode_t;
587
588 #define if_name(ifp) ifp->if_xname
589 /*
590 * Structure defining a network interface.
591 *
592 * (Would like to call this struct ``if'', but C isn't PL/1.)
593 */
594 struct ifnet {
595 /*
596 * Lock (RW or mutex) to protect this data structure (static storage.)
597 */
598 decl_lck_rw_data(, if_lock);
599 void *if_softc; /* pointer to driver state */
600 const char *if_name; /* name, e.g. ``en'' or ``lo'' */
601 const char *if_xname; /* external name (name + unit) */
602 struct if_description if_desc; /* extended description */
603 TAILQ_ENTRY(ifnet) if_link; /* all struct ifnets are chained */
604 TAILQ_ENTRY(ifnet) if_detaching_link; /* list of detaching ifnets */
605 TAILQ_ENTRY(ifnet) if_ordered_link; /* list of ordered ifnets */
606
607 decl_lck_mtx_data(, if_ref_lock);
608 u_int32_t if_refflags; /* see IFRF flags below */
609 u_int32_t if_refio; /* number of io ops to the underlying driver */
610 u_int32_t if_threads_pending; /* Threads created but waiting for first run */
611 u_int32_t if_datamov; /* number of threads moving data */
612 u_int32_t if_drainers; /* number of draining threads */
613 u_int32_t if_suspend; /* number of suspend requests */
614
615 #define if_list if_link
616 struct ifaddrhead if_addrhead; /* linked list of addresses per if */
617 #define if_addrlist if_addrhead
618 struct ifaddr *if_lladdr; /* link address (first/permanent) */
619
620 u_int32_t if_qosmarking_mode; /* generation to use with NECP clients */
621
622 int if_pcount; /* number of promiscuous listeners */
623 struct bpf_if *if_bpf; /* packet filter structure */
624 u_short if_index; /* numeric abbreviation for this if */
625 short if_unit; /* sub-unit for lower level driver */
626 short if_timer; /* time 'til if_watchdog called */
627 short if_flags; /* up/down, broadcast, etc. */
628 u_int32_t if_eflags; /* see <net/if.h> */
629 u_int32_t if_xflags; /* see <net/if.h> */
630
631 int if_capabilities; /* interface features & capabilities */
632 int if_capenable; /* enabled features & capabilities */
633
634 void *if_linkmib; /* link-type-specific MIB data */
635 uint32_t if_linkmiblen; /* length of above data */
636
637 struct if_data_internal if_data __attribute__((aligned(8)));
638
639 ifnet_family_t if_family; /* value assigned by Apple */
640 ifnet_subfamily_t if_subfamily; /* value assigned by Apple */
641 uintptr_t if_family_cookie;
642 volatile dlil_input_func if_input_dlil;
643 volatile dlil_output_func if_output_dlil;
644 volatile ifnet_start_func if_start;
645 ifnet_output_func if_output;
646 ifnet_pre_enqueue_func if_pre_enqueue;
647 ifnet_ctl_func if_output_ctl;
648 ifnet_input_poll_func if_input_poll;
649 ifnet_ctl_func if_input_ctl;
650 ifnet_ioctl_func if_ioctl;
651 ifnet_set_bpf_tap if_set_bpf_tap;
652 ifnet_detached_func if_free;
653 ifnet_demux_func if_demux;
654 ifnet_event_func if_event;
655 ifnet_framer_func if_framer_legacy;
656 ifnet_framer_extended_func if_framer;
657 ifnet_add_proto_func if_add_proto;
658 ifnet_del_proto_func if_del_proto;
659 ifnet_check_multi if_check_multi;
660 struct proto_hash_entry *if_proto_hash;
661 ifnet_detached_func if_detach;
662
663 u_int32_t if_flowhash; /* interface flow control ID */
664
665 decl_lck_mtx_data(, if_start_lock);
666 u_int32_t if_start_flags; /* see IFSF flags below */
667 u_int32_t if_start_req;
668 u_int8_t if_start_embryonic;
669 u_int8_t if_start_active; /* output is active */
670 u_int16_t if_start_delayed;
671 u_int16_t if_start_delay_qlen;
672 u_int16_t if_start_delay_idle;
673 u_int64_t if_start_delay_swin;
674 u_int32_t if_start_delay_cnt;
675 u_int32_t if_start_delay_timeout; /* nanoseconds */
676 struct timespec if_start_cycle; /* restart interval */
677 struct thread *if_start_thread;
678
679 struct ifclassq *if_snd; /* transmit queue */
680 u_int32_t if_output_sched_model; /* tx sched model */
681
682 struct if_bandwidths if_output_bw;
683 struct if_bandwidths if_input_bw;
684
685 struct if_latencies if_output_lt;
686 struct if_latencies if_input_lt;
687
688 decl_lck_mtx_data(, if_flt_lock);
689 u_int32_t if_flt_busy;
690 u_int32_t if_flt_waiters;
691 struct ifnet_filter_head if_flt_head;
692 uint32_t if_flt_non_os_count;
693 uint32_t if_flt_no_tso_count;
694
695 struct ifmultihead if_multiaddrs; /* multicast addresses */
696 u_int32_t if_updatemcasts; /* mcast addrs need updating */
697 int if_amcount; /* # of all-multicast reqs */
698 decl_lck_mtx_data(, if_addrconfig_lock); /* for serializing addr config */
699 struct in_multi *if_allhostsinm; /* store all-hosts inm for this ifp */
700
701 /*
702 * Opportunistic polling parameters.
703 */
704 decl_lck_mtx_data(, if_poll_lock);
705 struct if_poll_params {
706 u_int16_t poll_req;
707 u_int16_t poll_update; /* link update */
708 u_int32_t poll_flags;
709 #define IF_POLLF_READY 0x1 /* poll thread is ready */
710 #define IF_POLLF_RUNNING 0x2 /* poll thread is running/active */
711 #define IF_POLLF_TERMINATING 0x4 /* poll thread is terminating */
712 #define IF_POLLF_EMBRYONIC 0x8000 /* poll thread is being setup */
713 struct timespec poll_cycle; /* poll interval */
714 struct thread *poll_thread;
715
716 ifnet_model_t poll_mode; /* current mode */
717 struct pktcntr poll_tstats; /* incremental polling statistics */
718 struct if_rxpoll_stats poll_pstats; /* polling statistics */
719 struct pktcntr poll_sstats; /* packets and bytes per sampling */
720 struct timespec poll_mode_holdtime; /* mode holdtime in nsec */
721 struct timespec poll_mode_lasttime; /* last mode change time in nsec */
722 struct timespec poll_sample_holdtime; /* sampling holdtime in nsec */
723 struct timespec poll_sample_lasttime; /* last sampling time in nsec */
724 struct timespec poll_dbg_lasttime; /* last debug message time in nsec */
725 } rxpoll_params;
726 #define if_poll_req rxpoll_params.poll_req
727 #define if_poll_update rxpoll_params.poll_update
728 #define if_poll_flags rxpoll_params.poll_flags
729 #define if_poll_cycle rxpoll_params.poll_cycle
730 #define if_poll_thread rxpoll_params.poll_thread
731 #define if_poll_mode rxpoll_params.poll_mode
732 #define if_poll_tstats rxpoll_params.poll_tstats
733 #define if_poll_sstats rxpoll_params.poll_sstats
734 #define if_poll_pstats rxpoll_params.poll_pstats
735
736 #define if_poll_mode_holdtime rxpoll_params.poll_mode_holdtime
737 #define if_poll_mode_lasttime rxpoll_params.poll_mode_lasttime
738 #define if_poll_sample_holdtime rxpoll_params.poll_sample_holdtime
739 #define if_poll_sample_lasttime rxpoll_params.poll_sample_lasttime
740 #define if_poll_dbg_lasttime rxpoll_params.poll_dbg_lasttime
741
742 #define if_rxpoll_offreq rxpoll_params.poll_pstats.ifi_poll_off_req
743 #define if_rxpoll_offerr rxpoll_params.poll_pstats.ifi_poll_off_err
744 #define if_rxpoll_onreq rxpoll_params.poll_pstats.ifi_poll_on_req
745 #define if_rxpoll_onerr rxpoll_params.poll_pstats.ifi_poll_on_err
746 #define if_rxpoll_wavg rxpoll_params.poll_pstats.ifi_poll_wakeups_avg
747 #define if_rxpoll_wlowat rxpoll_params.poll_pstats.ifi_poll_wakeups_lowat
748 #define if_rxpoll_whiwat rxpoll_params.poll_pstats.ifi_poll_wakeups_hiwat
749 #define if_rxpoll_pavg rxpoll_params.poll_pstats.ifi_poll_packets_avg
750 #define if_rxpoll_pmin rxpoll_params.poll_pstats.ifi_poll_packets_min
751 #define if_rxpoll_pmax rxpoll_params.poll_pstats.ifi_poll_packets_max
752 #define if_rxpoll_plowat rxpoll_params.poll_pstats.ifi_poll_packets_lowat
753 #define if_rxpoll_phiwat rxpoll_params.poll_pstats.ifi_poll_packets_hiwat
754 #define if_rxpoll_bavg rxpoll_params.poll_pstats.ifi_poll_bytes_avg
755 #define if_rxpoll_bmin rxpoll_params.poll_pstats.ifi_poll_bytes_min
756 #define if_rxpoll_bmax rxpoll_params.poll_pstats.ifi_poll_bytes_max
757 #define if_rxpoll_blowat rxpoll_params.poll_pstats.ifi_poll_bytes_lowat
758 #define if_rxpoll_bhiwat rxpoll_params.poll_pstats.ifi_poll_bytes_hiwat
759 #define if_rxpoll_plim rxpoll_params.poll_pstats.ifi_poll_packets_limit
760 #define if_rxpoll_ival rxpoll_params.poll_pstats.ifi_poll_interval_time
761
762 struct dlil_threading_info *if_inp;
763
764 /* allocated once along with dlil_ifnet and is never freed */
765 thread_call_t if_dt_tcall;
766
767 struct {
768 u_int32_t length;
769 union {
770 u_char buffer[8];
771 u_char *ptr;
772 } u;
773 } if_broadcast;
774
775 #if PF
776 struct pfi_kif *if_pf_kif;
777 #endif /* PF */
778 #if SKYWALK
779 struct nexus_ifnet_ops *if_na_ops;
780 struct nexus_netif_adapter *if_na;
781
782 /* compat netif attachment */
783 if_nexus_netif if_nx_netif;
784
785 /* flowswitch attachment */
786 if_nexus_flowswitch if_nx_flowswitch;
787
788 /* headroom space to be reserved in tx packets */
789 uint16_t if_tx_headroom;
790
791 /* trailer space to be reserved in tx packets */
792 uint16_t if_tx_trailer;
793
794 /*
795 * mitigation interval in microseconds for the rx interrupt mitigation
796 * logic while operating in the high throughput mode.
797 */
798 uint32_t if_rx_mit_ival;
799
800 ifnet_start_func if_save_start;
801 ifnet_output_func if_save_output;
802
803 /*
804 * Number of threads waiting for the start callback to be finished;
805 * access is protected by if_start_lock; also serves as wait channel.
806 */
807 uint32_t if_start_waiters;
808 #endif /* SKYWALK */
809
810 decl_lck_mtx_data(, if_cached_route_lock);
811 u_int32_t if_fwd_cacheok;
812 struct route if_fwd_route; /* cached forwarding route */
813 struct route if_src_route; /* cached ipv4 source route */
814 struct route_in6 if_src_route6; /* cached ipv6 source route */
815
816 decl_lck_rw_data(, if_llreach_lock);
817 struct ll_reach_tree if_ll_srcs; /* source link-layer tree */
818
819 void *if_bridge; /* bridge glue */
820
821 u_int32_t if_idle_flags; /* idle flags */
822 u_int32_t if_idle_new_flags; /* temporary idle flags */
823 u_int32_t if_idle_new_flags_mask; /* temporary mask */
824 u_int32_t if_route_refcnt; /* idle: route ref count */
825 u_int32_t if_rt_sendts; /* last of a real time packet */
826
827 struct if_traffic_class if_tc __attribute__((aligned(8)));
828 #if INET
829 struct igmp_ifinfo *if_igi; /* for IGMPv3 */
830 #endif /* INET */
831 struct mld_ifinfo *if_mli; /* for MLDv2 */
832
833 struct tcpstat_local *if_tcp_stat; /* TCP specific stats */
834 struct udpstat_local *if_udp_stat; /* UDP specific stats */
835
836 struct {
837 int32_t level; /* cached logging level */
838 u_int32_t flags; /* cached logging flags */
839 int32_t category; /* cached category */
840 int32_t subcategory; /* cached subcategory */
841 } if_log;
842
843 struct {
844 struct ifnet *ifp; /* delegated ifp */
845 u_int32_t type; /* delegated i/f type */
846 u_int32_t family; /* delegated i/f family */
847 u_int32_t subfamily; /* delegated i/f sub-family */
848 uint32_t expensive:1, /* delegated i/f expensive? */
849 constrained:1; /* delegated i/f constrained? */
850 } if_delegated;
851
852 uuid_t *if_agentids; /* network agents attached to interface */
853 u_int32_t if_agentcount;
854
855 volatile uint32_t if_low_power_gencnt;
856
857 u_int32_t if_generation; /* generation to use with NECP clients */
858 u_int32_t if_fg_sendts; /* last send on a fg socket in seconds */
859
860 u_int64_t if_data_threshold;
861
862 /* Total bytes in send socket buffer */
863 int64_t if_sndbyte_total __attribute__ ((aligned(8)));
864 /* Total unsent bytes in send socket buffer */
865 int64_t if_sndbyte_unsent __attribute__ ((aligned(8)));
866 /* count of times, when there was data to send when sleep is impending */
867 uint32_t if_unsent_data_cnt;
868
869 #if INET
870 decl_lck_rw_data(, if_inetdata_lock);
871 struct in_ifextra *if_inetdata;
872 #endif /* INET */
873 decl_lck_mtx_data(, if_inet6_ioctl_lock);
874 boolean_t if_inet6_ioctl_busy;
875 decl_lck_rw_data(, if_inet6data_lock);
876 struct in6_ifextra *if_inet6data;
877 decl_lck_rw_data(, if_link_status_lock);
878 struct if_link_status *if_link_status;
879 struct if_interface_state if_interface_state;
880 struct if_tcp_ecn_stat *if_ipv4_stat;
881 struct if_tcp_ecn_stat *if_ipv6_stat;
882
883 #if SKYWALK
884 /* Keeps track of local ports bound to this interface
885 * Protected by the global lock in skywalk/netns/netns.c */
886 SLIST_HEAD(, ns_token) if_netns_tokens;
887 #endif /* SKYWALK */
888 struct if_lim_perf_stat if_lim_stat;
889
890 uint32_t if_tcp_kao_max;
891 uint32_t if_tcp_kao_cnt;
892
893 #if SKYWALK
894 struct netem *if_input_netem;
895 #endif /* SKYWALK */
896 struct netem *if_output_netem;
897
898 ipv6_router_mode_t if_ipv6_router_mode; /* see <netinet6/in6_var.h> */
899
900 u_int8_t if_estimated_up_bucket;
901 u_int8_t if_estimated_down_bucket;
902 u_int8_t if_radio_type;
903 u_int8_t if_radio_channel;
904
905 uint8_t network_id[IFNET_NETWORK_ID_LEN];
906 uint8_t network_id_len;
907 uint32_t if_traffic_rule_count;
908 uint32_t if_traffic_rule_genid;
909 };
910
911 /* Interface event handling declarations */
912 extern struct eventhandler_lists_ctxt ifnet_evhdlr_ctxt;
913
914 typedef enum {
915 INTF_EVENT_CODE_CREATED,
916 INTF_EVENT_CODE_REMOVED,
917 INTF_EVENT_CODE_STATUS_UPDATE,
918 INTF_EVENT_CODE_IPADDR_ATTACHED,
919 INTF_EVENT_CODE_IPADDR_DETACHED,
920 INTF_EVENT_CODE_LLADDR_UPDATE,
921 INTF_EVENT_CODE_MTU_CHANGED,
922 INTF_EVENT_CODE_LOW_POWER_UPDATE,
923 } intf_event_code_t;
924
925 typedef void (*ifnet_event_fn)(struct eventhandler_entry_arg, struct ifnet *, struct sockaddr *, intf_event_code_t);
926 EVENTHANDLER_DECLARE(ifnet_event, ifnet_event_fn);
927
928 #define IF_TCP_STATINC(_ifp, _s) do { \
929 if ((_ifp)->if_tcp_stat != NULL) \
930 atomic_add_64(&(_ifp)->if_tcp_stat->_s, 1); \
931 } while (0);
932
933 #define IF_UDP_STATINC(_ifp, _s) do { \
934 if ((_ifp)->if_udp_stat != NULL) \
935 atomic_add_64(&(_ifp)->if_udp_stat->_s, 1); \
936 } while (0);
937
938 /*
939 * Valid values for if_refflags
940 */
941 #define IFRF_EMBRYONIC 0x1 /* ifnet is allocated; awaiting attach */
942 #define IFRF_ATTACHED 0x2 /* ifnet attach is completely done */
943 #define IFRF_DETACHING 0x4 /* detach has been requested */
944 #define IFRF_READY 0x8 /* data path is ready */
945
946 #define IFRF_ATTACH_MASK \
947 (IFRF_EMBRYONIC|IFRF_ATTACHED|IFRF_DETACHING)
948
949 #define IF_FULLY_ATTACHED(_ifp) \
950 (((_ifp)->if_refflags & IFRF_ATTACH_MASK) == IFRF_ATTACHED)
951
952 #define IF_FULLY_ATTACHED_AND_READY(_ifp) \
953 (IF_FULLY_ATTACHED(_ifp) && ((_ifp)->if_refflags & IFRF_READY))
954 /*
955 * Valid values for if_start_flags
956 */
957 #define IFSF_FLOW_CONTROLLED 0x1 /* flow controlled */
958 #define IFSF_TERMINATING 0x2 /* terminating */
959
960 /*
961 * Structure describing a `cloning' interface.
962 */
963 struct if_clone {
964 LIST_ENTRY(if_clone) ifc_list; /* on list of cloners */
965 decl_lck_mtx_data(, ifc_mutex); /* To serialize clone create/delete */
966 u_int32_t ifc_minifs; /* minimum number of interfaces */
967 u_int32_t ifc_maxunit; /* maximum unit number */
968 unsigned char *ifc_units; /* bitmap to handle units */
969 u_int32_t ifc_bmlen; /* bitmap length */
970 u_int32_t ifc_zone_max_elem; /* Max elements for this zone type */
971 u_int32_t ifc_softc_size; /* size of softc for the device */
972 struct zone *ifc_zone; /* if_clone allocation zone */
973 int (*ifc_create)(struct if_clone *, u_int32_t, void *);
974 int (*ifc_destroy)(struct ifnet *);
975 uint8_t ifc_namelen; /* length of name */
976 char ifc_name[IFNAMSIZ + 1]; /* name of device, e.g. `vlan' */
977 };
978
979 #define IF_CLONE_INITIALIZER(name, create, destroy, minifs, maxunit, zone_max_elem, softc_size) { \
980 .ifc_list = { NULL, NULL }, \
981 .ifc_mutex = {}, \
982 .ifc_name = name, \
983 .ifc_namelen = (sizeof (name) - 1), \
984 .ifc_minifs = minifs, \
985 .ifc_maxunit = maxunit, \
986 .ifc_units = NULL, \
987 .ifc_bmlen = 0, \
988 .ifc_zone_max_elem = zone_max_elem, \
989 .ifc_softc_size = softc_size, \
990 .ifc_zone = NULL, \
991 .ifc_create = create, \
992 .ifc_destroy = destroy \
993 }
994
995 /*
996 * Macros to manipulate ifqueue. Users of these macros are responsible
997 * for serialization, by holding whatever lock is appropriate for the
998 * corresponding structure that is referring the ifqueue.
999 */
1000 #define IF_QFULL(ifq) ((ifq)->ifq_len >= (ifq)->ifq_maxlen)
1001 #define IF_DROP(ifq) ((ifq)->ifq_drops++)
1002
1003 #define IF_ENQUEUE(ifq, m) do { \
1004 (m)->m_nextpkt = NULL; \
1005 if ((ifq)->ifq_tail == NULL) \
1006 (ifq)->ifq_head = m; \
1007 else \
1008 ((struct mbuf*)(ifq)->ifq_tail)->m_nextpkt = m; \
1009 (ifq)->ifq_tail = m; \
1010 (ifq)->ifq_len++; \
1011 } while (0)
1012
1013 #define IF_PREPEND(ifq, m) do { \
1014 (m)->m_nextpkt = (mbuf_ref_t)(ifq)->ifq_head; \
1015 if ((ifq)->ifq_tail == NULL) \
1016 (ifq)->ifq_tail = (m); \
1017 (ifq)->ifq_head = (m); \
1018 (ifq)->ifq_len++; \
1019 } while (0)
1020
1021 #define IF_DEQUEUE(ifq, m) do { \
1022 (m) = (mbuf_ref_t)(ifq)->ifq_head; \
1023 if (m != NULL) { \
1024 if (((ifq)->ifq_head = (m)->m_nextpkt) == NULL) \
1025 (ifq)->ifq_tail = NULL; \
1026 (m)->m_nextpkt = NULL; \
1027 (ifq)->ifq_len--; \
1028 } \
1029 } while (0)
1030
1031 #define IF_REMQUEUE(ifq, m) do { \
1032 mbuf_ref_t _p = (mbuf_ref_t)(ifq)->ifq_head; \
1033 mbuf_ref_t _n = (m)->m_nextpkt; \
1034 if ((m) == _p) \
1035 _p = NULL; \
1036 while (_p != NULL) { \
1037 if (_p->m_nextpkt == (m)) \
1038 break; \
1039 _p = _p->m_nextpkt; \
1040 } \
1041 VERIFY(_p != NULL || ((m) == (ifq)->ifq_head)); \
1042 if ((m) == (ifq)->ifq_head) \
1043 (ifq)->ifq_head = _n; \
1044 if ((m) == (ifq)->ifq_tail) \
1045 (ifq)->ifq_tail = _p; \
1046 VERIFY((ifq)->ifq_tail != NULL || (ifq)->ifq_head == NULL); \
1047 VERIFY((ifq)->ifq_len != 0); \
1048 --(ifq)->ifq_len; \
1049 if (_p != NULL) \
1050 _p->m_nextpkt = _n; \
1051 (m)->m_nextpkt = NULL; \
1052 } while (0)
1053
1054 #define IF_DRAIN(ifq) do { \
1055 struct mbuf *_m; \
1056 for (;;) { \
1057 IF_DEQUEUE(ifq, _m); \
1058 if (_m == NULL) \
1059 break; \
1060 m_freem(_m); \
1061 } \
1062 } while (0)
1063
1064 /*
1065 * The ifaddr structure contains information about one address
1066 * of an interface. They are maintained by the different address families,
1067 * are allocated and attached when an address is set, and are linked
1068 * together so all addresses for an interface can be located.
1069 */
1070 struct ifaddr {
1071 decl_lck_mtx_data(, ifa_lock); /* lock for ifaddr */
1072 uint32_t ifa_refcnt; /* ref count, use IFA_{ADD,REM}REF */
1073 uint32_t ifa_debug; /* debug flags */
1074 struct sockaddr *ifa_addr; /* address of interface */
1075 struct sockaddr *ifa_dstaddr; /* other end of p-to-p link */
1076 #define ifa_broadaddr ifa_dstaddr /* broadcast address interface */
1077 struct sockaddr *ifa_netmask; /* used to determine subnet */
1078 struct ifnet *ifa_ifp; /* back-pointer to interface */
1079 TAILQ_ENTRY(ifaddr) ifa_link; /* queue macro glue */
1080 void (*ifa_rtrequest) /* check or clean routes (+ or -)'d */
1081 (int, struct rtentry *, struct sockaddr *);
1082 uint32_t ifa_flags; /* mostly rt_flags for cloning */
1083 int32_t ifa_metric; /* cost of going out this interface */
1084 void (*ifa_free)(struct ifaddr *); /* callback fn for freeing */
1085 void (*ifa_trace) /* callback fn for tracing refs */
1086 (struct ifaddr *, int);
1087 void (*ifa_attached)(struct ifaddr *); /* callback fn for attaching */
1088 void (*ifa_detached)(struct ifaddr *); /* callback fn for detaching */
1089 void *ifa_del_wc; /* Wait channel to avoid address deletion races */
1090 int ifa_del_waiters; /* Threads in wait to delete the address */
1091 };
1092
1093 /*
1094 * Valid values for ifa_flags
1095 */
1096 #define IFA_ROUTE RTF_UP /* route installed (0x1) */
1097 #define IFA_CLONING RTF_CLONING /* (0x100) */
1098
1099 /*
1100 * Valid values for ifa_debug
1101 */
1102 #define IFD_ATTACHED 0x1 /* attached to list */
1103 #define IFD_ALLOC 0x2 /* dynamically allocated */
1104 #define IFD_DEBUG 0x4 /* has debugging info */
1105 #define IFD_LINK 0x8 /* link address */
1106 #define IFD_TRASHED 0x10 /* in trash list */
1107 #define IFD_DETACHING 0x20 /* detach is in progress */
1108 #define IFD_NOTREADY 0x40 /* embryonic; not yet ready */
1109
1110 #define IFA_LOCK_ASSERT_HELD(_ifa) \
1111 LCK_MTX_ASSERT(&(_ifa)->ifa_lock, LCK_MTX_ASSERT_OWNED)
1112
1113 #define IFA_LOCK_ASSERT_NOTHELD(_ifa) \
1114 LCK_MTX_ASSERT(&(_ifa)->ifa_lock, LCK_MTX_ASSERT_NOTOWNED)
1115
1116 #define IFA_LOCK(_ifa) \
1117 lck_mtx_lock(&(_ifa)->ifa_lock)
1118
1119 #define IFA_LOCK_SPIN(_ifa) \
1120 lck_mtx_lock_spin(&(_ifa)->ifa_lock)
1121
1122 #define IFA_CONVERT_LOCK(_ifa) do { \
1123 IFA_LOCK_ASSERT_HELD(_ifa); \
1124 lck_mtx_convert_spin(&(_ifa)->ifa_lock); \
1125 } while (0)
1126
1127 #define IFA_UNLOCK(_ifa) \
1128 lck_mtx_unlock(&(_ifa)->ifa_lock)
1129
1130 #define IFA_ADDREF(_ifa) \
1131 ifa_addref(_ifa, 0)
1132
1133 #define IFA_ADDREF_LOCKED(_ifa) \
1134 ifa_addref(_ifa, 1)
1135
1136 #define IFA_REMREF(_ifa) do { \
1137 (void) ifa_remref(_ifa, 0); \
1138 } while (0)
1139
1140 #define IFA_REMREF_LOCKED(_ifa) \
1141 ifa_remref(_ifa, 1)
1142
1143 /*
1144 * Multicast address structure. This is analogous to the ifaddr
1145 * structure except that it keeps track of multicast addresses.
1146 * Also, the request count here is a count of requests for this
1147 * address, not a count of pointers to this structure; anonymous
1148 * membership(s) holds one outstanding request count.
1149 */
1150 struct ifmultiaddr {
1151 decl_lck_mtx_data(, ifma_lock);
1152 u_int32_t ifma_refcount; /* reference count */
1153 u_int32_t ifma_anoncnt; /* # of anonymous requests */
1154 u_int32_t ifma_reqcnt; /* total requests for this address */
1155 u_int32_t ifma_debug; /* see ifa_debug flags */
1156 u_int32_t ifma_flags; /* see below */
1157 LIST_ENTRY(ifmultiaddr) ifma_link; /* queue macro glue */
1158 struct sockaddr *ifma_addr; /* address this membership is for */
1159 struct ifmultiaddr *ifma_ll; /* link-layer translation, if any */
1160 struct ifnet *ifma_ifp; /* back-pointer to interface */
1161 void *ifma_protospec; /* protocol-specific state, if any */
1162 void (*ifma_trace) /* callback fn for tracing refs */
1163 (struct ifmultiaddr *, int);
1164 };
1165
1166 /*
1167 * Values for ifma_flags
1168 */
1169 #define IFMAF_ANONYMOUS 0x1 /* has anonymous request ref(s) held */
1170
1171 #define IFMA_LOCK_ASSERT_HELD(_ifma) \
1172 LCK_MTX_ASSERT(&(_ifma)->ifma_lock, LCK_MTX_ASSERT_OWNED)
1173
1174 #define IFMA_LOCK_ASSERT_NOTHELD(_ifma) \
1175 LCK_MTX_ASSERT(&(_ifma)->ifma_lock, LCK_MTX_ASSERT_NOTOWNED)
1176
1177 #define IFMA_LOCK(_ifma) \
1178 lck_mtx_lock(&(_ifma)->ifma_lock)
1179
1180 #define IFMA_LOCK_SPIN(_ifma) \
1181 lck_mtx_lock_spin(&(_ifma)->ifma_lock)
1182
1183 #define IFMA_CONVERT_LOCK(_ifma) do { \
1184 IFMA_LOCK_ASSERT_HELD(_ifma); \
1185 lck_mtx_convert_spin(&(_ifma)->ifma_lock); \
1186 } while (0)
1187
1188 #define IFMA_UNLOCK(_ifma) \
1189 lck_mtx_unlock(&(_ifma)->ifma_lock)
1190
1191 #define IFMA_ADDREF(_ifma) \
1192 ifma_addref(_ifma, 0)
1193
1194 #define IFMA_ADDREF_LOCKED(_ifma) \
1195 ifma_addref(_ifma, 1)
1196
1197 #define IFMA_REMREF(_ifma) \
1198 ifma_remref(_ifma)
1199
1200 /*
1201 * Indicate whether or not the immediate interface, or the interface delegated
1202 * by it, is a cellular interface (IFT_CELLULAR). Delegated interface type is
1203 * set/cleared along with the delegated ifp; we cache the type for performance
1204 * to avoid dereferencing delegated ifp each time.
1205 *
1206 * Note that this is meant to be used only for accounting and policy purposes;
1207 * certain places need to explicitly know the immediate interface type, and
1208 * this macro should not be used there.
1209 *
1210 * The test is done against IFT_CELLULAR instead of IFNET_FAMILY_CELLULAR to
1211 * handle certain cases where the family isn't set to the latter.
1212 *
1213 * This macro also handles the case of IFNET_FAMILY_ETHERNET with
1214 * IFNET_SUBFAMILY_SIMCELL which is used to simulate a cellular interface
1215 * for testing purposes. The underlying interface is Ethernet but we treat
1216 * it as cellular for accounting and policy purposes.
1217 */
1218 #define IFNET_IS_CELLULAR(_ifp) \
1219 ((_ifp)->if_type == IFT_CELLULAR || \
1220 (_ifp)->if_delegated.type == IFT_CELLULAR || \
1221 (((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1222 (_ifp)->if_subfamily == IFNET_SUBFAMILY_SIMCELL)) || \
1223 ((_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET && \
1224 (_ifp)->if_delegated.subfamily == IFNET_SUBFAMILY_SIMCELL))
1225
1226 /*
1227 * Indicate whether or not the immediate interface, or the interface delegated
1228 * by it, is an ETHERNET interface.
1229 */
1230 #define IFNET_IS_ETHERNET(_ifp) \
1231 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \
1232 (_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET)
1233 /*
1234 * Indicate whether or not the immediate interface, or the interface delegated
1235 * by it, is a Wi-Fi interface (IFNET_SUBFAMILY_WIFI). Delegated interface
1236 * subfamily is set/cleared along with the delegated ifp; we cache the subfamily
1237 * for performance to avoid dereferencing delegated ifp each time.
1238 *
1239 * Note that this is meant to be used only for accounting and policy purposes;
1240 * certain places need to explicitly know the immediate interface type, and
1241 * this macro should not be used there.
1242 *
1243 * The test is done against IFNET_SUBFAMILY_WIFI as the family may be set to
1244 * IFNET_FAMILY_ETHERNET (as well as type to IFT_ETHER) which is too generic.
1245 */
1246 #define IFNET_IS_WIFI(_ifp) \
1247 (((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1248 (_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI) || \
1249 ((_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET && \
1250 (_ifp)->if_delegated.subfamily == IFNET_SUBFAMILY_WIFI))
1251
1252 /*
1253 * Indicate whether or not the immediate interface, or the interface delegated
1254 * by it, is a Wired interface (several families). Delegated interface
1255 * family is set/cleared along with the delegated ifp; we cache the family
1256 * for performance to avoid dereferencing delegated ifp each time.
1257 *
1258 * Note that this is meant to be used only for accounting and policy purposes;
1259 * certain places need to explicitly know the immediate interface type, and
1260 * this macro should not be used there.
1261 */
1262 #define IFNET_IS_WIRED(_ifp) \
1263 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET || \
1264 (_ifp)->if_delegated.family == IFNET_FAMILY_ETHERNET || \
1265 (_ifp)->if_family == IFNET_FAMILY_FIREWIRE || \
1266 (_ifp)->if_delegated.family == IFNET_FAMILY_FIREWIRE)
1267
1268 /*
1269 * Indicate whether or not the immediate WiFi interface is on an infrastructure
1270 * network
1271 */
1272 #define IFNET_IS_WIFI_INFRA(_ifp) \
1273 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1274 (_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI && \
1275 !((_ifp)->if_eflags & IFEF_AWDL) && \
1276 !((_ifp)->if_xflags & IFXF_LOW_LATENCY))
1277
1278 /*
1279 * Indicate whether or not the immediate interface is a companion link
1280 * interface.
1281 */
1282 #define IFNET_IS_COMPANION_LINK(_ifp) \
1283 ((_ifp)->if_family == IFNET_FAMILY_IPSEC && \
1284 ((_ifp)->if_subfamily == IFNET_SUBFAMILY_BLUETOOTH || \
1285 (_ifp)->if_subfamily == IFNET_SUBFAMILY_WIFI || \
1286 (_ifp)->if_subfamily == IFNET_SUBFAMILY_QUICKRELAY || \
1287 (_ifp)->if_subfamily == IFNET_SUBFAMILY_DEFAULT))
1288
1289 /*
1290 * Indicate whether or not the immediate interface, or the interface delegated
1291 * by it, is marked as expensive. The delegated interface is set/cleared
1292 * along with the delegated ifp; we cache the flag for performance to avoid
1293 * dereferencing delegated ifp each time.
1294 *
1295 * Note that this is meant to be used only for policy purposes.
1296 */
1297 #define IFNET_IS_EXPENSIVE(_ifp) \
1298 ((_ifp)->if_eflags & IFEF_EXPENSIVE || \
1299 (_ifp)->if_delegated.expensive)
1300
1301 #define IFNET_IS_LOW_POWER(_ifp) \
1302 (if_low_power_restricted != 0 && \
1303 ((_ifp)->if_xflags & IFXF_LOW_POWER) || \
1304 ((_ifp)->if_delegated.ifp != NULL && \
1305 ((_ifp)->if_delegated.ifp->if_xflags & IFXF_LOW_POWER)))
1306
1307 #define IFNET_IS_CONSTRAINED(_ifp) \
1308 ((_ifp)->if_xflags & IFXF_CONSTRAINED || \
1309 (_ifp)->if_delegated.constrained)
1310
1311 /*
1312 * We don't support AWDL interface delegation.
1313 */
1314 #define IFNET_IS_AWDL_RESTRICTED(_ifp) \
1315 (((_ifp)->if_eflags & (IFEF_AWDL|IFEF_AWDL_RESTRICTED)) == \
1316 (IFEF_AWDL|IFEF_AWDL_RESTRICTED))
1317
1318 #define IFNET_IS_INTCOPROC(_ifp) \
1319 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1320 (_ifp)->if_subfamily == IFNET_SUBFAMILY_INTCOPROC)
1321
1322 #define IFNET_IS_VMNET(_ifp) \
1323 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1324 (_ifp)->if_subfamily == IFNET_SUBFAMILY_VMNET)
1325 /*
1326 * Indicate whether or not the immediate interface is IP over Thunderbolt.
1327 */
1328 #define IFNET_IS_THUNDERBOLT_IP(_ifp) \
1329 ((_ifp)->if_family == IFNET_FAMILY_ETHERNET && \
1330 (_ifp)->if_subfamily == IFNET_SUBFAMILY_THUNDERBOLT)
1331
1332 extern int if_index;
1333 extern struct ifnethead ifnet_head;
1334 extern struct ifnethead ifnet_ordered_head;
1335 extern struct ifnet **__counted_by(if_index) ifindex2ifnet;
1336 extern u_int32_t if_sndq_maxlen;
1337 extern u_int32_t if_rcvq_maxlen;
1338 extern struct ifaddr **ifnet_addrs;
1339 extern lck_attr_t ifa_mtx_attr;
1340 extern lck_grp_t ifa_mtx_grp;
1341 extern lck_grp_t ifnet_lock_group;
1342 extern lck_attr_t ifnet_lock_attr;
1343 extern ifnet_t lo_ifp;
1344 extern uint32_t net_wake_pkt_debug;
1345
1346 extern int if_addmulti(struct ifnet *, const struct sockaddr *,
1347 struct ifmultiaddr **);
1348 extern int if_addmulti_anon(struct ifnet *, const struct sockaddr *,
1349 struct ifmultiaddr **);
1350 extern int if_allmulti(struct ifnet *, int);
1351 extern int if_delmulti(struct ifnet *, const struct sockaddr *);
1352 extern int if_delmulti_ifma(struct ifmultiaddr *);
1353 extern int if_delmulti_anon(struct ifnet *, const struct sockaddr *);
1354 extern void if_down(struct ifnet *);
1355 extern int if_down_all(void);
1356 extern void if_up(struct ifnet *);
1357 __private_extern__ void if_updown(struct ifnet *ifp, int up);
1358 extern int ifioctl(struct socket *, u_long, caddr_t, struct proc *);
1359 extern int ifioctllocked(struct socket *, u_long, caddr_t, struct proc *);
1360 extern struct ifnet *ifunit(const char *);
1361 extern struct ifnet *ifunit_ref(const char *);
1362 extern int ifunit_extract(const char *src, char *dst, size_t dstlen, int *unit);
1363 extern struct ifnet *if_withname(struct sockaddr *);
1364 extern void if_qflush(struct ifnet *, struct ifclassq *, bool);
1365 extern void if_qflush_snd(struct ifnet *, bool);
1366 extern void if_qflush_sc(struct ifnet *, mbuf_svc_class_t, u_int32_t,
1367 u_int32_t *, u_int32_t *, int);
1368
1369 extern struct if_clone *if_clone_lookup(const char *, u_int32_t *);
1370 extern int if_clone_attach(struct if_clone *);
1371 extern void if_clone_detach(struct if_clone *);
1372 extern void *if_clone_softc_allocate(const struct if_clone *);
1373 extern void if_clone_softc_deallocate(const struct if_clone *, void *);
1374 extern u_int32_t if_functional_type(struct ifnet *, bool);
1375
1376 extern errno_t if_mcasts_update(struct ifnet *);
1377
1378 typedef enum {
1379 IFNET_LCK_ASSERT_EXCLUSIVE, /* RW: held as writer */
1380 IFNET_LCK_ASSERT_SHARED, /* RW: held as reader */
1381 IFNET_LCK_ASSERT_OWNED, /* RW: writer/reader, MTX: held */
1382 IFNET_LCK_ASSERT_NOTOWNED /* not held */
1383 } ifnet_lock_assert_t;
1384
1385 #define IF_LLADDR(_ifp) \
1386 (LLADDR(SDL(((_ifp)->if_lladdr)->ifa_addr)))
1387
1388 #define IF_INDEX_IN_RANGE(_ind_) ((_ind_) > 0 && \
1389 (unsigned int)(_ind_) <= (unsigned int)if_index)
1390
1391 __private_extern__ void ifnet_lock_assert(struct ifnet *, ifnet_lock_assert_t);
1392 __private_extern__ void ifnet_lock_shared(struct ifnet *ifp);
1393 __private_extern__ void ifnet_lock_exclusive(struct ifnet *ifp);
1394 __private_extern__ void ifnet_lock_done(struct ifnet *ifp);
1395
1396 #if INET
1397 __private_extern__ void if_inetdata_lock_shared(struct ifnet *ifp);
1398 __private_extern__ void if_inetdata_lock_exclusive(struct ifnet *ifp);
1399 __private_extern__ void if_inetdata_lock_done(struct ifnet *ifp);
1400 #endif
1401
1402 __private_extern__ void if_inet6data_lock_shared(struct ifnet *ifp);
1403 __private_extern__ void if_inet6data_lock_exclusive(struct ifnet *ifp);
1404 __private_extern__ void if_inet6data_lock_done(struct ifnet *ifp);
1405
1406 __private_extern__ void ifnet_head_lock_shared(void);
1407 __private_extern__ void ifnet_head_lock_exclusive(void);
1408 __private_extern__ void ifnet_head_done(void);
1409 __private_extern__ void ifnet_head_assert_exclusive(void);
1410
1411 __private_extern__ errno_t ifnet_set_idle_flags_locked(ifnet_t, u_int32_t,
1412 u_int32_t);
1413 __private_extern__ int ifnet_is_attached(struct ifnet *, int refio);
1414 __private_extern__ void ifnet_incr_pending_thread_count(struct ifnet *);
1415 __private_extern__ void ifnet_decr_pending_thread_count(struct ifnet *);
1416 __private_extern__ void ifnet_incr_iorefcnt(struct ifnet *);
1417 __private_extern__ void ifnet_decr_iorefcnt(struct ifnet *);
1418 __private_extern__ boolean_t ifnet_datamov_begin(struct ifnet *);
1419 __private_extern__ void ifnet_datamov_end(struct ifnet *);
1420 __private_extern__ void ifnet_datamov_suspend(struct ifnet *);
1421 __private_extern__ boolean_t ifnet_datamov_suspend_if_needed(struct ifnet *);
1422 __private_extern__ void ifnet_datamov_drain(struct ifnet *);
1423 __private_extern__ void ifnet_datamov_suspend_and_drain(struct ifnet *);
1424 __private_extern__ void ifnet_datamov_resume(struct ifnet *);
1425 __private_extern__ void ifnet_set_start_cycle(struct ifnet *,
1426 struct timespec *);
1427 __private_extern__ void ifnet_set_poll_cycle(struct ifnet *,
1428 struct timespec *);
1429
1430 __private_extern__ void if_attach_ifa(struct ifnet *, struct ifaddr *);
1431 __private_extern__ void if_attach_link_ifa(struct ifnet *, struct ifaddr *);
1432 __private_extern__ void if_detach_ifa(struct ifnet *, struct ifaddr *);
1433 __private_extern__ void if_detach_link_ifa(struct ifnet *, struct ifaddr *);
1434
1435 __private_extern__ void dlil_if_lock(void);
1436 __private_extern__ void dlil_if_unlock(void);
1437 __private_extern__ void dlil_if_lock_assert(void);
1438
1439 extern struct ifaddr *ifa_ifwithaddr(const struct sockaddr *);
1440 extern struct ifaddr *ifa_ifwithaddr_locked(const struct sockaddr *);
1441 extern struct ifaddr *ifa_ifwithaddr_scoped(const struct sockaddr *,
1442 unsigned int);
1443 extern struct ifaddr *ifa_ifwithaddr_scoped_locked(const struct sockaddr *,
1444 unsigned int);
1445 extern struct ifaddr *ifa_ifwithdstaddr(const struct sockaddr *);
1446 extern struct ifaddr *ifa_ifwithnet(const struct sockaddr *);
1447 extern struct ifaddr *ifa_ifwithnet_scoped(const struct sockaddr *,
1448 unsigned int);
1449 extern struct ifaddr *ifa_ifwithroute(int, const struct sockaddr *,
1450 const struct sockaddr *);
1451 extern struct ifaddr *ifa_ifwithroute_locked(int, const struct sockaddr *,
1452 const struct sockaddr *);
1453 extern struct ifaddr *ifa_ifwithroute_scoped_locked(int,
1454 const struct sockaddr *, const struct sockaddr *, unsigned int);
1455 extern struct ifaddr *ifaof_ifpforaddr_select(const struct sockaddr *, struct ifnet *);
1456 extern struct ifaddr *ifaof_ifpforaddr(const struct sockaddr *, struct ifnet *);
1457 __private_extern__ struct ifaddr *ifa_ifpgetprimary(struct ifnet *, int);
1458 extern void ifa_addref(struct ifaddr *, int);
1459 extern struct ifaddr *ifa_remref(struct ifaddr *, int);
1460 extern void ifa_lock_init(struct ifaddr *);
1461 extern void ifa_lock_destroy(struct ifaddr *);
1462 extern void ifma_addref(struct ifmultiaddr *, int);
1463 extern void ifma_remref(struct ifmultiaddr *);
1464
1465 extern void ifa_init(void);
1466
1467 __private_extern__ struct in_ifaddr *ifa_foraddr(unsigned int);
1468 __private_extern__ struct in_ifaddr *ifa_foraddr_scoped(unsigned int,
1469 unsigned int);
1470
1471 struct ifreq;
1472 extern errno_t ifnet_getset_opportunistic(struct ifnet *, u_long,
1473 struct ifreq *, struct proc *);
1474 extern int ifnet_get_throttle(struct ifnet *, u_int32_t *);
1475 extern int ifnet_set_throttle(struct ifnet *, u_int32_t);
1476 extern errno_t ifnet_getset_log(struct ifnet *, u_long,
1477 struct ifreq *, struct proc *);
1478 extern int ifnet_set_log(struct ifnet *, int32_t, uint32_t, int32_t, int32_t);
1479 extern int ifnet_get_log(struct ifnet *, int32_t *, uint32_t *, int32_t *,
1480 int32_t *);
1481 extern int ifnet_notify_address(struct ifnet *, int);
1482 extern void ifnet_notify_data_threshold(struct ifnet *);
1483
1484 #define IF_AFDATA_RLOCK if_afdata_rlock
1485 #define IF_AFDATA_RUNLOCK if_afdata_unlock
1486 #define IF_AFDATA_WLOCK if_afdata_wlock
1487 #define IF_AFDATA_WUNLOCK if_afdata_unlock
1488 #define IF_AFDATA_WLOCK_ASSERT if_afdata_wlock_assert
1489 #define IF_AFDATA_LOCK_ASSERT if_afdata_lock_assert
1490 #define IF_AFDATA_UNLOCK_ASSERT if_afdata_unlock_assert
1491
1492 static inline void
if_afdata_rlock(struct ifnet * ifp,int af)1493 if_afdata_rlock(struct ifnet *ifp, int af)
1494 {
1495 switch (af) {
1496 #if INET
1497 case AF_INET:
1498 lck_rw_lock_shared(&ifp->if_inetdata_lock);
1499 break;
1500 #endif
1501 case AF_INET6:
1502 lck_rw_lock_shared(&ifp->if_inet6data_lock);
1503 break;
1504 default:
1505 VERIFY(0);
1506 /* NOTREACHED */
1507 }
1508 return;
1509 }
1510
1511 static inline void
if_afdata_runlock(struct ifnet * ifp,int af)1512 if_afdata_runlock(struct ifnet *ifp, int af)
1513 {
1514 switch (af) {
1515 #if INET
1516 case AF_INET:
1517 lck_rw_done(&ifp->if_inetdata_lock);
1518 break;
1519 #endif
1520 case AF_INET6:
1521 lck_rw_done(&ifp->if_inet6data_lock);
1522 break;
1523 default:
1524 VERIFY(0);
1525 /* NOTREACHED */
1526 }
1527 return;
1528 }
1529
1530 static inline void
if_afdata_wlock(struct ifnet * ifp,int af)1531 if_afdata_wlock(struct ifnet *ifp, int af)
1532 {
1533 switch (af) {
1534 #if INET
1535 case AF_INET:
1536 lck_rw_lock_exclusive(&ifp->if_inetdata_lock);
1537 break;
1538 #endif
1539 case AF_INET6:
1540 lck_rw_lock_exclusive(&ifp->if_inet6data_lock);
1541 break;
1542 default:
1543 VERIFY(0);
1544 /* NOTREACHED */
1545 }
1546 return;
1547 }
1548
1549 static inline void
if_afdata_unlock(struct ifnet * ifp,int af)1550 if_afdata_unlock(struct ifnet *ifp, int af)
1551 {
1552 switch (af) {
1553 #if INET
1554 case AF_INET:
1555 lck_rw_done(&ifp->if_inetdata_lock);
1556 break;
1557 #endif
1558 case AF_INET6:
1559 lck_rw_done(&ifp->if_inet6data_lock);
1560 break;
1561 default:
1562 VERIFY(0);
1563 /* NOTREACHED */
1564 }
1565 return;
1566 }
1567
1568 static inline void
if_afdata_wlock_assert(struct ifnet * ifp,int af)1569 if_afdata_wlock_assert(struct ifnet *ifp, int af)
1570 {
1571 #if !MACH_ASSERT
1572 #pragma unused(ifp)
1573 #endif
1574 switch (af) {
1575 #if INET
1576 case AF_INET:
1577 LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_EXCLUSIVE);
1578 break;
1579 #endif
1580 case AF_INET6:
1581 LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_EXCLUSIVE);
1582 break;
1583 default:
1584 VERIFY(0);
1585 /* NOTREACHED */
1586 }
1587 return;
1588 }
1589
1590 static inline void
if_afdata_unlock_assert(struct ifnet * ifp,int af)1591 if_afdata_unlock_assert(struct ifnet *ifp, int af)
1592 {
1593 #if !MACH_ASSERT
1594 #pragma unused(ifp)
1595 #endif
1596 switch (af) {
1597 #if INET
1598 case AF_INET:
1599 LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_NOTHELD);
1600 break;
1601 #endif
1602 case AF_INET6:
1603 LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_NOTHELD);
1604 break;
1605 default:
1606 VERIFY(0);
1607 /* NOTREACHED */
1608 }
1609 return;
1610 }
1611
1612 static inline void
if_afdata_lock_assert(struct ifnet * ifp,int af)1613 if_afdata_lock_assert(struct ifnet *ifp, int af)
1614 {
1615 #if !MACH_ASSERT
1616 #pragma unused(ifp)
1617 #endif
1618 switch (af) {
1619 #if INET
1620 case AF_INET:
1621 LCK_RW_ASSERT(&ifp->if_inetdata_lock, LCK_RW_ASSERT_HELD);
1622 break;
1623 #endif
1624 case AF_INET6:
1625 LCK_RW_ASSERT(&ifp->if_inet6data_lock, LCK_RW_ASSERT_HELD);
1626 break;
1627 default:
1628 VERIFY(0);
1629 /* NOTREACHED */
1630 }
1631 return;
1632 }
1633
1634 struct in6_addr;
1635 __private_extern__ struct in6_ifaddr *ifa_foraddr6(struct in6_addr *);
1636 __private_extern__ struct in6_ifaddr *ifa_foraddr6_scoped(struct in6_addr *,
1637 unsigned int);
1638
1639 __private_extern__ void if_data_internal_to_if_data(struct ifnet *ifp,
1640 const struct if_data_internal *if_data_int, struct if_data *if_data);
1641 __private_extern__ void if_data_internal_to_if_data64(struct ifnet *ifp,
1642 const struct if_data_internal *if_data_int, struct if_data64 *if_data64);
1643 __private_extern__ void if_copy_traffic_class(struct ifnet *ifp,
1644 struct if_traffic_class *if_tc);
1645 __private_extern__ void if_copy_data_extended(struct ifnet *ifp,
1646 struct if_data_extended *if_de);
1647 __private_extern__ void if_copy_packet_stats(struct ifnet *ifp,
1648 struct if_packet_stats *if_ps);
1649 __private_extern__ void if_copy_rxpoll_stats(struct ifnet *ifp,
1650 struct if_rxpoll_stats *if_rs);
1651 __private_extern__ void if_copy_netif_stats(struct ifnet *ifp,
1652 struct if_netif_stats *if_ns);
1653
1654 __private_extern__ struct rtentry *ifnet_cached_rtlookup_inet(struct ifnet *,
1655 struct in_addr);
1656 __private_extern__ struct rtentry *ifnet_cached_rtlookup_inet6(struct ifnet *,
1657 struct in6_addr *);
1658
1659 __private_extern__ u_int32_t if_get_protolist(struct ifnet * ifp,
1660 u_int32_t *protolist, u_int32_t count);
1661 __private_extern__ void if_free_protolist(u_int32_t *list);
1662 __private_extern__ errno_t if_state_update(struct ifnet *,
1663 struct if_interface_state *);
1664 __private_extern__ void if_get_state(struct ifnet *,
1665 struct if_interface_state *);
1666 __private_extern__ errno_t if_probe_connectivity(struct ifnet *ifp,
1667 u_int32_t conn_probe);
1668 __private_extern__ void if_lqm_update(struct ifnet *, int32_t, int);
1669 __private_extern__ void ifnet_update_sndq(struct ifclassq *, cqev_t);
1670 __private_extern__ void ifnet_update_rcv(struct ifnet *, cqev_t);
1671
1672 __private_extern__ void ifnet_flowadv(uint32_t);
1673
1674 __private_extern__ errno_t ifnet_set_input_bandwidths(struct ifnet *,
1675 struct if_bandwidths *);
1676 __private_extern__ errno_t ifnet_set_output_bandwidths(struct ifnet *,
1677 struct if_bandwidths *, boolean_t);
1678 __private_extern__ u_int64_t ifnet_output_linkrate(struct ifnet *);
1679 __private_extern__ u_int64_t ifnet_input_linkrate(struct ifnet *);
1680
1681 __private_extern__ errno_t ifnet_set_input_latencies(struct ifnet *,
1682 struct if_latencies *);
1683 __private_extern__ errno_t ifnet_set_output_latencies(struct ifnet *,
1684 struct if_latencies *, boolean_t);
1685
1686 __private_extern__ void ifnet_clear_netagent(uuid_t);
1687
1688 __private_extern__ int ifnet_set_netsignature(struct ifnet *, uint8_t,
1689 uint8_t, uint16_t, uint8_t *);
1690 __private_extern__ int ifnet_get_netsignature(struct ifnet *, uint8_t,
1691 uint8_t *, uint16_t *, uint8_t *);
1692
1693 struct ipv6_prefix;
1694 __private_extern__ int ifnet_set_nat64prefix(struct ifnet *,
1695 struct ipv6_prefix *);
1696 __private_extern__ int ifnet_get_nat64prefix(struct ifnet *,
1697 struct ipv6_prefix *);
1698
1699 /* Required exclusive ifnet_head lock */
1700 __private_extern__ void ifnet_remove_from_ordered_list(struct ifnet *);
1701
1702 __private_extern__ void ifnet_increment_generation(struct ifnet *);
1703 __private_extern__ u_int32_t ifnet_get_generation(struct ifnet *);
1704
1705 /* Adding and deleting netagents will take ifnet lock */
1706 __private_extern__ int if_add_netagent(struct ifnet *, uuid_t);
1707 __private_extern__ int if_add_netagent_locked(struct ifnet *, uuid_t);
1708 __private_extern__ int if_delete_netagent(struct ifnet *, uuid_t);
1709 __private_extern__ boolean_t if_check_netagent(struct ifnet *, uuid_t);
1710
1711 #if SKYWALK
1712 extern unsigned int if_enable_fsw_ip_netagent;
1713 static inline boolean_t
if_is_fsw_ip_netagent_enabled(void)1714 if_is_fsw_ip_netagent_enabled(void)
1715 {
1716 return if_enable_fsw_ip_netagent != 0;
1717 }
1718 extern unsigned int if_enable_fsw_transport_netagent;
1719 static inline boolean_t
if_is_fsw_transport_netagent_enabled(void)1720 if_is_fsw_transport_netagent_enabled(void)
1721 {
1722 return if_enable_fsw_transport_netagent != 0;
1723 }
1724 static inline boolean_t
if_is_fsw_netagent_enabled(void)1725 if_is_fsw_netagent_enabled(void)
1726 {
1727 return if_is_fsw_transport_netagent_enabled() ||
1728 if_is_fsw_ip_netagent_enabled();
1729 }
1730 #endif /* SKYWALK */
1731
1732 extern int if_set_qosmarking_mode(struct ifnet *, u_int32_t);
1733 __private_extern__ uint32_t ifnet_mbuf_packetpreamblelen(struct ifnet *);
1734 __private_extern__ void intf_event_enqueue_nwk_wq_entry(struct ifnet *ifp,
1735 struct sockaddr *addrp, uint32_t intf_event_code);
1736 __private_extern__ void ifnet_update_stats_per_flow(struct ifnet_stats_per_flow *,
1737 struct ifnet *);
1738 __private_extern__ int if_get_tcp_kao_max(struct ifnet *);
1739 #if XNU_TARGET_OS_OSX
1740 __private_extern__ errno_t ifnet_framer_stub(struct ifnet *, struct mbuf **,
1741 const struct sockaddr *, const char *, const char *, u_int32_t *,
1742 u_int32_t *);
1743 #endif /* XNU_TARGET_OS_OSX */
1744 __private_extern__ void ifnet_enqueue_multi_setup(struct ifnet *, uint16_t,
1745 uint16_t);
1746 __private_extern__ errno_t ifnet_enqueue_mbuf(struct ifnet *, struct mbuf *,
1747 boolean_t, boolean_t *);
1748 __private_extern__ errno_t ifnet_enqueue_mbuf_chain(struct ifnet *,
1749 struct mbuf *, struct mbuf *, uint32_t, uint32_t, boolean_t, boolean_t *);
1750 __private_extern__ int ifnet_enqueue_netem(void *handle, pktsched_pkt_t *pkts,
1751 uint32_t n_pkts);
1752 #if SKYWALK
1753 struct __kern_packet;
1754 extern errno_t ifnet_enqueue_pkt(struct ifnet *,
1755 struct __kern_packet *, boolean_t, boolean_t *);
1756 extern errno_t ifnet_enqueue_ifcq_pkt(struct ifnet *, struct ifclassq *,
1757 struct __kern_packet *, boolean_t, boolean_t *);
1758 extern errno_t ifnet_enqueue_pkt_chain(struct ifnet *, struct __kern_packet *,
1759 struct __kern_packet *, uint32_t, uint32_t, boolean_t, boolean_t *);
1760 extern errno_t ifnet_enqueue_ifcq_pkt_chain(struct ifnet *, struct ifclassq *,
1761 struct __kern_packet *, struct __kern_packet *, uint32_t, uint32_t, boolean_t,
1762 boolean_t *);
1763 extern errno_t ifnet_set_output_handler(struct ifnet *, ifnet_output_func);
1764 extern void ifnet_reset_output_handler(struct ifnet *);
1765 extern errno_t ifnet_set_start_handler(struct ifnet *, ifnet_start_func);
1766 extern void ifnet_reset_start_handler(struct ifnet *);
1767
1768 #define SK_NXS_MS_IF_ADDR_GENCNT_INC(ifp) \
1769 atomic_add_32(&(ifp)->if_nx_flowswitch.if_fsw_ipaddr_gencnt, 1);
1770 #endif /* SKYWALK */
1771
1772 extern int if_low_power_verbose;
1773 extern int if_low_power_restricted;
1774 extern void if_low_power_evhdlr_init(void);
1775 extern int if_set_low_power(struct ifnet *, bool);
1776 extern u_int32_t if_set_eflags(ifnet_t, u_int32_t);
1777 extern void if_clear_eflags(ifnet_t, u_int32_t);
1778 extern u_int32_t if_set_xflags(ifnet_t, u_int32_t);
1779 extern void if_clear_xflags(ifnet_t, u_int32_t);
1780 extern boolean_t sa_equal(const struct sockaddr *, const struct sockaddr *);
1781 extern void ifnet_update_traffic_rule_genid(struct ifnet *);
1782 extern boolean_t ifnet_sync_traffic_rule_genid(struct ifnet *, uint32_t *);
1783 extern void ifnet_update_traffic_rule_count(struct ifnet *, uint32_t);
1784
1785 #endif /* BSD_KERNEL_PRIVATE */
1786 #endif /* DRIVERKIT */
1787
1788 #endif /* !_NET_IF_VAR_PRIVATE_H_ */
1789