xref: /xnu-12377.41.6/bsd/skywalk/nexus/netif/nx_netif.h (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2015-2025 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_NEXUS_NETIF_H_
30 #define _SKYWALK_NEXUS_NETIF_H_
31 
32 #include <skywalk/os_skywalk_private.h>
33 #include <skywalk/nexus/nexus_pktq.h>
34 
35 #if CONFIG_NEXUS_NETIF
36 
37 #define NEXUS_PROVIDER_NET_IF           "com.apple.nexus.netif"
38 
39 #define NX_NETIF_MAXPORTS       128
40 #define NX_NETIF_EVENT_RING_NUM      1     /* number of event rings */
41 #define NX_NETIF_EVENT_RING_SIZE     32    /* default event ring size */
42 
43 struct netif_filter {
44 	STAILQ_ENTRY(netif_filter) nf_link;
45 	nexus_port_t            nf_port;
46 	uint32_t                nf_refcnt;
47 	void                    *nf_cb_arg;
48 	errno_t                 (*nf_cb_func)(void *,
49 	    struct __kern_packet *, uint32_t);
50 };
51 STAILQ_HEAD(netif_filter_head, netif_filter);
52 
53 struct netif_flow_desc {
54 	uint16_t        fd_ethertype;
55 	struct in6_addr fd_laddr;
56 	struct in6_addr fd_raddr;
57 };
58 
59 struct netif_port_info {
60 	struct nx_port_info_header npi_hdr;
61 	struct netif_flow_desc  npi_fd;
62 };
63 
64 struct netif_flow {
65 	SLIST_ENTRY(netif_flow) nf_link;
66 	SLIST_ENTRY(netif_flow) nf_table_link;
67 	nexus_port_t            nf_port;
68 	uint32_t                nf_refcnt;
69 	struct netif_flow_desc  nf_desc;
70 	void                    *nf_cb_arg;
71 	errno_t                 (*nf_cb_func)(void *,
72 	    void *, uint32_t);
73 };
74 
75 typedef enum {
76 	FT_TYPE_ETHERTYPE,
77 	FT_TYPE_IPV6_ULA
78 } netif_flowtable_type_t;
79 
80 struct netif_flowtable {
81 	struct netif_flowtable_ops      *ft_ops;
82 	void                            *ft_internal;
83 };
84 
85 typedef int netif_flow_lookup_t(struct netif_flowtable *,
86     struct __kern_packet *, uint32_t, struct netif_flow **);
87 typedef boolean_t netif_flow_match_t(struct netif_flow_desc *,
88     struct netif_flow_desc *);
89 typedef int netif_flow_info_t(struct __kern_packet *,
90     struct netif_flow_desc *, uint32_t);
91 typedef int netif_flow_insert_t(struct netif_flowtable *,
92     struct netif_flow *);
93 typedef void netif_flow_remove_t(struct netif_flowtable *,
94     struct netif_flow *);
95 typedef struct netif_flowtable *netif_flow_table_alloc_t(
96 	struct netif_flowtable_ops *);
97 typedef void netif_flow_table_free_t(struct netif_flowtable *);
98 
99 struct netif_flowtable_ops {
100 	netif_flow_lookup_t           *nfo_lookup;
101 	netif_flow_match_t            *nfo_match;
102 	netif_flow_info_t             *nfo_info;
103 	netif_flow_insert_t           *nfo_insert;
104 	netif_flow_remove_t           *nfo_remove;
105 	netif_flow_table_alloc_t      *nfo_table_alloc;
106 	netif_flow_table_free_t       *nfo_table_free;
107 };
108 
109 SLIST_HEAD(netif_flow_head, netif_flow);
110 
111 struct netif_queue {
112 	decl_lck_mtx_data(, nq_lock);
113 	struct netif_qset               *nq_qset; /* backpointer to parent netif qset */
114 	struct pktq                     nq_pktq;
115 	struct netif_qstats             nq_stats;
116 	uint64_t                        nq_accumulated_bytes;
117 	uint64_t                        nq_accumulated_pkts;
118 	uint64_t                        nq_accumulate_start; /* in seconds */
119 	void                            *nq_ctx;
120 	kern_packet_svc_class_t         nq_svc; /* service class of TX queue */
121 	uint16_t                        nq_flags;
122 } __sk_aligned(64);
123 
124 /* values for nq_flags */
125 #define NETIF_QUEUE_EXT_INITED   0x0001 /* nxnpi_queue_init() succeeded */
126 #define NETIF_QUEUE_IS_RX        0x0002 /* RX queue, else TX */
127 
128 #define _NETIF_QSET_QUEUE(_p, _n)    \
129     (struct netif_queue *)(void *)((uint8_t *)((_p)->nqs_driver_queues) + \
130     ((_n) * sizeof(struct netif_queue)))
131 #define NETIF_QSET_RX_QUEUE(_p, _n)    _NETIF_QSET_QUEUE(_p, _n)
132 #define NETIF_QSET_TX_QUEUE(_p, _n)    \
133     _NETIF_QSET_QUEUE(_p, (_p)->nqs_num_rx_queues + (_n))
134 
135 /* the top 32 bits are unused for now */
136 #define NETIF_QSET_ID_ENCODE(llink_id_internal, qset_idx) \
137     ((((llink_id_internal) << 16) | (qset_idx)) & 0xffffffff)
138 
139 struct netif_qset {
140 	struct netif_llink         *nqs_llink; /* backpointer to parent logical link */
141 	struct ifclassq            *nqs_ifcq;
142 	SLIST_ENTRY(netif_qset)    nqs_list;
143 	void                       *nqs_ctx; /* context provided by driver */
144 	uint64_t                   nqs_id;  /* queue set identifier */
145 	uint8_t                    nqs_idx; /* queue set index */
146 	uint16_t                   nqs_flags;
147 	uint8_t                    nqs_num_rx_queues;
148 	uint8_t                    nqs_num_tx_queues;
149 	uint8_t                    nqs_num_queues;
150 	/*
151 	 * nq_queues will be organized as:
152 	 * nq_queues[0..nq_num_rx_queues-1] will hold RX queues.
153 	 * nq_queues[nq_num_rx_queues..nq_num_tx_queues-1] will hold TX queues.
154 	 */
155 	struct netif_queue         nqs_driver_queues[__counted_by(nqs_num_queues)]
156 	__attribute__((aligned(sizeof(uint64_t))));
157 };
158 
159 /* values for nqs_flags */
160 #define NETIF_QSET_FLAG_DEFAULT     0x0001 /* default queue set of the logical link */
161 #define NETIF_QSET_FLAG_AQM         0x0002 /* provides AQM */
162 #define NETIF_QSET_FLAG_LOW_LATENCY 0x0004 /* provides low latency service */
163 #define NETIF_QSET_FLAG_EXT_INITED  0x0008 /* nxnpi_qset_init() succeeded */
164 
165 #define NETIF_DEFAULT_QSET(_qs)    ((_qs)->nqs_flags & NETIF_QSET_FLAG_DEFAULT)
166 
167 struct netif_llink {
168 	struct nx_netif             *nll_nif; /* backpointer to parent netif instance */
169 	STAILQ_ENTRY(netif_llink)   nll_link;
170 	SLIST_HEAD(, netif_qset)    nll_qset_list;
171 	struct netif_qset           *nll_default_qset;
172 	struct ifclassq             *nll_ifcq;
173 	struct os_refcnt            nll_refcnt;
174 #define NETIF_LLINK_ID_DEFAULT    0
175 	kern_nexus_netif_llink_id_t nll_link_id;
176 	uint16_t                    nll_link_id_internal;
177 	uint16_t                    nll_qset_cnt;
178 	uint8_t                     nll_state;
179 	uint8_t                     nll_flags;
180 	void                        *nll_ctx; /* context provided by driver */
181 };
182 STAILQ_HEAD(netif_llink_head, netif_llink);
183 
184 /* values for nll_flags */
185 #define NETIF_LLINK_FLAG_DEFAULT    0x1 /* default logical link */
186 
187 /* values for nll_state */
188 #define NETIF_LLINK_STATE_INIT         0x1 /* Intialized and ready for use */
189 #define NETIF_LLINK_STATE_DESTROYED    0x2 /* not available for use */
190 
191 #define NETIF_DEFAULT_LLINK(_ll)  ((_ll)->nll_flags & NETIF_LLINK_FLAG_DEFAULT)
192 
193 SLIST_HEAD(netif_agent_flow_head, netif_agent_flow);
194 struct netif_agent_flow {
195 	SLIST_ENTRY(netif_agent_flow) naf_link;
196 	uuid_t                  naf_flow_uuid;
197 	uuid_t                  naf_bind_key;
198 	nexus_port_t            naf_nx_port;
199 	uint32_t                naf_flags;
200 	pid_t                   naf_pid;
201 	union sockaddr_in_4_6   naf_daddr;
202 	union sockaddr_in_4_6   naf_saddr;
203 };
204 
205 #define NIFNA(_na)       (__container_of((_na), struct nexus_netif_adapter, nifna_up))
206 
207 /*
208  * Values for nif_flags
209  * Used for describing the internal state of the nx_netif structure
210  */
211 #define NETIF_FLAG_LOW_LATENCY          0x00000001
212 #define NETIF_FLAG_COMPAT               0x00000002
213 #define NETIF_FLAG_LLINK_INITIALIZED    0x00000004
214 #define NETIF_FLAG_CHANGE_PENDING       0x00000008
215 
216 #define NETIF_IS_LOW_LATENCY(n) \
217     (((n)->nif_flags & NETIF_FLAG_LOW_LATENCY) != 0)
218 #define NETIF_IS_COMPAT(n) \
219     (((n)->nif_flags & NETIF_FLAG_COMPAT) != 0)
220 #define NETIF_LLINK_ENABLED(n) \
221     (((n)->nif_flags & NETIF_FLAG_LLINK_INITIALIZED) != 0)
222 #define NETIF_DEFAULT_DROP_ENABLED(n) \
223     (nx_netif_filter_default_drop != 0 && \
224     (((n)->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0))
225 
226 /* nif_agent_flags */
227 #define NETIF_AGENT_FLAG_REGISTERED     0x00000001
228 #define NETIF_AGENT_FLAG_ADDED          0x00000002
229 
230 /* nif_filter_flags */
231 #define NETIF_FILTER_FLAG_INITIALIZED   0x00000001
232 #define NETIF_FILTER_FLAG_ENABLED       0x00000002
233 
234 /* nif_flow_flags */
235 #define NETIF_FLOW_FLAG_INITIALIZED     0x00000001
236 #define NETIF_FLOW_FLAG_ENABLED         0x00000002
237 
238 /* nif_llink_flags */
239 #define NETIF_LLINK_FLAG_INITIALIZED    0x00000001
240 
241 /* Used by netif_hwna_set_mode() */
242 typedef enum {
243 	NETIF_MODE_NONE,
244 	NETIF_MODE_FSW,
245 	NETIF_MODE_LLW
246 } netif_mode_t;
247 
248 /* nif capabilities */
249 #define NETIF_CAPAB_INTERFACE_ADVISORY 0x00000001
250 #define NETIF_CAPAB_QSET_EXTENSIONS    0x00000002
251 #define NETIF_CAPAB_RX_FLOW_STEERING   0x00000004
252 
253 struct netif_qset_extensions {
254 	kern_nexus_capab_qsext_notify_steering_info_fn_t qe_notify_steering_info;
255 	void *qe_prov_ctx;
256 };
257 
258 struct netif_rx_flow_steering {
259 	kern_nexus_capab_rx_flow_steering_config_fn_t config_fn;
260 	void *prov_ctx;
261 };
262 
263 /*
264  * nx_netif is a descriptor for a netif nexus instance.
265  */
266 struct nx_netif {
267 	decl_lck_rw_data(, nif_lock);
268 	struct kern_nexus       *nif_nx;
269 
270 	struct nxbind           *nif_dev_nxb;
271 	struct nxbind           *nif_host_nxb;
272 	uuid_t                  nif_uuid;       /* attachment UUID */
273 	struct netif_stats      nif_stats;
274 	uint32_t                nif_flags;
275 	struct os_refcnt        nif_refcnt;
276 
277 	decl_lck_mtx_data(, nif_agent_lock);
278 	struct netif_agent_flow_head nif_agent_flow_list;
279 	uint32_t                nif_agent_flow_cnt;
280 	uint32_t                nif_agent_flags;
281 	netagent_session_t      nif_agent_session;
282 	uuid_t                  nif_agent_uuid;
283 
284 	uint32_t                nif_hwassist;
285 	uint32_t                nif_capabilities;
286 	uint32_t                nif_capenable;
287 	uint64_t                nif_input_rate; /* device input rate limit */
288 
289 	struct ifnet            *nif_ifp;
290 	struct nx_flowswitch    *nif_fsw;       /* attached flowswitch nexus */
291 	struct sk_nexusadv      *nif_fsw_nxadv; /* flowswitch nexus advisory */
292 	struct netif_nexus_advisory *nif_netif_nxadv; /* netif nexus advisory */
293 
294 	/* packet-mbuf copy routines */
295 	pkt_copy_from_mbuf_t    *nif_pkt_copy_from_mbuf;
296 	pkt_copy_to_mbuf_t      *nif_pkt_copy_to_mbuf;
297 	pkt_copy_from_pkt_t     *nif_pkt_copy_from_pkt;
298 
299 	/* packet filtering */
300 	decl_lck_mtx_data(, nif_filter_lock);
301 	uint32_t                nif_filter_flags;
302 	uint32_t                nif_filter_vp_cnt;
303 	uint32_t                nif_filter_cnt;
304 	struct kern_pbufpool    *nif_filter_pp;
305 	struct netif_filter_head nif_filter_list;
306 	union {
307 		struct nx_mbq   nif_tx_processed_mbq[MBUF_TC_MAX];
308 		struct nx_pktq  nif_tx_processed_pktq[KPKT_TC_MAX];
309 	};
310 
311 	/* virtual port */
312 	decl_lck_mtx_data(, nif_flow_lock);
313 	uint32_t                nif_vp_cnt;
314 	uint32_t                nif_flow_flags;
315 	uint32_t                nif_flow_cnt;
316 	struct netif_flow_head  nif_flow_list;
317 	struct netif_flowtable  *nif_flow_table;
318 	struct kern_channel     *nif_hw_ch;
319 	uint32_t                nif_hw_ch_refcnt;
320 
321 	/* logical link */
322 	decl_lck_rw_data(, nif_llink_lock);
323 	struct kern_nexus_netif_llink_init *nif_default_llink_params;
324 	struct netif_llink         *nif_default_llink;
325 	STAILQ_HEAD(, netif_llink) nif_llink_list;
326 	uint16_t                   nif_llink_cnt;
327 
328 	/* capability configuration callback function and context */
329 	uint32_t                nif_extended_capabilities;
330 	kern_nexus_capab_interface_advisory_config_fn_t nif_intf_adv_config;
331 	void *nif_intf_adv_prov_ctx;
332 
333 	struct netif_qset_extensions nif_qset_extensions;
334 
335 	struct netif_rx_flow_steering nif_rx_flow_steering;
336 #if (DEVELOPMENT || DEBUG)
337 	struct skoid            nif_skoid;
338 #endif /* !DEVELOPMENT && !DEBUG */
339 };
340 
341 #define NX_NETIF_PRIVATE(_nx) ((struct nx_netif *)(_nx)->nx_arg)
342 
343 #define NETIF_RWINIT(_nif)                \
344 	lck_rw_init(&(_nif)->nif_lock, &nexus_lock_group, &nexus_lock_attr)
345 #define NETIF_WLOCK(_nif)                 \
346 	lck_rw_lock_exclusive(&(_nif)->nif_lock)
347 #define NETIF_WUNLOCK(_nif)               \
348 	lck_rw_unlock_exclusive(&(_nif)->nif_lock)
349 #define NETIF_WLOCKTORLOCK(_nif)          \
350 	lck_rw_lock_exclusive_to_shared(&(_nif)->nif_lock)
351 #define NETIF_RLOCK(_nif)                 \
352 	lck_rw_lock_shared(&(_nif)->nif_lock)
353 #define NETIF_RLOCKTOWLOCK(_nif)          \
354 	lck_rw_lock_shared_to_exclusive(&(_nif)->nif_lock)
355 #define NETIF_RTRYLOCK(_nif)              \
356 	lck_rw_try_lock(&(_nif)->nif_lock, LCK_RW_TYPE_SHARED)
357 #define NETIF_RUNLOCK(_nif)               \
358 	lck_rw_unlock_shared(&(_nif)->nif_lock)
359 #define NETIF_UNLOCK(_nif)                \
360 	lck_rw_done(&(_nif)->nif_lock)
361 #define NETIF_RWDESTROY(_nif)             \
362 	lck_rw_destroy(&(_nif)->nif_lock, &nexus_lock_group)
363 #define NETIF_WLOCK_ASSERT_HELD(_nif)     \
364 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_EXCLUSIVE)
365 #define NETIF_RLOCK_ASSERT_HELD(_nif)     \
366 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_SHARED)
367 #define NETIF_LOCK_ASSERT_HELD(_nif)      \
368 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_HELD)
369 
370 SYSCTL_DECL(_kern_skywalk_netif);
371 
372 /*
373  * Macros to determine if an interface is skywalk capable or skywalk enabled.
374  * See the magic field in struct nexus_adapter.
375  */
376 #define SKYWALK_CAPABLE(ifp)                                            \
377 	(NA(ifp) != NULL && (ifnet_capabilities_supported(ifp) & IFCAP_SKYWALK))
378 
379 #define SKYWALK_SET_CAPABLE(ifp) do {                                   \
380 	ifnet_lock_exclusive(ifp);                                      \
381 	(ifp)->if_capabilities |= IFCAP_SKYWALK;                        \
382 	(ifp)->if_capenable |= IFCAP_SKYWALK;                           \
383 	ifnet_lock_done(ifp);                                           \
384 } while (0)
385 
386 #define SKYWALK_CLEAR_CAPABLE(ifp) do {                                 \
387 	ifnet_lock_exclusive(ifp);                                      \
388 	(ifp)->if_capabilities &= ~IFCAP_SKYWALK;                       \
389 	(ifp)->if_capenable &= ~IFCAP_SKYWALK;                          \
390 	ifnet_lock_done(ifp);                                           \
391 } while (0)
392 
393 #define SKYWALK_NATIVE(ifp)                                             \
394 	((ifp)->if_eflags & IFEF_SKYWALK_NATIVE)
395 
396 typedef enum {
397 	MIT_MODE_SIMPLE,
398 	MIT_MODE_ADVANCED_STATIC,
399 	MIT_MODE_ADVANCED_DYNAMIC,
400 } mit_mode_t;
401 
402 /*
403  * Mitigation support.
404  */
405 struct mit_cfg_tbl {
406 	uint32_t cfg_plowat;            /* packets low watermark */
407 	uint32_t cfg_phiwat;            /* packets high watermark */
408 	uint32_t cfg_blowat;            /* bytes low watermark */
409 	uint32_t cfg_bhiwat;            /* bytes high watermark */
410 	uint32_t cfg_ival;              /* delay interval (in microsecond) */
411 };
412 
413 #define NETIF_MIT_CFG_TBL_MAX_CFG       5
414 
415 struct nx_netif_mit {
416 	decl_lck_spin_data(, mit_lock);
417 	volatile struct __kern_channel_ring *mit_ckr;  /* kring backpointer */
418 	uint32_t        mit_flags;
419 	uint32_t        mit_requests;
420 	uint32_t        mit_interval;
421 
422 	/*
423 	 * Adaptive mitigation.
424 	 */
425 	uint32_t        mit_cfg_idx_max;        /* highest config selector */
426 	uint32_t        mit_cfg_idx;            /* current config selector */
427 	const struct mit_cfg_tbl *mit_cfg;      /* current config mapping */
428 	mit_mode_t      mit_mode;               /* current mode */
429 	uint32_t        mit_packets_avg;        /* average # of packets */
430 	uint32_t        mit_packets_min;        /* smallest # of packets */
431 	uint32_t        mit_packets_max;        /* largest # of packets */
432 	uint32_t        mit_bytes_avg;          /* average # of bytes */
433 	uint32_t        mit_bytes_min;          /* smallest # of bytes */
434 	uint32_t        mit_bytes_max;          /* largest # of bytes */
435 
436 	struct pktcntr  mit_sstats;             /* pkts & bytes per sampling */
437 	struct timespec mit_mode_holdtime;      /* mode holdtime in nsec */
438 	struct timespec mit_mode_lasttime;      /* last mode change time nsec */
439 	struct timespec mit_sample_time;        /* sampling holdtime in nsec */
440 	struct timespec mit_sample_lasttime;    /* last sampling time in nsec */
441 	struct timespec mit_start_time;         /* time of start work in nsec */
442 
443 	struct thread   *mit_thread;
444 	char            mit_name[MAXTHREADNAMESIZE];
445 
446 	const struct ifnet      *mit_netif_ifp;
447 	/* interface-specific mitigation table */
448 	struct mit_cfg_tbl mit_tbl[NETIF_MIT_CFG_TBL_MAX_CFG];
449 
450 #if (DEVELOPMENT || DEBUG)
451 	struct skoid    mit_skoid;
452 #endif /* !DEVELOPMENT && !DEBUG */
453 };
454 
455 #define NETIF_MITF_INITIALIZED  0x00000001      /* has been initialized */
456 #define NETIF_MITF_SAMPLING     0x00000002      /* busy sampling stats */
457 #define NETIF_MITF_SIMPLE       0x00000004      /* no stats, no delay */
458 #define NETIF_MITF_READY        0x10000000      /* thread is ready */
459 #define NETIF_MITF_RUNNING      0x20000000      /* thread is running */
460 #define NETIF_MITF_TERMINATING  0x40000000      /* thread is terminating */
461 #define NETIF_MITF_TERMINATED   0x80000000      /* thread is terminated */
462 
463 #define MIT_SPIN_LOCK(_mit)                     \
464 	lck_spin_lock(&(_mit)->mit_lock)
465 #define MIT_SPIN_LOCK_ASSERT_HELD(_mit)         \
466 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_OWNED)
467 #define MIT_SPIN_LOCK_ASSERT_NOTHELD(_mit)      \
468 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_NOTOWNED)
469 #define MIT_SPIN_UNLOCK(_mit)                   \
470 	lck_spin_unlock(&(_mit)->mit_lock)
471 
472 struct nexus_netif_adapter {
473 	/*
474 	 * This is an overlay structure on nexus_adapter;
475 	 * make sure it contains 'up' as the first member.
476 	 */
477 	struct nexus_adapter      nifna_up;
478 	struct nx_netif           *nifna_netif;
479 
480 	struct nx_netif_mit       *__counted_by(nifna_tx_mit_count) nifna_tx_mit;
481 	struct nx_netif_mit       *__counted_by(nifna_rx_mit_count) nifna_rx_mit;
482 
483 	/*
484 	 * XXX For filter or vpna only
485 	 */
486 	union {
487 		struct netif_filter     *nifna_filter;
488 		struct netif_flow       *nifna_flow;
489 	};
490 	uint16_t                  nifna_gencnt;
491 	uint32_t                  nifna_tx_mit_count;
492 	uint32_t                  nifna_rx_mit_count;
493 };
494 
495 extern kern_allocation_name_t skmem_tag_netif_filter;
496 extern kern_allocation_name_t skmem_tag_netif_flow;
497 extern kern_allocation_name_t skmem_tag_netif_agent_flow;
498 extern kern_allocation_name_t skmem_tag_netif_llink;
499 extern kern_allocation_name_t skmem_tag_netif_qset;
500 
501 __BEGIN_DECLS
502 extern struct nxdom nx_netif_dom_s;
503 extern struct kern_nexus_domain_provider nx_netif_prov_s;
504 
505 extern struct nx_netif *nx_netif_alloc(zalloc_flags_t);
506 extern void nx_netif_free(struct nx_netif *);
507 extern void nx_netif_retain(struct nx_netif *);
508 extern void nx_netif_release(struct nx_netif *);
509 
510 extern int nx_netif_dev_krings_create(struct nexus_adapter *,
511     struct kern_channel *);
512 extern void nx_netif_dev_krings_delete(struct nexus_adapter *,
513     struct kern_channel *, boolean_t);
514 extern int nx_netif_na_find(struct kern_nexus *, struct kern_channel *,
515     struct chreq *, struct nxbind *, struct proc *, struct nexus_adapter **,
516     boolean_t create);
517 extern int nx_netif_na_special(struct nexus_adapter *,
518     struct kern_channel *, struct chreq *, nxspec_cmd_t);
519 extern int nx_netif_na_special_common(struct nexus_adapter *,
520     struct kern_channel *, struct chreq *, nxspec_cmd_t);
521 extern int nx_netif_common_intr(struct __kern_channel_ring *, struct proc *,
522     uint32_t, uint32_t *);
523 
524 extern int nx_netif_prov_init(struct kern_nexus_domain_provider *);
525 extern int nx_netif_prov_params(struct kern_nexus_domain_provider *,
526     const uint32_t, const struct nxprov_params *, struct nxprov_params *,
527     struct skmem_region_params[SKMEM_REGIONS], uint32_t);
528 extern int nx_netif_prov_mem_new(struct kern_nexus_domain_provider *,
529     struct kern_nexus *, struct nexus_adapter *);
530 extern void nx_netif_prov_fini(struct kern_nexus_domain_provider *);
531 extern int nx_netif_prov_config(struct kern_nexus_domain_provider *,
532     struct kern_nexus *, struct nx_cfg_req *, int, struct proc *,
533     kauth_cred_t);
534 extern int nx_netif_prov_nx_ctor(struct kern_nexus *);
535 extern void nx_netif_prov_nx_dtor(struct kern_nexus *);
536 extern int nx_netif_prov_nx_mem_info(struct kern_nexus *,
537     struct kern_pbufpool **, struct kern_pbufpool **);
538 extern size_t nx_netif_prov_nx_mib_get(struct kern_nexus *nx,
539     struct nexus_mib_filter *, void *__sized_by(len), size_t len, struct proc *);
540 extern int nx_netif_prov_nx_stop(struct kern_nexus *);
541 
542 extern void nx_netif_reap(struct nexus_netif_adapter *, struct ifnet *,
543     uint32_t, boolean_t);
544 
545 extern void nx_netif_copy_stats(struct nexus_netif_adapter *,
546     struct if_netif_stats *);
547 extern struct nexus_netif_adapter * na_netif_alloc(zalloc_flags_t);
548 extern void na_netif_free(struct nexus_adapter *);
549 extern void na_netif_finalize(struct nexus_netif_adapter *, struct ifnet *);
550 extern void nx_netif_llw_detach_notify(void *);
551 extern void nx_netif_config_interface_advisory(struct kern_nexus *, bool);
552 
553 /*
554  * netif netagent API
555  */
556 extern void nx_netif_agent_init(struct nx_netif *);
557 extern void nx_netif_agent_fini(struct nx_netif *);
558 extern int nx_netif_netagent_flow_add(struct nx_netif *, struct nx_flow_req *);
559 extern int nx_netif_netagent_flow_del(struct nx_netif *, struct nx_flow_req *);
560 
561 /*
562  * "Interrupt" mitigation API. This is used by the netif adapter to reduce
563  * the number of "interrupt" requests/wakeup to clients on incoming packets.
564  */
565 extern void nx_netif_mit_init(struct nx_netif *, const struct ifnet *,
566     struct nx_netif_mit *, struct __kern_channel_ring *, boolean_t);
567 extern void nx_netif_mit_cleanup(struct nx_netif_mit *);
568 extern int nx_netif_mit_tx_intr(struct __kern_channel_ring *, struct proc *,
569     uint32_t, uint32_t *);
570 extern int nx_netif_mit_rx_intr(struct __kern_channel_ring *, struct proc *,
571     uint32_t, uint32_t *);
572 
573 /*
574  * Interface filter API
575  */
576 #define NETIF_FILTER_RX         0x0001
577 #define NETIF_FILTER_TX         0x0002
578 #define NETIF_FILTER_SOURCE     0x0004
579 #define NETIF_FILTER_INJECT     0x0008
580 extern errno_t nx_netif_filter_inject(struct nexus_netif_adapter *,
581     struct netif_filter *, struct __kern_packet *, uint32_t);
582 extern errno_t nx_netif_filter_add(struct nx_netif *, nexus_port_t, void *,
583     errno_t (*)(void *, struct __kern_packet *, uint32_t),
584     struct netif_filter **);
585 extern errno_t nx_netif_filter_remove(struct nx_netif *, struct netif_filter *);
586 extern void nx_netif_filter_init(struct nx_netif *);
587 extern void nx_netif_filter_fini(struct nx_netif *);
588 extern void nx_netif_filter_enable(struct nx_netif *);
589 extern void nx_netif_filter_disable(struct nx_netif *);
590 
591 /*
592  * These callbacks are invoked when a packet chain has traversed the full
593  * filter chain.
594  */
595 extern errno_t nx_netif_filter_rx_cb(struct nexus_netif_adapter *,
596     struct __kern_packet *, uint32_t);
597 extern errno_t nx_netif_filter_tx_cb(struct nexus_netif_adapter *,
598     struct __kern_packet *, uint32_t);
599 
600 /*
601  * These are called by nx_netif_filter_tx_cb() to feed filtered packets
602  * back to driver.
603  */
604 extern errno_t
605     nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *,
606     mbuf_svc_class_t, struct mbuf *);
607 extern errno_t
608     nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *,
609     kern_packet_svc_class_t, struct __kern_packet *);
610 
611 /*
612  * Called by nx_netif_na_find() to create a filter nexus adapter.
613  */
614 extern int netif_filter_na_create(struct kern_nexus *, struct chreq *,
615     struct nexus_adapter **);
616 
617 /*
618  * Callbacks from ifnet
619  */
620 extern errno_t nx_netif_native_tx_dequeue(struct nexus_netif_adapter *,
621     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
622     uint32_t *, uint32_t *, boolean_t, errno_t);
623 extern errno_t nx_netif_native_tx_get_len(struct nexus_netif_adapter *,
624     uint32_t, uint32_t *, uint32_t *, errno_t);
625 extern errno_t nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *,
626     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
627     uint32_t *, uint32_t *, boolean_t, errno_t);
628 extern errno_t nx_netif_compat_tx_get_len(struct nexus_netif_adapter *,
629     uint32_t, uint32_t *, uint32_t *, errno_t);
630 
631 /*
632  * doorbell dequeue tunable
633  */
634 extern uint32_t nx_netif_doorbell_max_dequeue;
635 
636 /*
637  * Default drop tunable
638  */
639 extern uint32_t nx_netif_filter_default_drop;
640 
641 /*
642  * Flow API
643  */
644 #define NETIF_FLOW_SOURCE       0x0001
645 #define NETIF_FLOW_INJECT       0x0002
646 #define NETIF_FLOW_OUTBOUND     0x0004 /* Assumes inbound if flag is missing */
647 
648 extern errno_t nx_netif_demux(struct nexus_netif_adapter *,
649     struct __kern_packet *, struct __kern_packet **, struct nexus_pkt_stats *,
650     uint32_t);
651 extern errno_t nx_netif_flow_add(struct nx_netif *, nexus_port_t,
652     struct netif_flow_desc *, void *, errno_t (*)(void *, void *, uint32_t),
653     struct netif_flow **);
654 extern errno_t nx_netif_flow_remove(struct nx_netif *, struct netif_flow *);
655 extern void nx_netif_flow_init(struct nx_netif *);
656 extern void nx_netif_flow_fini(struct nx_netif *);
657 extern void nx_netif_flow_enable(struct nx_netif *);
658 extern void nx_netif_flow_disable(struct nx_netif *);
659 extern void nx_netif_snoop(struct nx_netif *, struct __kern_packet *,
660     boolean_t);
661 extern boolean_t nx_netif_validate_macaddr(struct nx_netif *,
662     struct __kern_packet *, uint32_t);
663 extern boolean_t nx_netif_flow_match(struct nx_netif *, struct __kern_packet *,
664     struct netif_flow *, uint32_t);
665 extern struct netif_flow * nx_netif_flow_classify(struct nx_netif *,
666     struct __kern_packet *, uint32_t);
667 extern void nx_netif_flow_release(struct nx_netif *, struct netif_flow *);
668 extern int netif_vp_na_create(struct kern_nexus *, struct chreq *,
669     struct nexus_adapter **);
670 extern errno_t netif_vp_na_channel_event(struct nx_netif *, uint32_t,
671     struct __kern_channel_event *, uint16_t);
672 
673 /*
674  * Disable all checks on inbound/outbound packets on VP adapters
675  */
676 extern uint32_t nx_netif_vp_accept_all;
677 
678 /*
679  * Utility functions
680  */
681 extern struct __kern_packet *nx_netif_alloc_packet(struct kern_pbufpool *,
682     uint32_t, kern_packet_t *);
683 extern void nx_netif_free_packet(struct __kern_packet *);
684 extern void nx_netif_free_packet_chain(struct __kern_packet *, int *);
685 extern void netif_ifp_inc_traffic_class_out_pkt(struct ifnet *, uint32_t,
686     uint32_t, uint32_t);
687 
688 #define NETIF_CONVERT_RX        0x0001
689 #define NETIF_CONVERT_TX        0x0002
690 
691 extern struct __kern_packet *
692     nx_netif_mbuf_to_filter_pkt_chain(struct nexus_netif_adapter *,
693     struct mbuf *, uint32_t);
694 extern struct mbuf *
695     nx_netif_filter_pkt_to_mbuf_chain(struct nexus_netif_adapter *,
696     struct __kern_packet *, uint32_t);
697 
698 extern struct __kern_packet *
699     nx_netif_pkt_to_filter_pkt(struct nexus_netif_adapter *,
700     struct __kern_packet *, uint32_t);
701 extern struct __kern_packet *
702     nx_netif_pkt_to_filter_pkt_chain(struct nexus_netif_adapter *,
703     struct __kern_packet *, uint32_t);
704 extern struct __kern_packet *
705     nx_netif_filter_pkt_to_pkt_chain(struct nexus_netif_adapter *,
706     struct __kern_packet *, uint32_t);
707 
708 extern struct mbuf *
709     nx_netif_pkt_to_mbuf(struct nexus_netif_adapter *,
710     struct __kern_packet *, uint32_t);
711 extern struct __kern_packet *
712     nx_netif_pkt_to_pkt(struct nexus_netif_adapter *,
713     struct __kern_packet *, uint32_t);
714 
715 extern void nx_netif_mbuf_chain_info(struct mbuf *,
716     struct mbuf **, uint32_t *, uint32_t *);
717 extern void nx_netif_pkt_chain_info(struct __kern_packet *,
718     struct __kern_packet **, uint32_t *, uint32_t *);
719 extern int nx_netif_get_max_mtu(ifnet_t, uint32_t *);
720 
721 extern void nx_netif_mit_config(struct nexus_netif_adapter *,
722     boolean_t *, boolean_t *, boolean_t *, boolean_t *);
723 
724 extern void nx_netif_vp_region_params_adjust(struct nexus_adapter *,
725     struct skmem_region_params *);
726 
727 extern void nx_netif_pktap_output(ifnet_t, int, struct __kern_packet *);
728 
729 extern int netif_rx_notify(struct __kern_channel_ring *,
730     struct proc *p, uint32_t);
731 extern int netif_llw_rx_notify(struct __kern_channel_ring *,
732     struct proc *p, uint32_t);
733 extern void netif_receive(struct nexus_netif_adapter *,
734     struct __kern_packet *, struct nexus_pkt_stats *);
735 
736 #define NETIF_XMIT_FLAG_CHANNEL  0x0001
737 #define NETIF_XMIT_FLAG_HOST     0x0002
738 #define NETIF_XMIT_FLAG_REDIRECT 0x0004
739 #define NETIF_XMIT_FLAG_PACING   0x0008
740 extern void netif_transmit(struct ifnet *, uint32_t);
741 extern int netif_ring_tx_refill(const kern_channel_ring_t,
742     uint32_t, uint32_t, boolean_t, boolean_t *, boolean_t);
743 extern void netif_hwna_set_mode(struct nexus_adapter *, netif_mode_t,
744     void (*)(struct nexus_adapter *, struct __kern_packet *,
745     struct nexus_pkt_stats *));
746 extern void netif_hwna_clear_mode(struct nexus_adapter *);
747 
748 /*
749  * rxpoll functions
750  */
751 extern errno_t netif_rxpoll_set_params(struct ifnet *,
752     struct ifnet_poll_params *, boolean_t locked);
753 extern void netif_rxpoll_compat_thread_func(void *, wait_result_t);
754 
755 /*
756  * GSO functions
757  */
758 extern int netif_gso_dispatch(struct ifnet *ifp, struct mbuf *m);
759 extern void netif_gso_init(void);
760 extern void netif_gso_fini(void);
761 
762 /*
763  * Logical link functions
764  */
765 extern void nx_netif_llink_retain(struct netif_llink *);
766 extern void nx_netif_llink_release(struct netif_llink **);
767 extern void nx_netif_qset_retain(struct netif_qset *);
768 extern void nx_netif_qset_release(struct netif_qset **);
769 extern void nx_netif_llink_init(struct nx_netif *);
770 extern void nx_netif_llink_fini(struct nx_netif *);
771 extern struct netif_qset * nx_netif_find_qset(struct nx_netif *, uint64_t);
772 extern struct netif_qset * nx_netif_find_qset_with_pkt(struct ifnet *,
773     struct __kern_packet *);
774 extern struct netif_qset * nx_netif_get_default_qset_noref(struct nx_netif *);
775 extern int netif_qset_enqueue(struct netif_qset *, bool chain,
776     struct __kern_packet *, struct __kern_packet *, uint32_t, uint32_t,
777     uint32_t *, uint32_t *);
778 extern int nx_netif_default_llink_config(struct nx_netif *,
779     struct kern_nexus_netif_llink_init *);
780 extern void nx_netif_llink_config_free(struct nx_netif *);
781 extern int nx_netif_llink_ext_init_default_queues(struct kern_nexus *);
782 extern void nx_netif_llink_ext_fini_default_queues(struct kern_nexus *);
783 extern int nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *,
784     bool);
785 extern int nx_netif_llink_add(struct nx_netif *,
786     struct kern_nexus_netif_llink_init *, struct netif_llink **);
787 extern int nx_netif_llink_remove(struct nx_netif *,
788     kern_nexus_netif_llink_id_t);
789 extern int nx_netif_notify_steering_info(struct nx_netif *,
790     struct netif_qset *, struct ifnet_traffic_descriptor_common *, bool);
791 
792 /*
793  * Rx flow steering functions
794  */
795 extern int nx_netif_configure_rx_flow_steering(struct kern_nexus *, uint32_t,
796     struct ifnet_traffic_descriptor_common *, rx_flow_steering_action_t);
797 __END_DECLS
798 #endif /* CONFIG_NEXUS_NETIF */
799 #include <skywalk/nexus/netif/nx_netif_compat.h>
800 #include <skywalk/nexus/netif/nx_netif_host.h>
801 #endif /* _SKYWALK_NEXUS_NETIF_H_ */
802