xref: /xnu-11215.41.3/bsd/skywalk/nexus/netif/nx_netif.h (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_NEXUS_NETIF_H_
30 #define _SKYWALK_NEXUS_NETIF_H_
31 
32 #include <skywalk/os_skywalk_private.h>
33 #include <skywalk/nexus/nexus_pktq.h>
34 
35 #if CONFIG_NEXUS_NETIF
36 
37 #define NEXUS_PROVIDER_NET_IF           "com.apple.nexus.netif"
38 
39 #define NX_NETIF_MAXPORTS       128
40 #define NX_NETIF_EVENT_RING_NUM      1     /* number of event rings */
41 #define NX_NETIF_EVENT_RING_SIZE     32    /* default event ring size */
42 
43 struct netif_filter {
44 	STAILQ_ENTRY(netif_filter) nf_link;
45 	nexus_port_t            nf_port;
46 	uint32_t                nf_refcnt;
47 	void                    *nf_cb_arg;
48 	errno_t                 (*nf_cb_func)(void *,
49 	    struct __kern_packet *, uint32_t);
50 };
51 STAILQ_HEAD(netif_filter_head, netif_filter);
52 
53 struct netif_flow_desc {
54 	uint16_t        fd_ethertype;
55 	struct in6_addr fd_laddr;
56 	struct in6_addr fd_raddr;
57 };
58 
59 struct netif_port_info {
60 	struct nx_port_info_header npi_hdr;
61 	struct netif_flow_desc  npi_fd;
62 };
63 
64 struct netif_flow {
65 	SLIST_ENTRY(netif_flow) nf_link;
66 	SLIST_ENTRY(netif_flow) nf_table_link;
67 	nexus_port_t            nf_port;
68 	uint32_t                nf_refcnt;
69 	struct netif_flow_desc  nf_desc;
70 	void                    *nf_cb_arg;
71 	errno_t                 (*nf_cb_func)(void *,
72 	    void *, uint32_t);
73 };
74 
75 typedef enum {
76 	FT_TYPE_ETHERTYPE,
77 	FT_TYPE_IPV6_ULA
78 } netif_flowtable_type_t;
79 
80 struct netif_flowtable {
81 	struct netif_flowtable_ops      *ft_ops;
82 	void                            *ft_internal;
83 };
84 
85 typedef int netif_flow_lookup_t(struct netif_flowtable *,
86     struct __kern_packet *, uint32_t, struct netif_flow **);
87 typedef boolean_t netif_flow_match_t(struct netif_flow_desc *,
88     struct netif_flow_desc *);
89 typedef int netif_flow_info_t(struct __kern_packet *,
90     struct netif_flow_desc *, uint32_t);
91 typedef int netif_flow_insert_t(struct netif_flowtable *,
92     struct netif_flow *);
93 typedef void netif_flow_remove_t(struct netif_flowtable *,
94     struct netif_flow *);
95 typedef struct netif_flowtable *netif_flow_table_alloc_t(
96 	struct netif_flowtable_ops *);
97 typedef void netif_flow_table_free_t(struct netif_flowtable *);
98 
99 struct netif_flowtable_ops {
100 	netif_flow_lookup_t           *nfo_lookup;
101 	netif_flow_match_t            *nfo_match;
102 	netif_flow_info_t             *nfo_info;
103 	netif_flow_insert_t           *nfo_insert;
104 	netif_flow_remove_t           *nfo_remove;
105 	netif_flow_table_alloc_t      *nfo_table_alloc;
106 	netif_flow_table_free_t       *nfo_table_free;
107 };
108 
109 SLIST_HEAD(netif_flow_head, netif_flow);
110 
111 struct netif_queue {
112 	decl_lck_mtx_data(, nq_lock);
113 	struct netif_qset               *nq_qset; /* backpointer to parent netif qset */
114 	struct pktq                     nq_pktq;
115 	struct netif_qstats             nq_stats;
116 	uint64_t                        nq_accumulated_bytes;
117 	uint64_t                        nq_accumulated_pkts;
118 	uint64_t                        nq_accumulate_start; /* in seconds */
119 	void                            *nq_ctx;
120 	kern_packet_svc_class_t         nq_svc; /* service class of TX queue */
121 	uint16_t                        nq_flags;
122 }__attribute__((aligned(sizeof(uint64_t))));
123 
124 /* values for nq_flags */
125 #define NETIF_QUEUE_EXT_INITED   0x0001 /* nxnpi_queue_init() succeeded */
126 #define NETIF_QUEUE_IS_RX        0x0002 /* RX queue, else TX */
127 
128 #define _NETIF_QSET_QUEUE(_p, _n)    \
129     (struct netif_queue *)(void *)((uint8_t *)((_p)->nqs_driver_queues) + \
130     ((_n) * sizeof(struct netif_queue)))
131 #define NETIF_QSET_RX_QUEUE(_p, _n)    _NETIF_QSET_QUEUE(_p, _n)
132 #define NETIF_QSET_TX_QUEUE(_p, _n)    \
133     _NETIF_QSET_QUEUE(_p, (_p)->nqs_num_rx_queues + (_n))
134 
135 /* the top 32 bits are unused for now */
136 #define NETIF_QSET_ID_ENCODE(llink_id_internal, qset_idx) \
137     ((((llink_id_internal) << 16) | (qset_idx)) & 0xffffffff)
138 
139 struct netif_qset {
140 	struct netif_llink         *nqs_llink; /* backpointer to parent logical link */
141 	struct ifclassq            *nqs_ifcq;
142 	SLIST_ENTRY(netif_qset)    nqs_list;
143 	void                       *nqs_ctx; /* context provided by driver */
144 	uint64_t                   nqs_id;  /* queue set identifier */
145 	uint8_t                    nqs_idx; /* queue set index */
146 	uint16_t                   nqs_flags;
147 	uint8_t                    nqs_num_rx_queues;
148 	uint8_t                    nqs_num_tx_queues;
149 	uint8_t                    nqs_num_queues;
150 	/*
151 	 * nq_queues will be organized as:
152 	 * nq_queues[0..nq_num_rx_queues-1] will hold RX queues.
153 	 * nq_queues[nq_num_rx_queues..nq_num_tx_queues-1] will hold TX queues.
154 	 */
155 	struct netif_queue         nqs_driver_queues[__counted_by(nqs_num_queues)]
156 	__attribute__((aligned(sizeof(uint64_t))));
157 };
158 
159 /* values for nqs_flags */
160 #define NETIF_QSET_FLAG_DEFAULT     0x0001 /* default queue set of the logical link */
161 #define NETIF_QSET_FLAG_AQM         0x0002 /* provides AQM */
162 #define NETIF_QSET_FLAG_LOW_LATENCY 0x0004 /* provides low latency service */
163 #define NETIF_QSET_FLAG_EXT_INITED  0x0008 /* nxnpi_qset_init() succeeded */
164 
165 #define NETIF_DEFAULT_QSET(_qs)    ((_qs)->nqs_flags & NETIF_QSET_FLAG_DEFAULT)
166 
167 struct netif_llink {
168 	struct nx_netif             *nll_nif; /* backpointer to parent netif instance */
169 	STAILQ_ENTRY(netif_llink)   nll_link;
170 	SLIST_HEAD(, netif_qset)    nll_qset_list;
171 	struct netif_qset           *nll_default_qset;
172 	struct ifclassq             *nll_ifcq;
173 	struct os_refcnt            nll_refcnt;
174 #define NETIF_LLINK_ID_DEFAULT    0
175 	kern_nexus_netif_llink_id_t nll_link_id;
176 	uint16_t                    nll_link_id_internal;
177 	uint16_t                    nll_qset_cnt;
178 	uint8_t                     nll_state;
179 	uint8_t                     nll_flags;
180 	void                        *nll_ctx; /* context provided by driver */
181 };
182 STAILQ_HEAD(netif_llink_head, netif_llink);
183 
184 /* values for nll_flags */
185 #define NETIF_LLINK_FLAG_DEFAULT    0x1 /* default logical link */
186 
187 /* values for nll_state */
188 #define NETIF_LLINK_STATE_INIT         0x1 /* Intialized and ready for use */
189 #define NETIF_LLINK_STATE_DESTROYED    0x2 /* not available for use */
190 
191 #define NETIF_DEFAULT_LLINK(_ll)  ((_ll)->nll_flags & NETIF_LLINK_FLAG_DEFAULT)
192 
193 SLIST_HEAD(netif_agent_flow_head, netif_agent_flow);
194 struct netif_agent_flow {
195 	SLIST_ENTRY(netif_agent_flow) naf_link;
196 	uuid_t                  naf_flow_uuid;
197 	uuid_t                  naf_bind_key;
198 	nexus_port_t            naf_nx_port;
199 	uint16_t                naf_flags;
200 	pid_t                   naf_pid;
201 	union sockaddr_in_4_6   naf_daddr;
202 	union sockaddr_in_4_6   naf_saddr;
203 };
204 
205 #define NIFNA(_na)       (__container_of((_na), struct nexus_netif_adapter, nifna_up))
206 
207 /* nif_flags */
208 /*
209  * This is named differently from the flow classification rule
210  * (IPV6 ULA) because this gives us the flexibility of using
211  * different types of classification in the future.
212  */
213 #define NETIF_FLAG_LOW_LATENCY          0x00000001
214 #define NETIF_FLAG_COMPAT               0x00000002
215 #define NETIF_FLAG_LLINK_INITIALIZED    0x00000004
216 #define NETIF_IS_LOW_LATENCY(n) \
217     (((n)->nif_flags & NETIF_FLAG_LOW_LATENCY) != 0)
218 #define NETIF_IS_COMPAT(n) \
219     (((n)->nif_flags & NETIF_FLAG_COMPAT) != 0)
220 #define NETIF_LLINK_ENABLED(n) \
221     (((n)->nif_flags & NETIF_FLAG_LLINK_INITIALIZED) != 0)
222 #define NETIF_DEFAULT_DROP_ENABLED(n) \
223     (nx_netif_filter_default_drop != 0 && \
224     (((n)->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0))
225 
226 /* nif_agent_flags */
227 #define NETIF_AGENT_FLAG_REGISTERED     0x00000001
228 #define NETIF_AGENT_FLAG_ADDED          0x00000002
229 
230 /* nif_filter_flags */
231 #define NETIF_FILTER_FLAG_INITIALIZED   0x00000001
232 #define NETIF_FILTER_FLAG_ENABLED       0x00000002
233 
234 /* nif_flow_flags */
235 #define NETIF_FLOW_FLAG_INITIALIZED     0x00000001
236 #define NETIF_FLOW_FLAG_ENABLED         0x00000002
237 
238 /* nif_llink_flags */
239 #define NETIF_LLINK_FLAG_INITIALIZED    0x00000001
240 
241 /* Used by netif_hwna_set_mode() */
242 typedef enum {
243 	NETIF_MODE_NONE,
244 	NETIF_MODE_FSW,
245 	NETIF_MODE_LLW
246 } netif_mode_t;
247 
248 /* nif capabilities */
249 #define NETIF_CAPAB_INTERFACE_ADVISORY 0x00000001
250 #define NETIF_CAPAB_QSET_EXTENSIONS    0x00000002
251 
252 struct netif_qset_extensions {
253 	kern_nexus_capab_qsext_notify_steering_info_fn_t qe_notify_steering_info;
254 	void *qe_prov_ctx;
255 };
256 
257 /*
258  * nx_netif is a descriptor for a netif nexus instance.
259  */
260 struct nx_netif {
261 	decl_lck_rw_data(, nif_lock);
262 	struct kern_nexus       *nif_nx;
263 
264 	struct nxbind           *nif_dev_nxb;
265 	struct nxbind           *nif_host_nxb;
266 	uuid_t                  nif_uuid;       /* attachment UUID */
267 	struct netif_stats      nif_stats;
268 	uint32_t                nif_flags;
269 	struct os_refcnt        nif_refcnt;
270 
271 	decl_lck_mtx_data(, nif_agent_lock);
272 	struct netif_agent_flow_head nif_agent_flow_list;
273 	uint32_t                nif_agent_flow_cnt;
274 	uint32_t                nif_agent_flags;
275 	netagent_session_t      nif_agent_session;
276 	uuid_t                  nif_agent_uuid;
277 
278 	uint32_t                nif_hwassist;
279 	uint32_t                nif_capabilities;
280 	uint32_t                nif_capenable;
281 	uint64_t                nif_input_rate; /* device input rate limit */
282 
283 	struct ifnet            *nif_ifp;
284 	struct nx_flowswitch    *nif_fsw;       /* attached flowswitch nexus */
285 	struct sk_nexusadv      *nif_fsw_nxadv; /* flowswitch nexus advisory */
286 	struct netif_nexus_advisory *nif_netif_nxadv; /* netif nexus advisory */
287 
288 	/* packet-mbuf copy routines */
289 	pkt_copy_from_mbuf_t    *nif_pkt_copy_from_mbuf;
290 	pkt_copy_to_mbuf_t      *nif_pkt_copy_to_mbuf;
291 	pkt_copy_from_pkt_t     *nif_pkt_copy_from_pkt;
292 
293 	/* packet filtering */
294 	decl_lck_mtx_data(, nif_filter_lock);
295 	uint32_t                nif_filter_flags;
296 	uint32_t                nif_filter_vp_cnt;
297 	uint32_t                nif_filter_cnt;
298 	struct kern_pbufpool    *nif_filter_pp;
299 	struct netif_filter_head nif_filter_list;
300 	union {
301 		struct nx_mbq   nif_tx_processed_mbq[MBUF_TC_MAX];
302 		struct nx_pktq  nif_tx_processed_pktq[KPKT_TC_MAX];
303 	};
304 
305 	/* virtual port */
306 	decl_lck_mtx_data(, nif_flow_lock);
307 	uint32_t                nif_vp_cnt;
308 	uint32_t                nif_flow_flags;
309 	uint32_t                nif_flow_cnt;
310 	struct netif_flow_head  nif_flow_list;
311 	struct netif_flowtable  *nif_flow_table;
312 	struct kern_channel     *nif_hw_ch;
313 	uint32_t                nif_hw_ch_refcnt;
314 
315 	/* logical link */
316 	decl_lck_rw_data(, nif_llink_lock);
317 	struct kern_nexus_netif_llink_init *nif_default_llink_params;
318 	struct netif_llink         *nif_default_llink;
319 	STAILQ_HEAD(, netif_llink) nif_llink_list;
320 	uint16_t                   nif_llink_cnt;
321 
322 	/* capability configuration callback function and context */
323 	uint32_t                nif_extended_capabilities;
324 	kern_nexus_capab_interface_advisory_config_fn_t nif_intf_adv_config;
325 	void *nif_intf_adv_prov_ctx;
326 
327 	struct netif_qset_extensions nif_qset_extensions;
328 #if (DEVELOPMENT || DEBUG)
329 	struct skoid            nif_skoid;
330 #endif /* !DEVELOPMENT && !DEBUG */
331 };
332 
333 #define NX_NETIF_PRIVATE(_nx) ((struct nx_netif *)(_nx)->nx_arg)
334 
335 #define NETIF_RWINIT(_nif)                \
336 	lck_rw_init(&(_nif)->nif_lock, &nexus_lock_group, &nexus_lock_attr)
337 #define NETIF_WLOCK(_nif)                 \
338 	lck_rw_lock_exclusive(&(_nif)->nif_lock)
339 #define NETIF_WUNLOCK(_nif)               \
340 	lck_rw_unlock_exclusive(&(_nif)->nif_lock)
341 #define NETIF_WLOCKTORLOCK(_nif)          \
342 	lck_rw_lock_exclusive_to_shared(&(_nif)->nif_lock)
343 #define NETIF_RLOCK(_nif)                 \
344 	lck_rw_lock_shared(&(_nif)->nif_lock)
345 #define NETIF_RLOCKTOWLOCK(_nif)          \
346 	lck_rw_lock_shared_to_exclusive(&(_nif)->nif_lock)
347 #define NETIF_RTRYLOCK(_nif)              \
348 	lck_rw_try_lock(&(_nif)->nif_lock, LCK_RW_TYPE_SHARED)
349 #define NETIF_RUNLOCK(_nif)               \
350 	lck_rw_unlock_shared(&(_nif)->nif_lock)
351 #define NETIF_UNLOCK(_nif)                \
352 	lck_rw_done(&(_nif)->nif_lock)
353 #define NETIF_RWDESTROY(_nif)             \
354 	lck_rw_destroy(&(_nif)->nif_lock, &nexus_lock_group)
355 #define NETIF_WLOCK_ASSERT_HELD(_nif)     \
356 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_EXCLUSIVE)
357 #define NETIF_RLOCK_ASSERT_HELD(_nif)     \
358 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_SHARED)
359 #define NETIF_LOCK_ASSERT_HELD(_nif)      \
360 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_HELD)
361 
362 SYSCTL_DECL(_kern_skywalk_netif);
363 
364 /*
365  * Macros to determine if an interface is skywalk capable or skywalk enabled.
366  * See the magic field in struct nexus_adapter.
367  */
368 #define SKYWALK_CAPABLE(ifp)                                            \
369 	(NA(ifp) != NULL && (ifnet_capabilities_supported(ifp) & IFCAP_SKYWALK))
370 
371 #define SKYWALK_SET_CAPABLE(ifp) do {                                   \
372 	ifnet_lock_exclusive(ifp);                                      \
373 	(ifp)->if_capabilities |= IFCAP_SKYWALK;                        \
374 	(ifp)->if_capenable |= IFCAP_SKYWALK;                           \
375 	ifnet_lock_done(ifp);                                           \
376 } while (0)
377 
378 #define SKYWALK_CLEAR_CAPABLE(ifp) do {                                 \
379 	ifnet_lock_exclusive(ifp);                                      \
380 	(ifp)->if_capabilities &= ~IFCAP_SKYWALK;                       \
381 	(ifp)->if_capenable &= ~IFCAP_SKYWALK;                          \
382 	ifnet_lock_done(ifp);                                           \
383 } while (0)
384 
385 #define SKYWALK_NATIVE(ifp)                                             \
386 	((ifp)->if_eflags & IFEF_SKYWALK_NATIVE)
387 
388 typedef enum {
389 	MIT_MODE_SIMPLE,
390 	MIT_MODE_ADVANCED_STATIC,
391 	MIT_MODE_ADVANCED_DYNAMIC,
392 } mit_mode_t;
393 
394 /*
395  * Mitigation support.
396  */
397 struct mit_cfg_tbl {
398 	uint32_t cfg_plowat;            /* packets low watermark */
399 	uint32_t cfg_phiwat;            /* packets high watermark */
400 	uint32_t cfg_blowat;            /* bytes low watermark */
401 	uint32_t cfg_bhiwat;            /* bytes high watermark */
402 	uint32_t cfg_ival;              /* delay interval (in microsecond) */
403 };
404 
405 #define NETIF_MIT_CFG_TBL_MAX_CFG       5
406 
407 struct nx_netif_mit {
408 	decl_lck_spin_data(, mit_lock);
409 	volatile struct __kern_channel_ring *mit_ckr;  /* kring backpointer */
410 	uint32_t        mit_flags;
411 	uint32_t        mit_requests;
412 	uint32_t        mit_interval;
413 
414 	/*
415 	 * Adaptive mitigation.
416 	 */
417 	uint32_t        mit_cfg_idx_max;        /* highest config selector */
418 	uint32_t        mit_cfg_idx;            /* current config selector */
419 	const struct mit_cfg_tbl *mit_cfg;      /* current config mapping */
420 	mit_mode_t      mit_mode;               /* current mode */
421 	uint32_t        mit_packets_avg;        /* average # of packets */
422 	uint32_t        mit_packets_min;        /* smallest # of packets */
423 	uint32_t        mit_packets_max;        /* largest # of packets */
424 	uint32_t        mit_bytes_avg;          /* average # of bytes */
425 	uint32_t        mit_bytes_min;          /* smallest # of bytes */
426 	uint32_t        mit_bytes_max;          /* largest # of bytes */
427 
428 	struct pktcntr  mit_sstats;             /* pkts & bytes per sampling */
429 	struct timespec mit_mode_holdtime;      /* mode holdtime in nsec */
430 	struct timespec mit_mode_lasttime;      /* last mode change time nsec */
431 	struct timespec mit_sample_time;        /* sampling holdtime in nsec */
432 	struct timespec mit_sample_lasttime;    /* last sampling time in nsec */
433 	struct timespec mit_start_time;         /* time of start work in nsec */
434 
435 	struct thread   *mit_thread;
436 	char            mit_name[MAXTHREADNAMESIZE];
437 
438 	const struct ifnet      *mit_netif_ifp;
439 	/* interface-specific mitigation table */
440 	struct mit_cfg_tbl mit_tbl[NETIF_MIT_CFG_TBL_MAX_CFG];
441 
442 #if (DEVELOPMENT || DEBUG)
443 	struct skoid    mit_skoid;
444 #endif /* !DEVELOPMENT && !DEBUG */
445 };
446 
447 #define NETIF_MITF_INITIALIZED  0x00000001      /* has been initialized */
448 #define NETIF_MITF_SAMPLING     0x00000002      /* busy sampling stats */
449 #define NETIF_MITF_SIMPLE       0x00000004      /* no stats, no delay */
450 #define NETIF_MITF_READY        0x10000000      /* thread is ready */
451 #define NETIF_MITF_RUNNING      0x20000000      /* thread is running */
452 #define NETIF_MITF_TERMINATING  0x40000000      /* thread is terminating */
453 #define NETIF_MITF_TERMINATED   0x80000000      /* thread is terminated */
454 
455 #define MIT_SPIN_LOCK(_mit)                     \
456 	lck_spin_lock(&(_mit)->mit_lock)
457 #define MIT_SPIN_LOCK_ASSERT_HELD(_mit)         \
458 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_OWNED)
459 #define MIT_SPIN_LOCK_ASSERT_NOTHELD(_mit)      \
460 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_NOTOWNED)
461 #define MIT_SPIN_UNLOCK(_mit)                   \
462 	lck_spin_unlock(&(_mit)->mit_lock)
463 
464 struct nexus_netif_adapter {
465 	/*
466 	 * This is an overlay structure on nexus_adapter;
467 	 * make sure it contains 'up' as the first member.
468 	 */
469 	struct nexus_adapter      nifna_up;
470 	struct nx_netif           *nifna_netif;
471 
472 	struct nx_netif_mit       *__counted_by(nifna_tx_mit_count) nifna_tx_mit;
473 	struct nx_netif_mit       *__counted_by(nifna_rx_mit_count) nifna_rx_mit;
474 
475 	/*
476 	 * XXX For filter or vpna only
477 	 */
478 	union {
479 		struct netif_filter     *nifna_filter;
480 		struct netif_flow       *nifna_flow;
481 	};
482 	uint16_t                  nifna_gencnt;
483 	uint32_t                  nifna_tx_mit_count;
484 	uint32_t                  nifna_rx_mit_count;
485 };
486 
487 extern kern_allocation_name_t skmem_tag_netif_filter;
488 extern kern_allocation_name_t skmem_tag_netif_flow;
489 extern kern_allocation_name_t skmem_tag_netif_agent_flow;
490 extern kern_allocation_name_t skmem_tag_netif_llink;
491 extern kern_allocation_name_t skmem_tag_netif_qset;
492 
493 __BEGIN_DECLS
494 extern struct nxdom nx_netif_dom_s;
495 extern struct kern_nexus_domain_provider nx_netif_prov_s;
496 
497 extern struct nx_netif *nx_netif_alloc(zalloc_flags_t);
498 extern void nx_netif_free(struct nx_netif *);
499 extern void nx_netif_retain(struct nx_netif *);
500 extern void nx_netif_release(struct nx_netif *);
501 
502 extern int nx_netif_dev_krings_create(struct nexus_adapter *,
503     struct kern_channel *);
504 extern void nx_netif_dev_krings_delete(struct nexus_adapter *,
505     struct kern_channel *, boolean_t);
506 extern int nx_netif_na_find(struct kern_nexus *, struct kern_channel *,
507     struct chreq *, struct nxbind *, struct proc *, struct nexus_adapter **,
508     boolean_t create);
509 extern int nx_netif_na_special(struct nexus_adapter *,
510     struct kern_channel *, struct chreq *, nxspec_cmd_t);
511 extern int nx_netif_na_special_common(struct nexus_adapter *,
512     struct kern_channel *, struct chreq *, nxspec_cmd_t);
513 extern int nx_netif_common_intr(struct __kern_channel_ring *, struct proc *,
514     uint32_t, uint32_t *);
515 
516 extern int nx_netif_prov_init(struct kern_nexus_domain_provider *);
517 extern int nx_netif_prov_params(struct kern_nexus_domain_provider *,
518     const uint32_t, const struct nxprov_params *, struct nxprov_params *,
519     struct skmem_region_params[SKMEM_REGIONS], uint32_t);
520 extern int nx_netif_prov_mem_new(struct kern_nexus_domain_provider *,
521     struct kern_nexus *, struct nexus_adapter *);
522 extern void nx_netif_prov_fini(struct kern_nexus_domain_provider *);
523 extern int nx_netif_prov_config(struct kern_nexus_domain_provider *,
524     struct kern_nexus *, struct nx_cfg_req *, int, struct proc *,
525     kauth_cred_t);
526 extern int nx_netif_prov_nx_ctor(struct kern_nexus *);
527 extern void nx_netif_prov_nx_dtor(struct kern_nexus *);
528 extern int nx_netif_prov_nx_mem_info(struct kern_nexus *,
529     struct kern_pbufpool **, struct kern_pbufpool **);
530 extern size_t nx_netif_prov_nx_mib_get(struct kern_nexus *nx,
531     struct nexus_mib_filter *, void *__sized_by(len), size_t len, struct proc *);
532 extern int nx_netif_prov_nx_stop(struct kern_nexus *);
533 
534 extern void nx_netif_reap(struct nexus_netif_adapter *, struct ifnet *,
535     uint32_t, boolean_t);
536 
537 extern void nx_netif_copy_stats(struct nexus_netif_adapter *,
538     struct if_netif_stats *);
539 extern struct nexus_netif_adapter * na_netif_alloc(zalloc_flags_t);
540 extern void na_netif_free(struct nexus_adapter *);
541 extern void na_netif_finalize(struct nexus_netif_adapter *, struct ifnet *);
542 extern void nx_netif_llw_detach_notify(void *);
543 extern void nx_netif_config_interface_advisory(struct kern_nexus *, bool);
544 
545 /*
546  * netif netagent API
547  */
548 extern void nx_netif_agent_init(struct nx_netif *);
549 extern void nx_netif_agent_fini(struct nx_netif *);
550 extern int nx_netif_netagent_flow_add(struct nx_netif *, struct nx_flow_req *);
551 extern int nx_netif_netagent_flow_del(struct nx_netif *, struct nx_flow_req *);
552 
553 /*
554  * "Interrupt" mitigation API. This is used by the netif adapter to reduce
555  * the number of "interrupt" requests/wakeup to clients on incoming packets.
556  */
557 extern void nx_netif_mit_init(struct nx_netif *, const struct ifnet *,
558     struct nx_netif_mit *, struct __kern_channel_ring *, boolean_t);
559 extern void nx_netif_mit_cleanup(struct nx_netif_mit *);
560 extern int nx_netif_mit_tx_intr(struct __kern_channel_ring *, struct proc *,
561     uint32_t, uint32_t *);
562 extern int nx_netif_mit_rx_intr(struct __kern_channel_ring *, struct proc *,
563     uint32_t, uint32_t *);
564 
565 /*
566  * Interface filter API
567  */
568 #define NETIF_FILTER_RX         0x0001
569 #define NETIF_FILTER_TX         0x0002
570 #define NETIF_FILTER_SOURCE     0x0004
571 #define NETIF_FILTER_INJECT     0x0008
572 extern errno_t nx_netif_filter_inject(struct nexus_netif_adapter *,
573     struct netif_filter *, struct __kern_packet *, uint32_t);
574 extern errno_t nx_netif_filter_add(struct nx_netif *, nexus_port_t, void *,
575     errno_t (*)(void *, struct __kern_packet *, uint32_t),
576     struct netif_filter **);
577 extern errno_t nx_netif_filter_remove(struct nx_netif *, struct netif_filter *);
578 extern void nx_netif_filter_init(struct nx_netif *);
579 extern void nx_netif_filter_fini(struct nx_netif *);
580 extern void nx_netif_filter_enable(struct nx_netif *);
581 extern void nx_netif_filter_disable(struct nx_netif *);
582 
583 /*
584  * These callbacks are invoked when a packet chain has traversed the full
585  * filter chain.
586  */
587 extern errno_t nx_netif_filter_rx_cb(struct nexus_netif_adapter *,
588     struct __kern_packet *, uint32_t);
589 extern errno_t nx_netif_filter_tx_cb(struct nexus_netif_adapter *,
590     struct __kern_packet *, uint32_t);
591 
592 /*
593  * These are called by nx_netif_filter_tx_cb() to feed filtered packets
594  * back to driver.
595  */
596 extern errno_t
597     nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *,
598     mbuf_svc_class_t, struct mbuf *);
599 extern errno_t
600     nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *,
601     kern_packet_svc_class_t, struct __kern_packet *);
602 
603 /*
604  * Called by nx_netif_na_find() to create a filter nexus adapter.
605  */
606 extern int netif_filter_na_create(struct kern_nexus *, struct chreq *,
607     struct nexus_adapter **);
608 
609 /*
610  * Callbacks from ifnet
611  */
612 extern errno_t nx_netif_native_tx_dequeue(struct nexus_netif_adapter *,
613     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
614     uint32_t *, uint32_t *, boolean_t, errno_t);
615 extern errno_t nx_netif_native_tx_get_len(struct nexus_netif_adapter *,
616     uint32_t, uint32_t *, uint32_t *, errno_t);
617 extern errno_t nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *,
618     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
619     uint32_t *, uint32_t *, boolean_t, errno_t);
620 extern errno_t nx_netif_compat_tx_get_len(struct nexus_netif_adapter *,
621     uint32_t, uint32_t *, uint32_t *, errno_t);
622 
623 /*
624  * doorbell dequeue tunable
625  */
626 extern uint32_t nx_netif_doorbell_max_dequeue;
627 
628 /*
629  * Default drop tunable
630  */
631 extern uint32_t nx_netif_filter_default_drop;
632 
633 /*
634  * Flow API
635  */
636 #define NETIF_FLOW_SOURCE       0x0001
637 #define NETIF_FLOW_INJECT       0x0002
638 #define NETIF_FLOW_OUTBOUND     0x0004 /* Assumes inbound if flag is missing */
639 
640 extern errno_t nx_netif_demux(struct nexus_netif_adapter *,
641     struct __kern_packet *, struct __kern_packet **, struct nexus_pkt_stats *,
642     uint32_t);
643 extern errno_t nx_netif_flow_add(struct nx_netif *, nexus_port_t,
644     struct netif_flow_desc *, void *, errno_t (*)(void *, void *, uint32_t),
645     struct netif_flow **);
646 extern errno_t nx_netif_flow_remove(struct nx_netif *, struct netif_flow *);
647 extern void nx_netif_flow_init(struct nx_netif *);
648 extern void nx_netif_flow_fini(struct nx_netif *);
649 extern void nx_netif_flow_enable(struct nx_netif *);
650 extern void nx_netif_flow_disable(struct nx_netif *);
651 extern void nx_netif_snoop(struct nx_netif *, struct __kern_packet *,
652     boolean_t);
653 extern boolean_t nx_netif_validate_macaddr(struct nx_netif *,
654     struct __kern_packet *, uint32_t);
655 extern boolean_t nx_netif_flow_match(struct nx_netif *, struct __kern_packet *,
656     struct netif_flow *, uint32_t);
657 extern struct netif_flow * nx_netif_flow_classify(struct nx_netif *,
658     struct __kern_packet *, uint32_t);
659 extern void nx_netif_flow_release(struct nx_netif *, struct netif_flow *);
660 extern int netif_vp_na_create(struct kern_nexus *, struct chreq *,
661     struct nexus_adapter **);
662 extern errno_t netif_vp_na_channel_event(struct nx_netif *, uint32_t,
663     struct __kern_channel_event *, uint16_t);
664 
665 /*
666  * Disable all checks on inbound/outbound packets on VP adapters
667  */
668 extern uint32_t nx_netif_vp_accept_all;
669 
670 /*
671  * Utility functions
672  */
673 extern struct __kern_packet *nx_netif_alloc_packet(struct kern_pbufpool *,
674     uint32_t, kern_packet_t *);
675 extern void nx_netif_free_packet(struct __kern_packet *);
676 extern void nx_netif_free_packet_chain(struct __kern_packet *, int *);
677 extern void netif_ifp_inc_traffic_class_out_pkt(struct ifnet *, uint32_t,
678     uint32_t, uint32_t);
679 
680 #define NETIF_CONVERT_RX        0x0001
681 #define NETIF_CONVERT_TX        0x0002
682 
683 extern struct __kern_packet *
684     nx_netif_mbuf_to_filter_pkt_chain(struct nexus_netif_adapter *,
685     struct mbuf *, uint32_t);
686 extern struct mbuf *
687     nx_netif_filter_pkt_to_mbuf_chain(struct nexus_netif_adapter *,
688     struct __kern_packet *, uint32_t);
689 
690 extern struct __kern_packet *
691     nx_netif_pkt_to_filter_pkt(struct nexus_netif_adapter *,
692     struct __kern_packet *, uint32_t);
693 extern struct __kern_packet *
694     nx_netif_pkt_to_filter_pkt_chain(struct nexus_netif_adapter *,
695     struct __kern_packet *, uint32_t);
696 extern struct __kern_packet *
697     nx_netif_filter_pkt_to_pkt_chain(struct nexus_netif_adapter *,
698     struct __kern_packet *, uint32_t);
699 
700 extern struct mbuf *
701     nx_netif_pkt_to_mbuf(struct nexus_netif_adapter *,
702     struct __kern_packet *, uint32_t);
703 extern struct __kern_packet *
704     nx_netif_pkt_to_pkt(struct nexus_netif_adapter *,
705     struct __kern_packet *, uint32_t);
706 
707 extern void nx_netif_mbuf_chain_info(struct mbuf *,
708     struct mbuf **, uint32_t *, uint32_t *);
709 extern void nx_netif_pkt_chain_info(struct __kern_packet *,
710     struct __kern_packet **, uint32_t *, uint32_t *);
711 extern int nx_netif_get_max_mtu(ifnet_t, uint32_t *);
712 
713 extern void nx_netif_mit_config(struct nexus_netif_adapter *,
714     boolean_t *, boolean_t *, boolean_t *, boolean_t *);
715 
716 extern void nx_netif_vp_region_params_adjust(struct nexus_adapter *,
717     struct skmem_region_params *);
718 
719 extern void nx_netif_pktap_output(ifnet_t, int, struct __kern_packet *);
720 
721 extern int netif_rx_notify_default(struct __kern_channel_ring *,
722     struct proc *p, uint32_t);
723 extern int netif_rx_notify_fast(struct __kern_channel_ring *,
724     struct proc *p, uint32_t);
725 extern int netif_llw_rx_notify_default(struct __kern_channel_ring *,
726     struct proc *p, uint32_t);
727 extern int netif_llw_rx_notify_fast(struct __kern_channel_ring *,
728     struct proc *p, uint32_t);
729 extern void netif_receive(struct nexus_netif_adapter *,
730     struct __kern_packet *, struct nexus_pkt_stats *);
731 
732 #define NETIF_XMIT_FLAG_CHANNEL  0x0001
733 #define NETIF_XMIT_FLAG_HOST     0x0002
734 #define NETIF_XMIT_FLAG_REDIRECT 0x0004
735 #define NETIF_XMIT_FLAG_PACING   0x0008
736 extern void netif_transmit(struct ifnet *, uint32_t);
737 extern int netif_ring_tx_refill(const kern_channel_ring_t,
738     uint32_t, uint32_t, boolean_t, boolean_t *, boolean_t);
739 extern void netif_hwna_set_mode(struct nexus_adapter *, netif_mode_t,
740     void (*)(struct nexus_adapter *, struct __kern_packet *,
741     struct nexus_pkt_stats *));
742 extern void netif_hwna_clear_mode(struct nexus_adapter *);
743 
744 /*
745  * rxpoll functions
746  */
747 extern errno_t netif_rxpoll_set_params(struct ifnet *,
748     struct ifnet_poll_params *, boolean_t locked);
749 extern void netif_rxpoll_compat_thread_func(void *, wait_result_t);
750 
751 /*
752  * GSO functions
753  */
754 extern int netif_gso_dispatch(struct ifnet *ifp, struct mbuf *m);
755 extern void netif_gso_init(void);
756 extern void netif_gso_fini(void);
757 
758 /*
759  * Logical link functions
760  */
761 extern void nx_netif_llink_retain(struct netif_llink *);
762 extern void nx_netif_llink_release(struct netif_llink **);
763 extern void nx_netif_qset_retain(struct netif_qset *);
764 extern void nx_netif_qset_release(struct netif_qset **);
765 extern void nx_netif_llink_init(struct nx_netif *);
766 extern void nx_netif_llink_fini(struct nx_netif *);
767 extern struct netif_qset * nx_netif_find_qset(struct nx_netif *, uint64_t);
768 extern struct netif_qset * nx_netif_get_default_qset_noref(struct nx_netif *);
769 extern int netif_qset_enqueue(struct netif_qset *, struct __kern_packet *,
770     struct __kern_packet *, uint32_t, uint32_t, uint32_t *, uint32_t *);
771 extern int nx_netif_default_llink_config(struct nx_netif *,
772     struct kern_nexus_netif_llink_init *);
773 extern void nx_netif_llink_config_free(struct nx_netif *);
774 extern int nx_netif_llink_ext_init_default_queues(struct kern_nexus *);
775 extern void nx_netif_llink_ext_fini_default_queues(struct kern_nexus *);
776 extern int nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *,
777     bool);
778 extern int nx_netif_llink_add(struct nx_netif *,
779     struct kern_nexus_netif_llink_init *, struct netif_llink **);
780 extern int nx_netif_llink_remove(struct nx_netif *,
781     kern_nexus_netif_llink_id_t);
782 extern int nx_netif_notify_steering_info(struct nx_netif *,
783     struct netif_qset *, struct ifnet_traffic_descriptor_common *, bool);
784 __END_DECLS
785 #endif /* CONFIG_NEXUS_NETIF */
786 #include <skywalk/nexus/netif/nx_netif_compat.h>
787 #include <skywalk/nexus/netif/nx_netif_host.h>
788 #endif /* _SKYWALK_NEXUS_NETIF_H_ */
789