xref: /xnu-8020.121.3/bsd/skywalk/nexus/netif/nx_netif.h (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2015-2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _SKYWALK_NEXUS_NETIF_H_
30 #define _SKYWALK_NEXUS_NETIF_H_
31 
32 #include <skywalk/os_skywalk_private.h>
33 #include <skywalk/nexus/nexus_pktq.h>
34 
35 #if CONFIG_NEXUS_NETIF
36 
37 #define NEXUS_PROVIDER_NET_IF           "com.apple.nexus.netif"
38 
39 #define NX_NETIF_MAXPORTS       128
40 #define NX_NETIF_EVENT_RING_NUM      1     /* number of event rings */
41 #define NX_NETIF_EVENT_RING_SIZE     32    /* default event ring size */
42 
43 struct netif_filter {
44 	STAILQ_ENTRY(netif_filter) nf_link;
45 	nexus_port_t            nf_port;
46 	uint32_t                nf_refcnt;
47 	void                    *nf_cb_arg;
48 	errno_t                 (*nf_cb_func)(void *,
49 	    struct __kern_packet *, uint32_t);
50 };
51 STAILQ_HEAD(netif_filter_head, netif_filter);
52 
53 struct netif_flow_desc {
54 	uint16_t        fd_ethertype;
55 	struct in6_addr fd_laddr;
56 	struct in6_addr fd_raddr;
57 };
58 
59 struct netif_port_info {
60 	struct nx_port_info_header npi_hdr;
61 	struct netif_flow_desc  npi_fd;
62 };
63 
64 struct netif_flow {
65 	SLIST_ENTRY(netif_flow) nf_link;
66 	SLIST_ENTRY(netif_flow) nf_table_link;
67 	nexus_port_t            nf_port;
68 	uint32_t                nf_refcnt;
69 	struct netif_flow_desc  nf_desc;
70 	void                    *nf_cb_arg;
71 	errno_t                 (*nf_cb_func)(void *,
72 	    void *, uint32_t);
73 };
74 
75 typedef enum {
76 	FT_TYPE_ETHERTYPE,
77 	FT_TYPE_IPV6_ULA
78 } netif_flowtable_type_t;
79 
80 struct netif_flowtable {
81 	struct netif_flowtable_ops      *ft_ops;
82 	void                            *ft_internal;
83 };
84 
85 typedef int netif_flow_lookup_t(struct netif_flowtable *,
86     struct __kern_packet *, uint32_t, struct netif_flow **);
87 typedef boolean_t netif_flow_match_t(struct netif_flow_desc *,
88     struct netif_flow_desc *);
89 typedef int netif_flow_info_t(struct __kern_packet *,
90     struct netif_flow_desc *, uint32_t);
91 typedef int netif_flow_insert_t(struct netif_flowtable *,
92     struct netif_flow *);
93 typedef void netif_flow_remove_t(struct netif_flowtable *,
94     struct netif_flow *);
95 typedef struct netif_flowtable *netif_flow_table_alloc_t(
96 	struct netif_flowtable_ops *);
97 typedef void netif_flow_table_free_t(struct netif_flowtable *);
98 
99 struct netif_flowtable_ops {
100 	netif_flow_lookup_t           *nfo_lookup;
101 	netif_flow_match_t            *nfo_match;
102 	netif_flow_info_t             *nfo_info;
103 	netif_flow_insert_t           *nfo_insert;
104 	netif_flow_remove_t           *nfo_remove;
105 	netif_flow_table_alloc_t      *nfo_table_alloc;
106 	netif_flow_table_free_t       *nfo_table_free;
107 };
108 
109 SLIST_HEAD(netif_flow_head, netif_flow);
110 struct nexus_netif_adapter {
111 	/*
112 	 * This is an overlay structure on nexus_adapter;
113 	 * make sure it contains 'up' as the first member.
114 	 */
115 	struct nexus_adapter      nifna_up;
116 	struct nx_netif           *nifna_netif;
117 
118 	struct nx_netif_mit       *nifna_tx_mit;
119 	struct nx_netif_mit       *nifna_rx_mit;
120 
121 	/*
122 	 * XXX For filter or vpna only
123 	 */
124 	union {
125 		struct netif_filter     *nifna_filter;
126 		struct netif_flow       *nifna_flow;
127 	};
128 };
129 
130 struct netif_queue {
131 	decl_lck_mtx_data(, nq_lock);
132 	struct netif_qset    *nq_qset; /* backpointer to parent netif qset */
133 	struct pktq          nq_pktq;
134 	void                 *nq_ctx;
135 	kern_packet_svc_class_t nq_svc; /* service class of TX queue */
136 	uint16_t             nq_flags;
137 }__attribute__((aligned(sizeof(uint64_t))));
138 
139 /* values for nq_flags */
140 #define NETIF_QUEUE_EXT_INITED   0x0001 /* nxnpi_queue_init() succeeded */
141 #define NETIF_QUEUE_IS_RX        0x0002 /* RX queue, else TX */
142 
143 #define _NETIF_QSET_QUEUE(_p, _n)    \
144     (struct netif_queue *)(void *)((uint8_t *)((_p)->nqs_driver_queues) + \
145     ((_n) * sizeof(struct netif_queue)))
146 #define NETIF_QSET_RX_QUEUE(_p, _n)    _NETIF_QSET_QUEUE(_p, _n)
147 #define NETIF_QSET_TX_QUEUE(_p, _n)    \
148     _NETIF_QSET_QUEUE(_p, (_p)->nqs_num_rx_queues + (_n))
149 
150 /* the top 32 bits are unused for now */
151 #define NETIF_QSET_ID_ENCODE(llink_id_internal, qset_idx) \
152     ((((llink_id_internal) << 16) | (qset_idx)) & 0xffffffff)
153 
154 struct netif_qset {
155 	struct netif_llink         *nqs_llink; /* backpointer to parent logical link */
156 	struct ifclassq            *nqs_ifcq;
157 	SLIST_ENTRY(netif_qset)    nqs_list;
158 	void                       *nqs_ctx; /* context provided by driver */
159 	uint64_t                   nqs_id;  /* queue set identifier */
160 	uint8_t                    nqs_idx; /* queue set index */
161 	uint16_t                   nqs_flags;
162 	uint8_t                    nqs_num_rx_queues;
163 	uint8_t                    nqs_num_tx_queues;
164 	/*
165 	 * nq_queues will be organized as:
166 	 * nq_queues[0..nq_num_rx_queues-1] will hold RX queues.
167 	 * nq_queues[nq_num_rx_queues..nq_num_tx_queues-1] will hold TX queues.
168 	 */
169 	struct netif_queue         nqs_driver_queues[0]
170 	__attribute__((aligned(sizeof(uint64_t))));
171 };
172 
173 /* values for nqs_flags */
174 #define NETIF_QSET_FLAG_DEFAULT     0x0001 /* default queue set of the logical link */
175 #define NETIF_QSET_FLAG_AQM         0x0002 /* provides AQM */
176 #define NETIF_QSET_FLAG_LOW_LATENCY 0x0004 /* provides low latency service */
177 #define NETIF_QSET_FLAG_EXT_INITED  0x0008 /* nxnpi_qset_init() succeeded */
178 
179 #define NETIF_DEFAULT_QSET(_qs)    ((_qs)->nqs_flags & NETIF_QSET_FLAG_DEFAULT)
180 
181 struct netif_llink {
182 	struct nx_netif             *nll_nif; /* backpointer to parent netif instance */
183 	STAILQ_ENTRY(netif_llink)   nll_link;
184 	SLIST_HEAD(, netif_qset)    nll_qset_list;
185 	struct netif_qset           *nll_default_qset;
186 	struct os_refcnt            nll_refcnt;
187 #define NETIF_LLINK_ID_DEFAULT    0
188 	kern_nexus_netif_llink_id_t nll_link_id;
189 	uint16_t                    nll_link_id_internal;
190 	uint16_t                    nll_qset_cnt;
191 	uint8_t                     nll_state;
192 	uint8_t                     nll_flags;
193 	void                        *nll_ctx; /* context provided by driver */
194 };
195 STAILQ_HEAD(netif_llink_head, netif_llink);
196 
197 /* values for nll_flags */
198 #define NETIF_LLINK_FLAG_DEFAULT    0x1 /* default logical link */
199 
200 /* values for nll_state */
201 #define NETIF_LLINK_STATE_INIT         0x1 /* Intialized and ready for use */
202 #define NETIF_LLINK_STATE_DESTROYED    0x2 /* not available for use */
203 
204 #define NETIF_DEFAULT_LLINK(_ll)  ((_ll)->nll_flags & NETIF_LLINK_FLAG_DEFAULT)
205 
206 SLIST_HEAD(netif_agent_flow_head, netif_agent_flow);
207 struct netif_agent_flow {
208 	SLIST_ENTRY(netif_agent_flow) naf_link;
209 	uuid_t                  naf_flow_uuid;
210 	uuid_t                  naf_bind_key;
211 	nexus_port_t            naf_nx_port;
212 	uint16_t                naf_flags;
213 	pid_t                   naf_pid;
214 	union sockaddr_in_4_6   naf_daddr;
215 	union sockaddr_in_4_6   naf_saddr;
216 };
217 
218 #define NIFNA(_na)       ((struct nexus_netif_adapter *)(_na))
219 
220 /* nif_flags */
221 /*
222  * This is named differently from the flow classification rule
223  * (IPV6 ULA) because this gives us the flexibility of using
224  * different types of classification in the future.
225  */
226 #define NETIF_FLAG_LOW_LATENCY          0x00000001
227 #define NETIF_FLAG_COMPAT               0x00000002
228 #define NETIF_FLAG_LLINK_INITIALIZED    0x00000004
229 #define NETIF_IS_LOW_LATENCY(n) \
230     (((n)->nif_flags & NETIF_FLAG_LOW_LATENCY) != 0)
231 #define NETIF_IS_COMPAT(n) \
232     (((n)->nif_flags & NETIF_FLAG_COMPAT) != 0)
233 #define NETIF_LLINK_ENABLED(n) \
234     (((n)->nif_flags & NETIF_FLAG_LLINK_INITIALIZED) != 0)
235 #define NETIF_DEFAULT_DROP_ENABLED(n) \
236     (nx_netif_filter_default_drop != 0 && \
237     (((n)->nif_filter_flags & NETIF_FILTER_FLAG_INITIALIZED) != 0))
238 
239 /* nif_agent_flags */
240 #define NETIF_AGENT_FLAG_REGISTERED     0x00000001
241 #define NETIF_AGENT_FLAG_ADDED          0x00000002
242 
243 /* nif_filter_flags */
244 #define NETIF_FILTER_FLAG_INITIALIZED   0x00000001
245 #define NETIF_FILTER_FLAG_ENABLED       0x00000002
246 
247 /* nif_flow_flags */
248 #define NETIF_FLOW_FLAG_INITIALIZED     0x00000001
249 #define NETIF_FLOW_FLAG_ENABLED         0x00000002
250 
251 /* nif_llink_flags */
252 #define NETIF_LLINK_FLAG_INITIALIZED    0x00000001
253 
254 /* Used by netif_hwna_set_mode() */
255 typedef enum {
256 	NETIF_MODE_NONE,
257 	NETIF_MODE_FSW,
258 	NETIF_MODE_LLW
259 } netif_mode_t;
260 
261 /*
262  * nx_netif is a descriptor for a netif nexus instance.
263  */
264 struct nx_netif {
265 	decl_lck_rw_data(, nif_lock);
266 	struct kern_nexus       *nif_nx;
267 
268 	struct nxbind           *nif_dev_nxb;
269 	struct nxbind           *nif_host_nxb;
270 	uuid_t                  nif_uuid;       /* attachment UUID */
271 	struct netif_stats      nif_stats;
272 	uint32_t                nif_flags;
273 	struct os_refcnt        nif_refcnt;
274 
275 	decl_lck_mtx_data(, nif_agent_lock);
276 	struct netif_agent_flow_head nif_agent_flow_list;
277 	uint32_t                nif_agent_flow_cnt;
278 	uint32_t                nif_agent_flags;
279 	netagent_session_t      nif_agent_session;
280 	uuid_t                  nif_agent_uuid;
281 
282 	uint32_t                nif_hwassist;
283 	uint32_t                nif_capabilities;
284 	uint32_t                nif_capenable;
285 	uint64_t                nif_input_rate; /* device input rate limit */
286 
287 	struct ifnet            *nif_ifp;
288 	struct nx_flowswitch    *nif_fsw;       /* attached flowswitch nexus */
289 	struct sk_nexusadv      *nif_fsw_nxadv; /* flowswitch nexus advisory */
290 	struct netif_nexus_advisory *nif_netif_nxadv; /* netif nexus advisory */
291 
292 	/* packet-mbuf copy routines */
293 	pkt_copy_from_mbuf_t    *nif_pkt_copy_from_mbuf;
294 	pkt_copy_to_mbuf_t      *nif_pkt_copy_to_mbuf;
295 	pkt_copy_from_pkt_t     *nif_pkt_copy_from_pkt;
296 
297 	/* packet filtering */
298 	decl_lck_mtx_data(, nif_filter_lock);
299 	uint32_t                nif_filter_flags;
300 	uint32_t                nif_filter_vp_cnt;
301 	uint32_t                nif_filter_cnt;
302 	struct kern_pbufpool    *nif_filter_pp;
303 	struct netif_filter_head nif_filter_list;
304 	union {
305 		struct nx_mbq   nif_tx_processed_mbq[MBUF_TC_MAX];
306 		struct nx_pktq  nif_tx_processed_pktq[KPKT_TC_MAX];
307 	};
308 
309 	/* virtual port */
310 	decl_lck_mtx_data(, nif_flow_lock);
311 	uint32_t                nif_vp_cnt;
312 	uint32_t                nif_flow_flags;
313 	uint32_t                nif_flow_cnt;
314 	struct netif_flow_head  nif_flow_list;
315 	struct netif_flowtable  *nif_flow_table;
316 	struct kern_channel     *nif_hw_ch;
317 	uint32_t                nif_hw_ch_refcnt;
318 
319 	/* logical link */
320 	decl_lck_rw_data(, nif_llink_lock);
321 	struct kern_nexus_netif_llink_init *nif_default_llink_params;
322 	struct netif_llink         *nif_default_llink;
323 	STAILQ_HEAD(, netif_llink) nif_llink_list;
324 	uint16_t                   nif_llink_cnt;
325 
326 	/* capability configuration callback function and context */
327 	kern_nexus_capab_interface_advisory_config_fn_t nif_intf_adv_config;
328 	void *nif_intf_adv_prov_ctx;
329 
330 #if (DEVELOPMENT || DEBUG)
331 	struct skoid            nif_skoid;
332 #endif /* !DEVELOPMENT && !DEBUG */
333 };
334 
335 #define NX_NETIF_PRIVATE(_nx) ((struct nx_netif *)(_nx)->nx_arg)
336 
337 #define NETIF_RWINIT(_nif)                \
338 	lck_rw_init(&(_nif)->nif_lock, &nexus_lock_group, &nexus_lock_attr)
339 #define NETIF_WLOCK(_nif)                 \
340 	lck_rw_lock_exclusive(&(_nif)->nif_lock)
341 #define NETIF_WUNLOCK(_nif)               \
342 	lck_rw_unlock_exclusive(&(_nif)->nif_lock)
343 #define NETIF_WLOCKTORLOCK(_nif)          \
344 	lck_rw_lock_exclusive_to_shared(&(_nif)->nif_lock)
345 #define NETIF_RLOCK(_nif)                 \
346 	lck_rw_lock_shared(&(_nif)->nif_lock)
347 #define NETIF_RLOCKTOWLOCK(_nif)          \
348 	lck_rw_lock_shared_to_exclusive(&(_nif)->nif_lock)
349 #define NETIF_RTRYLOCK(_nif)              \
350 	lck_rw_try_lock(&(_nif)->nif_lock, LCK_RW_TYPE_SHARED)
351 #define NETIF_RUNLOCK(_nif)               \
352 	lck_rw_unlock_shared(&(_nif)->nif_lock)
353 #define NETIF_UNLOCK(_nif)                \
354 	lck_rw_done(&(_nif)->nif_lock)
355 #define NETIF_RWDESTROY(_nif)             \
356 	lck_rw_destroy(&(_nif)->nif_lock, &nexus_lock_group)
357 #define NETIF_WLOCK_ASSERT_HELD(_nif)     \
358 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_EXCLUSIVE)
359 #define NETIF_RLOCK_ASSERT_HELD(_nif)     \
360 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_SHARED)
361 #define NETIF_LOCK_ASSERT_HELD(_nif)      \
362 	LCK_RW_ASSERT(&(_nif)->nif_lock, LCK_RW_ASSERT_HELD)
363 
364 SYSCTL_DECL(_kern_skywalk_netif);
365 
366 /*
367  * Macros to determine if an interface is skywalk capable or skywalk enabled.
368  * See the magic field in struct nexus_adapter.
369  */
370 #define SKYWALK_CAPABLE(ifp)                                            \
371 	(NA(ifp) != NULL && (ifnet_capabilities_supported(ifp) & IFCAP_SKYWALK))
372 
373 #define SKYWALK_SET_CAPABLE(ifp) do {                                   \
374 	ifnet_lock_exclusive(ifp);                                      \
375 	(ifp)->if_capabilities |= IFCAP_SKYWALK;                        \
376 	(ifp)->if_capenable |= IFCAP_SKYWALK;                           \
377 	ifnet_lock_done(ifp);                                           \
378 } while (0)
379 
380 #define SKYWALK_CLEAR_CAPABLE(ifp) do {                                 \
381 	ifnet_lock_exclusive(ifp);                                      \
382 	(ifp)->if_capabilities &= ~IFCAP_SKYWALK;                       \
383 	(ifp)->if_capenable &= ~IFCAP_SKYWALK;                          \
384 	ifnet_lock_done(ifp);                                           \
385 } while (0)
386 
387 #define SKYWALK_NATIVE(ifp)                                             \
388 	((ifp)->if_eflags & IFEF_SKYWALK_NATIVE)
389 
390 typedef enum {
391 	MIT_MODE_SIMPLE,
392 	MIT_MODE_ADVANCED_STATIC,
393 	MIT_MODE_ADVANCED_DYNAMIC,
394 } mit_mode_t;
395 
396 /*
397  * Mitigation support.
398  */
399 struct mit_cfg_tbl {
400 	uint32_t cfg_plowat;            /* packets low watermark */
401 	uint32_t cfg_phiwat;            /* packets high watermark */
402 	uint32_t cfg_blowat;            /* bytes low watermark */
403 	uint32_t cfg_bhiwat;            /* bytes high watermark */
404 	uint32_t cfg_ival;              /* delay interval (in microsecond) */
405 };
406 
407 #define NETIF_MIT_CFG_TBL_MAX_CFG       5
408 
409 struct nx_netif_mit {
410 	decl_lck_spin_data(, mit_lock);
411 	volatile struct __kern_channel_ring *mit_ckr;  /* kring backpointer */
412 	uint32_t        mit_flags;
413 	uint32_t        mit_requests;
414 	uint32_t        mit_interval;
415 
416 	/*
417 	 * Adaptive mitigation.
418 	 */
419 	uint32_t        mit_cfg_idx_max;        /* highest config selector */
420 	uint32_t        mit_cfg_idx;            /* current config selector */
421 	const struct mit_cfg_tbl *mit_cfg;      /* current config mapping */
422 	mit_mode_t      mit_mode;               /* current mode */
423 	uint32_t        mit_packets_avg;        /* average # of packets */
424 	uint32_t        mit_packets_min;        /* smallest # of packets */
425 	uint32_t        mit_packets_max;        /* largest # of packets */
426 	uint32_t        mit_bytes_avg;          /* average # of bytes */
427 	uint32_t        mit_bytes_min;          /* smallest # of bytes */
428 	uint32_t        mit_bytes_max;          /* largest # of bytes */
429 
430 	struct pktcntr  mit_sstats;             /* pkts & bytes per sampling */
431 	struct timespec mit_mode_holdtime;      /* mode holdtime in nsec */
432 	struct timespec mit_mode_lasttime;      /* last mode change time nsec */
433 	struct timespec mit_sample_time;        /* sampling holdtime in nsec */
434 	struct timespec mit_sample_lasttime;    /* last sampling time in nsec */
435 	struct timespec mit_start_time;         /* time of start work in nsec */
436 
437 	struct thread   *mit_thread;
438 	char            mit_name[MAXTHREADNAMESIZE];
439 
440 	const struct ifnet      *mit_netif_ifp;
441 	/* interface-specific mitigation table */
442 	struct mit_cfg_tbl mit_tbl[NETIF_MIT_CFG_TBL_MAX_CFG];
443 
444 #if (DEVELOPMENT || DEBUG)
445 	struct skoid    mit_skoid;
446 #endif /* !DEVELOPMENT && !DEBUG */
447 };
448 
449 #define NETIF_MITF_INITIALIZED  0x00000001      /* has been initialized */
450 #define NETIF_MITF_SAMPLING     0x00000002      /* busy sampling stats */
451 #define NETIF_MITF_SIMPLE       0x00000004      /* no stats, no delay */
452 #define NETIF_MITF_READY        0x10000000      /* thread is ready */
453 #define NETIF_MITF_RUNNING      0x20000000      /* thread is running */
454 #define NETIF_MITF_TERMINATING  0x40000000      /* thread is terminating */
455 #define NETIF_MITF_TERMINATED   0x80000000      /* thread is terminated */
456 
457 #define MIT_SPIN_LOCK(_mit)                     \
458 	lck_spin_lock(&(_mit)->mit_lock)
459 #define MIT_SPIN_LOCK_ASSERT_HELD(_mit)         \
460 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_OWNED)
461 #define MIT_SPIN_LOCK_ASSERT_NOTHELD(_mit)      \
462 	LCK_SPIN_ASSERT(&(_mit)->mit_lock, LCK_ASSERT_NOTOWNED)
463 #define MIT_SPIN_UNLOCK(_mit)                   \
464 	lck_spin_unlock(&(_mit)->mit_lock)
465 
466 extern kern_allocation_name_t skmem_tag_netif_filter;
467 extern kern_allocation_name_t skmem_tag_netif_flow;
468 extern kern_allocation_name_t skmem_tag_netif_agent_flow;
469 extern kern_allocation_name_t skmem_tag_netif_llink;
470 extern kern_allocation_name_t skmem_tag_netif_qset;
471 
472 __BEGIN_DECLS
473 extern struct nxdom nx_netif_dom_s;
474 extern struct kern_nexus_domain_provider nx_netif_prov_s;
475 
476 extern struct nx_netif *nx_netif_alloc(zalloc_flags_t);
477 extern void nx_netif_free(struct nx_netif *);
478 extern void nx_netif_retain(struct nx_netif *);
479 extern void nx_netif_release(struct nx_netif *);
480 
481 extern int nx_netif_dev_krings_create(struct nexus_adapter *,
482     struct kern_channel *);
483 extern void nx_netif_dev_krings_delete(struct nexus_adapter *,
484     struct kern_channel *, boolean_t);
485 extern int nx_netif_na_find(struct kern_nexus *, struct kern_channel *,
486     struct chreq *, struct nxbind *, struct proc *, struct nexus_adapter **,
487     boolean_t create);
488 extern int nx_netif_na_special(struct nexus_adapter *,
489     struct kern_channel *, struct chreq *, nxspec_cmd_t);
490 extern int nx_netif_na_special_common(struct nexus_adapter *,
491     struct kern_channel *, struct chreq *, nxspec_cmd_t);
492 extern int nx_netif_common_intr(struct __kern_channel_ring *, struct proc *,
493     uint32_t, uint32_t *);
494 
495 extern int nx_netif_prov_init(struct kern_nexus_domain_provider *);
496 extern int nx_netif_prov_params(struct kern_nexus_domain_provider *,
497     const uint32_t, const struct nxprov_params *, struct nxprov_params *,
498     struct skmem_region_params[SKMEM_REGIONS]);
499 extern int nx_netif_prov_mem_new(struct kern_nexus_domain_provider *,
500     struct kern_nexus *, struct nexus_adapter *);
501 extern void nx_netif_prov_fini(struct kern_nexus_domain_provider *);
502 extern int nx_netif_prov_config(struct kern_nexus_domain_provider *,
503     struct kern_nexus *, struct nx_cfg_req *, int, struct proc *,
504     kauth_cred_t);
505 extern int nx_netif_prov_nx_ctor(struct kern_nexus *);
506 extern void nx_netif_prov_nx_dtor(struct kern_nexus *);
507 extern int nx_netif_prov_nx_mem_info(struct kern_nexus *,
508     struct kern_pbufpool **, struct kern_pbufpool **);
509 extern size_t nx_netif_prov_nx_mib_get(struct kern_nexus *nx,
510     struct nexus_mib_filter *, void *, size_t, struct proc *);
511 extern int nx_netif_prov_nx_stop(struct kern_nexus *);
512 
513 extern void nx_netif_reap(struct nexus_netif_adapter *, struct ifnet *,
514     uint32_t, boolean_t);
515 
516 extern void nx_netif_copy_stats(struct nexus_netif_adapter *,
517     struct if_netif_stats *);
518 extern struct nexus_netif_adapter * na_netif_alloc(zalloc_flags_t);
519 extern void na_netif_free(struct nexus_adapter *);
520 extern void na_netif_finalize(struct nexus_netif_adapter *, struct ifnet *);
521 extern int nx_netif_interface_advisory_report(struct nexus_adapter *,
522     const struct ifnet_interface_advisory *);
523 extern void nx_netif_config_interface_advisory(struct kern_nexus *, bool);
524 
525 /*
526  * netif netagent API
527  */
528 extern void nx_netif_agent_init(struct nx_netif *);
529 extern void nx_netif_agent_fini(struct nx_netif *);
530 extern int nx_netif_netagent_flow_add(struct nx_netif *, struct nx_flow_req *);
531 extern int nx_netif_netagent_flow_del(struct nx_netif *, struct nx_flow_req *);
532 
533 /*
534  * "Interrupt" mitigation API. This is used by the netif adapter to reduce
535  * the number of "interrupt" requests/wakeup to clients on incoming packets.
536  */
537 extern void nx_netif_mit_init(struct nx_netif *, const struct ifnet *,
538     struct nx_netif_mit *, struct __kern_channel_ring *, boolean_t);
539 extern void nx_netif_mit_cleanup(struct nx_netif_mit *);
540 extern int nx_netif_mit_tx_intr(struct __kern_channel_ring *, struct proc *,
541     uint32_t, uint32_t *);
542 extern int nx_netif_mit_rx_intr(struct __kern_channel_ring *, struct proc *,
543     uint32_t, uint32_t *);
544 
545 /*
546  * Interface filter API
547  */
548 #define NETIF_FILTER_RX         0x0001
549 #define NETIF_FILTER_TX         0x0002
550 #define NETIF_FILTER_SOURCE     0x0004
551 #define NETIF_FILTER_INJECT     0x0008
552 extern errno_t nx_netif_filter_inject(struct nexus_netif_adapter *,
553     struct netif_filter *, struct __kern_packet *, uint32_t);
554 extern errno_t nx_netif_filter_add(struct nx_netif *, nexus_port_t, void *,
555     errno_t (*)(void *, struct __kern_packet *, uint32_t),
556     struct netif_filter **);
557 extern errno_t nx_netif_filter_remove(struct nx_netif *, struct netif_filter *);
558 extern void nx_netif_filter_init(struct nx_netif *);
559 extern void nx_netif_filter_fini(struct nx_netif *);
560 extern void nx_netif_filter_enable(struct nx_netif *);
561 extern void nx_netif_filter_disable(struct nx_netif *);
562 
563 /*
564  * These callbacks are invoked when a packet chain has traversed the full
565  * filter chain.
566  */
567 extern errno_t nx_netif_filter_rx_cb(struct nexus_netif_adapter *,
568     struct __kern_packet *, uint32_t);
569 extern errno_t nx_netif_filter_tx_cb(struct nexus_netif_adapter *,
570     struct __kern_packet *, uint32_t);
571 
572 /*
573  * These are called by nx_netif_filter_tx_cb() to feed filtered packets
574  * back to driver.
575  */
576 extern errno_t
577     nx_netif_filter_tx_processed_mbuf_enqueue(struct nexus_netif_adapter *,
578     mbuf_svc_class_t, struct mbuf *);
579 extern errno_t
580     nx_netif_filter_tx_processed_pkt_enqueue(struct nexus_netif_adapter *,
581     kern_packet_svc_class_t, struct __kern_packet *);
582 
583 /*
584  * Called by nx_netif_na_find() to create a filter nexus adapter.
585  */
586 extern int netif_filter_na_create(struct kern_nexus *, struct chreq *,
587     struct nexus_adapter **);
588 
589 /*
590  * Callbacks from ifnet
591  */
592 extern errno_t nx_netif_native_tx_dequeue(struct nexus_netif_adapter *,
593     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
594     uint32_t *, uint32_t *, boolean_t, errno_t);
595 extern errno_t nx_netif_native_tx_get_len(struct nexus_netif_adapter *,
596     uint32_t, uint32_t *, uint32_t *, errno_t);
597 extern errno_t nx_netif_compat_tx_dequeue(struct nexus_netif_adapter *,
598     uint32_t, uint32_t, uint32_t, classq_pkt_t *, classq_pkt_t *,
599     uint32_t *, uint32_t *, boolean_t, errno_t);
600 extern errno_t nx_netif_compat_tx_get_len(struct nexus_netif_adapter *,
601     uint32_t, uint32_t *, uint32_t *, errno_t);
602 
603 /*
604  * doorbell dequeue tunable
605  */
606 extern uint32_t nx_netif_doorbell_max_dequeue;
607 
608 /*
609  * Default drop tunable
610  */
611 extern uint32_t nx_netif_filter_default_drop;
612 
613 /*
614  * Flow API
615  */
616 #define NETIF_FLOW_SOURCE       0x0001
617 #define NETIF_FLOW_INJECT       0x0002
618 #define NETIF_FLOW_OUTBOUND     0x0004 /* Assumes inbound if flag is missing */
619 
620 extern errno_t nx_netif_demux(struct nexus_netif_adapter *,
621     struct __kern_packet *, struct __kern_packet **, uint32_t);
622 extern errno_t nx_netif_flow_add(struct nx_netif *, nexus_port_t,
623     struct netif_flow_desc *, void *, errno_t (*)(void *, void *, uint32_t),
624     struct netif_flow **);
625 extern errno_t nx_netif_flow_remove(struct nx_netif *, struct netif_flow *);
626 extern void nx_netif_flow_init(struct nx_netif *);
627 extern void nx_netif_flow_fini(struct nx_netif *);
628 extern void nx_netif_flow_enable(struct nx_netif *);
629 extern void nx_netif_flow_disable(struct nx_netif *);
630 extern void nx_netif_snoop(struct nx_netif *, struct __kern_packet *,
631     boolean_t);
632 extern boolean_t nx_netif_validate_macaddr(struct nx_netif *,
633     struct __kern_packet *, uint32_t);
634 extern boolean_t nx_netif_flow_match(struct nx_netif *, struct __kern_packet *,
635     struct netif_flow *, uint32_t);
636 extern struct netif_flow * nx_netif_flow_classify(struct nx_netif *,
637     struct __kern_packet *, uint32_t);
638 extern void nx_netif_flow_release(struct nx_netif *, struct netif_flow *);
639 extern int netif_vp_na_create(struct kern_nexus *, struct chreq *,
640     struct nexus_adapter **);
641 
642 /*
643  * Disable all checks on inbound/outbound packets on VP adapters
644  */
645 extern uint32_t nx_netif_vp_accept_all;
646 
647 /*
648  * Utility functions
649  */
650 extern struct __kern_packet *nx_netif_alloc_packet(struct kern_pbufpool *,
651     uint32_t, kern_packet_t *);
652 extern void nx_netif_free_packet(struct __kern_packet *);
653 extern void nx_netif_free_packet_chain(struct __kern_packet *, int *);
654 
655 #define NETIF_CONVERT_RX        0x0001
656 #define NETIF_CONVERT_TX        0x0002
657 
658 extern struct __kern_packet *
659     nx_netif_mbuf_to_filter_pkt_chain(struct nexus_netif_adapter *,
660     struct mbuf *, uint32_t);
661 extern struct mbuf *
662     nx_netif_filter_pkt_to_mbuf_chain(struct nexus_netif_adapter *,
663     struct __kern_packet *, uint32_t);
664 
665 extern struct __kern_packet *
666     nx_netif_pkt_to_filter_pkt(struct nexus_netif_adapter *,
667     struct __kern_packet *, uint32_t);
668 extern struct __kern_packet *
669     nx_netif_pkt_to_filter_pkt_chain(struct nexus_netif_adapter *,
670     struct __kern_packet *, uint32_t);
671 extern struct __kern_packet *
672     nx_netif_filter_pkt_to_pkt_chain(struct nexus_netif_adapter *,
673     struct __kern_packet *, uint32_t);
674 
675 extern struct mbuf *
676     nx_netif_pkt_to_mbuf(struct nexus_netif_adapter *,
677     struct __kern_packet *, uint32_t);
678 extern struct __kern_packet *
679     nx_netif_pkt_to_pkt(struct nexus_netif_adapter *,
680     struct __kern_packet *, uint32_t);
681 
682 extern void nx_netif_mbuf_chain_info(struct mbuf *,
683     struct mbuf **, uint32_t *, uint32_t *);
684 extern void nx_netif_pkt_chain_info(struct __kern_packet *,
685     struct __kern_packet **, uint32_t *, uint32_t *);
686 extern int nx_netif_get_max_mtu(ifnet_t, uint32_t *);
687 
688 extern void nx_netif_mit_config(struct nexus_netif_adapter *,
689     boolean_t *, boolean_t *, boolean_t *, boolean_t *);
690 
691 extern void nx_netif_vp_region_params_adjust(struct nexus_adapter *,
692     struct skmem_region_params *);
693 
694 extern void nx_netif_pktap_output(ifnet_t, int, struct __kern_packet *);
695 
696 extern int netif_rx_notify_default(struct __kern_channel_ring *,
697     struct proc *p, uint32_t);
698 extern int netif_rx_notify_fast(struct __kern_channel_ring *,
699     struct proc *p, uint32_t);
700 extern int netif_llw_rx_notify_default(struct __kern_channel_ring *,
701     struct proc *p, uint32_t);
702 extern int netif_llw_rx_notify_fast(struct __kern_channel_ring *,
703     struct proc *p, uint32_t);
704 extern void netif_receive(struct nexus_netif_adapter *,
705     struct __kern_packet *, struct nexus_pkt_stats *);
706 
707 #define NETIF_XMIT_FLAG_CHANNEL 0x0001
708 #define NETIF_XMIT_FLAG_HOST    0x0002
709 extern void netif_transmit(struct ifnet *, uint32_t);
710 extern int netif_ring_tx_refill(const kern_channel_ring_t,
711     uint32_t, uint32_t, boolean_t, boolean_t *, boolean_t);
712 extern void netif_hwna_set_mode(struct nexus_adapter *, netif_mode_t,
713     void (*)(struct nexus_adapter *, struct __kern_packet *,
714     struct nexus_pkt_stats *));
715 extern void netif_hwna_clear_mode(struct nexus_adapter *);
716 
717 /*
718  * rxpoll functions
719  */
720 extern errno_t netif_rxpoll_set_params(struct ifnet *,
721     struct ifnet_poll_params *, boolean_t locked);
722 extern void netif_rxpoll_compat_thread_func(void *, wait_result_t);
723 
724 /*
725  * GSO functions
726  */
727 extern int netif_gso_dispatch(struct ifnet *ifp, struct mbuf *m);
728 extern void netif_gso_init(void);
729 extern void netif_gso_fini(void);
730 
731 /*
732  * Logical link functions
733  */
734 extern void nx_netif_llink_retain(struct netif_llink *);
735 extern void nx_netif_llink_release(struct netif_llink **);
736 extern void nx_netif_qset_retain(struct netif_qset *);
737 extern void nx_netif_qset_release(struct netif_qset **);
738 extern void nx_netif_llink_init(struct nx_netif *);
739 extern void nx_netif_llink_fini(struct nx_netif *);
740 extern struct netif_qset * nx_netif_find_qset(struct nx_netif *, uint64_t);
741 extern struct netif_qset * nx_netif_get_default_qset_noref(struct nx_netif *);
742 extern int netif_qset_enqueue(struct netif_qset *, struct __kern_packet *,
743     struct __kern_packet *, uint32_t, uint32_t, uint32_t *, uint32_t *);
744 extern int nx_netif_default_llink_config(struct nx_netif *,
745     struct kern_nexus_netif_llink_init *);
746 extern void nx_netif_llink_config_free(struct nx_netif *);
747 extern int nx_netif_llink_ext_init_default_queues(struct kern_nexus *);
748 extern void nx_netif_llink_ext_fini_default_queues(struct kern_nexus *);
749 extern int nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *,
750     bool);
751 extern int nx_netif_llink_add(struct nx_netif *,
752     struct kern_nexus_netif_llink_init *, struct netif_llink **);
753 extern int nx_netif_llink_remove(struct nx_netif *,
754     kern_nexus_netif_llink_id_t);
755 
756 __END_DECLS
757 #endif /* CONFIG_NEXUS_NETIF */
758 #include <skywalk/nexus/netif/nx_netif_compat.h>
759 #include <skywalk/nexus/netif/nx_netif_host.h>
760 #endif /* _SKYWALK_NEXUS_NETIF_H_ */
761