xref: /xnu-8019.80.24/bsd/skywalk/nexus/nexus_adapter.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2015-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
31  * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
32  *
33  * Redistribution and use in source and binary forms, with or without
34  * modification, are permitted provided that the following conditions
35  * are met:
36  *   1. Redistributions of source code must retain the above copyright
37  *      notice, this list of conditions and the following disclaimer.
38  *   2. Redistributions in binary form must reproduce the above copyright
39  *      notice, this list of conditions and the following disclaimer in the
40  *    documentation and/or other materials provided with the distribution.
41  *
42  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52  * SUCH DAMAGE.
53  */
54 
55 #ifndef _SKYWALK_NEXUS_ADAPTER_H_
56 #define _SKYWALK_NEXUS_ADAPTER_H_
57 
58 #ifdef BSD_KERNEL_PRIVATE
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/os_packet_private.h>
61 
62 #define NEXUS_ADAPTER_NAMELEN   64
63 
64 struct chreq;
65 struct kern_nexus;
66 struct __kern_channel_ring;
67 struct nexus_vp_adapter;
68 struct nexus_upipe_adapter;
69 
70 typedef enum {
71 	NA_INVALID = 0,         /* uninitialized */
72 	NA_PSEUDO,              /* struct nexus_adapter */
73 #if CONFIG_NEXUS_USER_PIPE
74 	NA_USER_PIPE,           /* struct nexus_upipe_adapter */
75 #endif /* CONFIG_NEXUS_USER_PIPE */
76 #if CONFIG_NEXUS_KERNEL_PIPE
77 	NA_KERNEL_PIPE,         /* struct nexus_kpipe_adapter */
78 #endif /* CONFIG_NEXUS_KERNEL_PIPE */
79 #if CONFIG_NEXUS_MONITOR
80 	NA_MONITOR,             /* struct nexus_monitor_adapter */
81 #endif /* CONFIG_NEXUS_MONITOR */
82 #if CONFIG_NEXUS_NETIF
83 	NA_NETIF_DEV,           /* struct nexus_netif_adapter (dev) */
84 	NA_NETIF_HOST,          /* struct nexus_netif_adapter (host) */
85 	NA_NETIF_COMPAT_DEV,    /* struct nexus_netif_compat_adapter (dev) */
86 	NA_NETIF_COMPAT_HOST,   /* struct nexus_netif_compat_adapter (host) */
87 	NA_NETIF_FILTER,        /* struct nexus_netif_adapter (vp) */
88 	NA_NETIF_VP,            /* struct nexus_netif_adapter (vp) */
89 #endif /* CONFIG_NEXUS_NETIF */
90 #if CONFIG_NEXUS_FLOWSWITCH
91 	NA_FLOWSWITCH_VP,       /* struct nexus_vp_adapter */
92 #endif /* CONFIG_NEXUS_FLOWSWITCH */
93 } nexus_adapter_type_t;
94 
95 typedef enum {
96 	NXSPEC_CMD_CONNECT =    0,
97 	NXSPEC_CMD_DISCONNECT = 1,
98 	NXSPEC_CMD_START =      2,
99 	NXSPEC_CMD_STOP =       3,
100 } nxspec_cmd_t;
101 
102 typedef enum {
103 	NA_ACTIVATE_MODE_ON =   0,      /* activate adapter */
104 	NA_ACTIVATE_MODE_DEFUNCT,       /* defunct an activate adapter */
105 	NA_ACTIVATE_MODE_OFF,           /* deactivate adapter */
106 } na_activate_mode_t;
107 
108 struct nexus_pkt_stats {
109 	uint64_t nps_pkts;
110 	uint64_t nps_bytes;
111 };
112 
113 /*
114  * The "struct nexus_adapter" contains all base fields needed to support
115  * Nexus adapter operations.  There are different types of Nexus adapters
116  * (upipe, kpipe, fsw, monitor, vp, ...) so a nexus_adapter is
117  * always the first field in the derived type.
118  */
119 struct nexus_adapter {
120 	volatile uint32_t               na_flags;       /* NAF_* flags */
121 	nexus_adapter_type_t            na_type;        /* nexus type */
122 	const nexus_meta_type_t         na_md_type;     /* metadata type */
123 	const nexus_meta_subtype_t      na_md_subtype;  /* metadata subtype */
124 
125 	nexus_port_t na_nx_port;
126 
127 	/*
128 	 * Number of user-space descriptors using this interface,
129 	 * which is equal to the number of channel schema objects
130 	 * in the mapped region.
131 	 */
132 	uint32_t na_channels;
133 
134 	/* number of adapter transmit and receive rings */
135 	uint32_t na_num_rx_rings;
136 	uint32_t na_num_tx_rings;
137 
138 	/* number of ring pairs used by packet allocator */
139 	uint32_t na_num_allocator_ring_pairs;
140 
141 	/* number of event rings */
142 	uint32_t na_num_event_rings;
143 
144 	uint64_t na_work_ts;            /* when we last worked on it */
145 
146 	/*
147 	 * na_{tx,rx,alloc,free,event}_rings are private but allocated
148 	 * as a contiguous chunk of memory.
149 	 */
150 	struct __kern_channel_ring *na_tx_rings; /* array of TX rings. */
151 	struct __kern_channel_ring *na_rx_rings; /* array of RX rings. */
152 
153 	/*
154 	 * na_nx refers to the nexus instance associated with this
155 	 * nexus adapter; in cases such as the virtual port adapter
156 	 * of a flow switch nexus used for user pipe, this will
157 	 * indicate the latter.  The na_nxdom_prov will point to
158 	 * the actual nexus domain associated with the adapter.
159 	 */
160 	struct kern_nexus *na_nx;
161 
162 	/*
163 	 * Standard refcount to control the lifetime of the adapter
164 	 * (it should be equal to the lifetime of the corresponding ifp)
165 	 */
166 	volatile uint32_t na_refcount;
167 
168 	int na_si_users[NR_ALL];         /* # of users per global wait queue */
169 	struct ch_selinfo na_si[NR_ALL]; /* global wait queues */
170 
171 	/*
172 	 * Memory arena.
173 	 */
174 	struct skmem_arena *na_arena;
175 
176 	/*
177 	 * Number of descriptor in each queue.
178 	 */
179 	uint32_t na_num_tx_slots;
180 	uint32_t na_num_rx_slots;
181 	uint32_t na_num_allocator_slots;
182 	uint32_t na_num_event_slots;
183 
184 	/*
185 	 * Combined slot count of all rings.
186 	 * Used for allocating slot_ctx and scratch memory.
187 	 */
188 	uint32_t na_total_slots;
189 
190 	/*
191 	 * For tracking ring memory allocated by sk_alloc()
192 	 */
193 	size_t na_rings_mem_sz;
194 
195 	/*
196 	 * Flow advisory (if applicable).
197 	 */
198 	const uint32_t na_flowadv_max;  /* max # of flow advisory entries */
199 
200 	/*
201 	 * Shareable statistics (if applicable).
202 	 */
203 	const nexus_stats_type_t na_stats_type; /* stats type */
204 
205 	/*
206 	 * Array of packet allocator and event rings
207 	 */
208 	struct __kern_channel_ring *na_alloc_rings;
209 	struct __kern_channel_ring *na_free_rings;
210 	struct __kern_channel_ring *na_event_rings;
211 
212 	uint64_t na_ch_mit_ival;        /* mitigation interval */
213 
214 	/*
215 	 * The actual nexus domain associated with the adapter.
216 	 */
217 	struct kern_nexus_domain_provider *na_nxdom_prov;
218 
219 	/*
220 	 * Array of slot contexts.  This covers enough space to hold
221 	 * slot contexts of slot_ctx size for all of the TX and RX rings,
222 	 * It is optional and is requested at na_krings_create() time.
223 	 */
224 	struct slot_ctx *na_slot_ctxs;
225 
226 	/*
227 	 * Array of packet handlers, enough for all slots in the
228 	 * TX and RX rings of this adapter.  It is automatically
229 	 * created at na_krings_create() time.
230 	 */
231 	kern_packet_t *na_scratch;
232 
233 	void *na_tailroom; /* space below the rings array (used for leases) */
234 
235 #if CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF
236 	/*
237 	 * Additional information attached to this adapter by other
238 	 * Skywalk subsystems; currently used by flow switch and netif.
239 	 */
240 	void *na_private;
241 
242 	/*
243 	 * References to the ifnet and device routines, used by the netif
244 	 * nexus adapter functions.  A non-NULL na_ifp indicates an io ref
245 	 * count to the ifnet that needs to be released at adapter detach
246 	 * time (at which point it will be nullifed).
247 	 */
248 	struct ifnet *na_ifp;
249 	/*
250 	 * lookup table to retrieve the ring corresponding to a service
251 	 * class. we store the ring index in na_(tx/rx)_rings array.
252 	 */
253 	uint8_t na_kring_svc_lut[KPKT_SC_MAX_CLASSES];
254 #endif /* CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF */
255 
256 #if CONFIG_NEXUS_USER_PIPE
257 	uint32_t na_next_pipe;  /* next free slot in the array */
258 	uint32_t na_max_pipes;  /* size of the array */
259 	/* array of pipes that have this adapter as a parent */
260 	struct nexus_upipe_adapter **na_pipes;
261 #endif /* CONFIG_NEXUS_USER_PIPE */
262 
263 	char na_name[NEXUS_ADAPTER_NAMELEN];    /* diagnostics */
264 	uuid_t na_uuid;
265 
266 	/*
267 	 * na_activate() is called to activate, defunct or deactivate a nexus
268 	 * adapter.  This is invoked by na_bind_channel(), the first time a
269 	 * channel is opened to the adapter; by na_defunct() when an open
270 	 * channel gets defunct; as well as by na_unbind_channel() when the
271 	 * last channel instance opened to the adapter is closed.
272 	 */
273 	int (*na_activate)(struct nexus_adapter *, na_activate_mode_t);
274 	/*
275 	 * na_special() is an optional callback implemented by nexus types
276 	 * that support kernel channel (special mode).  This allows the nexus
277 	 * to override the logic surrounding na_{bind,unbind}_channel() calls.
278 	 */
279 	int (*na_special)(struct nexus_adapter *, struct kern_channel *,
280 	    struct chreq *, nxspec_cmd_t);
281 	/*
282 	 * na_txsync() pushes packets to the underlying device;
283 	 * na_rxsync() collects packets from the underlying device.
284 	 */
285 	int (*na_txsync)(struct __kern_channel_ring *kring, struct proc *,
286 	    uint32_t flags);
287 	int (*na_rxsync)(struct __kern_channel_ring *kring, struct proc *,
288 	    uint32_t flags);
289 #define NA_SYNCF_MONITOR                0x1
290 #define NA_SYNCF_FORCE_READ             0x2
291 #define NA_SYNCF_FORCE_RECLAIM          0x4
292 #define NA_SYNCF_NETIF                  0x8     /* netif normal sync */
293 #define NA_SYNCF_NETIF_ASYNC            0x10    /* asynchronous doorbell */
294 #define NA_SYNCF_NETIF_DOORBELL         0x20    /* doorbell request */
295 #define NA_SYNCF_NETIF_IFSTART          0x40    /* in if_start context */
296 #define NA_SYNCF_FORCE_UPP_SYNC         0x80    /* force upp sync alloc/free */
297 #define NA_SYNCF_UPP_PURGE              0x100   /* purge upp alloc pool */
298 #define NA_SYNCF_SYNC_ONLY              0x200   /* sync only, no doorbell */
299 
300 	/*
301 	 * na_notify() is used to act ater data have become available,
302 	 * or the state of the ring has changed.  Depending on the nexus
303 	 * type, this may involve triggering an event and/or performing
304 	 * additional work such as calling na_txsync().
305 	 */
306 	int (*na_notify)(struct __kern_channel_ring *kring, struct proc *,
307 	    uint32_t flags);
308 #define NA_NOTEF_MONITOR        0x1
309 #define NA_NOTEF_IN_KEVENT      0x2
310 #define NA_NOTEF_CAN_SLEEP      0x4     /* OK to block in kr_enter() */
311 #define NA_NOTEF_NETIF          0x8     /* same as NA_SYNCF_NETIF */
312 #define NA_NOTEF_PUSH           0x100   /* need immediate attention */
313 
314 	/*
315 	 * na_channel_event_notify() is used to send events on the user channel.
316 	 */
317 	int (*na_channel_event_notify)(struct nexus_adapter *,
318 	    struct __kern_packet *, struct __kern_channel_event *, uint16_t);
319 	/*
320 	 * na_config() is an optional callback for returning nexus-specific
321 	 * configuration information.  This is implemented by nexus types
322 	 * that handle dynamically changing configs.
323 	 */
324 	int (*na_config)(struct nexus_adapter *,
325 	    uint32_t *txr, uint32_t *txd, uint32_t *rxr, uint32_t *rxd);
326 	/*
327 	 * na_krings_create() creates and initializes the __kern_channel_ring
328 	 * arrays, as well as initializing the callback routines within;
329 	 * na_krings_delete() cleans up and destroys the kernel rings.
330 	 */
331 	int (*na_krings_create)(struct nexus_adapter *, struct kern_channel *);
332 	void (*na_krings_delete)(struct nexus_adapter *, struct kern_channel *,
333 	    boolean_t);
334 	/*
335 	 * na_dtor() is the destructor callback that is invoked when the
336 	 * last reference to the nexus adapter has been released.
337 	 */
338 	void (*na_dtor)(struct nexus_adapter *);
339 	/*
340 	 * na_free() is the free callback that gets invoked after the
341 	 * adapter has been destroyed.
342 	 */
343 	void (*na_free)(struct nexus_adapter *);
344 
345 	/*
346 	 * packet-chain-based callbacks for passing packets up the stack.
347 	 * The inject variant is used by filters for rejecting packets
348 	 * into the rx path from user space.
349 	 */
350 	void (*na_rx)(struct nexus_adapter *,
351 	    struct __kern_packet *, struct nexus_pkt_stats *);
352 
353 	/*
354 	 * Linkage to list of to-be-destroyed nexus adapters.
355 	 */
356 	TAILQ_ENTRY(nexus_adapter) na_destroyer_link;
357 };
358 
359 /* valid values for na_flags */
360 #define NAF_ACTIVE              0x1     /* skywalk is active */
361 #define NAF_HOST_ONLY           0x2     /* host adapter (no device rings) */
362 #define NAF_SPEC_INIT           0x4     /* na_special() initialized */
363 #define NAF_NATIVE              0x8     /* skywalk native netif adapter */
364 #define NAF_MEM_NO_INIT         0x10    /* na_kr_setup() skipped */
365 #define NAF_SLOT_CONTEXT        0x20    /* na_slot_ctxs is valid */
366 #define NAF_USER_PKT_POOL       0x40    /* na supports user packet pool */
367 #define NAF_TX_MITIGATION       0x80    /* na supports TX event mitigation */
368 #define NAF_RX_MITIGATION       0x100   /* na supports RX event mitigation */
369 #define NAF_DEFUNCT             0x200   /* no longer in service */
370 #define NAF_MEM_LOANED          0x400   /* arena owned by another adapter */
371 #define NAF_REJECT              0x800   /* not accepting channel activities */
372 #define NAF_EVENT_RING          0x1000  /* NA is providing event ring */
373 #define NAF_CHANNEL_EVENT_ATTACHED 0x2000 /* kevent registered for ch events */
374 #define NAF_ASYNC_DTOR          0x4000  /* async destroy */
375 #define NAF_VIRTUAL_DEVICE      0x8000  /* netif adapter for virtual device */
376 #define NAF_MODE_FSW            0x10000 /* NA is owned by fsw */
377 #define NAF_MODE_LLW            0x20000 /* NA is owned by llw */
378 #define NAF_LOW_LATENCY         0x40000 /* Low latency NA */
379 #define NAF_DRAINING            0x80000 /* NA is being drained */
380 /*
381  * defunct allowed flag.
382  * Currently used only by the parent nexus adapter of user-pipe nexus
383  * to indicate that defuncting is allowed on the channels.
384  */
385 #define NAF_DEFUNCT_OK          0x80000
386 #define NAF_KERNEL_ONLY (1U << 31) /* used internally, not usable by userland */
387 
388 #define NAF_BITS                                                        \
389 	"\020\01ACTIVE\02HOST_ONLY\03SPEC_INIT\04NATIVE"                \
390 	"\05MEM_NO_INIT\06SLOT_CONTEXT\07USER_PKT_POOL"                 \
391 	"\010TX_MITIGATION\011RX_MITIGATION\012DEFUNCT\013MEM_LOANED"   \
392 	"\014REJECT\015EVENT_RING\016EVENT_ATTACH\017ASYNC_DTOR"        \
393 	"\020VIRTUAL\021MODE_FSW\022MODE_LLW\023LOW_LATENCY\024DRAINING" \
394 	"\040KERNEL_ONLY"
395 
396 #define NA_FREE(na) do {                                                \
397 	ASSERT((na)->na_destroyer_link.tqe_next == NULL);               \
398 	ASSERT((na)->na_destroyer_link.tqe_prev == NULL);               \
399 	(na)->na_free(na);                                              \
400 } while (0)
401 
402 /*
403  * NA returns a pointer to the struct nexus_adapter from the ifp's netif nexus.
404  */
405 #define NA(_ifp)                ((_ifp)->if_na)
406 
407 __attribute__((always_inline))
408 static inline uint32_t
na_get_nslots(const struct nexus_adapter * na,enum txrx t)409 na_get_nslots(const struct nexus_adapter *na, enum txrx t)
410 {
411 	switch (t) {
412 	case NR_TX:
413 		return na->na_num_tx_slots;
414 	case NR_RX:
415 		return na->na_num_rx_slots;
416 	case NR_A:
417 	case NR_F:
418 		return na->na_num_allocator_slots;
419 	case NR_EV:
420 		return na->na_num_event_slots;
421 	default:
422 		VERIFY(0);
423 		/* NOTREACHED */
424 		__builtin_unreachable();
425 	}
426 }
427 
428 __attribute__((always_inline))
429 static inline void
na_set_nslots(struct nexus_adapter * na,enum txrx t,uint32_t v)430 na_set_nslots(struct nexus_adapter *na, enum txrx t, uint32_t v)
431 {
432 	switch (t) {
433 	case NR_TX:
434 		na->na_num_tx_slots = v;
435 		break;
436 	case NR_RX:
437 		na->na_num_rx_slots = v;
438 		break;
439 	case NR_A:
440 	case NR_F:
441 		na->na_num_allocator_slots = v;
442 		break;
443 	case NR_EV:
444 		na->na_num_event_slots = v;
445 		break;
446 	default:
447 		VERIFY(0);
448 		/* NOTREACHED */
449 		__builtin_unreachable();
450 	}
451 }
452 
453 __attribute__((always_inline))
454 static inline uint32_t
na_get_nrings(const struct nexus_adapter * na,enum txrx t)455 na_get_nrings(const struct nexus_adapter *na, enum txrx t)
456 {
457 	switch (t) {
458 	case NR_TX:
459 		return na->na_num_tx_rings;
460 	case NR_RX:
461 		return na->na_num_rx_rings;
462 	case NR_A:
463 	case NR_F:
464 		return na->na_num_allocator_ring_pairs;
465 	case NR_EV:
466 		return na->na_num_event_rings;
467 	default:
468 		VERIFY(0);
469 		/* NOTREACHED */
470 		__builtin_unreachable();
471 	}
472 }
473 
474 __attribute__((always_inline))
475 static inline void
na_set_nrings(struct nexus_adapter * na,enum txrx t,uint32_t v)476 na_set_nrings(struct nexus_adapter *na, enum txrx t, uint32_t v)
477 {
478 	switch (t) {
479 	case NR_TX:
480 		na->na_num_tx_rings = v;
481 		break;
482 	case NR_RX:
483 		na->na_num_rx_rings = v;
484 		break;
485 	case NR_A:
486 	case NR_F:
487 		na->na_num_allocator_ring_pairs = v;
488 		break;
489 	case NR_EV:
490 		na->na_num_event_rings = v;
491 		break;
492 	default:
493 		VERIFY(0);
494 		/* NOTREACHED */
495 		__builtin_unreachable();
496 	}
497 }
498 
499 __attribute__((always_inline))
500 static inline struct __kern_channel_ring *
NAKR(struct nexus_adapter * na,enum txrx t)501 NAKR(struct nexus_adapter *na, enum txrx t)
502 {
503 	switch (t) {
504 	case NR_TX:
505 		return na->na_tx_rings;
506 	case NR_RX:
507 		return na->na_rx_rings;
508 	case NR_A:
509 		return na->na_alloc_rings;
510 	case NR_F:
511 		return na->na_free_rings;
512 	case NR_EV:
513 		return na->na_event_rings;
514 	default:
515 		VERIFY(0);
516 		/* NOTREACHED */
517 		__builtin_unreachable();
518 	}
519 }
520 
521 /*
522  * If the adapter is owned by the kernel, neither another flow switch nor user
523  * can use it; if the adapter is owned by a user, only users can share it.
524  * Evaluation must be done under SK_LOCK().
525  */
526 #define NA_KERNEL_ONLY(_na)     (((_na)->na_flags & NAF_KERNEL_ONLY) != 0)
527 #define NA_OWNED_BY_ANY(_na) \
528 	(NA_KERNEL_ONLY(_na) || ((_na)->na_channels > 0))
529 #define NA_OWNED_BY_FSW(_na) \
530 	(((_na)->na_flags & NAF_MODE_FSW) != 0)
531 #define NA_OWNED_BY_LLW(_na) \
532 	(((_na)->na_flags & NAF_MODE_LLW) != 0)
533 
534 /*
535  * Whether the adapter has been activated via na_activate() call.
536  */
537 #define NA_IS_ACTIVE(_na)       (((_na)->na_flags & NAF_ACTIVE) != 0)
538 #define NA_IS_DEFUNCT(_na)       (((_na)->na_flags & NAF_DEFUNCT) != 0)
539 #define NA_CHANNEL_EVENT_ATTACHED(_na)   \
540     (((_na)->na_flags & NAF_CHANNEL_EVENT_ATTACHED) != 0)
541 /*
542  * Whether channel activities are rejected by the adapter.  This takes the
543  * nexus adapter argument separately, as ch->ch_na may not be set yet.
544  */
545 __attribute__((always_inline))
546 static inline boolean_t
na_reject_channel(struct kern_channel * ch,struct nexus_adapter * na)547 na_reject_channel(struct kern_channel *ch, struct nexus_adapter *na)
548 {
549 	boolean_t reject;
550 
551 	ASSERT(ch->ch_na == NULL || ch->ch_na == na);
552 
553 	if ((na->na_flags & NAF_REJECT) || NX_REJECT_ACT(na->na_nx)) {
554 		/* set trapdoor NAF_REJECT flag */
555 		if (!(na->na_flags & NAF_REJECT)) {
556 			SK_ERR("%s(%d) marked as non-permissive",
557 			    ch->ch_name, ch->ch_pid);
558 			atomic_bitset_32(&na->na_flags, NAF_REJECT);
559 			ch_deactivate(ch);
560 		}
561 		reject = TRUE;
562 	} else {
563 		reject = FALSE;
564 	}
565 
566 	return reject;
567 }
568 
569 #if SK_LOG
570 __attribute__((always_inline))
571 static inline const char *
na_activate_mode2str(na_activate_mode_t m)572 na_activate_mode2str(na_activate_mode_t m)
573 {
574 	switch (m) {
575 	case NA_ACTIVATE_MODE_ON:
576 		return "on";
577 	case NA_ACTIVATE_MODE_DEFUNCT:
578 		return "defunct";
579 	case NA_ACTIVATE_MODE_OFF:
580 		return "off";
581 	default:
582 		VERIFY(0);
583 		/* NOTREACHED */
584 		__builtin_unreachable();
585 	}
586 }
587 #endif /* SK_LOG */
588 
589 __BEGIN_DECLS
590 extern void na_init(void);
591 extern void na_fini(void);
592 
593 extern int na_bind_channel(struct nexus_adapter *na, struct kern_channel *ch,
594     struct chreq *);
595 extern void na_unbind_channel(struct kern_channel *ch);
596 
597 /*
598  * Common routine for all functions that create a nexus adapter. It performs
599  * two main tasks:
600  * - if the na points to an ifp, mark the ifp as Skywalk capable
601  *   using na as its native adapter;
602  * - provide defaults for the setup callbacks and the memory allocator
603  */
604 extern void na_attach_common(struct nexus_adapter *,
605     struct kern_nexus *, struct kern_nexus_domain_provider *);
606 /*
607  * Update the ring parameters (number and size of tx and rx rings).
608  * It calls the nm_config callback, if available.
609  */
610 extern int na_update_config(struct nexus_adapter *na);
611 
612 extern int na_rings_mem_setup(struct nexus_adapter *, uint32_t, boolean_t,
613     struct kern_channel *);
614 extern void na_rings_mem_teardown(struct nexus_adapter *,
615     struct kern_channel *, boolean_t);
616 extern void na_ch_rings_defunct(struct kern_channel *, struct proc *);
617 
618 /* convenience wrappers for na_set_all_rings, used in drivers */
619 extern void na_disable_all_rings(struct nexus_adapter *);
620 extern void na_enable_all_rings(struct nexus_adapter *);
621 extern void na_lock_all_rings(struct nexus_adapter *);
622 extern void na_unlock_all_rings(struct nexus_adapter *);
623 extern int na_interp_ringid(struct nexus_adapter *, ring_id_t, ring_set_t,
624     uint32_t[NR_TXRX], uint32_t[NR_TXRX]);
625 extern struct kern_pbufpool *na_kr_get_pp(struct nexus_adapter *, enum txrx);
626 
627 extern int na_find(struct kern_channel *, struct kern_nexus *,
628     struct chreq *, struct kern_channel *, struct nxbind *,
629     struct proc *, struct nexus_adapter **, boolean_t);
630 extern void na_retain_locked(struct nexus_adapter *na);
631 extern int na_release_locked(struct nexus_adapter *na);
632 
633 extern int na_connect(struct kern_nexus *, struct kern_channel *,
634     struct chreq *, struct kern_channel *, struct nxbind *, struct proc *);
635 extern void na_disconnect(struct kern_nexus *, struct kern_channel *);
636 extern void na_defunct(struct kern_nexus *, struct kern_channel *,
637     struct nexus_adapter *, boolean_t);
638 extern int na_connect_spec(struct kern_nexus *, struct kern_channel *,
639     struct chreq *, struct proc *);
640 extern void na_disconnect_spec(struct kern_nexus *, struct kern_channel *);
641 extern void na_start_spec(struct kern_nexus *, struct kern_channel *);
642 extern void na_stop_spec(struct kern_nexus *, struct kern_channel *);
643 
644 extern int na_pseudo_create(struct kern_nexus *, struct chreq *,
645     struct nexus_adapter **);
646 extern void na_kr_drop(struct nexus_adapter *, boolean_t);
647 extern void na_flowadv_entry_alloc(const struct nexus_adapter *, uuid_t,
648     const flowadv_idx_t);
649 extern void na_flowadv_entry_free(const struct nexus_adapter *, uuid_t,
650     const flowadv_idx_t);
651 extern bool na_flowadv_set(const struct nexus_adapter *,
652     const flowadv_idx_t, const flowadv_token_t);
653 extern boolean_t na_flowadv_clear(const struct kern_channel *,
654     const flowadv_idx_t, const flowadv_token_t);
655 extern void na_flowadv_event(struct __kern_channel_ring *);
656 extern void na_post_event(struct __kern_channel_ring *, boolean_t, boolean_t,
657     boolean_t, uint32_t);
658 
659 extern void na_drain(struct nexus_adapter *, boolean_t);
660 
661 __END_DECLS
662 #endif /* BSD_KERNEL_PRIVATE */
663 #endif /* _SKYWALK_NEXUS_ADAPTER_H_ */
664