1 /*
2 * Copyright (c) 2015-2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 2011-2014 Matteo Landi, Luigi Rizzo. All rights reserved.
31 * Copyright (C) 2013-2014 Universita` di Pisa. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
45 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
46 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
47 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
48 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
49 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
50 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
51 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
52 * SUCH DAMAGE.
53 */
54
55 #ifndef _SKYWALK_NEXUS_ADAPTER_H_
56 #define _SKYWALK_NEXUS_ADAPTER_H_
57
58 #ifdef BSD_KERNEL_PRIVATE
59 #include <skywalk/os_skywalk_private.h>
60 #include <skywalk/os_packet_private.h>
61
62 #define NEXUS_ADAPTER_NAMELEN 64
63
64 struct chreq;
65 struct kern_nexus;
66 struct __kern_channel_ring;
67 struct nexus_vp_adapter;
68 struct nexus_upipe_adapter;
69
70 typedef enum {
71 NA_INVALID = 0, /* uninitialized */
72 NA_PSEUDO, /* struct nexus_adapter */
73 #if CONFIG_NEXUS_USER_PIPE
74 NA_USER_PIPE, /* struct nexus_upipe_adapter */
75 #endif /* CONFIG_NEXUS_USER_PIPE */
76 #if CONFIG_NEXUS_KERNEL_PIPE
77 NA_KERNEL_PIPE, /* struct nexus_kpipe_adapter */
78 #endif /* CONFIG_NEXUS_KERNEL_PIPE */
79 #if CONFIG_NEXUS_NETIF
80 NA_NETIF_DEV, /* struct nexus_netif_adapter (dev) */
81 NA_NETIF_HOST, /* struct nexus_netif_adapter (host) */
82 NA_NETIF_COMPAT_DEV, /* struct nexus_netif_compat_adapter (dev) */
83 NA_NETIF_COMPAT_HOST, /* struct nexus_netif_compat_adapter (host) */
84 NA_NETIF_FILTER, /* struct nexus_netif_adapter (vp) */
85 NA_NETIF_VP, /* struct nexus_netif_adapter (vp) */
86 #endif /* CONFIG_NEXUS_NETIF */
87 #if CONFIG_NEXUS_FLOWSWITCH
88 NA_FLOWSWITCH_VP, /* struct nexus_vp_adapter */
89 #endif /* CONFIG_NEXUS_FLOWSWITCH */
90 } nexus_adapter_type_t;
91
92 typedef enum {
93 NXSPEC_CMD_CONNECT = 0,
94 NXSPEC_CMD_DISCONNECT = 1,
95 NXSPEC_CMD_START = 2,
96 NXSPEC_CMD_STOP = 3,
97 } nxspec_cmd_t;
98
99 typedef enum {
100 NA_ACTIVATE_MODE_ON = 0, /* activate adapter */
101 NA_ACTIVATE_MODE_DEFUNCT, /* defunct an activate adapter */
102 NA_ACTIVATE_MODE_OFF, /* deactivate adapter */
103 } na_activate_mode_t;
104
105 struct nexus_pkt_stats {
106 uint64_t nps_pkts;
107 uint64_t nps_bytes;
108 };
109
110 /*
111 * The "struct nexus_adapter" contains all base fields needed to support
112 * Nexus adapter operations. There are different types of Nexus adapters
113 * (upipe, kpipe, fsw, vp, ...) so a nexus_adapter is
114 * always the first field in the derived type.
115 */
116 struct nexus_adapter {
117 volatile uint32_t na_flags; /* NAF_* flags */
118 nexus_adapter_type_t na_type; /* nexus type */
119 const nexus_meta_type_t na_md_type; /* metadata type */
120 const nexus_meta_subtype_t na_md_subtype; /* metadata subtype */
121
122 nexus_port_t na_nx_port;
123
124 /*
125 * Number of user-space descriptors using this interface,
126 * which is equal to the number of channel schema objects
127 * in the mapped region.
128 */
129 uint32_t na_channels;
130
131 /* number of adapter transmit and receive rings */
132 uint32_t na_num_rx_rings;
133 uint32_t na_num_tx_rings;
134
135 /* number of ring pairs used by packet allocator */
136 uint32_t na_num_allocator_ring_pairs;
137
138 /* number of event rings */
139 uint32_t na_num_event_rings;
140
141 /* number of large buffer alloc rings */
142 uint32_t na_num_large_buf_alloc_rings;
143
144 /* XXX -fbounds-safety: duplicate counts to avoid self-assignments */
145 uint32_t na_rx_rings_cnt;
146 uint32_t na_tx_rings_cnt;
147 uint32_t na_alloc_free_rings_cnt;
148 uint32_t na_event_rings_cnt;
149 uint32_t na_large_buf_alloc_rings_cnt;
150 uint32_t na_slot_ctxs_cnt;
151 uint32_t na_scratch_cnt;
152 uint32_t na_all_rings_cnt;
153
154 uint64_t na_work_ts; /* when we last worked on it */
155
156 /*
157 * na_{tx,rx,alloc,free,event}_rings are private but allocated
158 * as a contiguous chunk of memory.
159 */
160 struct __kern_channel_ring *__counted_by(na_tx_rings_cnt) na_tx_rings; /* array of TX rings. */
161 struct __kern_channel_ring *__counted_by(na_rx_rings_cnt) na_rx_rings; /* array of RX rings. */
162 struct __kern_channel_ring *__counted_by(na_all_rings_cnt) na_all_rings;
163
164 /*
165 * na_nx refers to the nexus instance associated with this
166 * nexus adapter; in cases such as the virtual port adapter
167 * of a flow switch nexus used for user pipe, this will
168 * indicate the latter. The na_nxdom_prov will point to
169 * the actual nexus domain associated with the adapter.
170 */
171 struct kern_nexus *na_nx;
172
173 /*
174 * Standard refcount to control the lifetime of the adapter
175 * (it should be equal to the lifetime of the corresponding ifp)
176 */
177 volatile uint32_t na_refcount;
178
179 int na_si_users[NR_ALL]; /* # of users per global wait queue */
180 struct ch_selinfo na_si[NR_ALL]; /* global wait queues */
181
182 /*
183 * Memory arena.
184 */
185 struct skmem_arena *na_arena;
186
187 /*
188 * Number of descriptors in each queue.
189 */
190 uint32_t na_num_tx_slots;
191 uint32_t na_num_rx_slots;
192 uint32_t na_num_allocator_slots;
193 uint32_t na_num_event_slots;
194 uint32_t na_num_large_buf_alloc_slots;
195
196 /*
197 * Combined slot count of all rings.
198 * Used for allocating slot_ctx and scratch memory.
199 */
200 uint32_t na_total_slots;
201
202 /*
203 * Flow advisory (if applicable).
204 */
205 const uint32_t na_flowadv_max; /* max # of flow advisory entries */
206
207 /*
208 * Shareable statistics (if applicable).
209 */
210 const nexus_stats_type_t na_stats_type; /* stats type */
211
212 /*
213 * Array of packet allocator and event rings
214 */
215 struct __kern_channel_ring *__counted_by(na_alloc_free_rings_cnt)na_alloc_rings;
216 struct __kern_channel_ring *__counted_by(na_alloc_free_rings_cnt)na_free_rings;
217 struct __kern_channel_ring *__counted_by(na_event_rings_cnt)na_event_rings;
218 struct __kern_channel_ring *__counted_by(na_large_buf_alloc_rings_cnt)na_large_buf_alloc_rings;
219
220 uint64_t na_ch_mit_ival; /* mitigation interval */
221
222 /*
223 * The actual nexus domain associated with the adapter.
224 */
225 struct kern_nexus_domain_provider *na_nxdom_prov;
226
227 /*
228 * Array of slot contexts. This covers enough space to hold
229 * slot contexts of slot_ctx size for all of the TX and RX rings,
230 * It is optional and is requested at na_krings_create() time.
231 */
232 struct slot_ctx *__counted_by(na_slot_ctxs_cnt)na_slot_ctxs;
233
234 /*
235 * Array of packet handlers, enough for all slots in the
236 * TX and RX rings of this adapter. It is automatically
237 * created at na_krings_create() time.
238 */
239 kern_packet_t *__counted_by(na_scratch_cnt)na_scratch;
240
241 struct __kern_channel_ring *__counted_by(0) na_tail; /* pointer past the last ring */
242
243 #if CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF
244 /*
245 * Additional information attached to this adapter by other
246 * Skywalk subsystems; currently used by flow switch and netif.
247 */
248 void *na_private;
249
250 /*
251 * References to the ifnet and device routines, used by the netif
252 * nexus adapter functions. A non-NULL na_ifp indicates an io ref
253 * count to the ifnet that needs to be released at adapter detach
254 * time (at which point it will be nullifed).
255 */
256 struct ifnet *na_ifp;
257 /*
258 * lookup table to retrieve the ring corresponding to a service
259 * class. we store the ring index in na_(tx/rx)_rings array.
260 */
261 uint8_t na_kring_svc_lut[KPKT_SC_MAX_CLASSES];
262 #endif /* CONFIG_NEXUS_FLOWSWITCH || CONFIG_NEXUS_NETIF */
263
264 #if CONFIG_NEXUS_USER_PIPE
265 uint32_t na_next_pipe; /* next free slot in the array */
266 uint32_t na_max_pipes; /* size of the array */
267 /* array of pipes that have this adapter as a parent */
268 struct nexus_upipe_adapter **__counted_by(na_max_pipes) na_pipes;
269 #endif /* CONFIG_NEXUS_USER_PIPE */
270
271 char na_name[NEXUS_ADAPTER_NAMELEN]; /* diagnostics */
272 uuid_t na_uuid;
273
274 /*
275 * na_activate() is called to activate, defunct or deactivate a nexus
276 * adapter. This is invoked by na_bind_channel(), the first time a
277 * channel is opened to the adapter; by na_defunct() when an open
278 * channel gets defunct; as well as by na_unbind_channel() when the
279 * last channel instance opened to the adapter is closed.
280 */
281 int (*na_activate)(struct nexus_adapter *, na_activate_mode_t);
282 /*
283 * na_special() is an optional callback implemented by nexus types
284 * that support kernel channel (special mode). This allows the nexus
285 * to override the logic surrounding na_{bind,unbind}_channel() calls.
286 */
287 int (*na_special)(struct nexus_adapter *, struct kern_channel *,
288 struct chreq *, nxspec_cmd_t);
289 /*
290 * na_txsync() pushes packets to the underlying device;
291 * na_rxsync() collects packets from the underlying device.
292 */
293 int (*na_txsync)(struct __kern_channel_ring *kring, struct proc *,
294 uint32_t flags);
295 int (*na_rxsync)(struct __kern_channel_ring *kring, struct proc *,
296 uint32_t flags);
297 #define NA_SYNCF_UNUSED_1 0x1
298 #define NA_SYNCF_FORCE_READ 0x2
299 #define NA_SYNCF_FORCE_RECLAIM 0x4
300 #define NA_SYNCF_NETIF 0x8 /* netif normal sync */
301 #define NA_SYNCF_NETIF_ASYNC 0x10 /* asynchronous doorbell */
302 #define NA_SYNCF_NETIF_DOORBELL 0x20 /* doorbell request */
303 #define NA_SYNCF_NETIF_IFSTART 0x40 /* in if_start context */
304 #define NA_SYNCF_FORCE_UPP_SYNC 0x80 /* force upp sync alloc/free */
305 #define NA_SYNCF_UPP_PURGE 0x100 /* purge upp alloc pool */
306 #define NA_SYNCF_SYNC_ONLY 0x200 /* sync only, no doorbell */
307
308 /*
309 * na_notify() is used to act after data have become available,
310 * or the state of the ring has changed. Depending on the nexus
311 * type, this may involve triggering an event and/or performing
312 * additional work such as calling na_txsync().
313 */
314 int (*na_notify)(struct __kern_channel_ring *kring, struct proc *,
315 uint32_t flags);
316 #define NA_NOTEF_UNUSED_1 0x1
317 #define NA_NOTEF_IN_KEVENT 0x2
318 #define NA_NOTEF_CAN_SLEEP 0x4 /* OK to block in kr_enter() */
319 #define NA_NOTEF_NETIF 0x8 /* same as NA_SYNCF_NETIF */
320 #define NA_NOTEF_PUSH 0x100 /* need immediate attention */
321
322 /*
323 * na_channel_event_notify() is used to send events on the user channel.
324 */
325 int (*na_channel_event_notify)(struct nexus_adapter *,
326 struct __kern_channel_event *, uint16_t);
327 /*
328 * na_config() is an optional callback for returning nexus-specific
329 * configuration information. This is implemented by nexus types
330 * that handle dynamically changing configs.
331 */
332 int (*na_config)(struct nexus_adapter *,
333 uint32_t *txr, uint32_t *txd, uint32_t *rxr, uint32_t *rxd);
334 /*
335 * na_krings_create() creates and initializes the __kern_channel_ring
336 * arrays, as well as initializing the callback routines within;
337 * na_krings_delete() cleans up and destroys the kernel rings.
338 */
339 int (*na_krings_create)(struct nexus_adapter *, struct kern_channel *);
340 void (*na_krings_delete)(struct nexus_adapter *, struct kern_channel *,
341 boolean_t);
342 /*
343 * na_dtor() is the destructor callback that is invoked when the
344 * last reference to the nexus adapter has been released.
345 */
346 void (*na_dtor)(struct nexus_adapter *);
347 /*
348 * na_free() is the free callback that gets invoked after the
349 * adapter has been destroyed.
350 */
351 void (*na_free)(struct nexus_adapter *);
352
353 /*
354 * packet-chain-based callbacks for passing packets up the stack.
355 * The inject variant is used by filters for rejecting packets
356 * into the rx path from user space.
357 */
358 void (*na_rx)(struct nexus_adapter *,
359 struct __kern_packet *, struct nexus_pkt_stats *);
360 };
361
362 /* valid values for na_flags */
363 #define NAF_ACTIVE 0x1 /* skywalk is active */
364 #define NAF_HOST_ONLY 0x2 /* host adapter (no device rings) */
365 #define NAF_SPEC_INIT 0x4 /* na_special() initialized */
366 #define NAF_NATIVE 0x8 /* skywalk native netif adapter */
367 #define NAF_MEM_NO_INIT 0x10 /* na_kr_setup() skipped */
368 #define NAF_SLOT_CONTEXT 0x20 /* na_slot_ctxs is valid */
369 #define NAF_USER_PKT_POOL 0x40 /* na supports user packet pool */
370 #define NAF_TX_MITIGATION 0x80 /* na supports TX event mitigation */
371 #define NAF_RX_MITIGATION 0x100 /* na supports RX event mitigation */
372 #define NAF_DEFUNCT 0x200 /* no longer in service */
373 #define NAF_MEM_LOANED 0x400 /* arena owned by another adapter */
374 #define NAF_REJECT 0x800 /* not accepting channel activities */
375 #define NAF_EVENT_RING 0x1000 /* NA is providing event ring */
376 #define NAF_CHANNEL_EVENT_ATTACHED 0x2000 /* kevent registered for ch events */
377 #define NAF_VIRTUAL_DEVICE 0x8000 /* netif adapter for virtual device */
378 #define NAF_MODE_FSW 0x10000 /* NA is owned by fsw */
379 #define NAF_MODE_LLW 0x20000 /* NA is owned by llw */
380 #define NAF_LOW_LATENCY 0x40000 /* Low latency NA */
381 #define NAF_DRAINING 0x80000 /* NA is being drained */
382 /*
383 * defunct allowed flag.
384 * Currently used only by the parent nexus adapter of user-pipe nexus
385 * to indicate that defuncting is allowed on the channels.
386 */
387 #define NAF_DEFUNCT_OK 0x100000
388 #define NAF_KERNEL_ONLY (1U << 31) /* used internally, not usable by userland */
389
390 #define NAF_BITS \
391 "\020\01ACTIVE\02HOST_ONLY\03SPEC_INIT\04NATIVE" \
392 "\05MEM_NO_INIT\06SLOT_CONTEXT\07USER_PKT_POOL" \
393 "\010TX_MITIGATION\011RX_MITIGATION\012DEFUNCT\013MEM_LOANED" \
394 "\014REJECT\015EVENT_RING\016EVENT_ATTACH" \
395 "\020VIRTUAL\021MODE_FSW\022MODE_LLW\023LOW_LATENCY\024DRAINING" \
396 "\025DEFUNCT_OK\040KERNEL_ONLY"
397
398 #define NA_FREE(na) do { \
399 (na)->na_free(na); \
400 } while (0)
401
402 /*
403 * NA returns a pointer to the struct nexus_adapter from the ifp's netif nexus.
404 */
405 #define NA(_ifp) ((_ifp)->if_na)
406
407 __attribute__((always_inline))
408 static inline uint32_t
na_get_nslots(const struct nexus_adapter * na,enum txrx t)409 na_get_nslots(const struct nexus_adapter *na, enum txrx t)
410 {
411 switch (t) {
412 case NR_TX:
413 return na->na_num_tx_slots;
414 case NR_RX:
415 return na->na_num_rx_slots;
416 case NR_A:
417 case NR_F:
418 return na->na_num_allocator_slots;
419 case NR_EV:
420 return na->na_num_event_slots;
421 case NR_LBA:
422 return na->na_num_large_buf_alloc_slots;
423 default:
424 VERIFY(0);
425 /* NOTREACHED */
426 __builtin_unreachable();
427 }
428 }
429
430 __attribute__((always_inline))
431 static inline void
na_set_nslots(struct nexus_adapter * na,enum txrx t,uint32_t v)432 na_set_nslots(struct nexus_adapter *na, enum txrx t, uint32_t v)
433 {
434 switch (t) {
435 case NR_TX:
436 na->na_num_tx_slots = v;
437 break;
438 case NR_RX:
439 na->na_num_rx_slots = v;
440 break;
441 case NR_A:
442 case NR_F:
443 na->na_num_allocator_slots = v;
444 break;
445 case NR_EV:
446 na->na_num_event_slots = v;
447 break;
448 case NR_LBA:
449 na->na_num_large_buf_alloc_slots = v;
450 break;
451 default:
452 VERIFY(0);
453 /* NOTREACHED */
454 __builtin_unreachable();
455 }
456 }
457
458 __attribute__((always_inline))
459 static inline uint32_t
na_get_nrings(const struct nexus_adapter * na,enum txrx t)460 na_get_nrings(const struct nexus_adapter *na, enum txrx t)
461 {
462 switch (t) {
463 case NR_TX:
464 return na->na_num_tx_rings;
465 case NR_RX:
466 return na->na_num_rx_rings;
467 case NR_A:
468 case NR_F:
469 return na->na_num_allocator_ring_pairs;
470 case NR_EV:
471 return na->na_num_event_rings;
472 case NR_LBA:
473 return na->na_num_large_buf_alloc_rings;
474 default:
475 VERIFY(0);
476 /* NOTREACHED */
477 __builtin_unreachable();
478 }
479 }
480
481 __attribute__((always_inline))
482 static inline void
na_set_nrings(struct nexus_adapter * na,enum txrx t,uint32_t v)483 na_set_nrings(struct nexus_adapter *na, enum txrx t, uint32_t v)
484 {
485 switch (t) {
486 case NR_TX:
487 na->na_num_tx_rings = v;
488 break;
489 case NR_RX:
490 na->na_num_rx_rings = v;
491 break;
492 case NR_A:
493 case NR_F:
494 na->na_num_allocator_ring_pairs = v;
495 break;
496 case NR_EV:
497 na->na_num_event_rings = v;
498 break;
499 case NR_LBA:
500 /* we only support one ring for now */
501 ASSERT(v <= 1);
502 na->na_num_large_buf_alloc_rings = v;
503 break;
504 default:
505 VERIFY(0);
506 /* NOTREACHED */
507 __builtin_unreachable();
508 }
509 }
510
511 __attribute__((always_inline))
512 static inline struct __kern_channel_ring *__header_indexable
NAKR(struct nexus_adapter * na,enum txrx t)513 NAKR(struct nexus_adapter *na, enum txrx t)
514 {
515 switch (t) {
516 case NR_TX:
517 return na->na_tx_rings;
518 case NR_RX:
519 return na->na_rx_rings;
520 case NR_A:
521 return na->na_alloc_rings;
522 case NR_F:
523 return na->na_free_rings;
524 case NR_EV:
525 return na->na_event_rings;
526 case NR_LBA:
527 return na->na_large_buf_alloc_rings;
528 default:
529 VERIFY(0);
530 /* NOTREACHED */
531 __builtin_unreachable();
532 }
533 }
534
535 #define KR_SINGLE(kr) (__unsafe_forge_single(struct __kern_channel_ring *, (kr)))
536
537 /*
538 * If the adapter is owned by the kernel, neither another flow switch nor user
539 * can use it; if the adapter is owned by a user, only users can share it.
540 * Evaluation must be done under SK_LOCK().
541 */
542 #define NA_KERNEL_ONLY(_na) (((_na)->na_flags & NAF_KERNEL_ONLY) != 0)
543 #define NA_OWNED_BY_ANY(_na) \
544 (NA_KERNEL_ONLY(_na) || ((_na)->na_channels > 0))
545 #define NA_OWNED_BY_FSW(_na) \
546 (((_na)->na_flags & NAF_MODE_FSW) != 0)
547 #define NA_OWNED_BY_LLW(_na) \
548 (((_na)->na_flags & NAF_MODE_LLW) != 0)
549
550 /*
551 * Whether the adapter has been activated via na_activate() call.
552 */
553 #define NA_IS_ACTIVE(_na) (((_na)->na_flags & NAF_ACTIVE) != 0)
554 #define NA_IS_DEFUNCT(_na) (((_na)->na_flags & NAF_DEFUNCT) != 0)
555 #define NA_CHANNEL_EVENT_ATTACHED(_na) \
556 (((_na)->na_flags & NAF_CHANNEL_EVENT_ATTACHED) != 0)
557 /*
558 * Whether channel activities are rejected by the adapter. This takes the
559 * nexus adapter argument separately, as ch->ch_na may not be set yet.
560 */
561 __attribute__((always_inline))
562 static inline boolean_t
na_reject_channel(struct kern_channel * ch,struct nexus_adapter * na)563 na_reject_channel(struct kern_channel *ch, struct nexus_adapter *na)
564 {
565 boolean_t reject;
566
567 ASSERT(ch->ch_na == NULL || ch->ch_na == na);
568
569 if ((na->na_flags & NAF_REJECT) || NX_REJECT_ACT(na->na_nx)) {
570 /* set trapdoor NAF_REJECT flag */
571 if (!(na->na_flags & NAF_REJECT)) {
572 SK_ERR("%s(%d) marked as non-permissive",
573 ch->ch_name, ch->ch_pid);
574 os_atomic_or(&na->na_flags, NAF_REJECT, relaxed);
575 ch_deactivate(ch);
576 }
577 reject = TRUE;
578 } else {
579 reject = FALSE;
580 }
581
582 return reject;
583 }
584
585 #if SK_LOG
586 __attribute__((always_inline))
587 static inline const char *
na_activate_mode2str(na_activate_mode_t m)588 na_activate_mode2str(na_activate_mode_t m)
589 {
590 switch (m) {
591 case NA_ACTIVATE_MODE_ON:
592 return "on";
593 case NA_ACTIVATE_MODE_DEFUNCT:
594 return "defunct";
595 case NA_ACTIVATE_MODE_OFF:
596 return "off";
597 default:
598 VERIFY(0);
599 /* NOTREACHED */
600 __builtin_unreachable();
601 }
602 }
603 #endif /* SK_LOG */
604
605 __BEGIN_DECLS
606 extern void na_init(void);
607 extern void na_fini(void);
608
609 extern int na_bind_channel(struct nexus_adapter *na, struct kern_channel *ch,
610 struct chreq *);
611 extern void na_unbind_channel(struct kern_channel *ch);
612
613 /*
614 * Common routine for all functions that create a nexus adapter. It performs
615 * two main tasks:
616 * - if the na points to an ifp, mark the ifp as Skywalk capable
617 * using na as its native adapter;
618 * - provide defaults for the setup callbacks and the memory allocator
619 */
620 extern void na_attach_common(struct nexus_adapter *,
621 struct kern_nexus *, struct kern_nexus_domain_provider *);
622 /*
623 * Update the ring parameters (number and size of tx and rx rings).
624 * It calls the nm_config callback, if available.
625 */
626 extern int na_update_config(struct nexus_adapter *na);
627
628 extern int na_rings_mem_setup(struct nexus_adapter *, boolean_t,
629 struct kern_channel *);
630 extern void na_rings_mem_teardown(struct nexus_adapter *,
631 struct kern_channel *, boolean_t);
632 extern void na_ch_rings_defunct(struct kern_channel *, struct proc *);
633
634 /* convenience wrappers for na_set_all_rings, used in drivers */
635 extern void na_disable_all_rings(struct nexus_adapter *);
636 extern void na_enable_all_rings(struct nexus_adapter *);
637 extern void na_lock_all_rings(struct nexus_adapter *);
638 extern void na_unlock_all_rings(struct nexus_adapter *);
639 extern int na_interp_ringid(struct nexus_adapter *, ring_id_t, ring_set_t,
640 uint32_t[NR_TXRX], uint32_t[NR_TXRX]);
641 extern struct kern_pbufpool *na_kr_get_pp(struct nexus_adapter *, enum txrx);
642
643 extern int na_find(struct kern_channel *, struct kern_nexus *, struct chreq *,
644 struct nxbind *, struct proc *, struct nexus_adapter **, boolean_t);
645 extern void na_retain_locked(struct nexus_adapter *na);
646 extern int na_release_locked(struct nexus_adapter *na);
647
648 extern int na_connect(struct kern_nexus *, struct kern_channel *,
649 struct chreq *, struct nxbind *, struct proc *);
650 extern void na_disconnect(struct kern_nexus *, struct kern_channel *);
651 extern void na_defunct(struct kern_nexus *, struct kern_channel *,
652 struct nexus_adapter *, boolean_t);
653 extern int na_connect_spec(struct kern_nexus *, struct kern_channel *,
654 struct chreq *, struct proc *);
655 extern void na_disconnect_spec(struct kern_nexus *, struct kern_channel *);
656 extern void na_start_spec(struct kern_nexus *, struct kern_channel *);
657 extern void na_stop_spec(struct kern_nexus *, struct kern_channel *);
658
659 extern int na_pseudo_create(struct kern_nexus *, struct chreq *,
660 struct nexus_adapter **);
661 extern void na_kr_drop(struct nexus_adapter *, boolean_t);
662 extern void na_flowadv_entry_alloc(const struct nexus_adapter *, uuid_t,
663 const flowadv_idx_t, const uint32_t);
664 extern void na_flowadv_entry_free(const struct nexus_adapter *, uuid_t,
665 const flowadv_idx_t, const uint32_t);
666 extern bool na_flowadv_set(const struct kern_channel *,
667 const flowadv_idx_t, const flowadv_token_t);
668 extern bool na_flowadv_clear(const struct kern_channel *,
669 const flowadv_idx_t, const flowadv_token_t);
670 extern int na_flowadv_report_congestion_event(const struct kern_channel *ch,
671 const flowadv_idx_t fe_idx, const flowadv_token_t flow_token,
672 uint32_t congestion_cnt, uint32_t ce_cnt, uint32_t total_pkt_cnt);
673 extern void na_flowadv_event(struct __kern_channel_ring *);
674 extern void na_post_event(struct __kern_channel_ring *, boolean_t, boolean_t,
675 boolean_t, uint32_t);
676
677 extern void na_drain(struct nexus_adapter *, boolean_t);
678
679 #if SK_LOG
680 #define NA_DBGBUF_SIZE 256
681 extern char * na2str(const struct nexus_adapter *na, char *__counted_by(dsz)dst, size_t dsz);
682 #endif /* SK_LOG */
683
684 __END_DECLS
685 #endif /* BSD_KERNEL_PRIVATE */
686 #endif /* _SKYWALK_NEXUS_ADAPTER_H_ */
687