1 /*
2 * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
31 #include <os/refcnt.h>
32 #include <sys/sdt.h>
33
34 #define NX_NETIF_TAG_QSET "com.apple.skywalk.netif.qset"
35 static SKMEM_TAG_DEFINE(nx_netif_tag_qset, NX_NETIF_TAG_QSET);
36
37 #define NX_NETIF_TAG_LLINK_CFG "com.apple.skywalk.netif.llink.cfg"
38 static SKMEM_TAG_DEFINE(nx_netif_tag_llink_cfg, NX_NETIF_TAG_LLINK_CFG);
39
40 LCK_ATTR_DECLARE(netif_llink_lock_attr, 0, 0);
41 static LCK_GRP_DECLARE(netif_llink_lock_group, "netif llink locks");
42
43 #if (DEVELOPMENT || DEBUG)
44 static TUNABLE(uint32_t, nx_netif_disable_llink, "sk_disable_llink", 0);
45 #endif /* (DEVELOPMENT || DEBUG) */
46
47 static struct netif_llink *nx_netif_llink_alloc(void);
48 static void nx_netif_llink_free(struct netif_llink **);
49 static struct netif_qset *nx_netif_qset_alloc(uint8_t, uint8_t);
50 static void nx_netif_qset_free(struct netif_qset **);
51 static void nx_netif_qset_setup_ifclassq(struct netif_llink *,
52 struct netif_qset *);
53 static void nx_netif_qset_teardown_ifclassq(struct netif_qset *);
54 static void nx_netif_qset_init(struct netif_qset *, struct netif_llink *,
55 uint8_t idx, struct kern_nexus_netif_llink_qset_init *);
56 static struct netif_qset *nx_netif_qset_create(struct netif_llink *,
57 uint8_t, struct kern_nexus_netif_llink_qset_init *);
58 static void nx_netif_qset_destroy(struct netif_qset *);
59 static void nx_netif_llink_initialize(struct netif_llink *, struct nx_netif *,
60 struct kern_nexus_netif_llink_init *);
61 static void nx_netif_driver_queue_destroy(struct netif_queue *);
62 static void nx_netif_driver_queue_init(struct netif_qset *,
63 struct netif_queue *, kern_packet_svc_class_t, bool);
64 static struct netif_llink *nx_netif_llink_create_locked(struct nx_netif *,
65 struct kern_nexus_netif_llink_init *);
66 static void nx_netif_default_llink_add(struct nx_netif *);
67 static int nx_netif_llink_ext_init_queues(struct kern_nexus *,
68 struct netif_llink *);
69 static void nx_netif_llink_ext_fini_queues(struct kern_nexus *,
70 struct netif_llink *);
71
72 static uint32_t nx_netif_random_qset = 0;
73 #if (DEVELOPMENT || DEBUG)
74 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, random_qset,
75 CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_random_qset, 0,
76 "pick a random qset");
77 #endif /* DEVELOPMENT || DEBUG */
78
79 /* retains a reference for the callee */
80 static struct netif_llink *
nx_netif_llink_alloc(void)81 nx_netif_llink_alloc(void)
82 {
83 struct netif_llink *llink;
84
85 llink = sk_alloc_type(struct netif_llink, Z_WAITOK | Z_NOFAIL,
86 skmem_tag_netif_llink);
87 os_ref_init(&llink->nll_refcnt, NULL);
88 return llink;
89 }
90
91 SK_NO_INLINE_ATTRIBUTE
92 void
nx_netif_llink_retain(struct netif_llink * llink)93 nx_netif_llink_retain(struct netif_llink *llink)
94 {
95 os_ref_retain(&llink->nll_refcnt);
96 }
97
98 SK_NO_INLINE_ATTRIBUTE
99 static void
nx_netif_llink_free(struct netif_llink ** pllink)100 nx_netif_llink_free(struct netif_llink **pllink)
101 {
102 struct netif_llink *llink = *pllink;
103 struct netif_qset *qset, *tqset;
104
105 VERIFY(llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
106 *pllink = NULL;
107 SLIST_FOREACH_SAFE(qset, &llink->nll_qset_list, nqs_list, tqset) {
108 SLIST_REMOVE(&llink->nll_qset_list, qset, netif_qset,
109 nqs_list);
110 nx_netif_qset_destroy(qset);
111 }
112 if (llink->nll_ifcq != NULL) {
113 ifclassq_release(&llink->nll_ifcq);
114 }
115
116 sk_free_type(struct netif_llink, llink);
117 }
118
119 SK_NO_INLINE_ATTRIBUTE
120 void
nx_netif_llink_release(struct netif_llink ** pllink)121 nx_netif_llink_release(struct netif_llink **pllink)
122 {
123 struct netif_llink *__single llink = *pllink;
124
125 *pllink = NULL;
126 if (os_ref_release(&llink->nll_refcnt) == 0) {
127 nx_netif_llink_free(&llink);
128 }
129 }
130
131 /* retains a reference for the callee */
132 static struct netif_qset *
nx_netif_qset_alloc(uint8_t nrxqs,uint8_t ntxqs)133 nx_netif_qset_alloc(uint8_t nrxqs, uint8_t ntxqs)
134 {
135 struct netif_qset *qset;
136
137 static_assert(sizeof(struct netif_queue) % sizeof(uint64_t) == 0);
138
139 qset = sk_alloc_type_header_array(struct netif_qset, struct netif_queue,
140 nrxqs + ntxqs, Z_WAITOK | Z_NOFAIL, nx_netif_tag_qset);
141
142 qset->nqs_num_queues = nrxqs + ntxqs;
143 qset->nqs_num_rx_queues = nrxqs;
144 qset->nqs_num_tx_queues = ntxqs;
145 return qset;
146 }
147
148 SK_NO_INLINE_ATTRIBUTE
149 void
nx_netif_qset_retain(struct netif_qset * qset)150 nx_netif_qset_retain(struct netif_qset *qset)
151 {
152 /*
153 * Logical link is immutable, i.e. Queue Sets can't added/removed
154 * from it. We will rely on this property to simply acquire a refcnt
155 * on the logical link, which is the parent structure of a qset.
156 */
157 nx_netif_llink_retain(qset->nqs_llink);
158 }
159
160 SK_NO_INLINE_ATTRIBUTE
161 void
nx_netif_qset_release(struct netif_qset ** pqset)162 nx_netif_qset_release(struct netif_qset **pqset)
163 {
164 struct netif_qset *qset = *pqset;
165 struct netif_llink *__single llink = qset->nqs_llink;
166
167 *pqset = NULL;
168 nx_netif_llink_release(&llink);
169 }
170
171 SK_NO_INLINE_ATTRIBUTE
172 static void
nx_netif_qset_free(struct netif_qset ** pqset)173 nx_netif_qset_free(struct netif_qset **pqset)
174 {
175 struct netif_qset *qset = *pqset;
176 uint8_t i;
177
178 VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
179
180 for (i = 0; i < qset->nqs_num_rx_queues; i++) {
181 nx_netif_driver_queue_destroy(NETIF_QSET_RX_QUEUE(qset, i));
182 }
183 for (i = 0; i < qset->nqs_num_tx_queues; i++) {
184 nx_netif_driver_queue_destroy(NETIF_QSET_TX_QUEUE(qset, i));
185 }
186 if (qset->nqs_flags & NETIF_QSET_FLAG_AQM) {
187 nx_netif_qset_teardown_ifclassq(qset);
188 }
189 qset->nqs_llink = NULL;
190 sk_free_type_header_array(struct netif_qset, struct netif_queue,
191 qset->nqs_num_rx_queues + qset->nqs_num_tx_queues, qset);
192 }
193
194 SK_NO_INLINE_ATTRIBUTE
195 static void
nx_netif_qset_destroy(struct netif_qset * qset)196 nx_netif_qset_destroy(struct netif_qset *qset)
197 {
198 VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
199 nx_netif_qset_free(&qset);
200 }
201
202 SK_NO_INLINE_ATTRIBUTE
203 static void
nx_netif_qset_setup_ifclassq(struct netif_llink * llink,struct netif_qset * qset)204 nx_netif_qset_setup_ifclassq(struct netif_llink *llink,
205 struct netif_qset *qset)
206 {
207 uint8_t flags = 0;
208
209 ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
210 ASSERT(llink->nll_ifcq != NULL);
211
212 ifclassq_retain(llink->nll_ifcq);
213 qset->nqs_ifcq = llink->nll_ifcq;
214
215 if ((qset->nqs_flags & NETIF_QSET_FLAG_LOW_LATENCY) != 0) {
216 flags |= IF_CLASSQ_LOW_LATENCY;
217 }
218 if ((qset->nqs_flags & NETIF_QSET_FLAG_DEFAULT) != 0) {
219 flags |= IF_DEFAULT_GRP;
220 }
221
222 ifclassq_setup_group(qset->nqs_ifcq, qset->nqs_idx, flags);
223 }
224
225 SK_NO_INLINE_ATTRIBUTE
226 static void
nx_netif_qset_teardown_ifclassq(struct netif_qset * qset)227 nx_netif_qset_teardown_ifclassq(struct netif_qset *qset)
228 {
229 ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
230 ASSERT(qset->nqs_ifcq != NULL);
231
232 qset->nqs_flags &= ~NETIF_QSET_FLAG_AQM;
233 ifclassq_release(&qset->nqs_ifcq);
234 }
235
236 SK_NO_INLINE_ATTRIBUTE
237 static void
nx_netif_qset_init(struct netif_qset * qset,struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)238 nx_netif_qset_init(struct netif_qset *qset, struct netif_llink *llink,
239 uint8_t idx, struct kern_nexus_netif_llink_qset_init *qset_init)
240 {
241 #define _NETIF_QSET_MAX_TXQS 4
242 kern_packet_svc_class_t svc[_NETIF_QSET_MAX_TXQS] =
243 {KPKT_SC_BE, KPKT_SC_BK, KPKT_SC_VI, KPKT_SC_VO};
244 struct ifnet *ifp = llink->nll_nif->nif_ifp;
245 uint8_t i;
246
247 /*
248 * no need to retain a reference for llink, as the logical link is
249 * immutable and qsets are created and destroyed along with logical
250 * link.
251 */
252 qset->nqs_llink = llink;
253 qset->nqs_id = NETIF_QSET_ID_ENCODE(llink->nll_link_id_internal, idx);
254 qset->nqs_idx = idx;
255
256 if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
257 qset->nqs_flags |= NETIF_QSET_FLAG_DEFAULT;
258 }
259 if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY) {
260 qset->nqs_flags |= NETIF_QSET_FLAG_LOW_LATENCY;
261 }
262 if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_AQM) {
263 qset->nqs_flags |= NETIF_QSET_FLAG_AQM;
264 nx_netif_qset_setup_ifclassq(llink, qset);
265 }
266
267 for (i = 0; i < qset->nqs_num_rx_queues; i++) {
268 nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
269 KPKT_SC_UNSPEC, true);
270 }
271
272 if (ifp->if_output_sched_model & IFNET_SCHED_DRIVER_MANGED_MODELS) {
273 VERIFY(qset->nqs_num_tx_queues == _NETIF_QSET_MAX_TXQS);
274 VERIFY(IFNET_MODEL_IS_VALID(ifp->if_output_sched_model));
275 for (i = 0; i < qset->nqs_num_tx_queues; i++) {
276 nx_netif_driver_queue_init(qset,
277 NETIF_QSET_TX_QUEUE(qset, i), svc[i], false);
278 }
279 } else {
280 for (i = 0; i < qset->nqs_num_tx_queues; i++) {
281 nx_netif_driver_queue_init(qset,
282 NETIF_QSET_TX_QUEUE(qset, i), KPKT_SC_UNSPEC, false);
283 }
284 }
285 }
286
287 SK_NO_INLINE_ATTRIBUTE
288 static struct netif_qset *
nx_netif_qset_create(struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)289 nx_netif_qset_create(struct netif_llink *llink, uint8_t idx,
290 struct kern_nexus_netif_llink_qset_init *qset_init)
291 {
292 struct netif_qset *qset;
293
294 qset = nx_netif_qset_alloc(qset_init->nlqi_num_rxqs,
295 qset_init->nlqi_num_txqs);
296 nx_netif_qset_init(qset, llink, idx, qset_init);
297 return qset;
298 }
299
300 static uint16_t
nx_netif_generate_internal_llink_id(struct nx_netif * nif)301 nx_netif_generate_internal_llink_id(struct nx_netif *nif)
302 {
303 struct netif_llink *llink;
304 struct netif_stats *nifs = &nif->nif_stats;
305 uint16_t id;
306
307 again:
308 id = (uint16_t)(random() % 65536);
309 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
310 if (__improbable(llink->nll_link_id_internal == id)) {
311 break;
312 }
313 }
314 if (__probable(llink == NULL && id != 0)) {
315 return id;
316 } else {
317 STATS_INC(nifs, NETIF_STATS_LLINK_DUP_INT_ID_GENERATED);
318 DTRACE_SKYWALK1(dup__llink__id__internal, uint16_t, id);
319 goto again;
320 }
321 }
322
323 static void
nx_netif_llink_initialize(struct netif_llink * llink,struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)324 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif,
325 struct kern_nexus_netif_llink_init *llink_init)
326 {
327 uint8_t i;
328 struct ifnet *ifp = nif->nif_ifp;
329
330 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
331
332 llink->nll_nif = nif;
333 llink->nll_link_id = llink_init->nli_link_id;
334 if (llink_init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) {
335 llink->nll_flags |= NETIF_LLINK_FLAG_DEFAULT;
336 }
337 llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif);
338 llink->nll_ctx = llink_init->nli_ctx;
339 SLIST_INIT(&llink->nll_qset_list);
340
341 for (i = 0; i < llink_init->nli_num_qsets; i++) {
342 if (llink->nll_ifcq == NULL &&
343 (llink_init->nli_qsets[i].nlqi_flags &
344 KERN_NEXUS_NET_LLINK_QSET_AQM)) {
345 if (NETIF_DEFAULT_LLINK(llink)) {
346 /* use the default AQM queues from ifnet */
347 ifclassq_retain(ifp->if_snd);
348 llink->nll_ifcq = ifp->if_snd;
349 } else {
350 llink->nll_ifcq = ifclassq_alloc();
351 dlil_ifclassq_setup(ifp, llink->nll_ifcq);
352 }
353 }
354
355 struct netif_qset *qset = nx_netif_qset_create(llink, i,
356 &llink_init->nli_qsets[i]);
357 /* nx_netif_qset_create retains a reference for the callee */
358 SLIST_INSERT_HEAD(&llink->nll_qset_list, qset, nqs_list);
359 if (NETIF_DEFAULT_QSET(qset)) {
360 /* there can only be one default queue set */
361 VERIFY(llink->nll_default_qset == NULL);
362 llink->nll_default_qset = qset;
363 }
364 }
365 llink->nll_qset_cnt = llink_init->nli_num_qsets;
366 /* there should be a default queue set */
367 VERIFY(llink->nll_default_qset != NULL);
368 llink->nll_state = NETIF_LLINK_STATE_INIT;
369 }
370
371 static void
nx_netif_driver_queue_destroy(struct netif_queue * drvq)372 nx_netif_driver_queue_destroy(struct netif_queue *drvq)
373 {
374 VERIFY(drvq->nq_qset->nqs_llink->nll_state ==
375 NETIF_LLINK_STATE_DESTROYED);
376
377 lck_mtx_lock(&drvq->nq_lock);
378 VERIFY(KPKTQ_EMPTY(&drvq->nq_pktq));
379 lck_mtx_unlock(&drvq->nq_lock);
380
381 drvq->nq_qset = NULL;
382 lck_mtx_destroy(&drvq->nq_lock, &netif_llink_lock_group);
383 }
384
385 static void
nx_netif_driver_queue_init(struct netif_qset * qset,struct netif_queue * drvq,kern_packet_svc_class_t svc,bool is_rx)386 nx_netif_driver_queue_init(struct netif_qset *qset,
387 struct netif_queue *drvq, kern_packet_svc_class_t svc, bool is_rx)
388 {
389 lck_mtx_init(&drvq->nq_lock, &netif_llink_lock_group,
390 &netif_llink_lock_attr);
391
392 lck_mtx_lock(&drvq->nq_lock);
393 KPKTQ_INIT(&drvq->nq_pktq);
394 lck_mtx_unlock(&drvq->nq_lock);
395
396 /*
397 * no need to retain a reference for qset, as queue set is
398 * immutable and driver queue is part of the queue set data structure.
399 */
400 drvq->nq_qset = qset;
401 drvq->nq_svc = svc;
402 if (is_rx) {
403 drvq->nq_flags |= NETIF_QUEUE_IS_RX;
404 }
405 }
406
407 SK_NO_INLINE_ATTRIBUTE
408 static struct netif_llink *
nx_netif_llink_create_locked(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)409 nx_netif_llink_create_locked(struct nx_netif *nif,
410 struct kern_nexus_netif_llink_init *llink_init)
411 {
412 struct netif_llink *llink;
413 struct netif_stats *nifs = &nif->nif_stats;
414
415 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
416 llink = nx_netif_llink_alloc();
417 nx_netif_llink_initialize(llink, nif, llink_init);
418 /* nx_netif_llink_alloc retains a reference for the caller */
419 STAILQ_INSERT_TAIL(&nif->nif_llink_list, llink, nll_link);
420 nif->nif_llink_cnt++;
421 STATS_INC(nifs, NETIF_STATS_LLINK_ADD);
422 if (NETIF_DEFAULT_LLINK(llink)) {
423 /* there can only be one default logical link */
424 VERIFY(nif->nif_default_llink == NULL);
425 }
426 return llink;
427 }
428
429 SK_NO_INLINE_ATTRIBUTE
430 static void
nx_netif_llink_destroy_locked(struct nx_netif * nif,struct netif_llink ** pllink)431 nx_netif_llink_destroy_locked(struct nx_netif *nif, struct netif_llink **pllink)
432 {
433 struct netif_stats *nifs = &nif->nif_stats;
434
435 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
436 (*pllink)->nll_state = NETIF_LLINK_STATE_DESTROYED;
437 STAILQ_REMOVE(&nif->nif_llink_list, *pllink, netif_llink, nll_link);
438 nif->nif_llink_cnt--;
439 STATS_INC(nifs, NETIF_STATS_LLINK_REMOVE);
440 nx_netif_llink_release(pllink);
441 }
442
443 int
nx_netif_llink_add(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init,struct netif_llink ** pllink)444 nx_netif_llink_add(struct nx_netif *nif,
445 struct kern_nexus_netif_llink_init *llink_init, struct netif_llink **pllink)
446 {
447 int err;
448 struct netif_llink *__single llink;
449 struct netif_stats *nifs = &nif->nif_stats;
450
451 *pllink = NULL;
452 lck_rw_lock_exclusive(&nif->nif_llink_lock);
453 /* ensure logical_link_id is unique */
454 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
455 if (llink->nll_link_id == llink_init->nli_link_id) {
456 SK_ERR("duplicate llink_id 0x%llu",
457 llink_init->nli_link_id);
458 STATS_INC(nifs, NETIF_STATS_LLINK_DUP_ID_GIVEN);
459 DTRACE_SKYWALK1(dup__id__given, uint64_t,
460 llink_init->nli_link_id);
461 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
462 return EINVAL;
463 }
464 }
465 llink = nx_netif_llink_create_locked(nif, llink_init);
466 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
467 VERIFY(llink != NULL);
468 ASSERT((llink->nll_flags & NETIF_LLINK_FLAG_DEFAULT) == 0);
469 err = nx_netif_llink_ext_init_queues(nif->nif_nx, llink);
470 if (err != 0) {
471 lck_rw_lock_exclusive(&nif->nif_llink_lock);
472 nx_netif_llink_destroy_locked(nif, &llink);
473 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
474 } else {
475 /*
476 * Increment reference to keep the same pattern as default llink
477 * refcnt is 2 after this retain.
478 */
479 nx_netif_llink_retain(llink);
480 *pllink = llink;
481 }
482 return err;
483 }
484
485 int
nx_netif_llink_remove(struct nx_netif * nif,kern_nexus_netif_llink_id_t llink_id)486 nx_netif_llink_remove(struct nx_netif *nif,
487 kern_nexus_netif_llink_id_t llink_id)
488 {
489 bool llink_found = false;
490 struct netif_llink *__single llink, *__single llink_tmp;
491 struct netif_stats *nifs = &nif->nif_stats;
492
493 lck_rw_lock_exclusive(&nif->nif_llink_lock);
494 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
495 if (llink->nll_link_id == llink_id) {
496 llink_found = true;
497 llink_tmp = llink;
498 break;
499 }
500 }
501 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
502 if (!llink_found) {
503 STATS_INC(nifs, NETIF_STATS_LLINK_NOT_FOUND_REMOVE);
504 DTRACE_SKYWALK1(not__found, uint64_t, llink_id);
505 return ENOENT;
506 }
507 ASSERT((llink_tmp->nll_flags & NETIF_LLINK_FLAG_DEFAULT) == 0);
508 nx_netif_llink_ext_fini_queues(nif->nif_nx, llink_tmp);
509 lck_rw_lock_exclusive(&nif->nif_llink_lock);
510 nx_netif_llink_release(&llink_tmp);
511 nx_netif_llink_destroy_locked(nif, &llink);
512 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
513 return 0;
514 }
515
516 static void
nx_netif_default_llink_add(struct nx_netif * nif)517 nx_netif_default_llink_add(struct nx_netif *nif)
518 {
519 struct kern_nexus_netif_llink_init llink_init, *pllink_init;
520 struct kern_nexus_netif_llink_qset_init qset;
521 struct ifnet *ifp = nif->nif_ifp;
522 struct netif_llink *llink;
523
524 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
525 VERIFY(SKYWALK_NATIVE(ifp));
526
527 llink_init.nli_flags = KERN_NEXUS_NET_LLINK_DEFAULT;
528
529 if (NX_LLINK_PROV(nif->nif_nx)) {
530 VERIFY(nif->nif_default_llink_params != NULL);
531 pllink_init = nif->nif_default_llink_params;
532 } else {
533 struct nexus_adapter *devna =
534 nx_port_get_na(nif->nif_nx, NEXUS_PORT_NET_IF_DEV);
535
536 llink_init.nli_link_id = NETIF_LLINK_ID_DEFAULT;
537 qset.nlqi_flags = KERN_NEXUS_NET_LLINK_QSET_DEFAULT;
538 /*
539 * For the legacy mode of operation we will assume that
540 * AQM is not needed on low-latency interface.
541 */
542 if (NETIF_IS_LOW_LATENCY(nif)) {
543 qset.nlqi_flags |=
544 KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY;
545 } else {
546 qset.nlqi_flags |= KERN_NEXUS_NET_LLINK_QSET_AQM;
547 }
548 qset.nlqi_num_rxqs =
549 (uint8_t)na_get_nrings(devna, NR_RX);
550 qset.nlqi_num_txqs =
551 (uint8_t)na_get_nrings(devna, NR_TX);
552 llink_init.nli_num_qsets = 1;
553 llink_init.nli_qsets = &qset;
554 llink_init.nli_ctx = NULL;
555 pllink_init = &llink_init;
556 }
557 llink = nx_netif_llink_create_locked(nif, pllink_init);
558 /* there can only be one default logical link */
559 VERIFY(nif->nif_default_llink == NULL);
560 nx_netif_llink_retain(llink);
561 /* obtain a reference for the default logical link pointer */
562 nif->nif_default_llink = llink;
563 }
564
565 static void
nx_netif_default_llink_remove(struct nx_netif * nif)566 nx_netif_default_llink_remove(struct nx_netif *nif)
567 {
568 struct netif_llink *__single llink;
569
570 LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
571 ASSERT(nif->nif_default_llink != NULL);
572 ASSERT(nif->nif_llink_cnt == 1);
573 llink = nif->nif_default_llink;
574 nx_netif_llink_release(&nif->nif_default_llink);
575 ASSERT(nif->nif_default_llink == NULL);
576 nx_netif_llink_destroy_locked(nif, &llink);
577 }
578
579 int
netif_qset_enqueue(struct netif_qset * qset,bool chain,struct __kern_packet * pkt_chain,struct __kern_packet * tail,uint32_t cnt,uint32_t bytes,uint32_t * flowctl,uint32_t * dropped)580 netif_qset_enqueue(struct netif_qset *qset, bool chain,
581 struct __kern_packet *pkt_chain, struct __kern_packet *tail, uint32_t cnt,
582 uint32_t bytes, uint32_t *flowctl, uint32_t *dropped)
583 {
584 struct __kern_packet *pkt = pkt_chain;
585 struct __kern_packet *next;
586 struct netif_stats *nifs = &qset->nqs_llink->nll_nif->nif_stats;
587 struct ifnet *ifp = qset->nqs_ifcq->ifcq_ifp;
588 uint32_t c = 0, b = 0;
589 boolean_t pkt_drop = FALSE;
590 int err = 0;
591
592 ASSERT(dropped != NULL && flowctl != NULL);
593
594 /* drop packets if logical link state is destroyed */
595 if (qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED) {
596 pp_free_packet_chain(pkt_chain, (int *)dropped);
597 STATS_ADD(nifs, NETIF_STATS_LLINK_TX_DROP_BAD_STATE, *dropped);
598 return ENXIO;
599 }
600
601 if (chain) {
602 /* all packets in this chain should have the same SVC */
603 netif_ifp_inc_traffic_class_out_pkt(ifp, pkt_chain->pkt_svc_class,
604 cnt, bytes);
605
606 err = ifnet_enqueue_pkt_chain(ifp, qset->nqs_ifcq, pkt_chain, tail, cnt,
607 bytes, false, &pkt_drop);
608 if (__improbable(err != 0)) {
609 if ((err == EQFULL || err == EQSUSPENDED)) {
610 (*flowctl)++;
611 }
612 if (pkt_drop) {
613 *dropped = cnt;
614 }
615 }
616 } else {
617 while (pkt != NULL) {
618 next = pkt->pkt_nextpkt;
619 pkt->pkt_nextpkt = NULL;
620 c++;
621 b += pkt->pkt_length;
622
623 netif_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
624 1, pkt->pkt_length);
625
626 err = ifnet_enqueue_pkt(ifp, qset->nqs_ifcq, pkt, false, &pkt_drop);
627 if (__improbable(err != 0)) {
628 if ((err == EQFULL || err == EQSUSPENDED)) {
629 (*flowctl)++;
630 }
631 if (pkt_drop) {
632 (*dropped)++;
633 }
634 }
635
636 pkt = next;
637 }
638 VERIFY(c == cnt);
639 VERIFY(b == bytes);
640 }
641
642 if (*flowctl > 0) {
643 STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_QFULL, *flowctl);
644 err = EIO;
645 }
646 if (*dropped > 0) {
647 STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_DROPPED, *dropped);
648 STATS_ADD(nifs, NETIF_STATS_DROP, *dropped);
649 err = EIO;
650 }
651 return err;
652 }
653
654 struct netif_qset *
nx_netif_get_default_qset_noref(struct nx_netif * nif)655 nx_netif_get_default_qset_noref(struct nx_netif *nif)
656 {
657 struct netif_qset *qset;
658 struct netif_stats *nifs = &nif->nif_stats;
659
660 ASSERT(NETIF_LLINK_ENABLED(nif));
661 if (__improbable(nif->nif_default_llink->nll_state !=
662 NETIF_LLINK_STATE_INIT)) {
663 STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
664 DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
665 return NULL;
666 }
667 qset = nif->nif_default_llink->nll_default_qset;
668 return qset;
669 }
670
671 static void
nx_netif_qset_hint_decode(uint64_t hint,uint16_t * link_id_internal,uint16_t * qset_idx)672 nx_netif_qset_hint_decode(uint64_t hint,
673 uint16_t *link_id_internal, uint16_t *qset_idx)
674 {
675 /* The top 32 bits are unused for now */
676 *link_id_internal = (uint16_t)((0xffff0000 & hint) >> 16);
677 *qset_idx = (uint16_t)((0x0000ffff & hint));
678 }
679
680 /* retains a reference for the caller */
681 static struct netif_qset *
nx_netif_get_default_qset(struct nx_netif * nif)682 nx_netif_get_default_qset(struct nx_netif *nif)
683 {
684 struct netif_qset *qset;
685
686 qset = nif->nif_default_llink->nll_default_qset;
687 nx_netif_qset_retain(qset);
688 return qset;
689 }
690
691 /*
692 * Find the qset based on the qset hint. Fall back to the default qset
693 * if not found. The random qset is used for experimentation.
694 */
695 struct netif_qset *
nx_netif_find_qset(struct nx_netif * nif,uint64_t hint)696 nx_netif_find_qset(struct nx_netif *nif, uint64_t hint)
697 {
698 uint16_t ll_id_internal, qset_idx;
699 struct netif_llink *llink;
700 struct netif_qset *qset;
701 struct netif_stats *nifs = &nif->nif_stats;
702 int i, j, random_id;
703
704 ASSERT(NETIF_LLINK_ENABLED(nif));
705 if (__improbable(nif->nif_default_llink->nll_state !=
706 NETIF_LLINK_STATE_INIT)) {
707 STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
708 DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
709 return NULL;
710 }
711 if (!NX_LLINK_PROV(nif->nif_nx) ||
712 (nx_netif_random_qset == 0 && hint == 0)) {
713 goto def_qset;
714 }
715 if (nx_netif_random_qset == 0) {
716 nx_netif_qset_hint_decode(hint, &ll_id_internal, &qset_idx);
717 } else {
718 ll_id_internal = 0;
719 qset_idx = 0;
720 }
721 lck_rw_lock_shared(&nif->nif_llink_lock);
722 i = 0;
723 random_id = random();
724 STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
725 if (nx_netif_random_qset != 0 &&
726 (random_id % nif->nif_llink_cnt) == i) {
727 break;
728 } else if (llink->nll_link_id_internal == ll_id_internal) {
729 break;
730 }
731 i++;
732 }
733 if (llink == NULL) {
734 STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
735 lck_rw_unlock_shared(&nif->nif_llink_lock);
736 goto def_qset;
737 }
738 j = 0;
739 random_id = random();
740 SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
741 if (nx_netif_random_qset != 0 &&
742 (random_id % llink->nll_qset_cnt) == j) {
743 break;
744 } else if (qset->nqs_idx == qset_idx) {
745 break;
746 }
747 j++;
748 }
749 if (qset == NULL) {
750 STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
751 lck_rw_unlock_shared(&nif->nif_llink_lock);
752 goto def_qset;
753 }
754 nx_netif_qset_retain(qset);
755 STATS_INC(nifs, NETIF_STATS_LLINK_NONDEF_QSET_USED);
756 lck_rw_unlock_shared(&nif->nif_llink_lock);
757 if (nx_netif_random_qset != 0) {
758 SK_DF(SK_VERB_LLINK, "%s: random qset: qset %p, ifcq %p, "
759 "llink_idx %d, qset_idx %d", if_name(nif->nif_ifp),
760 qset, qset->nqs_ifcq, i, j);
761
762 DTRACE_SKYWALK5(random__qset, struct nx_netif *, nif,
763 struct netif_qset *, qset, struct ifclassq *,
764 qset->nqs_ifcq, int, i, int, j);
765 } else {
766 SK_DF(SK_VERB_LLINK, "%s: non-default qset: qset %p, ifcq %p, "
767 " ll_id_internal 0x%x, qset_idx %d", if_name(nif->nif_ifp),
768 qset, qset->nqs_ifcq, ll_id_internal, qset_idx);
769
770 DTRACE_SKYWALK5(nondef__qset, struct nx_netif *, nif,
771 struct netif_qset *, qset, struct ifclassq *,
772 qset->nqs_ifcq, uint16_t, ll_id_internal,
773 uint16_t, qset_idx);
774 }
775 return qset;
776
777 def_qset:
778 STATS_INC(nifs, NETIF_STATS_LLINK_DEF_QSET_USED);
779 qset = nx_netif_get_default_qset(nif);
780 ASSERT(qset != NULL);
781
782 SK_DF(SK_VERB_LLINK, "%s: default qset: qset %p, ifcq %p, hint %llx",
783 if_name(nif->nif_ifp), qset, qset->nqs_ifcq, hint);
784
785 DTRACE_SKYWALK4(def__qset, struct nx_netif *, nif, struct netif_qset *,
786 qset, struct ifclassq *, qset->nqs_ifcq, uint64_t, hint);
787 return qset;
788 }
789
790 struct netif_qset *
nx_netif_find_qset_with_pkt(struct ifnet * ifp,struct __kern_packet * pkt)791 nx_netif_find_qset_with_pkt(struct ifnet *ifp, struct __kern_packet *pkt)
792 {
793 struct nx_netif *nif = NA(ifp)->nifna_netif;
794 struct netif_qset *__single qset = NULL;
795 uint64_t qset_id;
796
797 if (NX_LLINK_PROV(nif->nif_nx)) {
798 /*
799 * Note: ifp can have either eth traffc rules or inet traffc rules
800 * and not both.
801 */
802 if (ifp->if_eth_traffic_rule_count) {
803 if (__probable(pkt->pkt_pflags & PKT_F_PRIV_HAS_QSET_ID)) {
804 qset = nx_netif_find_qset(nif, (uint64_t) pkt->pkt_priv);
805 ASSERT(qset != NULL);
806 } else if (nxctl_eth_traffic_rule_find_qset_id_with_pkt(
807 ifp->if_xname, pkt, &qset_id) == 0) {
808 qset = nx_netif_find_qset(nif, qset_id);
809 ASSERT(qset != NULL);
810 }
811 } else if (ifp->if_inet_traffic_rule_count > 0 &&
812 nxctl_inet_traffic_rule_find_qset_id_with_pkt(
813 ifp->if_xname, pkt, &qset_id) == 0) {
814 qset = nx_netif_find_qset(nif, qset_id);
815 ASSERT(qset != NULL);
816 }
817 }
818
819 return qset;
820 }
821
822 void
nx_netif_llink_init(struct nx_netif * nif)823 nx_netif_llink_init(struct nx_netif *nif)
824 {
825 ifnet_t ifp = nif->nif_ifp;
826
827 #if (DEVELOPMENT || DEBUG)
828 if (__improbable(nx_netif_disable_llink != 0)) {
829 SK_DF(SK_VERB_LLINK, "%s: llink is disabled",
830 if_name(nif->nif_ifp));
831 return;
832 }
833 #endif /* (DEVELOPMENT || DEBUG) */
834
835 if (!SKYWALK_NATIVE(ifp)) {
836 SK_DF(SK_VERB_LLINK,
837 "%s: llink is supported on native devices only",
838 if_name(ifp));
839 return;
840 }
841 ASSERT(!NETIF_LLINK_ENABLED(nif));
842 lck_rw_init(&nif->nif_llink_lock, &netif_llink_lock_group,
843 &netif_llink_lock_attr);
844
845 lck_rw_lock_exclusive(&nif->nif_llink_lock);
846
847 STAILQ_INIT(&nif->nif_llink_list);
848 nif->nif_llink_cnt = 0;
849 nx_netif_default_llink_add(nif);
850 nif->nif_flags |= NETIF_FLAG_LLINK_INITIALIZED;
851
852 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
853
854 SK_DF(SK_VERB_LLINK, "%s: llink initialized", if_name(ifp));
855 }
856
857 void
nx_netif_llink_fini(struct nx_netif * nif)858 nx_netif_llink_fini(struct nx_netif *nif)
859 {
860 if (!NETIF_LLINK_ENABLED(nif)) {
861 SK_DF(SK_VERB_LLINK, "%s: llink not initialized",
862 if_name(nif->nif_ifp));
863 return;
864 }
865
866 lck_rw_lock_exclusive(&nif->nif_llink_lock);
867
868 nif->nif_flags &= ~NETIF_FLAG_LLINK_INITIALIZED;
869 nx_netif_default_llink_remove(nif);
870 ASSERT(nif->nif_llink_cnt == 0);
871 ASSERT(STAILQ_EMPTY(&nif->nif_llink_list));
872
873 lck_rw_unlock_exclusive(&nif->nif_llink_lock);
874
875 nx_netif_llink_config_free(nif);
876 lck_rw_destroy(&nif->nif_llink_lock, &netif_llink_lock_group);
877 SK_DF(SK_VERB_LLINK, "%s: llink uninitialization done",
878 if_name(nif->nif_ifp));
879 }
880
881 int
nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init * init,bool default_llink)882 nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *init,
883 bool default_llink)
884 {
885 struct kern_nexus_netif_llink_qset_init *qsinit;
886 bool has_default_qset = false;
887 bool default_llink_flag;
888 uint8_t i;
889
890 default_llink_flag =
891 ((init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) != 0);
892
893 if (default_llink != default_llink_flag) {
894 SK_ERR("default llink flag incompatible: default_llink(%s), "
895 "default_llink_flag(%s)",
896 default_llink ? "true" : "false",
897 default_llink_flag ? "true" : "false");
898 return EINVAL;
899 }
900 if (init->nli_num_qsets == 0) {
901 SK_ERR("num qsets is zero");
902 return EINVAL;
903 }
904 if ((qsinit = init->nli_qsets) == NULL) {
905 SK_ERR("qsets is NULL");
906 return EINVAL;
907 }
908 for (i = 0; i < init->nli_num_qsets; i++) {
909 if (qsinit[i].nlqi_flags &
910 KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
911 if (has_default_qset) {
912 SK_ERR("has more than one default qset");
913 return EINVAL;
914 }
915 has_default_qset = true;
916 }
917 if (qsinit[i].nlqi_num_txqs == 0) {
918 SK_ERR("num_txqs == 0");
919 return EINVAL;
920 }
921 if ((qsinit[i].nlqi_flags &
922 KERN_NEXUS_NET_LLINK_QSET_WMM_MODE) &&
923 (qsinit[i].nlqi_num_txqs != NEXUS_NUM_WMM_QUEUES)) {
924 SK_ERR("invalid wmm mode");
925 return EINVAL;
926 }
927 }
928 return 0;
929 }
930
931 int
nx_netif_default_llink_config(struct nx_netif * nif,struct kern_nexus_netif_llink_init * init)932 nx_netif_default_llink_config(struct nx_netif *nif,
933 struct kern_nexus_netif_llink_init *init)
934 {
935 struct kern_nexus_netif_llink_qset_init *qsinit;
936 int i, err;
937
938 err = nx_netif_validate_llink_config(init, true);
939 if (err != 0) {
940 return err;
941 }
942 nif->nif_default_llink_params = sk_alloc_type(
943 struct kern_nexus_netif_llink_init,
944 Z_WAITOK | Z_NOFAIL, nx_netif_tag_llink_cfg);
945
946 qsinit = sk_alloc_type_array(struct kern_nexus_netif_llink_qset_init,
947 init->nli_num_qsets, Z_WAITOK, nx_netif_tag_llink_cfg);
948 if (qsinit == NULL) {
949 SK_ERR("failed to alloc kern_nexus_netif_llink_qset_init");
950 sk_free_type(struct kern_nexus_netif_llink_init,
951 nif->nif_default_llink_params);
952 nif->nif_default_llink_params = NULL;
953 return ENOMEM;
954 }
955 memcpy(nif->nif_default_llink_params, init,
956 __builtin_offsetof(struct kern_nexus_netif_llink_init,
957 nli_qsets));
958 for (i = 0; i < init->nli_num_qsets; i++) {
959 *(&qsinit[i]) = *(&init->nli_qsets[i]);
960 }
961 nif->nif_default_llink_params->nli_qsets = qsinit;
962 nif->nif_default_llink_params->nli_num_qsets = init->nli_num_qsets;
963 return 0;
964 }
965
966 void
nx_netif_llink_config_free(struct nx_netif * nif)967 nx_netif_llink_config_free(struct nx_netif *nif)
968 {
969 if (nif->nif_default_llink_params == NULL) {
970 return;
971 }
972 sk_free_type_array_counted_by(struct kern_nexus_netif_llink_qset_init,
973 nif->nif_default_llink_params->nli_num_qsets,
974 nif->nif_default_llink_params->nli_qsets);
975
976 sk_free_type(struct kern_nexus_netif_llink_init,
977 nif->nif_default_llink_params);
978 nif->nif_default_llink_params = NULL;
979 }
980
981 static int
nx_netif_llink_ext_init_queues(struct kern_nexus * nx,struct netif_llink * llink)982 nx_netif_llink_ext_init_queues(struct kern_nexus *nx, struct netif_llink *llink)
983 {
984 struct kern_nexus_provider *nxprov = NX_PROV(nx);
985 struct kern_nexus_netif_provider_init *nxnpi;
986 struct netif_qset *qset;
987 struct netif_stats *nifs = &NX_NETIF_PRIVATE(nx)->nif_stats;
988 int err = 0;
989 uint8_t i;
990
991 nxnpi = &nxprov->nxprov_netif_ext;
992 ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_init != NULL);
993 ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_init != NULL);
994
995 SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
996 struct netif_queue *drvq;
997
998 ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0);
999 err = nxnpi->nxnpi_qset_init(nxprov, nx, llink->nll_ctx,
1000 qset->nqs_idx, qset->nqs_id, qset, &qset->nqs_ctx);
1001 if (err != 0) {
1002 STATS_INC(nifs, NETIF_STATS_LLINK_QSET_INIT_FAIL);
1003 SK_ERR("nx: %p, qset: %d, qset init err %d",
1004 SK_KVA(nx), qset->nqs_idx, err);
1005 goto out;
1006 }
1007 qset->nqs_flags |= NETIF_QSET_FLAG_EXT_INITED;
1008
1009 for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1010 drvq = NETIF_QSET_RX_QUEUE(qset, i);
1011
1012 ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1013 err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1014 i, false, drvq, &drvq->nq_ctx);
1015 if (err != 0) {
1016 STATS_INC(nifs, NETIF_STATS_LLINK_RXQ_INIT_FAIL);
1017 SK_ERR("nx: %p qset: %d queue_init err %d",
1018 SK_KVA(nx), qset->nqs_idx, err);
1019 goto out;
1020 }
1021 drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1022 }
1023 for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1024 drvq = NETIF_QSET_TX_QUEUE(qset, i);
1025
1026 ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1027 err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1028 i, true, drvq, &drvq->nq_ctx);
1029 if (err != 0) {
1030 STATS_INC(nifs, NETIF_STATS_LLINK_TXQ_INIT_FAIL);
1031 SK_ERR("nx: %p qset: %d queue_init err %d",
1032 SK_KVA(nx), qset->nqs_idx, err);
1033 goto out;
1034 }
1035 drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1036 }
1037 }
1038 out:
1039 if (err != 0) {
1040 nx_netif_llink_ext_fini_queues(nx, llink);
1041 }
1042 return err;
1043 }
1044
1045 static void
nx_netif_llink_ext_fini_queues(struct kern_nexus * nx,struct netif_llink * llink)1046 nx_netif_llink_ext_fini_queues(struct kern_nexus *nx, struct netif_llink *llink)
1047 {
1048 struct kern_nexus_provider *nxprov = NX_PROV(nx);
1049 struct kern_nexus_netif_provider_init *nxnpi;
1050 struct netif_qset *qset;
1051 uint8_t i;
1052
1053 nxnpi = &nxprov->nxprov_netif_ext;
1054 ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_fini != NULL);
1055 ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_fini != NULL);
1056
1057 SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1058 struct netif_queue *drvq;
1059
1060 for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1061 drvq = NETIF_QSET_RX_QUEUE(qset, i);
1062 if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1063 continue;
1064 }
1065 nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1066 drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1067 }
1068 for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1069 drvq = NETIF_QSET_TX_QUEUE(qset, i);
1070 if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1071 continue;
1072 }
1073 nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1074 drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1075 }
1076 if ((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0) {
1077 continue;
1078 }
1079 nxnpi->nxnpi_qset_fini(nxprov, nx, qset->nqs_ctx);
1080 qset->nqs_flags &= ~NETIF_QSET_FLAG_EXT_INITED;
1081 }
1082 }
1083
1084 int
nx_netif_llink_ext_init_default_queues(struct kern_nexus * nx)1085 nx_netif_llink_ext_init_default_queues(struct kern_nexus *nx)
1086 {
1087 struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1088 return nx_netif_llink_ext_init_queues(nx, nif->nif_default_llink);
1089 }
1090
1091 void
nx_netif_llink_ext_fini_default_queues(struct kern_nexus * nx)1092 nx_netif_llink_ext_fini_default_queues(struct kern_nexus *nx)
1093 {
1094 struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1095 nx_netif_llink_ext_fini_queues(nx, nif->nif_default_llink);
1096 }
1097