xref: /xnu-10002.1.13/bsd/skywalk/nexus/netif/nx_netif_llink.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a) !
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
31 #include <os/refcnt.h>
32 #include <sys/sdt.h>
33 
34 #define NX_NETIF_TAG_QSET   "com.apple.skywalk.netif.qset"
35 static SKMEM_TAG_DEFINE(nx_netif_tag_qset, NX_NETIF_TAG_QSET);
36 
37 #define NX_NETIF_TAG_LLINK_CFG   "com.apple.skywalk.netif.llink.cfg"
38 static SKMEM_TAG_DEFINE(nx_netif_tag_llink_cfg, NX_NETIF_TAG_LLINK_CFG);
39 
40 LCK_ATTR_DECLARE(netif_llink_lock_attr, 0, 0);
41 static LCK_GRP_DECLARE(netif_llink_lock_group, "netif llink locks");
42 
43 #if (DEVELOPMENT || DEBUG)
44 static TUNABLE(uint32_t, nx_netif_disable_llink, "sk_disable_llink", 0);
45 #endif /* (DEVELOPMENT || DEBUG) */
46 
47 static struct netif_llink *nx_netif_llink_alloc(void);
48 static void nx_netif_llink_free(struct netif_llink **);
49 static struct netif_qset *nx_netif_qset_alloc(uint8_t, uint8_t);
50 static void nx_netif_qset_free(struct netif_qset **);
51 static void nx_netif_qset_setup_ifclassq(struct netif_llink *,
52     struct netif_qset *);
53 static void nx_netif_qset_teardown_ifclassq(struct netif_qset *);
54 static void nx_netif_qset_init(struct netif_qset *, struct netif_llink *,
55     uint8_t idx, struct kern_nexus_netif_llink_qset_init *);
56 static struct netif_qset *nx_netif_qset_create(struct netif_llink *,
57     uint8_t, struct kern_nexus_netif_llink_qset_init *);
58 static void nx_netif_qset_destroy(struct netif_qset *);
59 static void nx_netif_llink_initialize(struct netif_llink *, struct nx_netif *,
60     struct kern_nexus_netif_llink_init *);
61 static void nx_netif_driver_queue_destroy(struct netif_queue *);
62 static void nx_netif_driver_queue_init(struct netif_qset *,
63     struct netif_queue *, kern_packet_svc_class_t, bool);
64 static struct netif_llink *nx_netif_llink_create_locked(struct nx_netif *,
65     struct kern_nexus_netif_llink_init *);
66 static void nx_netif_default_llink_add(struct nx_netif *);
67 static int netif_qset_enqueue_single(struct netif_qset *,
68     struct __kern_packet *, uint32_t *, uint32_t *);
69 static int nx_netif_llink_ext_init_queues(struct kern_nexus *,
70     struct netif_llink *);
71 static void nx_netif_llink_ext_fini_queues(struct kern_nexus *,
72     struct netif_llink *);
73 
74 static uint32_t nx_netif_random_qset = 0;
75 #if (DEVELOPMENT || DEBUG)
76 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, random_qset,
77     CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_random_qset, 0,
78     "pick a random qset");
79 #endif /* DEVELOPMENT || DEBUG */
80 
81 /* retains a reference for the callee */
82 static struct netif_llink *
nx_netif_llink_alloc(void)83 nx_netif_llink_alloc(void)
84 {
85 	struct netif_llink *llink;
86 
87 	llink = sk_alloc_type(struct netif_llink, Z_WAITOK | Z_NOFAIL,
88 	    skmem_tag_netif_llink);
89 	os_ref_init(&llink->nll_refcnt, NULL);
90 	return llink;
91 }
92 
93 SK_NO_INLINE_ATTRIBUTE
94 void
nx_netif_llink_retain(struct netif_llink * llink)95 nx_netif_llink_retain(struct netif_llink *llink)
96 {
97 	os_ref_retain(&llink->nll_refcnt);
98 }
99 
100 SK_NO_INLINE_ATTRIBUTE
101 static void
nx_netif_llink_free(struct netif_llink ** pllink)102 nx_netif_llink_free(struct netif_llink **pllink)
103 {
104 	struct netif_llink *llink = *pllink;
105 	struct netif_qset *qset, *tqset;
106 
107 	VERIFY(llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
108 	*pllink = NULL;
109 	SLIST_FOREACH_SAFE(qset, &llink->nll_qset_list, nqs_list, tqset) {
110 		SLIST_REMOVE(&llink->nll_qset_list, qset, netif_qset,
111 		    nqs_list);
112 		nx_netif_qset_destroy(qset);
113 	}
114 	if (llink->nll_ifcq != NULL) {
115 		ifclassq_release(&llink->nll_ifcq);
116 	}
117 
118 	sk_free_type(struct netif_llink, llink);
119 }
120 
121 SK_NO_INLINE_ATTRIBUTE
122 void
nx_netif_llink_release(struct netif_llink ** pllink)123 nx_netif_llink_release(struct netif_llink **pllink)
124 {
125 	struct netif_llink *llink = *pllink;
126 
127 	*pllink = NULL;
128 	if (os_ref_release(&llink->nll_refcnt) == 0) {
129 		nx_netif_llink_free(&llink);
130 	}
131 }
132 
133 /* retains a reference for the callee */
134 static struct netif_qset *
nx_netif_qset_alloc(uint8_t nrxqs,uint8_t ntxqs)135 nx_netif_qset_alloc(uint8_t nrxqs, uint8_t ntxqs)
136 {
137 	struct netif_qset *qset;
138 
139 	_CASSERT(sizeof(struct netif_queue) % sizeof(uint64_t) == 0);
140 
141 	qset = sk_alloc_type_header_array(struct netif_qset, struct netif_queue,
142 	    nrxqs + ntxqs, Z_WAITOK | Z_NOFAIL, nx_netif_tag_qset);
143 
144 	qset->nqs_num_rx_queues = nrxqs;
145 	qset->nqs_num_tx_queues =  ntxqs;
146 	return qset;
147 }
148 
149 SK_NO_INLINE_ATTRIBUTE
150 void
nx_netif_qset_retain(struct netif_qset * qset)151 nx_netif_qset_retain(struct netif_qset *qset)
152 {
153 	/*
154 	 * Logical link is immutable, i.e. Queue Sets can't added/removed
155 	 * from it. We will rely on this property to simply acquire a refcnt
156 	 * on the logical link, which is the parent structure of a qset.
157 	 */
158 	nx_netif_llink_retain(qset->nqs_llink);
159 }
160 
161 SK_NO_INLINE_ATTRIBUTE
162 void
nx_netif_qset_release(struct netif_qset ** pqset)163 nx_netif_qset_release(struct netif_qset **pqset)
164 {
165 	struct netif_qset *qset = *pqset;
166 	struct netif_llink *llink = qset->nqs_llink;
167 
168 	*pqset = NULL;
169 	nx_netif_llink_release(&llink);
170 }
171 
172 SK_NO_INLINE_ATTRIBUTE
173 static void
nx_netif_qset_free(struct netif_qset ** pqset)174 nx_netif_qset_free(struct netif_qset **pqset)
175 {
176 	struct netif_qset *qset = *pqset;
177 	uint8_t i;
178 
179 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
180 
181 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
182 		nx_netif_driver_queue_destroy(NETIF_QSET_RX_QUEUE(qset, i));
183 	}
184 	for (i = 0; i < qset->nqs_num_tx_queues; i++) {
185 		nx_netif_driver_queue_destroy(NETIF_QSET_TX_QUEUE(qset, i));
186 	}
187 	if (qset->nqs_flags & NETIF_QSET_FLAG_AQM) {
188 		nx_netif_qset_teardown_ifclassq(qset);
189 	}
190 	qset->nqs_llink = NULL;
191 	sk_free_type_header_array(struct netif_qset, struct netif_queue,
192 	    qset->nqs_num_rx_queues + qset->nqs_num_tx_queues, qset);
193 }
194 
195 SK_NO_INLINE_ATTRIBUTE
196 static void
nx_netif_qset_destroy(struct netif_qset * qset)197 nx_netif_qset_destroy(struct netif_qset *qset)
198 {
199 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
200 	nx_netif_qset_free(&qset);
201 }
202 
203 SK_NO_INLINE_ATTRIBUTE
204 static void
nx_netif_qset_setup_ifclassq(struct netif_llink * llink,struct netif_qset * qset)205 nx_netif_qset_setup_ifclassq(struct netif_llink *llink,
206     struct netif_qset *qset)
207 {
208 	uint8_t flags = 0;
209 
210 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
211 	ASSERT(llink->nll_ifcq != NULL);
212 
213 	ifclassq_retain(llink->nll_ifcq);
214 	qset->nqs_ifcq = llink->nll_ifcq;
215 
216 	if ((qset->nqs_flags & NETIF_QSET_FLAG_LOW_LATENCY) != 0) {
217 		flags |= IF_CLASSQ_LOW_LATENCY;
218 	}
219 	if ((qset->nqs_flags & NETIF_QSET_FLAG_DEFAULT) != 0) {
220 		flags |= IF_DEFAULT_GRP;
221 	}
222 
223 	ifclassq_setup_group(qset->nqs_ifcq, qset->nqs_idx, flags);
224 }
225 
226 SK_NO_INLINE_ATTRIBUTE
227 static void
nx_netif_qset_teardown_ifclassq(struct netif_qset * qset)228 nx_netif_qset_teardown_ifclassq(struct netif_qset *qset)
229 {
230 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
231 	ASSERT(qset->nqs_ifcq != NULL);
232 
233 	qset->nqs_flags &= ~NETIF_QSET_FLAG_AQM;
234 	ifclassq_release(&qset->nqs_ifcq);
235 }
236 
237 SK_NO_INLINE_ATTRIBUTE
238 static void
nx_netif_qset_init(struct netif_qset * qset,struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)239 nx_netif_qset_init(struct netif_qset *qset, struct netif_llink *llink,
240     uint8_t idx, struct kern_nexus_netif_llink_qset_init *qset_init)
241 {
242 #define _NETIF_QSET_MAX_TXQS    4
243 	kern_packet_svc_class_t svc[_NETIF_QSET_MAX_TXQS] =
244 	{KPKT_SC_BE, KPKT_SC_BK, KPKT_SC_VI, KPKT_SC_VO};
245 	struct ifnet *ifp = llink->nll_nif->nif_ifp;
246 	uint8_t i;
247 
248 	/*
249 	 * no need to retain a reference for llink, as the logical link is
250 	 * immutable and qsets are created and destroyed along with logical
251 	 * link.
252 	 */
253 	qset->nqs_llink = llink;
254 	qset->nqs_id = NETIF_QSET_ID_ENCODE(llink->nll_link_id_internal, idx);
255 	qset->nqs_idx = idx;
256 
257 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
258 		qset->nqs_flags |= NETIF_QSET_FLAG_DEFAULT;
259 	}
260 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY) {
261 		qset->nqs_flags |= NETIF_QSET_FLAG_LOW_LATENCY;
262 	}
263 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_AQM) {
264 		qset->nqs_flags |= NETIF_QSET_FLAG_AQM;
265 		nx_netif_qset_setup_ifclassq(llink, qset);
266 	}
267 
268 
269 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
270 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
271 		    KPKT_SC_UNSPEC, true);
272 	}
273 
274 	if (ifp->if_output_sched_model == IFNET_SCHED_MODEL_DRIVER_MANAGED) {
275 		VERIFY(qset->nqs_num_tx_queues == _NETIF_QSET_MAX_TXQS);
276 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
277 			nx_netif_driver_queue_init(qset,
278 			    NETIF_QSET_TX_QUEUE(qset, i), svc[i], false);
279 		}
280 	} else {
281 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
282 			nx_netif_driver_queue_init(qset,
283 			    NETIF_QSET_TX_QUEUE(qset, i), KPKT_SC_UNSPEC, false);
284 		}
285 	}
286 }
287 
288 SK_NO_INLINE_ATTRIBUTE
289 static struct netif_qset *
nx_netif_qset_create(struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)290 nx_netif_qset_create(struct netif_llink *llink, uint8_t idx,
291     struct kern_nexus_netif_llink_qset_init *qset_init)
292 {
293 	struct netif_qset *qset;
294 
295 	qset = nx_netif_qset_alloc(qset_init->nlqi_num_rxqs,
296 	    qset_init->nlqi_num_txqs);
297 	nx_netif_qset_init(qset, llink, idx, qset_init);
298 	return qset;
299 }
300 
301 static uint16_t
nx_netif_generate_internal_llink_id(struct nx_netif * nif)302 nx_netif_generate_internal_llink_id(struct nx_netif *nif)
303 {
304 	struct netif_llink *llink;
305 	struct netif_stats *nifs = &nif->nif_stats;
306 	uint16_t id;
307 
308 again:
309 	id = (uint16_t)(random() % 65536);
310 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
311 		if (__improbable(llink->nll_link_id_internal == id)) {
312 			break;
313 		}
314 	}
315 	if (__probable(llink == NULL && id != 0)) {
316 		return id;
317 	} else {
318 		STATS_INC(nifs, NETIF_STATS_LLINK_DUP_INT_ID_GENERATED);
319 		DTRACE_SKYWALK1(dup__llink__id__internal, uint16_t, id);
320 		goto again;
321 	}
322 }
323 
324 static void
nx_netif_llink_initialize(struct netif_llink * llink,struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)325 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif,
326     struct kern_nexus_netif_llink_init *llink_init)
327 {
328 	uint8_t i;
329 	struct ifnet *ifp = nif->nif_ifp;
330 
331 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
332 
333 	llink->nll_nif = nif;
334 	llink->nll_link_id = llink_init->nli_link_id;
335 	if (llink_init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) {
336 		llink->nll_flags |= NETIF_LLINK_FLAG_DEFAULT;
337 	}
338 	llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif);
339 	llink->nll_ctx = llink_init->nli_ctx;
340 	SLIST_INIT(&llink->nll_qset_list);
341 
342 	for (i = 0; i < llink_init->nli_num_qsets; i++) {
343 		if (llink->nll_ifcq == NULL &&
344 		    (llink_init->nli_qsets[i].nlqi_flags &
345 		    KERN_NEXUS_NET_LLINK_QSET_AQM)) {
346 			if (NETIF_DEFAULT_LLINK(llink)) {
347 				/* use the default AQM queues from ifnet */
348 				ifclassq_retain(ifp->if_snd);
349 				llink->nll_ifcq = ifp->if_snd;
350 			} else {
351 				llink->nll_ifcq = ifclassq_alloc();
352 				dlil_ifclassq_setup(ifp, llink->nll_ifcq);
353 			}
354 		}
355 
356 		struct netif_qset *qset = nx_netif_qset_create(llink, i,
357 		    &llink_init->nli_qsets[i]);
358 		/* nx_netif_qset_create retains a reference for the callee */
359 		SLIST_INSERT_HEAD(&llink->nll_qset_list, qset, nqs_list);
360 		if (NETIF_DEFAULT_QSET(qset)) {
361 			/* there can only be one default queue set */
362 			VERIFY(llink->nll_default_qset == NULL);
363 			llink->nll_default_qset = qset;
364 		}
365 	}
366 	llink->nll_qset_cnt = llink_init->nli_num_qsets;
367 	/* there should be a default queue set */
368 	VERIFY(llink->nll_default_qset != NULL);
369 	llink->nll_state = NETIF_LLINK_STATE_INIT;
370 }
371 
372 static void
nx_netif_driver_queue_destroy(struct netif_queue * drvq)373 nx_netif_driver_queue_destroy(struct netif_queue *drvq)
374 {
375 	VERIFY(drvq->nq_qset->nqs_llink->nll_state ==
376 	    NETIF_LLINK_STATE_DESTROYED);
377 
378 	lck_mtx_lock(&drvq->nq_lock);
379 	VERIFY(KPKTQ_EMPTY(&drvq->nq_pktq));
380 	lck_mtx_unlock(&drvq->nq_lock);
381 
382 	drvq->nq_qset = NULL;
383 	lck_mtx_destroy(&drvq->nq_lock, &netif_llink_lock_group);
384 }
385 
386 static void
nx_netif_driver_queue_init(struct netif_qset * qset,struct netif_queue * drvq,kern_packet_svc_class_t svc,bool is_rx)387 nx_netif_driver_queue_init(struct netif_qset *qset,
388     struct netif_queue *drvq, kern_packet_svc_class_t svc, bool is_rx)
389 {
390 	lck_mtx_init(&drvq->nq_lock, &netif_llink_lock_group,
391 	    &netif_llink_lock_attr);
392 
393 	lck_mtx_lock(&drvq->nq_lock);
394 	KPKTQ_INIT(&drvq->nq_pktq);
395 	lck_mtx_unlock(&drvq->nq_lock);
396 
397 	/*
398 	 * no need to retain a reference for qset, as queue set is
399 	 * immutable and driver queue is part of the queue set data structure.
400 	 */
401 	drvq->nq_qset = qset;
402 	drvq->nq_svc = svc;
403 	if (is_rx) {
404 		drvq->nq_flags |= NETIF_QUEUE_IS_RX;
405 	}
406 }
407 
408 SK_NO_INLINE_ATTRIBUTE
409 static struct netif_llink *
nx_netif_llink_create_locked(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)410 nx_netif_llink_create_locked(struct nx_netif *nif,
411     struct kern_nexus_netif_llink_init *llink_init)
412 {
413 	struct netif_llink *llink;
414 	struct netif_stats *nifs = &nif->nif_stats;
415 
416 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
417 	llink = nx_netif_llink_alloc();
418 	nx_netif_llink_initialize(llink, nif, llink_init);
419 	/* nx_netif_llink_alloc retains a reference for the caller */
420 	STAILQ_INSERT_TAIL(&nif->nif_llink_list, llink, nll_link);
421 	nif->nif_llink_cnt++;
422 	STATS_INC(nifs, NETIF_STATS_LLINK_ADD);
423 	if (NETIF_DEFAULT_LLINK(llink)) {
424 		/* there can only be one default logical link */
425 		VERIFY(nif->nif_default_llink == NULL);
426 	}
427 	return llink;
428 }
429 
430 SK_NO_INLINE_ATTRIBUTE
431 static void
nx_netif_llink_destroy_locked(struct nx_netif * nif,struct netif_llink ** pllink)432 nx_netif_llink_destroy_locked(struct nx_netif *nif, struct netif_llink **pllink)
433 {
434 	struct netif_stats *nifs = &nif->nif_stats;
435 
436 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
437 	(*pllink)->nll_state = NETIF_LLINK_STATE_DESTROYED;
438 	STAILQ_REMOVE(&nif->nif_llink_list, *pllink, netif_llink, nll_link);
439 	nif->nif_llink_cnt--;
440 	STATS_INC(nifs, NETIF_STATS_LLINK_REMOVE);
441 	nx_netif_llink_release(pllink);
442 }
443 
444 int
nx_netif_llink_add(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init,struct netif_llink ** pllink)445 nx_netif_llink_add(struct nx_netif *nif,
446     struct kern_nexus_netif_llink_init *llink_init, struct netif_llink **pllink)
447 {
448 	int err;
449 	struct netif_llink *llink;
450 	struct netif_stats *nifs = &nif->nif_stats;
451 
452 	*pllink = NULL;
453 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
454 	/* ensure logical_link_id is unique */
455 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
456 		if (llink->nll_link_id == llink_init->nli_link_id) {
457 			SK_ERR("duplicate llink_id 0x%llu",
458 			    llink_init->nli_link_id);
459 			STATS_INC(nifs, NETIF_STATS_LLINK_DUP_ID_GIVEN);
460 			DTRACE_SKYWALK1(dup__id__given, uint64_t,
461 			    llink_init->nli_link_id);
462 			lck_rw_unlock_exclusive(&nif->nif_llink_lock);
463 			return EINVAL;
464 		}
465 	}
466 	llink = nx_netif_llink_create_locked(nif, llink_init);
467 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
468 	VERIFY(llink != NULL);
469 	err = nx_netif_llink_ext_init_queues(nif->nif_nx, llink);
470 	if (err != 0) {
471 		lck_rw_lock_exclusive(&nif->nif_llink_lock);
472 		nx_netif_llink_destroy_locked(nif, &llink);
473 		lck_rw_unlock_exclusive(&nif->nif_llink_lock);
474 	} else {
475 		/* increment reference for the caller */
476 		nx_netif_llink_retain(llink);
477 		*pllink = llink;
478 	}
479 	return err;
480 }
481 
482 int
nx_netif_llink_remove(struct nx_netif * nif,kern_nexus_netif_llink_id_t llink_id)483 nx_netif_llink_remove(struct nx_netif *nif,
484     kern_nexus_netif_llink_id_t llink_id)
485 {
486 	bool llink_found = false;
487 	struct netif_llink *llink;
488 	struct netif_stats *nifs = &nif->nif_stats;
489 
490 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
491 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
492 		if (llink->nll_link_id == llink_id) {
493 			llink_found = true;
494 			break;
495 		}
496 	}
497 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
498 	if (!llink_found) {
499 		STATS_INC(nifs, NETIF_STATS_LLINK_NOT_FOUND_REMOVE);
500 		DTRACE_SKYWALK1(not__found, uint64_t, llink_id);
501 		return ENOENT;
502 	}
503 	nx_netif_llink_ext_fini_queues(nif->nif_nx, llink);
504 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
505 	nx_netif_llink_destroy_locked(nif, &llink);
506 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
507 	return 0;
508 }
509 
510 static void
nx_netif_default_llink_add(struct nx_netif * nif)511 nx_netif_default_llink_add(struct nx_netif *nif)
512 {
513 	struct kern_nexus_netif_llink_init llink_init, *pllink_init;
514 	struct kern_nexus_netif_llink_qset_init qset;
515 	struct ifnet *ifp = nif->nif_ifp;
516 	struct netif_llink *llink;
517 
518 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
519 	VERIFY(SKYWALK_NATIVE(ifp));
520 
521 	llink_init.nli_flags = KERN_NEXUS_NET_LLINK_DEFAULT;
522 
523 	if (NX_LLINK_PROV(nif->nif_nx)) {
524 		VERIFY(nif->nif_default_llink_params != NULL);
525 		pllink_init = nif->nif_default_llink_params;
526 	} else {
527 		struct nexus_adapter *devna =
528 		    nx_port_get_na(nif->nif_nx, NEXUS_PORT_NET_IF_DEV);
529 
530 		llink_init.nli_link_id = NETIF_LLINK_ID_DEFAULT;
531 		qset.nlqi_flags = KERN_NEXUS_NET_LLINK_QSET_DEFAULT;
532 		/*
533 		 * For the legacy mode of operation we will assume that
534 		 * AQM is not needed on low-latency interface.
535 		 */
536 		if (NETIF_IS_LOW_LATENCY(nif)) {
537 			qset.nlqi_flags |=
538 			    KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY;
539 		} else {
540 			qset.nlqi_flags |= KERN_NEXUS_NET_LLINK_QSET_AQM;
541 		}
542 		qset.nlqi_num_rxqs =
543 		    (uint8_t)na_get_nrings(devna, NR_RX);
544 		qset.nlqi_num_txqs =
545 		    (uint8_t)na_get_nrings(devna, NR_TX);
546 		llink_init.nli_num_qsets = 1;
547 		llink_init.nli_qsets = &qset;
548 		llink_init.nli_ctx = NULL;
549 		pllink_init = &llink_init;
550 	}
551 	llink = nx_netif_llink_create_locked(nif, pllink_init);
552 	/* there can only be one default logical link */
553 	VERIFY(nif->nif_default_llink == NULL);
554 	nx_netif_llink_retain(llink);
555 	/* obtain a reference for the default logical link pointer */
556 	nif->nif_default_llink = llink;
557 }
558 
559 static void
nx_netif_default_llink_remove(struct nx_netif * nif)560 nx_netif_default_llink_remove(struct nx_netif *nif)
561 {
562 	struct netif_llink *llink;
563 
564 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
565 	ASSERT(nif->nif_default_llink != NULL);
566 	ASSERT(nif->nif_llink_cnt == 1);
567 	llink = nif->nif_default_llink;
568 	nx_netif_llink_release(&nif->nif_default_llink);
569 	ASSERT(nif->nif_default_llink == NULL);
570 	nx_netif_llink_destroy_locked(nif, &llink);
571 }
572 
573 static int
netif_qset_enqueue_single(struct netif_qset * qset,struct __kern_packet * pkt,uint32_t * flowctl,uint32_t * dropped)574 netif_qset_enqueue_single(struct netif_qset *qset, struct __kern_packet *pkt,
575     uint32_t *flowctl, uint32_t *dropped)
576 {
577 	struct ifnet *ifp = qset->nqs_ifcq->ifcq_ifp;
578 	boolean_t pkt_drop = FALSE;
579 	int err;
580 
581 	/*
582 	 * we are using the first 4 bytes of flow_id as the AQM flow
583 	 * identifier.
584 	 */
585 	ASSERT(!uuid_is_null(pkt->pkt_flow_id));
586 	netif_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
587 	    1, pkt->pkt_length);
588 
589 	if (__improbable(pkt->pkt_trace_id != 0)) {
590 		KDBG(SK_KTRACE_PKT_TX_FSW | DBG_FUNC_END, pkt->pkt_trace_id);
591 		KDBG(SK_KTRACE_PKT_TX_AQM | DBG_FUNC_START, pkt->pkt_trace_id);
592 	}
593 
594 	/* Only native path is supported */
595 	ASSERT((pkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
596 	ASSERT(pkt->pkt_mbuf == NULL);
597 
598 	err = ifnet_enqueue_ifcq_pkt(ifp, qset->nqs_ifcq, pkt, false,
599 	    &pkt_drop);
600 	if (__improbable(err != 0)) {
601 		if ((err == EQFULL || err == EQSUSPENDED) && flowctl != NULL) {
602 			(*flowctl)++;
603 		}
604 		if (pkt_drop && dropped != NULL) {
605 			(*dropped)++;
606 		}
607 	}
608 	return err;
609 }
610 
611 int
netif_qset_enqueue(struct netif_qset * qset,struct __kern_packet * pkt_chain,struct __kern_packet * tail,uint32_t cnt,uint32_t bytes,uint32_t * flowctl,uint32_t * dropped)612 netif_qset_enqueue(struct netif_qset *qset, struct __kern_packet *pkt_chain,
613     struct __kern_packet *tail, uint32_t cnt, uint32_t bytes, uint32_t *flowctl,
614     uint32_t *dropped)
615 {
616 #pragma unused(tail)
617 	struct __kern_packet *pkt = pkt_chain;
618 	struct __kern_packet *next;
619 	struct netif_stats *nifs = &qset->nqs_llink->nll_nif->nif_stats;
620 	uint32_t c = 0, b = 0, drop_cnt = 0, flowctl_cnt = 0;
621 	int err = 0;
622 
623 	/* drop packets if logical link state is destroyed */
624 	if (qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED) {
625 		pp_free_packet_chain(pkt_chain, (int *)&drop_cnt);
626 		STATS_ADD(nifs, NETIF_STATS_LLINK_TX_DROP_BAD_STATE, drop_cnt);
627 		if (dropped != NULL) {
628 			*dropped = drop_cnt;
629 		}
630 		return ENXIO;
631 	}
632 
633 	/* We don't support chains for now */
634 	while (pkt != NULL) {
635 		next = pkt->pkt_nextpkt;
636 		pkt->pkt_nextpkt = NULL;
637 		c++;
638 		b += pkt->pkt_length;
639 
640 		(void) netif_qset_enqueue_single(qset, pkt, &flowctl_cnt,
641 		    &drop_cnt);
642 		pkt = next;
643 	}
644 	VERIFY(c == cnt);
645 	VERIFY(b == bytes);
646 	if (flowctl != NULL && flowctl_cnt > 0) {
647 		*flowctl = flowctl_cnt;
648 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_QFULL, flowctl_cnt);
649 		err = EIO;
650 	}
651 	if (dropped != NULL && drop_cnt > 0) {
652 		*dropped = drop_cnt;
653 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_DROPPED, drop_cnt);
654 		err = EIO;
655 	}
656 	return err;
657 }
658 
659 struct netif_qset *
nx_netif_get_default_qset_noref(struct nx_netif * nif)660 nx_netif_get_default_qset_noref(struct nx_netif *nif)
661 {
662 	struct netif_qset *qset;
663 	struct netif_stats *nifs = &nif->nif_stats;
664 
665 	ASSERT(NETIF_LLINK_ENABLED(nif));
666 	if (__improbable(nif->nif_default_llink->nll_state !=
667 	    NETIF_LLINK_STATE_INIT)) {
668 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
669 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
670 		return NULL;
671 	}
672 	qset = nif->nif_default_llink->nll_default_qset;
673 	return qset;
674 }
675 
676 static void
nx_netif_qset_hint_decode(uint64_t hint,uint16_t * link_id_internal,uint16_t * qset_idx)677 nx_netif_qset_hint_decode(uint64_t hint,
678     uint16_t *link_id_internal, uint16_t *qset_idx)
679 {
680 	/* The top 32 bits are unused for now */
681 	*link_id_internal = (uint16_t)((0xffff0000 & hint) >> 16);
682 	*qset_idx = (uint16_t)((0x0000ffff & hint));
683 }
684 
685 /* retains a reference for the caller */
686 static struct netif_qset *
nx_netif_get_default_qset(struct nx_netif * nif)687 nx_netif_get_default_qset(struct nx_netif *nif)
688 {
689 	struct netif_qset *qset;
690 
691 	qset = nif->nif_default_llink->nll_default_qset;
692 	nx_netif_qset_retain(qset);
693 	return qset;
694 }
695 
696 /*
697  * Find the qset based on the qset hint. Fall back to the default qset
698  * if not found. The random qset is used for experimentation.
699  */
700 struct netif_qset *
nx_netif_find_qset(struct nx_netif * nif,uint64_t hint)701 nx_netif_find_qset(struct nx_netif *nif, uint64_t hint)
702 {
703 	uint16_t ll_id_internal, qset_idx;
704 	struct netif_llink *llink;
705 	struct netif_qset *qset;
706 	struct netif_stats *nifs = &nif->nif_stats;
707 	int i, j, random_id;
708 
709 	ASSERT(NETIF_LLINK_ENABLED(nif));
710 	if (__improbable(nif->nif_default_llink->nll_state !=
711 	    NETIF_LLINK_STATE_INIT)) {
712 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
713 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
714 		return NULL;
715 	}
716 	if (!NX_LLINK_PROV(nif->nif_nx) ||
717 	    (nx_netif_random_qset == 0 && hint == 0)) {
718 		goto def_qset;
719 	}
720 	if (nx_netif_random_qset == 0) {
721 		nx_netif_qset_hint_decode(hint, &ll_id_internal, &qset_idx);
722 	} else {
723 		ll_id_internal = 0;
724 		qset_idx = 0;
725 	}
726 	lck_rw_lock_shared(&nif->nif_llink_lock);
727 	i = 0;
728 	random_id = random();
729 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
730 		if (nx_netif_random_qset != 0 &&
731 		    (random_id % nif->nif_llink_cnt) == i) {
732 			break;
733 		} else if (llink->nll_link_id_internal == ll_id_internal) {
734 			break;
735 		}
736 		i++;
737 	}
738 	if (llink == NULL) {
739 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
740 		lck_rw_unlock_shared(&nif->nif_llink_lock);
741 		goto def_qset;
742 	}
743 	j = 0;
744 	random_id = random();
745 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
746 		if (nx_netif_random_qset != 0 &&
747 		    (random_id % llink->nll_qset_cnt) == j) {
748 			break;
749 		} else if (qset->nqs_idx == qset_idx) {
750 			break;
751 		}
752 		j++;
753 	}
754 	if (qset == NULL) {
755 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
756 		lck_rw_unlock_shared(&nif->nif_llink_lock);
757 		goto def_qset;
758 	}
759 	nx_netif_qset_retain(qset);
760 	STATS_INC(nifs, NETIF_STATS_LLINK_NONDEF_QSET_USED);
761 	lck_rw_unlock_shared(&nif->nif_llink_lock);
762 	if (nx_netif_random_qset != 0) {
763 		SK_DF(SK_VERB_LLINK, "%s: random qset: qset %p, ifcq %p, "
764 		    "llink_idx %d, qset_idx %d", if_name(nif->nif_ifp),
765 		    qset, qset->nqs_ifcq, i, j);
766 
767 		DTRACE_SKYWALK5(random__qset, struct nx_netif *, nif,
768 		    struct netif_qset *, qset, struct ifclassq *,
769 		    qset->nqs_ifcq, int, i, int, j);
770 	} else {
771 		SK_DF(SK_VERB_LLINK, "%s: non-default qset: qset %p, ifcq %p, "
772 		    " ll_id_internal 0x%x, qset_idx %d", if_name(nif->nif_ifp),
773 		    qset, qset->nqs_ifcq, ll_id_internal, qset_idx);
774 
775 		DTRACE_SKYWALK5(nondef__qset, struct nx_netif *, nif,
776 		    struct netif_qset *, qset, struct ifclassq *,
777 		    qset->nqs_ifcq, uint16_t, ll_id_internal,
778 		    uint16_t, qset_idx);
779 	}
780 	return qset;
781 
782 def_qset:
783 	STATS_INC(nifs, NETIF_STATS_LLINK_DEF_QSET_USED);
784 	qset = nx_netif_get_default_qset(nif);
785 	ASSERT(qset != NULL);
786 
787 	SK_DF(SK_VERB_LLINK, "%s: default qset: qset %p, ifcq %p, hint %llx",
788 	    if_name(nif->nif_ifp), qset, qset->nqs_ifcq, hint);
789 
790 	DTRACE_SKYWALK4(def__qset, struct nx_netif *, nif, struct netif_qset *,
791 	    qset, struct ifclassq *, qset->nqs_ifcq, uint64_t, hint);
792 	return qset;
793 }
794 
795 void
nx_netif_llink_init(struct nx_netif * nif)796 nx_netif_llink_init(struct nx_netif *nif)
797 {
798 	ifnet_t ifp = nif->nif_ifp;
799 
800 #if (DEVELOPMENT || DEBUG)
801 	if (__improbable(nx_netif_disable_llink != 0)) {
802 		SK_DF(SK_VERB_LLINK, "%s: llink is disabled",
803 		    if_name(nif->nif_ifp));
804 		return;
805 	}
806 #endif /* (DEVELOPMENT || DEBUG) */
807 
808 	if (!SKYWALK_NATIVE(ifp)) {
809 		SK_DF(SK_VERB_LLINK,
810 		    "%s: llink is supported on native devices only",
811 		    if_name(ifp));
812 		return;
813 	}
814 	ASSERT(!NETIF_LLINK_ENABLED(nif));
815 	lck_rw_init(&nif->nif_llink_lock, &netif_llink_lock_group,
816 	    &netif_llink_lock_attr);
817 
818 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
819 
820 	STAILQ_INIT(&nif->nif_llink_list);
821 	nif->nif_llink_cnt = 0;
822 	nx_netif_default_llink_add(nif);
823 	nif->nif_flags |= NETIF_FLAG_LLINK_INITIALIZED;
824 
825 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
826 
827 	SK_DF(SK_VERB_LLINK, "%s: llink initialized", if_name(ifp));
828 }
829 
830 void
nx_netif_llink_fini(struct nx_netif * nif)831 nx_netif_llink_fini(struct nx_netif *nif)
832 {
833 	if (!NETIF_LLINK_ENABLED(nif)) {
834 		SK_DF(SK_VERB_LLINK, "%s: llink not initialized",
835 		    if_name(nif->nif_ifp));
836 		return;
837 	}
838 
839 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
840 
841 	nif->nif_flags &= ~NETIF_FLAG_LLINK_INITIALIZED;
842 	nx_netif_default_llink_remove(nif);
843 	ASSERT(nif->nif_llink_cnt == 0);
844 	ASSERT(STAILQ_EMPTY(&nif->nif_llink_list));
845 
846 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
847 
848 	nx_netif_llink_config_free(nif);
849 	lck_rw_destroy(&nif->nif_llink_lock, &netif_llink_lock_group);
850 	SK_DF(SK_VERB_LLINK, "%s: llink uninitialization done",
851 	    if_name(nif->nif_ifp));
852 }
853 
854 int
nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init * init,bool default_llink)855 nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *init,
856     bool default_llink)
857 {
858 	struct kern_nexus_netif_llink_qset_init *qsinit;
859 	bool has_default_qset = false;
860 	bool default_llink_flag;
861 	uint8_t i;
862 
863 	default_llink_flag =
864 	    ((init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) != 0);
865 
866 	if (default_llink != default_llink_flag) {
867 		SK_ERR("default llink flag incompatible: default_llink(%s), "
868 		    "default_llink_flag(%s)",
869 		    default_llink ? "true" : "false",
870 		    default_llink_flag ? "true" : "false");
871 		return EINVAL;
872 	}
873 	if (init->nli_num_qsets == 0) {
874 		SK_ERR("num qsets is zero");
875 		return EINVAL;
876 	}
877 	if ((qsinit = init->nli_qsets) == NULL) {
878 		SK_ERR("qsets is NULL");
879 		return EINVAL;
880 	}
881 	for (i = 0; i < init->nli_num_qsets; i++) {
882 		if (qsinit[i].nlqi_flags &
883 		    KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
884 			if (has_default_qset) {
885 				SK_ERR("has more than one default qset");
886 				return EINVAL;
887 			}
888 			if (qsinit[i].nlqi_num_rxqs == 0) {
889 				SK_ERR("num_rxqs == 0");
890 				return EINVAL;
891 			}
892 			has_default_qset = true;
893 		}
894 		if (qsinit[i].nlqi_num_txqs == 0) {
895 			SK_ERR("num_txqs == 0");
896 			return EINVAL;
897 		}
898 		if ((qsinit[i].nlqi_flags &
899 		    KERN_NEXUS_NET_LLINK_QSET_WMM_MODE) &&
900 		    (qsinit[i].nlqi_num_txqs != NEXUS_NUM_WMM_QUEUES)) {
901 			SK_ERR("invalid wmm mode");
902 			return EINVAL;
903 		}
904 	}
905 	return 0;
906 }
907 
908 int
nx_netif_default_llink_config(struct nx_netif * nif,struct kern_nexus_netif_llink_init * init)909 nx_netif_default_llink_config(struct nx_netif *nif,
910     struct kern_nexus_netif_llink_init *init)
911 {
912 	struct kern_nexus_netif_llink_qset_init *qsinit;
913 	int i, err;
914 
915 	err = nx_netif_validate_llink_config(init, true);
916 	if (err != 0) {
917 		return err;
918 	}
919 	nif->nif_default_llink_params = sk_alloc_type(
920 		struct kern_nexus_netif_llink_init,
921 		Z_WAITOK | Z_NOFAIL, nx_netif_tag_llink_cfg);
922 
923 	qsinit = sk_alloc_type_array(struct kern_nexus_netif_llink_qset_init,
924 	    init->nli_num_qsets, Z_WAITOK, nx_netif_tag_llink_cfg);
925 	if (qsinit == NULL) {
926 		SK_ERR("failed to alloc kern_nexus_netif_llink_qset_init");
927 		sk_free_type(struct kern_nexus_netif_llink_init,
928 		    nif->nif_default_llink_params);
929 		nif->nif_default_llink_params = NULL;
930 		return ENOMEM;
931 	}
932 	memcpy(nif->nif_default_llink_params, init,
933 	    __builtin_offsetof(struct kern_nexus_netif_llink_init,
934 	    nli_qsets));
935 	for (i = 0; i < init->nli_num_qsets; i++) {
936 		*(&qsinit[i]) = *(&init->nli_qsets[i]);
937 	}
938 	nif->nif_default_llink_params->nli_qsets = qsinit;
939 	return 0;
940 }
941 
942 void
nx_netif_llink_config_free(struct nx_netif * nif)943 nx_netif_llink_config_free(struct nx_netif *nif)
944 {
945 	if (nif->nif_default_llink_params == NULL) {
946 		return;
947 	}
948 	sk_free_type_array(struct kern_nexus_netif_llink_qset_init,
949 	    nif->nif_default_llink_params->nli_num_qsets,
950 	    nif->nif_default_llink_params->nli_qsets);
951 	nif->nif_default_llink_params->nli_qsets = NULL;
952 
953 	sk_free_type(struct kern_nexus_netif_llink_init,
954 	    nif->nif_default_llink_params);
955 	nif->nif_default_llink_params = NULL;
956 }
957 
958 static int
nx_netif_llink_ext_init_queues(struct kern_nexus * nx,struct netif_llink * llink)959 nx_netif_llink_ext_init_queues(struct kern_nexus *nx, struct netif_llink *llink)
960 {
961 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
962 	struct kern_nexus_netif_provider_init *nxnpi;
963 	struct netif_qset *qset;
964 	struct netif_stats *nifs = &NX_NETIF_PRIVATE(nx)->nif_stats;
965 	int err = 0;
966 	uint8_t i;
967 
968 	nxnpi = &nxprov->nxprov_netif_ext;
969 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_init != NULL);
970 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_init != NULL);
971 
972 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
973 		struct netif_queue *drvq;
974 
975 		ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0);
976 		err = nxnpi->nxnpi_qset_init(nxprov, nx, llink->nll_ctx,
977 		    qset->nqs_idx, qset->nqs_id, qset, &qset->nqs_ctx);
978 		if (err != 0) {
979 			STATS_INC(nifs, NETIF_STATS_LLINK_QSET_INIT_FAIL);
980 			SK_ERR("nx: 0x%llx, qset: %d, qset init err %d",
981 			    SK_KVA(nx), qset->nqs_idx, err);
982 			goto out;
983 		}
984 		qset->nqs_flags |= NETIF_QSET_FLAG_EXT_INITED;
985 
986 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
987 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
988 
989 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
990 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
991 			    i, false, drvq, &drvq->nq_ctx);
992 			if (err != 0) {
993 				STATS_INC(nifs, NETIF_STATS_LLINK_RXQ_INIT_FAIL);
994 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
995 				    SK_KVA(nx), qset->nqs_idx, err);
996 				goto out;
997 			}
998 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
999 		}
1000 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1001 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1002 
1003 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1004 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1005 			    i, true, drvq, &drvq->nq_ctx);
1006 			if (err != 0) {
1007 				STATS_INC(nifs, NETIF_STATS_LLINK_TXQ_INIT_FAIL);
1008 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
1009 				    SK_KVA(nx), qset->nqs_idx, err);
1010 				goto out;
1011 			}
1012 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1013 		}
1014 	}
1015 out:
1016 	if (err != 0) {
1017 		nx_netif_llink_ext_fini_queues(nx, llink);
1018 	}
1019 	return err;
1020 }
1021 
1022 static void
nx_netif_llink_ext_fini_queues(struct kern_nexus * nx,struct netif_llink * llink)1023 nx_netif_llink_ext_fini_queues(struct kern_nexus *nx, struct netif_llink *llink)
1024 {
1025 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
1026 	struct kern_nexus_netif_provider_init *nxnpi;
1027 	struct netif_qset *qset;
1028 	uint8_t i;
1029 
1030 	nxnpi = &nxprov->nxprov_netif_ext;
1031 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_fini != NULL);
1032 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_fini != NULL);
1033 
1034 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1035 		struct netif_queue *drvq;
1036 
1037 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1038 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1039 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1040 				continue;
1041 			}
1042 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1043 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1044 		}
1045 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1046 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1047 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1048 				continue;
1049 			}
1050 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1051 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1052 		}
1053 		if ((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0) {
1054 			continue;
1055 		}
1056 		nxnpi->nxnpi_qset_fini(nxprov, nx, qset->nqs_ctx);
1057 		qset->nqs_flags &= ~NETIF_QSET_FLAG_EXT_INITED;
1058 	}
1059 }
1060 
1061 int
nx_netif_llink_ext_init_default_queues(struct kern_nexus * nx)1062 nx_netif_llink_ext_init_default_queues(struct kern_nexus *nx)
1063 {
1064 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1065 	return nx_netif_llink_ext_init_queues(nx, nif->nif_default_llink);
1066 }
1067 
1068 void
nx_netif_llink_ext_fini_default_queues(struct kern_nexus * nx)1069 nx_netif_llink_ext_fini_default_queues(struct kern_nexus *nx)
1070 {
1071 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1072 	nx_netif_llink_ext_fini_queues(nx, nif->nif_default_llink);
1073 }
1074