xref: /xnu-11417.140.69/bsd/skywalk/nexus/netif/nx_netif_llink.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4) !
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
31 #include <os/refcnt.h>
32 #include <sys/sdt.h>
33 
34 #define NX_NETIF_TAG_QSET   "com.apple.skywalk.netif.qset"
35 static SKMEM_TAG_DEFINE(nx_netif_tag_qset, NX_NETIF_TAG_QSET);
36 
37 #define NX_NETIF_TAG_LLINK_CFG   "com.apple.skywalk.netif.llink.cfg"
38 static SKMEM_TAG_DEFINE(nx_netif_tag_llink_cfg, NX_NETIF_TAG_LLINK_CFG);
39 
40 LCK_ATTR_DECLARE(netif_llink_lock_attr, 0, 0);
41 static LCK_GRP_DECLARE(netif_llink_lock_group, "netif llink locks");
42 
43 #if (DEVELOPMENT || DEBUG)
44 static TUNABLE(uint32_t, nx_netif_disable_llink, "sk_disable_llink", 0);
45 #endif /* (DEVELOPMENT || DEBUG) */
46 
47 static struct netif_llink *nx_netif_llink_alloc(void);
48 static void nx_netif_llink_free(struct netif_llink **);
49 static struct netif_qset *nx_netif_qset_alloc(uint8_t, uint8_t);
50 static void nx_netif_qset_free(struct netif_qset **);
51 static void nx_netif_qset_setup_ifclassq(struct netif_llink *,
52     struct netif_qset *);
53 static void nx_netif_qset_teardown_ifclassq(struct netif_qset *);
54 static void nx_netif_qset_init(struct netif_qset *, struct netif_llink *,
55     uint8_t idx, struct kern_nexus_netif_llink_qset_init *);
56 static struct netif_qset *nx_netif_qset_create(struct netif_llink *,
57     uint8_t, struct kern_nexus_netif_llink_qset_init *);
58 static void nx_netif_qset_destroy(struct netif_qset *);
59 static void nx_netif_llink_initialize(struct netif_llink *, struct nx_netif *,
60     struct kern_nexus_netif_llink_init *);
61 static void nx_netif_driver_queue_destroy(struct netif_queue *);
62 static void nx_netif_driver_queue_init(struct netif_qset *,
63     struct netif_queue *, kern_packet_svc_class_t, bool);
64 static struct netif_llink *nx_netif_llink_create_locked(struct nx_netif *,
65     struct kern_nexus_netif_llink_init *);
66 static void nx_netif_default_llink_add(struct nx_netif *);
67 static int nx_netif_llink_ext_init_queues(struct kern_nexus *,
68     struct netif_llink *);
69 static void nx_netif_llink_ext_fini_queues(struct kern_nexus *,
70     struct netif_llink *);
71 
72 static uint32_t nx_netif_random_qset = 0;
73 #if (DEVELOPMENT || DEBUG)
74 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, random_qset,
75     CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_random_qset, 0,
76     "pick a random qset");
77 #endif /* DEVELOPMENT || DEBUG */
78 
79 /* retains a reference for the callee */
80 static struct netif_llink *
nx_netif_llink_alloc(void)81 nx_netif_llink_alloc(void)
82 {
83 	struct netif_llink *llink;
84 
85 	llink = sk_alloc_type(struct netif_llink, Z_WAITOK | Z_NOFAIL,
86 	    skmem_tag_netif_llink);
87 	os_ref_init(&llink->nll_refcnt, NULL);
88 	return llink;
89 }
90 
91 SK_NO_INLINE_ATTRIBUTE
92 void
nx_netif_llink_retain(struct netif_llink * llink)93 nx_netif_llink_retain(struct netif_llink *llink)
94 {
95 	os_ref_retain(&llink->nll_refcnt);
96 }
97 
98 SK_NO_INLINE_ATTRIBUTE
99 static void
nx_netif_llink_free(struct netif_llink ** pllink)100 nx_netif_llink_free(struct netif_llink **pllink)
101 {
102 	struct netif_llink *llink = *pllink;
103 	struct netif_qset *qset, *tqset;
104 
105 	VERIFY(llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
106 	*pllink = NULL;
107 	SLIST_FOREACH_SAFE(qset, &llink->nll_qset_list, nqs_list, tqset) {
108 		SLIST_REMOVE(&llink->nll_qset_list, qset, netif_qset,
109 		    nqs_list);
110 		nx_netif_qset_destroy(qset);
111 	}
112 	if (llink->nll_ifcq != NULL) {
113 		ifclassq_release(&llink->nll_ifcq);
114 	}
115 
116 	sk_free_type(struct netif_llink, llink);
117 }
118 
119 SK_NO_INLINE_ATTRIBUTE
120 void
nx_netif_llink_release(struct netif_llink ** pllink)121 nx_netif_llink_release(struct netif_llink **pllink)
122 {
123 	struct netif_llink *__single llink = *pllink;
124 
125 	*pllink = NULL;
126 	if (os_ref_release(&llink->nll_refcnt) == 0) {
127 		nx_netif_llink_free(&llink);
128 	}
129 }
130 
131 /* retains a reference for the callee */
132 static struct netif_qset *
nx_netif_qset_alloc(uint8_t nrxqs,uint8_t ntxqs)133 nx_netif_qset_alloc(uint8_t nrxqs, uint8_t ntxqs)
134 {
135 	struct netif_qset *qset;
136 
137 	_CASSERT(sizeof(struct netif_queue) % sizeof(uint64_t) == 0);
138 
139 	qset = sk_alloc_type_header_array(struct netif_qset, struct netif_queue,
140 	    nrxqs + ntxqs, Z_WAITOK | Z_NOFAIL, nx_netif_tag_qset);
141 
142 	qset->nqs_num_queues = nrxqs + ntxqs;
143 	qset->nqs_num_rx_queues = nrxqs;
144 	qset->nqs_num_tx_queues =  ntxqs;
145 	return qset;
146 }
147 
148 SK_NO_INLINE_ATTRIBUTE
149 void
nx_netif_qset_retain(struct netif_qset * qset)150 nx_netif_qset_retain(struct netif_qset *qset)
151 {
152 	/*
153 	 * Logical link is immutable, i.e. Queue Sets can't added/removed
154 	 * from it. We will rely on this property to simply acquire a refcnt
155 	 * on the logical link, which is the parent structure of a qset.
156 	 */
157 	nx_netif_llink_retain(qset->nqs_llink);
158 }
159 
160 SK_NO_INLINE_ATTRIBUTE
161 void
nx_netif_qset_release(struct netif_qset ** pqset)162 nx_netif_qset_release(struct netif_qset **pqset)
163 {
164 	struct netif_qset *qset = *pqset;
165 	struct netif_llink *__single llink = qset->nqs_llink;
166 
167 	*pqset = NULL;
168 	nx_netif_llink_release(&llink);
169 }
170 
171 SK_NO_INLINE_ATTRIBUTE
172 static void
nx_netif_qset_free(struct netif_qset ** pqset)173 nx_netif_qset_free(struct netif_qset **pqset)
174 {
175 	struct netif_qset *qset = *pqset;
176 	uint8_t i;
177 
178 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
179 
180 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
181 		nx_netif_driver_queue_destroy(NETIF_QSET_RX_QUEUE(qset, i));
182 	}
183 	for (i = 0; i < qset->nqs_num_tx_queues; i++) {
184 		nx_netif_driver_queue_destroy(NETIF_QSET_TX_QUEUE(qset, i));
185 	}
186 	if (qset->nqs_flags & NETIF_QSET_FLAG_AQM) {
187 		nx_netif_qset_teardown_ifclassq(qset);
188 	}
189 	qset->nqs_llink = NULL;
190 	sk_free_type_header_array(struct netif_qset, struct netif_queue,
191 	    qset->nqs_num_rx_queues + qset->nqs_num_tx_queues, qset);
192 }
193 
194 SK_NO_INLINE_ATTRIBUTE
195 static void
nx_netif_qset_destroy(struct netif_qset * qset)196 nx_netif_qset_destroy(struct netif_qset *qset)
197 {
198 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
199 	nx_netif_qset_free(&qset);
200 }
201 
202 SK_NO_INLINE_ATTRIBUTE
203 static void
nx_netif_qset_setup_ifclassq(struct netif_llink * llink,struct netif_qset * qset)204 nx_netif_qset_setup_ifclassq(struct netif_llink *llink,
205     struct netif_qset *qset)
206 {
207 	uint8_t flags = 0;
208 
209 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
210 	ASSERT(llink->nll_ifcq != NULL);
211 
212 	ifclassq_retain(llink->nll_ifcq);
213 	qset->nqs_ifcq = llink->nll_ifcq;
214 
215 	if ((qset->nqs_flags & NETIF_QSET_FLAG_LOW_LATENCY) != 0) {
216 		flags |= IF_CLASSQ_LOW_LATENCY;
217 	}
218 	if ((qset->nqs_flags & NETIF_QSET_FLAG_DEFAULT) != 0) {
219 		flags |= IF_DEFAULT_GRP;
220 	}
221 
222 	ifclassq_setup_group(qset->nqs_ifcq, qset->nqs_idx, flags);
223 }
224 
225 SK_NO_INLINE_ATTRIBUTE
226 static void
nx_netif_qset_teardown_ifclassq(struct netif_qset * qset)227 nx_netif_qset_teardown_ifclassq(struct netif_qset *qset)
228 {
229 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
230 	ASSERT(qset->nqs_ifcq != NULL);
231 
232 	qset->nqs_flags &= ~NETIF_QSET_FLAG_AQM;
233 	ifclassq_release(&qset->nqs_ifcq);
234 }
235 
236 SK_NO_INLINE_ATTRIBUTE
237 static void
nx_netif_qset_init(struct netif_qset * qset,struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)238 nx_netif_qset_init(struct netif_qset *qset, struct netif_llink *llink,
239     uint8_t idx, struct kern_nexus_netif_llink_qset_init *qset_init)
240 {
241 #define _NETIF_QSET_MAX_TXQS    4
242 	kern_packet_svc_class_t svc[_NETIF_QSET_MAX_TXQS] =
243 	{KPKT_SC_BE, KPKT_SC_BK, KPKT_SC_VI, KPKT_SC_VO};
244 	struct ifnet *ifp = llink->nll_nif->nif_ifp;
245 	uint8_t i;
246 
247 	/*
248 	 * no need to retain a reference for llink, as the logical link is
249 	 * immutable and qsets are created and destroyed along with logical
250 	 * link.
251 	 */
252 	qset->nqs_llink = llink;
253 	qset->nqs_id = NETIF_QSET_ID_ENCODE(llink->nll_link_id_internal, idx);
254 	qset->nqs_idx = idx;
255 
256 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
257 		qset->nqs_flags |= NETIF_QSET_FLAG_DEFAULT;
258 	}
259 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY) {
260 		qset->nqs_flags |= NETIF_QSET_FLAG_LOW_LATENCY;
261 	}
262 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_AQM) {
263 		qset->nqs_flags |= NETIF_QSET_FLAG_AQM;
264 		nx_netif_qset_setup_ifclassq(llink, qset);
265 	}
266 
267 
268 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
269 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
270 		    KPKT_SC_UNSPEC, true);
271 	}
272 
273 	if (ifp->if_output_sched_model == IFNET_SCHED_MODEL_DRIVER_MANAGED) {
274 		VERIFY(qset->nqs_num_tx_queues == _NETIF_QSET_MAX_TXQS);
275 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
276 			nx_netif_driver_queue_init(qset,
277 			    NETIF_QSET_TX_QUEUE(qset, i), svc[i], false);
278 		}
279 	} else {
280 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
281 			nx_netif_driver_queue_init(qset,
282 			    NETIF_QSET_TX_QUEUE(qset, i), KPKT_SC_UNSPEC, false);
283 		}
284 	}
285 }
286 
287 SK_NO_INLINE_ATTRIBUTE
288 static struct netif_qset *
nx_netif_qset_create(struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)289 nx_netif_qset_create(struct netif_llink *llink, uint8_t idx,
290     struct kern_nexus_netif_llink_qset_init *qset_init)
291 {
292 	struct netif_qset *qset;
293 
294 	qset = nx_netif_qset_alloc(qset_init->nlqi_num_rxqs,
295 	    qset_init->nlqi_num_txqs);
296 	nx_netif_qset_init(qset, llink, idx, qset_init);
297 	return qset;
298 }
299 
300 static uint16_t
nx_netif_generate_internal_llink_id(struct nx_netif * nif)301 nx_netif_generate_internal_llink_id(struct nx_netif *nif)
302 {
303 	struct netif_llink *llink;
304 	struct netif_stats *nifs = &nif->nif_stats;
305 	uint16_t id;
306 
307 again:
308 	id = (uint16_t)(random() % 65536);
309 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
310 		if (__improbable(llink->nll_link_id_internal == id)) {
311 			break;
312 		}
313 	}
314 	if (__probable(llink == NULL && id != 0)) {
315 		return id;
316 	} else {
317 		STATS_INC(nifs, NETIF_STATS_LLINK_DUP_INT_ID_GENERATED);
318 		DTRACE_SKYWALK1(dup__llink__id__internal, uint16_t, id);
319 		goto again;
320 	}
321 }
322 
323 static void
nx_netif_llink_initialize(struct netif_llink * llink,struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)324 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif,
325     struct kern_nexus_netif_llink_init *llink_init)
326 {
327 	uint8_t i;
328 	struct ifnet *ifp = nif->nif_ifp;
329 
330 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
331 
332 	llink->nll_nif = nif;
333 	llink->nll_link_id = llink_init->nli_link_id;
334 	if (llink_init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) {
335 		llink->nll_flags |= NETIF_LLINK_FLAG_DEFAULT;
336 	}
337 	llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif);
338 	llink->nll_ctx = llink_init->nli_ctx;
339 	SLIST_INIT(&llink->nll_qset_list);
340 
341 	for (i = 0; i < llink_init->nli_num_qsets; i++) {
342 		if (llink->nll_ifcq == NULL &&
343 		    (llink_init->nli_qsets[i].nlqi_flags &
344 		    KERN_NEXUS_NET_LLINK_QSET_AQM)) {
345 			if (NETIF_DEFAULT_LLINK(llink)) {
346 				/* use the default AQM queues from ifnet */
347 				ifclassq_retain(ifp->if_snd);
348 				llink->nll_ifcq = ifp->if_snd;
349 			} else {
350 				llink->nll_ifcq = ifclassq_alloc();
351 				dlil_ifclassq_setup(ifp, llink->nll_ifcq);
352 			}
353 		}
354 
355 		struct netif_qset *qset = nx_netif_qset_create(llink, i,
356 		    &llink_init->nli_qsets[i]);
357 		/* nx_netif_qset_create retains a reference for the callee */
358 		SLIST_INSERT_HEAD(&llink->nll_qset_list, qset, nqs_list);
359 		if (NETIF_DEFAULT_QSET(qset)) {
360 			/* there can only be one default queue set */
361 			VERIFY(llink->nll_default_qset == NULL);
362 			llink->nll_default_qset = qset;
363 		}
364 	}
365 	llink->nll_qset_cnt = llink_init->nli_num_qsets;
366 	/* there should be a default queue set */
367 	VERIFY(llink->nll_default_qset != NULL);
368 	llink->nll_state = NETIF_LLINK_STATE_INIT;
369 }
370 
371 static void
nx_netif_driver_queue_destroy(struct netif_queue * drvq)372 nx_netif_driver_queue_destroy(struct netif_queue *drvq)
373 {
374 	VERIFY(drvq->nq_qset->nqs_llink->nll_state ==
375 	    NETIF_LLINK_STATE_DESTROYED);
376 
377 	lck_mtx_lock(&drvq->nq_lock);
378 	VERIFY(KPKTQ_EMPTY(&drvq->nq_pktq));
379 	lck_mtx_unlock(&drvq->nq_lock);
380 
381 	drvq->nq_qset = NULL;
382 	lck_mtx_destroy(&drvq->nq_lock, &netif_llink_lock_group);
383 }
384 
385 static void
nx_netif_driver_queue_init(struct netif_qset * qset,struct netif_queue * drvq,kern_packet_svc_class_t svc,bool is_rx)386 nx_netif_driver_queue_init(struct netif_qset *qset,
387     struct netif_queue *drvq, kern_packet_svc_class_t svc, bool is_rx)
388 {
389 	lck_mtx_init(&drvq->nq_lock, &netif_llink_lock_group,
390 	    &netif_llink_lock_attr);
391 
392 	lck_mtx_lock(&drvq->nq_lock);
393 	KPKTQ_INIT(&drvq->nq_pktq);
394 	lck_mtx_unlock(&drvq->nq_lock);
395 
396 	/*
397 	 * no need to retain a reference for qset, as queue set is
398 	 * immutable and driver queue is part of the queue set data structure.
399 	 */
400 	drvq->nq_qset = qset;
401 	drvq->nq_svc = svc;
402 	if (is_rx) {
403 		drvq->nq_flags |= NETIF_QUEUE_IS_RX;
404 	}
405 }
406 
407 SK_NO_INLINE_ATTRIBUTE
408 static struct netif_llink *
nx_netif_llink_create_locked(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)409 nx_netif_llink_create_locked(struct nx_netif *nif,
410     struct kern_nexus_netif_llink_init *llink_init)
411 {
412 	struct netif_llink *llink;
413 	struct netif_stats *nifs = &nif->nif_stats;
414 
415 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
416 	llink = nx_netif_llink_alloc();
417 	nx_netif_llink_initialize(llink, nif, llink_init);
418 	/* nx_netif_llink_alloc retains a reference for the caller */
419 	STAILQ_INSERT_TAIL(&nif->nif_llink_list, llink, nll_link);
420 	nif->nif_llink_cnt++;
421 	STATS_INC(nifs, NETIF_STATS_LLINK_ADD);
422 	if (NETIF_DEFAULT_LLINK(llink)) {
423 		/* there can only be one default logical link */
424 		VERIFY(nif->nif_default_llink == NULL);
425 	}
426 	return llink;
427 }
428 
429 SK_NO_INLINE_ATTRIBUTE
430 static void
nx_netif_llink_destroy_locked(struct nx_netif * nif,struct netif_llink ** pllink)431 nx_netif_llink_destroy_locked(struct nx_netif *nif, struct netif_llink **pllink)
432 {
433 	struct netif_stats *nifs = &nif->nif_stats;
434 
435 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
436 	(*pllink)->nll_state = NETIF_LLINK_STATE_DESTROYED;
437 	STAILQ_REMOVE(&nif->nif_llink_list, *pllink, netif_llink, nll_link);
438 	nif->nif_llink_cnt--;
439 	STATS_INC(nifs, NETIF_STATS_LLINK_REMOVE);
440 	nx_netif_llink_release(pllink);
441 }
442 
443 int
nx_netif_llink_add(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init,struct netif_llink ** pllink)444 nx_netif_llink_add(struct nx_netif *nif,
445     struct kern_nexus_netif_llink_init *llink_init, struct netif_llink **pllink)
446 {
447 	int err;
448 	struct netif_llink *__single llink;
449 	struct netif_stats *nifs = &nif->nif_stats;
450 
451 	*pllink = NULL;
452 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
453 	/* ensure logical_link_id is unique */
454 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
455 		if (llink->nll_link_id == llink_init->nli_link_id) {
456 			SK_ERR("duplicate llink_id 0x%llu",
457 			    llink_init->nli_link_id);
458 			STATS_INC(nifs, NETIF_STATS_LLINK_DUP_ID_GIVEN);
459 			DTRACE_SKYWALK1(dup__id__given, uint64_t,
460 			    llink_init->nli_link_id);
461 			lck_rw_unlock_exclusive(&nif->nif_llink_lock);
462 			return EINVAL;
463 		}
464 	}
465 	llink = nx_netif_llink_create_locked(nif, llink_init);
466 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
467 	VERIFY(llink != NULL);
468 	err = nx_netif_llink_ext_init_queues(nif->nif_nx, llink);
469 	if (err != 0) {
470 		lck_rw_lock_exclusive(&nif->nif_llink_lock);
471 		nx_netif_llink_destroy_locked(nif, &llink);
472 		lck_rw_unlock_exclusive(&nif->nif_llink_lock);
473 	} else {
474 		/* increment reference for the caller */
475 		nx_netif_llink_retain(llink);
476 		*pllink = llink;
477 	}
478 	return err;
479 }
480 
481 int
nx_netif_llink_remove(struct nx_netif * nif,kern_nexus_netif_llink_id_t llink_id)482 nx_netif_llink_remove(struct nx_netif *nif,
483     kern_nexus_netif_llink_id_t llink_id)
484 {
485 	bool llink_found = false;
486 	struct netif_llink *__single llink;
487 	struct netif_stats *nifs = &nif->nif_stats;
488 
489 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
490 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
491 		if (llink->nll_link_id == llink_id) {
492 			llink_found = true;
493 			break;
494 		}
495 	}
496 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
497 	if (!llink_found) {
498 		STATS_INC(nifs, NETIF_STATS_LLINK_NOT_FOUND_REMOVE);
499 		DTRACE_SKYWALK1(not__found, uint64_t, llink_id);
500 		return ENOENT;
501 	}
502 	nx_netif_llink_ext_fini_queues(nif->nif_nx, llink);
503 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
504 	nx_netif_llink_destroy_locked(nif, &llink);
505 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
506 	return 0;
507 }
508 
509 static void
nx_netif_default_llink_add(struct nx_netif * nif)510 nx_netif_default_llink_add(struct nx_netif *nif)
511 {
512 	struct kern_nexus_netif_llink_init llink_init, *pllink_init;
513 	struct kern_nexus_netif_llink_qset_init qset;
514 	struct ifnet *ifp = nif->nif_ifp;
515 	struct netif_llink *llink;
516 
517 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
518 	VERIFY(SKYWALK_NATIVE(ifp));
519 
520 	llink_init.nli_flags = KERN_NEXUS_NET_LLINK_DEFAULT;
521 
522 	if (NX_LLINK_PROV(nif->nif_nx)) {
523 		VERIFY(nif->nif_default_llink_params != NULL);
524 		pllink_init = nif->nif_default_llink_params;
525 	} else {
526 		struct nexus_adapter *devna =
527 		    nx_port_get_na(nif->nif_nx, NEXUS_PORT_NET_IF_DEV);
528 
529 		llink_init.nli_link_id = NETIF_LLINK_ID_DEFAULT;
530 		qset.nlqi_flags = KERN_NEXUS_NET_LLINK_QSET_DEFAULT;
531 		/*
532 		 * For the legacy mode of operation we will assume that
533 		 * AQM is not needed on low-latency interface.
534 		 */
535 		if (NETIF_IS_LOW_LATENCY(nif)) {
536 			qset.nlqi_flags |=
537 			    KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY;
538 		} else {
539 			qset.nlqi_flags |= KERN_NEXUS_NET_LLINK_QSET_AQM;
540 		}
541 		qset.nlqi_num_rxqs =
542 		    (uint8_t)na_get_nrings(devna, NR_RX);
543 		qset.nlqi_num_txqs =
544 		    (uint8_t)na_get_nrings(devna, NR_TX);
545 		llink_init.nli_num_qsets = 1;
546 		llink_init.nli_qsets = &qset;
547 		llink_init.nli_ctx = NULL;
548 		pllink_init = &llink_init;
549 	}
550 	llink = nx_netif_llink_create_locked(nif, pllink_init);
551 	/* there can only be one default logical link */
552 	VERIFY(nif->nif_default_llink == NULL);
553 	nx_netif_llink_retain(llink);
554 	/* obtain a reference for the default logical link pointer */
555 	nif->nif_default_llink = llink;
556 }
557 
558 static void
nx_netif_default_llink_remove(struct nx_netif * nif)559 nx_netif_default_llink_remove(struct nx_netif *nif)
560 {
561 	struct netif_llink *__single llink;
562 
563 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
564 	ASSERT(nif->nif_default_llink != NULL);
565 	ASSERT(nif->nif_llink_cnt == 1);
566 	llink = nif->nif_default_llink;
567 	nx_netif_llink_release(&nif->nif_default_llink);
568 	ASSERT(nif->nif_default_llink == NULL);
569 	nx_netif_llink_destroy_locked(nif, &llink);
570 }
571 
572 int
netif_qset_enqueue(struct netif_qset * qset,bool chain,struct __kern_packet * pkt_chain,struct __kern_packet * tail,uint32_t cnt,uint32_t bytes,uint32_t * flowctl,uint32_t * dropped)573 netif_qset_enqueue(struct netif_qset *qset, bool chain,
574     struct __kern_packet *pkt_chain, struct __kern_packet *tail, uint32_t cnt,
575     uint32_t bytes, uint32_t *flowctl, uint32_t *dropped)
576 {
577 	struct __kern_packet *pkt = pkt_chain;
578 	struct __kern_packet *next;
579 	struct netif_stats *nifs = &qset->nqs_llink->nll_nif->nif_stats;
580 	struct ifnet *ifp = qset->nqs_ifcq->ifcq_ifp;
581 	uint32_t c = 0, b = 0;
582 	boolean_t pkt_drop = FALSE;
583 	int err = 0;
584 
585 	ASSERT(dropped != NULL && flowctl != NULL);
586 
587 	/* drop packets if logical link state is destroyed */
588 	if (qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED) {
589 		pp_free_packet_chain(pkt_chain, (int *)dropped);
590 		STATS_ADD(nifs, NETIF_STATS_LLINK_TX_DROP_BAD_STATE, *dropped);
591 		return ENXIO;
592 	}
593 
594 	if (chain) {
595 		/* all packets in this chain should have the same SVC */
596 		netif_ifp_inc_traffic_class_out_pkt(ifp, pkt_chain->pkt_svc_class,
597 		    cnt, bytes);
598 
599 		err = ifnet_enqueue_pkt_chain(ifp, pkt_chain, tail, cnt,
600 		    bytes, false, &pkt_drop);
601 		if (__improbable(err != 0)) {
602 			if ((err == EQFULL || err == EQSUSPENDED)) {
603 				(*flowctl)++;
604 			}
605 			if (pkt_drop) {
606 				*dropped = cnt;
607 			}
608 		}
609 	} else {
610 		while (pkt != NULL) {
611 			next = pkt->pkt_nextpkt;
612 			pkt->pkt_nextpkt = NULL;
613 			c++;
614 			b += pkt->pkt_length;
615 
616 			netif_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
617 			    1, pkt->pkt_length);
618 
619 			err = ifnet_enqueue_pkt(ifp, pkt, false, &pkt_drop);
620 			if (__improbable(err != 0)) {
621 				if ((err == EQFULL || err == EQSUSPENDED)) {
622 					(*flowctl)++;
623 				}
624 				if (pkt_drop) {
625 					(*dropped)++;
626 				}
627 			}
628 
629 			pkt = next;
630 		}
631 		VERIFY(c == cnt);
632 		VERIFY(b == bytes);
633 	}
634 
635 	if (*flowctl > 0) {
636 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_QFULL, *flowctl);
637 		err = EIO;
638 	}
639 	if (*dropped > 0) {
640 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_DROPPED, *dropped);
641 		STATS_ADD(nifs, NETIF_STATS_DROP, *dropped);
642 		err = EIO;
643 	}
644 	return err;
645 }
646 
647 struct netif_qset *
nx_netif_get_default_qset_noref(struct nx_netif * nif)648 nx_netif_get_default_qset_noref(struct nx_netif *nif)
649 {
650 	struct netif_qset *qset;
651 	struct netif_stats *nifs = &nif->nif_stats;
652 
653 	ASSERT(NETIF_LLINK_ENABLED(nif));
654 	if (__improbable(nif->nif_default_llink->nll_state !=
655 	    NETIF_LLINK_STATE_INIT)) {
656 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
657 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
658 		return NULL;
659 	}
660 	qset = nif->nif_default_llink->nll_default_qset;
661 	return qset;
662 }
663 
664 static void
nx_netif_qset_hint_decode(uint64_t hint,uint16_t * link_id_internal,uint16_t * qset_idx)665 nx_netif_qset_hint_decode(uint64_t hint,
666     uint16_t *link_id_internal, uint16_t *qset_idx)
667 {
668 	/* The top 32 bits are unused for now */
669 	*link_id_internal = (uint16_t)((0xffff0000 & hint) >> 16);
670 	*qset_idx = (uint16_t)((0x0000ffff & hint));
671 }
672 
673 /* retains a reference for the caller */
674 static struct netif_qset *
nx_netif_get_default_qset(struct nx_netif * nif)675 nx_netif_get_default_qset(struct nx_netif *nif)
676 {
677 	struct netif_qset *qset;
678 
679 	qset = nif->nif_default_llink->nll_default_qset;
680 	nx_netif_qset_retain(qset);
681 	return qset;
682 }
683 
684 /*
685  * Find the qset based on the qset hint. Fall back to the default qset
686  * if not found. The random qset is used for experimentation.
687  */
688 struct netif_qset *
nx_netif_find_qset(struct nx_netif * nif,uint64_t hint)689 nx_netif_find_qset(struct nx_netif *nif, uint64_t hint)
690 {
691 	uint16_t ll_id_internal, qset_idx;
692 	struct netif_llink *llink;
693 	struct netif_qset *qset;
694 	struct netif_stats *nifs = &nif->nif_stats;
695 	int i, j, random_id;
696 
697 	ASSERT(NETIF_LLINK_ENABLED(nif));
698 	if (__improbable(nif->nif_default_llink->nll_state !=
699 	    NETIF_LLINK_STATE_INIT)) {
700 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
701 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
702 		return NULL;
703 	}
704 	if (!NX_LLINK_PROV(nif->nif_nx) ||
705 	    (nx_netif_random_qset == 0 && hint == 0)) {
706 		goto def_qset;
707 	}
708 	if (nx_netif_random_qset == 0) {
709 		nx_netif_qset_hint_decode(hint, &ll_id_internal, &qset_idx);
710 	} else {
711 		ll_id_internal = 0;
712 		qset_idx = 0;
713 	}
714 	lck_rw_lock_shared(&nif->nif_llink_lock);
715 	i = 0;
716 	random_id = random();
717 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
718 		if (nx_netif_random_qset != 0 &&
719 		    (random_id % nif->nif_llink_cnt) == i) {
720 			break;
721 		} else if (llink->nll_link_id_internal == ll_id_internal) {
722 			break;
723 		}
724 		i++;
725 	}
726 	if (llink == NULL) {
727 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
728 		lck_rw_unlock_shared(&nif->nif_llink_lock);
729 		goto def_qset;
730 	}
731 	j = 0;
732 	random_id = random();
733 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
734 		if (nx_netif_random_qset != 0 &&
735 		    (random_id % llink->nll_qset_cnt) == j) {
736 			break;
737 		} else if (qset->nqs_idx == qset_idx) {
738 			break;
739 		}
740 		j++;
741 	}
742 	if (qset == NULL) {
743 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
744 		lck_rw_unlock_shared(&nif->nif_llink_lock);
745 		goto def_qset;
746 	}
747 	nx_netif_qset_retain(qset);
748 	STATS_INC(nifs, NETIF_STATS_LLINK_NONDEF_QSET_USED);
749 	lck_rw_unlock_shared(&nif->nif_llink_lock);
750 	if (nx_netif_random_qset != 0) {
751 		SK_DF(SK_VERB_LLINK, "%s: random qset: qset %p, ifcq %p, "
752 		    "llink_idx %d, qset_idx %d", if_name(nif->nif_ifp),
753 		    qset, qset->nqs_ifcq, i, j);
754 
755 		DTRACE_SKYWALK5(random__qset, struct nx_netif *, nif,
756 		    struct netif_qset *, qset, struct ifclassq *,
757 		    qset->nqs_ifcq, int, i, int, j);
758 	} else {
759 		SK_DF(SK_VERB_LLINK, "%s: non-default qset: qset %p, ifcq %p, "
760 		    " ll_id_internal 0x%x, qset_idx %d", if_name(nif->nif_ifp),
761 		    qset, qset->nqs_ifcq, ll_id_internal, qset_idx);
762 
763 		DTRACE_SKYWALK5(nondef__qset, struct nx_netif *, nif,
764 		    struct netif_qset *, qset, struct ifclassq *,
765 		    qset->nqs_ifcq, uint16_t, ll_id_internal,
766 		    uint16_t, qset_idx);
767 	}
768 	return qset;
769 
770 def_qset:
771 	STATS_INC(nifs, NETIF_STATS_LLINK_DEF_QSET_USED);
772 	qset = nx_netif_get_default_qset(nif);
773 	ASSERT(qset != NULL);
774 
775 	SK_DF(SK_VERB_LLINK, "%s: default qset: qset %p, ifcq %p, hint %llx",
776 	    if_name(nif->nif_ifp), qset, qset->nqs_ifcq, hint);
777 
778 	DTRACE_SKYWALK4(def__qset, struct nx_netif *, nif, struct netif_qset *,
779 	    qset, struct ifclassq *, qset->nqs_ifcq, uint64_t, hint);
780 	return qset;
781 }
782 
783 void
nx_netif_llink_init(struct nx_netif * nif)784 nx_netif_llink_init(struct nx_netif *nif)
785 {
786 	ifnet_t ifp = nif->nif_ifp;
787 
788 #if (DEVELOPMENT || DEBUG)
789 	if (__improbable(nx_netif_disable_llink != 0)) {
790 		SK_DF(SK_VERB_LLINK, "%s: llink is disabled",
791 		    if_name(nif->nif_ifp));
792 		return;
793 	}
794 #endif /* (DEVELOPMENT || DEBUG) */
795 
796 	if (!SKYWALK_NATIVE(ifp)) {
797 		SK_DF(SK_VERB_LLINK,
798 		    "%s: llink is supported on native devices only",
799 		    if_name(ifp));
800 		return;
801 	}
802 	ASSERT(!NETIF_LLINK_ENABLED(nif));
803 	lck_rw_init(&nif->nif_llink_lock, &netif_llink_lock_group,
804 	    &netif_llink_lock_attr);
805 
806 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
807 
808 	STAILQ_INIT(&nif->nif_llink_list);
809 	nif->nif_llink_cnt = 0;
810 	nx_netif_default_llink_add(nif);
811 	nif->nif_flags |= NETIF_FLAG_LLINK_INITIALIZED;
812 
813 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
814 
815 	SK_DF(SK_VERB_LLINK, "%s: llink initialized", if_name(ifp));
816 }
817 
818 void
nx_netif_llink_fini(struct nx_netif * nif)819 nx_netif_llink_fini(struct nx_netif *nif)
820 {
821 	if (!NETIF_LLINK_ENABLED(nif)) {
822 		SK_DF(SK_VERB_LLINK, "%s: llink not initialized",
823 		    if_name(nif->nif_ifp));
824 		return;
825 	}
826 
827 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
828 
829 	nif->nif_flags &= ~NETIF_FLAG_LLINK_INITIALIZED;
830 	nx_netif_default_llink_remove(nif);
831 	ASSERT(nif->nif_llink_cnt == 0);
832 	ASSERT(STAILQ_EMPTY(&nif->nif_llink_list));
833 
834 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
835 
836 	nx_netif_llink_config_free(nif);
837 	lck_rw_destroy(&nif->nif_llink_lock, &netif_llink_lock_group);
838 	SK_DF(SK_VERB_LLINK, "%s: llink uninitialization done",
839 	    if_name(nif->nif_ifp));
840 }
841 
842 int
nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init * init,bool default_llink)843 nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *init,
844     bool default_llink)
845 {
846 	struct kern_nexus_netif_llink_qset_init *qsinit;
847 	bool has_default_qset = false;
848 	bool default_llink_flag;
849 	uint8_t i;
850 
851 	default_llink_flag =
852 	    ((init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) != 0);
853 
854 	if (default_llink != default_llink_flag) {
855 		SK_ERR("default llink flag incompatible: default_llink(%s), "
856 		    "default_llink_flag(%s)",
857 		    default_llink ? "true" : "false",
858 		    default_llink_flag ? "true" : "false");
859 		return EINVAL;
860 	}
861 	if (init->nli_num_qsets == 0) {
862 		SK_ERR("num qsets is zero");
863 		return EINVAL;
864 	}
865 	if ((qsinit = init->nli_qsets) == NULL) {
866 		SK_ERR("qsets is NULL");
867 		return EINVAL;
868 	}
869 	for (i = 0; i < init->nli_num_qsets; i++) {
870 		if (qsinit[i].nlqi_flags &
871 		    KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
872 			if (has_default_qset) {
873 				SK_ERR("has more than one default qset");
874 				return EINVAL;
875 			}
876 			if (qsinit[i].nlqi_num_rxqs == 0) {
877 				SK_ERR("num_rxqs == 0");
878 				return EINVAL;
879 			}
880 			has_default_qset = true;
881 		}
882 		if (qsinit[i].nlqi_num_txqs == 0) {
883 			SK_ERR("num_txqs == 0");
884 			return EINVAL;
885 		}
886 		if ((qsinit[i].nlqi_flags &
887 		    KERN_NEXUS_NET_LLINK_QSET_WMM_MODE) &&
888 		    (qsinit[i].nlqi_num_txqs != NEXUS_NUM_WMM_QUEUES)) {
889 			SK_ERR("invalid wmm mode");
890 			return EINVAL;
891 		}
892 	}
893 	return 0;
894 }
895 
896 int
nx_netif_default_llink_config(struct nx_netif * nif,struct kern_nexus_netif_llink_init * init)897 nx_netif_default_llink_config(struct nx_netif *nif,
898     struct kern_nexus_netif_llink_init *init)
899 {
900 	struct kern_nexus_netif_llink_qset_init *qsinit;
901 	int i, err;
902 
903 	err = nx_netif_validate_llink_config(init, true);
904 	if (err != 0) {
905 		return err;
906 	}
907 	nif->nif_default_llink_params = sk_alloc_type(
908 		struct kern_nexus_netif_llink_init,
909 		Z_WAITOK | Z_NOFAIL, nx_netif_tag_llink_cfg);
910 
911 	qsinit = sk_alloc_type_array(struct kern_nexus_netif_llink_qset_init,
912 	    init->nli_num_qsets, Z_WAITOK, nx_netif_tag_llink_cfg);
913 	if (qsinit == NULL) {
914 		SK_ERR("failed to alloc kern_nexus_netif_llink_qset_init");
915 		sk_free_type(struct kern_nexus_netif_llink_init,
916 		    nif->nif_default_llink_params);
917 		nif->nif_default_llink_params = NULL;
918 		return ENOMEM;
919 	}
920 	memcpy(nif->nif_default_llink_params, init,
921 	    __builtin_offsetof(struct kern_nexus_netif_llink_init,
922 	    nli_qsets));
923 	for (i = 0; i < init->nli_num_qsets; i++) {
924 		*(&qsinit[i]) = *(&init->nli_qsets[i]);
925 	}
926 	nif->nif_default_llink_params->nli_qsets = qsinit;
927 	nif->nif_default_llink_params->nli_num_qsets = init->nli_num_qsets;
928 	return 0;
929 }
930 
931 void
nx_netif_llink_config_free(struct nx_netif * nif)932 nx_netif_llink_config_free(struct nx_netif *nif)
933 {
934 	if (nif->nif_default_llink_params == NULL) {
935 		return;
936 	}
937 	sk_free_type_array_counted_by(struct kern_nexus_netif_llink_qset_init,
938 	    nif->nif_default_llink_params->nli_num_qsets,
939 	    nif->nif_default_llink_params->nli_qsets);
940 
941 	sk_free_type(struct kern_nexus_netif_llink_init,
942 	    nif->nif_default_llink_params);
943 	nif->nif_default_llink_params = NULL;
944 }
945 
946 static int
nx_netif_llink_ext_init_queues(struct kern_nexus * nx,struct netif_llink * llink)947 nx_netif_llink_ext_init_queues(struct kern_nexus *nx, struct netif_llink *llink)
948 {
949 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
950 	struct kern_nexus_netif_provider_init *nxnpi;
951 	struct netif_qset *qset;
952 	struct netif_stats *nifs = &NX_NETIF_PRIVATE(nx)->nif_stats;
953 	int err = 0;
954 	uint8_t i;
955 
956 	nxnpi = &nxprov->nxprov_netif_ext;
957 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_init != NULL);
958 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_init != NULL);
959 
960 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
961 		struct netif_queue *drvq;
962 
963 		ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0);
964 		err = nxnpi->nxnpi_qset_init(nxprov, nx, llink->nll_ctx,
965 		    qset->nqs_idx, qset->nqs_id, qset, &qset->nqs_ctx);
966 		if (err != 0) {
967 			STATS_INC(nifs, NETIF_STATS_LLINK_QSET_INIT_FAIL);
968 			SK_ERR("nx: 0x%llx, qset: %d, qset init err %d",
969 			    SK_KVA(nx), qset->nqs_idx, err);
970 			goto out;
971 		}
972 		qset->nqs_flags |= NETIF_QSET_FLAG_EXT_INITED;
973 
974 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
975 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
976 
977 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
978 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
979 			    i, false, drvq, &drvq->nq_ctx);
980 			if (err != 0) {
981 				STATS_INC(nifs, NETIF_STATS_LLINK_RXQ_INIT_FAIL);
982 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
983 				    SK_KVA(nx), qset->nqs_idx, err);
984 				goto out;
985 			}
986 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
987 		}
988 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
989 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
990 
991 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
992 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
993 			    i, true, drvq, &drvq->nq_ctx);
994 			if (err != 0) {
995 				STATS_INC(nifs, NETIF_STATS_LLINK_TXQ_INIT_FAIL);
996 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
997 				    SK_KVA(nx), qset->nqs_idx, err);
998 				goto out;
999 			}
1000 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1001 		}
1002 	}
1003 out:
1004 	if (err != 0) {
1005 		nx_netif_llink_ext_fini_queues(nx, llink);
1006 	}
1007 	return err;
1008 }
1009 
1010 static void
nx_netif_llink_ext_fini_queues(struct kern_nexus * nx,struct netif_llink * llink)1011 nx_netif_llink_ext_fini_queues(struct kern_nexus *nx, struct netif_llink *llink)
1012 {
1013 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
1014 	struct kern_nexus_netif_provider_init *nxnpi;
1015 	struct netif_qset *qset;
1016 	uint8_t i;
1017 
1018 	nxnpi = &nxprov->nxprov_netif_ext;
1019 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_fini != NULL);
1020 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_fini != NULL);
1021 
1022 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1023 		struct netif_queue *drvq;
1024 
1025 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1026 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1027 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1028 				continue;
1029 			}
1030 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1031 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1032 		}
1033 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1034 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1035 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1036 				continue;
1037 			}
1038 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1039 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1040 		}
1041 		if ((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0) {
1042 			continue;
1043 		}
1044 		nxnpi->nxnpi_qset_fini(nxprov, nx, qset->nqs_ctx);
1045 		qset->nqs_flags &= ~NETIF_QSET_FLAG_EXT_INITED;
1046 	}
1047 }
1048 
1049 int
nx_netif_llink_ext_init_default_queues(struct kern_nexus * nx)1050 nx_netif_llink_ext_init_default_queues(struct kern_nexus *nx)
1051 {
1052 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1053 	return nx_netif_llink_ext_init_queues(nx, nif->nif_default_llink);
1054 }
1055 
1056 void
nx_netif_llink_ext_fini_default_queues(struct kern_nexus * nx)1057 nx_netif_llink_ext_fini_default_queues(struct kern_nexus *nx)
1058 {
1059 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1060 	nx_netif_llink_ext_fini_queues(nx, nif->nif_default_llink);
1061 }
1062