xref: /xnu-8796.101.5/bsd/skywalk/nexus/netif/nx_netif_llink.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
31 #include <os/refcnt.h>
32 #include <sys/sdt.h>
33 
34 #define NX_NETIF_TAG_QSET   "com.apple.skywalk.netif.qset"
35 static SKMEM_TAG_DEFINE(nx_netif_tag_qset, NX_NETIF_TAG_QSET);
36 
37 #define NX_NETIF_TAG_LLINK_CFG   "com.apple.skywalk.netif.llink.cfg"
38 static SKMEM_TAG_DEFINE(nx_netif_tag_llink_cfg, NX_NETIF_TAG_LLINK_CFG);
39 
40 LCK_ATTR_DECLARE(netif_llink_lock_attr, 0, 0);
41 static LCK_GRP_DECLARE(netif_llink_lock_group, "netif llink locks");
42 
43 #if (DEVELOPMENT || DEBUG)
44 static TUNABLE(uint32_t, nx_netif_disable_llink, "sk_disable_llink", 0);
45 #endif /* (DEVELOPMENT || DEBUG) */
46 
47 static struct netif_llink *nx_netif_llink_alloc(void);
48 static void nx_netif_llink_free(struct netif_llink **);
49 static struct netif_qset *nx_netif_qset_alloc(uint8_t, uint8_t);
50 static void nx_netif_qset_free(struct netif_qset **);
51 static void nx_netif_qset_setup_ifclassq(struct netif_llink *,
52     struct netif_qset *);
53 static void nx_netif_qset_teardown_ifclassq(struct netif_qset *);
54 static void nx_netif_qset_init(struct netif_qset *, struct netif_llink *,
55     uint8_t idx, struct kern_nexus_netif_llink_qset_init *);
56 static struct netif_qset *nx_netif_qset_create(struct netif_llink *,
57     uint8_t, struct kern_nexus_netif_llink_qset_init *);
58 static void nx_netif_qset_destroy(struct netif_qset *);
59 static void nx_netif_llink_initialize(struct netif_llink *, struct nx_netif *,
60     struct kern_nexus_netif_llink_init *);
61 static void nx_netif_driver_queue_destroy(struct netif_queue *);
62 static void nx_netif_driver_queue_init(struct netif_qset *,
63     struct netif_queue *, kern_packet_svc_class_t, bool);
64 static struct netif_llink *nx_netif_llink_create_locked(struct nx_netif *,
65     struct kern_nexus_netif_llink_init *);
66 static void nx_netif_default_llink_add(struct nx_netif *);
67 static int netif_qset_enqueue_single(struct netif_qset *,
68     struct __kern_packet *, uint32_t *, uint32_t *);
69 static int nx_netif_llink_ext_init_queues(struct kern_nexus *,
70     struct netif_llink *);
71 static void nx_netif_llink_ext_fini_queues(struct kern_nexus *,
72     struct netif_llink *);
73 
74 static uint32_t nx_netif_random_qset = 0;
75 #if (DEVELOPMENT || DEBUG)
76 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, random_qset,
77     CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_random_qset, 0,
78     "pick a random qset");
79 #endif /* DEVELOPMENT || DEBUG */
80 
81 /* retains a reference for the callee */
82 static struct netif_llink *
nx_netif_llink_alloc(void)83 nx_netif_llink_alloc(void)
84 {
85 	struct netif_llink *llink;
86 
87 	llink = sk_alloc_type(struct netif_llink, Z_WAITOK | Z_NOFAIL,
88 	    skmem_tag_netif_llink);
89 	os_ref_init(&llink->nll_refcnt, NULL);
90 	return llink;
91 }
92 
93 SK_NO_INLINE_ATTRIBUTE
94 void
nx_netif_llink_retain(struct netif_llink * llink)95 nx_netif_llink_retain(struct netif_llink *llink)
96 {
97 	os_ref_retain(&llink->nll_refcnt);
98 }
99 
100 SK_NO_INLINE_ATTRIBUTE
101 static void
nx_netif_llink_free(struct netif_llink ** pllink)102 nx_netif_llink_free(struct netif_llink **pllink)
103 {
104 	struct netif_llink *llink = *pllink;
105 	struct netif_qset *qset, *tqset;
106 
107 	VERIFY(llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
108 	*pllink = NULL;
109 	SLIST_FOREACH_SAFE(qset, &llink->nll_qset_list, nqs_list, tqset) {
110 		SLIST_REMOVE(&llink->nll_qset_list, qset, netif_qset,
111 		    nqs_list);
112 		nx_netif_qset_destroy(qset);
113 	}
114 	if (llink->nll_ifcq != NULL) {
115 		ifclassq_release(&llink->nll_ifcq);
116 	}
117 
118 	sk_free_type(struct netif_llink, llink);
119 }
120 
121 SK_NO_INLINE_ATTRIBUTE
122 void
nx_netif_llink_release(struct netif_llink ** pllink)123 nx_netif_llink_release(struct netif_llink **pllink)
124 {
125 	struct netif_llink *llink = *pllink;
126 
127 	*pllink = NULL;
128 	if (os_ref_release(&llink->nll_refcnt) == 0) {
129 		nx_netif_llink_free(&llink);
130 	}
131 }
132 
133 /* retains a reference for the callee */
134 static struct netif_qset *
nx_netif_qset_alloc(uint8_t nrxqs,uint8_t ntxqs)135 nx_netif_qset_alloc(uint8_t nrxqs, uint8_t ntxqs)
136 {
137 	struct netif_qset *qset;
138 
139 	_CASSERT(sizeof(struct netif_queue) % sizeof(uint64_t) == 0);
140 
141 	qset = sk_alloc_type_header_array(struct netif_qset, struct netif_queue,
142 	    nrxqs + ntxqs, Z_WAITOK | Z_NOFAIL, nx_netif_tag_qset);
143 
144 	qset->nqs_num_rx_queues = nrxqs;
145 	qset->nqs_num_tx_queues =  ntxqs;
146 	return qset;
147 }
148 
149 SK_NO_INLINE_ATTRIBUTE
150 void
nx_netif_qset_retain(struct netif_qset * qset)151 nx_netif_qset_retain(struct netif_qset *qset)
152 {
153 	/*
154 	 * Logical link is immutable, i.e. Queue Sets can't added/removed
155 	 * from it. We will rely on this property to simply acquire a refcnt
156 	 * on the logical link, which is the parent structure of a qset.
157 	 */
158 	nx_netif_llink_retain(qset->nqs_llink);
159 }
160 
161 SK_NO_INLINE_ATTRIBUTE
162 void
nx_netif_qset_release(struct netif_qset ** pqset)163 nx_netif_qset_release(struct netif_qset **pqset)
164 {
165 	struct netif_qset *qset = *pqset;
166 	struct netif_llink *llink = qset->nqs_llink;
167 
168 	*pqset = NULL;
169 	nx_netif_llink_release(&llink);
170 }
171 
172 SK_NO_INLINE_ATTRIBUTE
173 static void
nx_netif_qset_free(struct netif_qset ** pqset)174 nx_netif_qset_free(struct netif_qset **pqset)
175 {
176 	struct netif_qset *qset = *pqset;
177 	uint8_t i;
178 
179 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
180 
181 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
182 		nx_netif_driver_queue_destroy(NETIF_QSET_RX_QUEUE(qset, i));
183 	}
184 	for (i = 0; i < qset->nqs_num_tx_queues; i++) {
185 		nx_netif_driver_queue_destroy(NETIF_QSET_TX_QUEUE(qset, i));
186 	}
187 	if (qset->nqs_flags & NETIF_QSET_FLAG_AQM) {
188 		nx_netif_qset_teardown_ifclassq(qset);
189 	}
190 	qset->nqs_llink = NULL;
191 	sk_free_type_header_array(struct netif_qset, struct netif_queue,
192 	    qset->nqs_num_rx_queues + qset->nqs_num_tx_queues, qset);
193 }
194 
195 SK_NO_INLINE_ATTRIBUTE
196 static void
nx_netif_qset_destroy(struct netif_qset * qset)197 nx_netif_qset_destroy(struct netif_qset *qset)
198 {
199 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
200 	nx_netif_qset_free(&qset);
201 }
202 
203 SK_NO_INLINE_ATTRIBUTE
204 static void
nx_netif_qset_setup_ifclassq(struct netif_llink * llink,struct netif_qset * qset)205 nx_netif_qset_setup_ifclassq(struct netif_llink *llink,
206     struct netif_qset *qset)
207 {
208 	uint8_t flags = 0;
209 
210 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
211 	ASSERT(llink->nll_ifcq != NULL);
212 
213 	ifclassq_retain(llink->nll_ifcq);
214 	qset->nqs_ifcq = llink->nll_ifcq;
215 
216 	if ((qset->nqs_flags & NETIF_QSET_FLAG_LOW_LATENCY) != 0) {
217 		flags |= IF_CLASSQ_LOW_LATENCY;
218 	}
219 	if ((qset->nqs_flags & NETIF_QSET_FLAG_DEFAULT) != 0) {
220 		flags |= IF_DEFAULT_GRP;
221 	}
222 
223 	ifclassq_setup_group(qset->nqs_ifcq, qset->nqs_idx, flags);
224 }
225 
226 SK_NO_INLINE_ATTRIBUTE
227 static void
nx_netif_qset_teardown_ifclassq(struct netif_qset * qset)228 nx_netif_qset_teardown_ifclassq(struct netif_qset *qset)
229 {
230 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
231 	ASSERT(qset->nqs_ifcq != NULL);
232 
233 	qset->nqs_flags &= ~NETIF_QSET_FLAG_AQM;
234 	ifclassq_release(&qset->nqs_ifcq);
235 }
236 
237 SK_NO_INLINE_ATTRIBUTE
238 static void
nx_netif_qset_init(struct netif_qset * qset,struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)239 nx_netif_qset_init(struct netif_qset *qset, struct netif_llink *llink,
240     uint8_t idx, struct kern_nexus_netif_llink_qset_init *qset_init)
241 {
242 #define _NETIF_QSET_MAX_TXQS    4
243 	kern_packet_svc_class_t svc[_NETIF_QSET_MAX_TXQS] =
244 	{KPKT_SC_VO, KPKT_SC_VI, KPKT_SC_BE, KPKT_SC_BK};
245 	struct ifnet *ifp = llink->nll_nif->nif_ifp;
246 	uint8_t i;
247 
248 	/*
249 	 * no need to retain a reference for llink, as the logical link is
250 	 * immutable and qsets are created and destroyed along with logical
251 	 * link.
252 	 */
253 	qset->nqs_llink = llink;
254 	qset->nqs_id = NETIF_QSET_ID_ENCODE(llink->nll_link_id_internal, idx);
255 	qset->nqs_idx = idx;
256 
257 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
258 		qset->nqs_flags |= NETIF_QSET_FLAG_DEFAULT;
259 	}
260 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY) {
261 		qset->nqs_flags |= NETIF_QSET_FLAG_LOW_LATENCY;
262 	}
263 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_AQM) {
264 		qset->nqs_flags |= NETIF_QSET_FLAG_AQM;
265 		nx_netif_qset_setup_ifclassq(llink, qset);
266 	}
267 
268 
269 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
270 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
271 		    KPKT_SC_UNSPEC, true);
272 	}
273 
274 	/*
275 	 * TODO:
276 	 * Could be more flexible here to allow an arbitrary number of queues.
277 	 */
278 	if (qset->nqs_num_tx_queues > 1) {
279 		VERIFY(qset->nqs_num_tx_queues == _NETIF_QSET_MAX_TXQS);
280 		VERIFY(ifp->if_output_sched_model ==
281 		    IFNET_SCHED_MODEL_DRIVER_MANAGED);
282 		for (i = 0; i < _NETIF_QSET_MAX_TXQS; i++) {
283 			nx_netif_driver_queue_init(qset,
284 			    NETIF_QSET_TX_QUEUE(qset, i), svc[i], false);
285 		}
286 	} else {
287 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
288 		    KPKT_SC_UNSPEC, false);
289 	}
290 }
291 
292 SK_NO_INLINE_ATTRIBUTE
293 static struct netif_qset *
nx_netif_qset_create(struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)294 nx_netif_qset_create(struct netif_llink *llink, uint8_t idx,
295     struct kern_nexus_netif_llink_qset_init *qset_init)
296 {
297 	struct netif_qset *qset;
298 
299 	qset = nx_netif_qset_alloc(qset_init->nlqi_num_rxqs,
300 	    qset_init->nlqi_num_txqs);
301 	nx_netif_qset_init(qset, llink, idx, qset_init);
302 	return qset;
303 }
304 
305 static uint16_t
nx_netif_generate_internal_llink_id(struct nx_netif * nif)306 nx_netif_generate_internal_llink_id(struct nx_netif *nif)
307 {
308 	struct netif_llink *llink;
309 	struct netif_stats *nifs = &nif->nif_stats;
310 	uint16_t id;
311 
312 again:
313 	id = (uint16_t)(random() % 65536);
314 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
315 		if (__improbable(llink->nll_link_id_internal == id)) {
316 			break;
317 		}
318 	}
319 	if (__probable(llink == NULL && id != 0)) {
320 		return id;
321 	} else {
322 		STATS_INC(nifs, NETIF_STATS_LLINK_DUP_INT_ID_GENERATED);
323 		DTRACE_SKYWALK1(dup__llink__id__internal, uint16_t, id);
324 		goto again;
325 	}
326 }
327 
328 static void
nx_netif_llink_initialize(struct netif_llink * llink,struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)329 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif,
330     struct kern_nexus_netif_llink_init *llink_init)
331 {
332 	uint8_t i;
333 	struct ifnet *ifp = nif->nif_ifp;
334 
335 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
336 
337 	llink->nll_nif = nif;
338 	llink->nll_link_id = llink_init->nli_link_id;
339 	if (llink_init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) {
340 		llink->nll_flags |= NETIF_LLINK_FLAG_DEFAULT;
341 	}
342 	llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif);
343 	llink->nll_ctx = llink_init->nli_ctx;
344 	SLIST_INIT(&llink->nll_qset_list);
345 
346 	for (i = 0; i < llink_init->nli_num_qsets; i++) {
347 		if (llink->nll_ifcq == NULL &&
348 		    (llink_init->nli_qsets[i].nlqi_flags &
349 		    KERN_NEXUS_NET_LLINK_QSET_AQM)) {
350 			if (NETIF_DEFAULT_LLINK(llink)) {
351 				/* use the default AQM queues from ifnet */
352 				ifclassq_retain(ifp->if_snd);
353 				llink->nll_ifcq = ifp->if_snd;
354 			} else {
355 				llink->nll_ifcq = ifclassq_alloc();
356 				dlil_ifclassq_setup(ifp, llink->nll_ifcq);
357 			}
358 		}
359 
360 		struct netif_qset *qset = nx_netif_qset_create(llink, i,
361 		    &llink_init->nli_qsets[i]);
362 		/* nx_netif_qset_create retains a reference for the callee */
363 		SLIST_INSERT_HEAD(&llink->nll_qset_list, qset, nqs_list);
364 		if (NETIF_DEFAULT_QSET(qset)) {
365 			/* there can only be one default queue set */
366 			VERIFY(llink->nll_default_qset == NULL);
367 			llink->nll_default_qset = qset;
368 		}
369 	}
370 	llink->nll_qset_cnt = llink_init->nli_num_qsets;
371 	/* there should be a default queue set */
372 	VERIFY(llink->nll_default_qset != NULL);
373 	llink->nll_state = NETIF_LLINK_STATE_INIT;
374 }
375 
376 static void
nx_netif_driver_queue_destroy(struct netif_queue * drvq)377 nx_netif_driver_queue_destroy(struct netif_queue *drvq)
378 {
379 	VERIFY(drvq->nq_qset->nqs_llink->nll_state ==
380 	    NETIF_LLINK_STATE_DESTROYED);
381 
382 	lck_mtx_lock(&drvq->nq_lock);
383 	VERIFY(KPKTQ_EMPTY(&drvq->nq_pktq));
384 	lck_mtx_unlock(&drvq->nq_lock);
385 
386 	drvq->nq_qset = NULL;
387 	lck_mtx_destroy(&drvq->nq_lock, &netif_llink_lock_group);
388 }
389 
390 static void
nx_netif_driver_queue_init(struct netif_qset * qset,struct netif_queue * drvq,kern_packet_svc_class_t svc,bool is_rx)391 nx_netif_driver_queue_init(struct netif_qset *qset,
392     struct netif_queue *drvq, kern_packet_svc_class_t svc, bool is_rx)
393 {
394 	lck_mtx_init(&drvq->nq_lock, &netif_llink_lock_group,
395 	    &netif_llink_lock_attr);
396 
397 	lck_mtx_lock(&drvq->nq_lock);
398 	KPKTQ_INIT(&drvq->nq_pktq);
399 	lck_mtx_unlock(&drvq->nq_lock);
400 
401 	/*
402 	 * no need to retain a reference for qset, as queue set is
403 	 * immutable and driver queue is part of the queue set data structure.
404 	 */
405 	drvq->nq_qset = qset;
406 	drvq->nq_svc = svc;
407 	if (is_rx) {
408 		drvq->nq_flags |= NETIF_QUEUE_IS_RX;
409 	}
410 }
411 
412 SK_NO_INLINE_ATTRIBUTE
413 static struct netif_llink *
nx_netif_llink_create_locked(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)414 nx_netif_llink_create_locked(struct nx_netif *nif,
415     struct kern_nexus_netif_llink_init *llink_init)
416 {
417 	struct netif_llink *llink;
418 	struct netif_stats *nifs = &nif->nif_stats;
419 
420 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
421 	llink = nx_netif_llink_alloc();
422 	nx_netif_llink_initialize(llink, nif, llink_init);
423 	/* nx_netif_llink_alloc retains a reference for the caller */
424 	STAILQ_INSERT_TAIL(&nif->nif_llink_list, llink, nll_link);
425 	nif->nif_llink_cnt++;
426 	STATS_INC(nifs, NETIF_STATS_LLINK_ADD);
427 	if (NETIF_DEFAULT_LLINK(llink)) {
428 		/* there can only be one default logical link */
429 		VERIFY(nif->nif_default_llink == NULL);
430 	}
431 	return llink;
432 }
433 
434 SK_NO_INLINE_ATTRIBUTE
435 static void
nx_netif_llink_destroy_locked(struct nx_netif * nif,struct netif_llink ** pllink)436 nx_netif_llink_destroy_locked(struct nx_netif *nif, struct netif_llink **pllink)
437 {
438 	struct netif_stats *nifs = &nif->nif_stats;
439 
440 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
441 	(*pllink)->nll_state = NETIF_LLINK_STATE_DESTROYED;
442 	STAILQ_REMOVE(&nif->nif_llink_list, *pllink, netif_llink, nll_link);
443 	nif->nif_llink_cnt--;
444 	STATS_INC(nifs, NETIF_STATS_LLINK_REMOVE);
445 	nx_netif_llink_release(pllink);
446 }
447 
448 int
nx_netif_llink_add(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init,struct netif_llink ** pllink)449 nx_netif_llink_add(struct nx_netif *nif,
450     struct kern_nexus_netif_llink_init *llink_init, struct netif_llink **pllink)
451 {
452 	int err;
453 	struct netif_llink *llink;
454 	struct netif_stats *nifs = &nif->nif_stats;
455 
456 	*pllink = NULL;
457 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
458 	/* ensure logical_link_id is unique */
459 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
460 		if (llink->nll_link_id == llink_init->nli_link_id) {
461 			SK_ERR("duplicate llink_id 0x%llu",
462 			    llink_init->nli_link_id);
463 			STATS_INC(nifs, NETIF_STATS_LLINK_DUP_ID_GIVEN);
464 			DTRACE_SKYWALK1(dup__id__given, uint64_t,
465 			    llink_init->nli_link_id);
466 			lck_rw_unlock_exclusive(&nif->nif_llink_lock);
467 			return EINVAL;
468 		}
469 	}
470 	llink = nx_netif_llink_create_locked(nif, llink_init);
471 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
472 	VERIFY(llink != NULL);
473 	err = nx_netif_llink_ext_init_queues(nif->nif_nx, llink);
474 	if (err != 0) {
475 		lck_rw_lock_exclusive(&nif->nif_llink_lock);
476 		nx_netif_llink_destroy_locked(nif, &llink);
477 		lck_rw_unlock_exclusive(&nif->nif_llink_lock);
478 	} else {
479 		/* increment reference for the caller */
480 		nx_netif_llink_retain(llink);
481 		*pllink = llink;
482 	}
483 	return err;
484 }
485 
486 int
nx_netif_llink_remove(struct nx_netif * nif,kern_nexus_netif_llink_id_t llink_id)487 nx_netif_llink_remove(struct nx_netif *nif,
488     kern_nexus_netif_llink_id_t llink_id)
489 {
490 	bool llink_found = false;
491 	struct netif_llink *llink;
492 	struct netif_stats *nifs = &nif->nif_stats;
493 
494 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
495 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
496 		if (llink->nll_link_id == llink_id) {
497 			llink_found = true;
498 			break;
499 		}
500 	}
501 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
502 	if (!llink_found) {
503 		STATS_INC(nifs, NETIF_STATS_LLINK_NOT_FOUND_REMOVE);
504 		DTRACE_SKYWALK1(not__found, uint64_t, llink_id);
505 		return ENOENT;
506 	}
507 	nx_netif_llink_ext_fini_queues(nif->nif_nx, llink);
508 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
509 	nx_netif_llink_destroy_locked(nif, &llink);
510 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
511 	return 0;
512 }
513 
514 static void
nx_netif_default_llink_add(struct nx_netif * nif)515 nx_netif_default_llink_add(struct nx_netif *nif)
516 {
517 	struct kern_nexus_netif_llink_init llink_init, *pllink_init;
518 	struct kern_nexus_netif_llink_qset_init qset;
519 	struct ifnet *ifp = nif->nif_ifp;
520 	struct netif_llink *llink;
521 
522 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
523 	VERIFY(SKYWALK_NATIVE(ifp));
524 
525 	llink_init.nli_flags = KERN_NEXUS_NET_LLINK_DEFAULT;
526 
527 	if (NX_LLINK_PROV(nif->nif_nx)) {
528 		VERIFY(nif->nif_default_llink_params != NULL);
529 		pllink_init = nif->nif_default_llink_params;
530 	} else {
531 		struct nexus_adapter *devna =
532 		    nx_port_get_na(nif->nif_nx, NEXUS_PORT_NET_IF_DEV);
533 
534 		llink_init.nli_link_id = NETIF_LLINK_ID_DEFAULT;
535 		qset.nlqi_flags = KERN_NEXUS_NET_LLINK_QSET_DEFAULT;
536 		/*
537 		 * For the legacy mode of operation we will assume that
538 		 * AQM is not needed on low-latency interface.
539 		 */
540 		if (NETIF_IS_LOW_LATENCY(nif)) {
541 			qset.nlqi_flags |=
542 			    KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY;
543 		} else {
544 			qset.nlqi_flags |= KERN_NEXUS_NET_LLINK_QSET_AQM;
545 		}
546 		qset.nlqi_num_rxqs =
547 		    (uint8_t)na_get_nrings(devna, NR_RX);
548 		qset.nlqi_num_txqs =
549 		    (uint8_t)na_get_nrings(devna, NR_TX);
550 		llink_init.nli_num_qsets = 1;
551 		llink_init.nli_qsets = &qset;
552 		llink_init.nli_ctx = NULL;
553 		pllink_init = &llink_init;
554 	}
555 	llink = nx_netif_llink_create_locked(nif, pllink_init);
556 	/* there can only be one default logical link */
557 	VERIFY(nif->nif_default_llink == NULL);
558 	nx_netif_llink_retain(llink);
559 	/* obtain a reference for the default logical link pointer */
560 	nif->nif_default_llink = llink;
561 }
562 
563 static void
nx_netif_default_llink_remove(struct nx_netif * nif)564 nx_netif_default_llink_remove(struct nx_netif *nif)
565 {
566 	struct netif_llink *llink;
567 
568 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
569 	ASSERT(nif->nif_default_llink != NULL);
570 	ASSERT(nif->nif_llink_cnt == 1);
571 	llink = nif->nif_default_llink;
572 	nx_netif_llink_release(&nif->nif_default_llink);
573 	ASSERT(nif->nif_default_llink == NULL);
574 	nx_netif_llink_destroy_locked(nif, &llink);
575 }
576 
577 __attribute__((always_inline))
578 static inline void
netif_ifp_inc_traffic_class_out_pkt(struct ifnet * ifp,uint32_t svc,uint32_t cnt,uint32_t len)579 netif_ifp_inc_traffic_class_out_pkt(struct ifnet *ifp, uint32_t svc,
580     uint32_t cnt, uint32_t len)
581 {
582 	switch (svc) {
583 	case PKT_TC_BE:
584 		ifp->if_tc.ifi_obepackets += cnt;
585 		ifp->if_tc.ifi_obebytes += len;
586 		break;
587 	case PKT_TC_BK:
588 		ifp->if_tc.ifi_obkpackets += cnt;
589 		ifp->if_tc.ifi_obkbytes += len;
590 		break;
591 	case PKT_TC_VI:
592 		ifp->if_tc.ifi_ovipackets += cnt;
593 		ifp->if_tc.ifi_ovibytes += len;
594 		break;
595 	case PKT_TC_VO:
596 		ifp->if_tc.ifi_ovopackets += cnt;
597 		ifp->if_tc.ifi_ovobytes += len;
598 		break;
599 	default:
600 		break;
601 	}
602 }
603 
604 static int
netif_qset_enqueue_single(struct netif_qset * qset,struct __kern_packet * pkt,uint32_t * flowctl,uint32_t * dropped)605 netif_qset_enqueue_single(struct netif_qset *qset, struct __kern_packet *pkt,
606     uint32_t *flowctl, uint32_t *dropped)
607 {
608 	struct ifnet *ifp = qset->nqs_ifcq->ifcq_ifp;
609 	boolean_t pkt_drop = FALSE;
610 	int err;
611 
612 	/*
613 	 * we are using the first 4 bytes of flow_id as the AQM flow
614 	 * identifier.
615 	 */
616 	ASSERT(!uuid_is_null(pkt->pkt_flow_id));
617 	netif_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
618 	    1, pkt->pkt_length);
619 
620 	if (__improbable(pkt->pkt_trace_id != 0)) {
621 		KDBG(SK_KTRACE_PKT_TX_FSW | DBG_FUNC_END, pkt->pkt_trace_id);
622 		KDBG(SK_KTRACE_PKT_TX_AQM | DBG_FUNC_START, pkt->pkt_trace_id);
623 	}
624 
625 	/* Only native path is supported */
626 	ASSERT((pkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
627 	ASSERT(pkt->pkt_mbuf == NULL);
628 
629 	err = ifnet_enqueue_ifcq_pkt(ifp, qset->nqs_ifcq, pkt, false,
630 	    &pkt_drop);
631 	if (__improbable(err != 0)) {
632 		if ((err == EQFULL || err == EQSUSPENDED) && flowctl != NULL) {
633 			(*flowctl)++;
634 		}
635 		if (pkt_drop && dropped != NULL) {
636 			(*dropped)++;
637 		}
638 	}
639 	return err;
640 }
641 
642 int
netif_qset_enqueue(struct netif_qset * qset,struct __kern_packet * pkt_chain,struct __kern_packet * tail,uint32_t cnt,uint32_t bytes,uint32_t * flowctl,uint32_t * dropped)643 netif_qset_enqueue(struct netif_qset *qset, struct __kern_packet *pkt_chain,
644     struct __kern_packet *tail, uint32_t cnt, uint32_t bytes, uint32_t *flowctl,
645     uint32_t *dropped)
646 {
647 #pragma unused(tail)
648 	struct __kern_packet *pkt = pkt_chain;
649 	struct __kern_packet *next;
650 	struct netif_stats *nifs = &qset->nqs_llink->nll_nif->nif_stats;
651 	uint32_t c = 0, b = 0, drop_cnt = 0, flowctl_cnt = 0;
652 	int err = 0;
653 
654 	/* drop packets if logical link state is destroyed */
655 	if (qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED) {
656 		pp_free_packet_chain(pkt_chain, (int *)&drop_cnt);
657 		STATS_ADD(nifs, NETIF_STATS_LLINK_TX_DROP_BAD_STATE, drop_cnt);
658 		if (dropped != NULL) {
659 			*dropped = drop_cnt;
660 		}
661 		return ENXIO;
662 	}
663 
664 	/* We don't support chains for now */
665 	while (pkt != NULL) {
666 		next = pkt->pkt_nextpkt;
667 		pkt->pkt_nextpkt = NULL;
668 		c++;
669 		b += pkt->pkt_length;
670 
671 		(void) netif_qset_enqueue_single(qset, pkt, &flowctl_cnt,
672 		    &drop_cnt);
673 		pkt = next;
674 	}
675 	VERIFY(c == cnt);
676 	VERIFY(b == bytes);
677 	if (flowctl != NULL && flowctl_cnt > 0) {
678 		*flowctl = flowctl_cnt;
679 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_QFULL, flowctl_cnt);
680 		err = EIO;
681 	}
682 	if (dropped != NULL && drop_cnt > 0) {
683 		*dropped = drop_cnt;
684 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_DROPPED, drop_cnt);
685 		err = EIO;
686 	}
687 	return err;
688 }
689 
690 struct netif_qset *
nx_netif_get_default_qset_noref(struct nx_netif * nif)691 nx_netif_get_default_qset_noref(struct nx_netif *nif)
692 {
693 	struct netif_qset *qset;
694 	struct netif_stats *nifs = &nif->nif_stats;
695 
696 	ASSERT(NETIF_LLINK_ENABLED(nif));
697 	if (__improbable(nif->nif_default_llink->nll_state !=
698 	    NETIF_LLINK_STATE_INIT)) {
699 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
700 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
701 		return NULL;
702 	}
703 	qset = nif->nif_default_llink->nll_default_qset;
704 	return qset;
705 }
706 
707 static void
nx_netif_qset_hint_decode(uint64_t hint,uint16_t * link_id_internal,uint16_t * qset_idx)708 nx_netif_qset_hint_decode(uint64_t hint,
709     uint16_t *link_id_internal, uint16_t *qset_idx)
710 {
711 	/* The top 32 bits are unused for now */
712 	*link_id_internal = (uint16_t)((0xffff0000 & hint) >> 16);
713 	*qset_idx = (uint16_t)((0x0000ffff & hint));
714 }
715 
716 /* retains a reference for the caller */
717 static struct netif_qset *
nx_netif_get_default_qset(struct nx_netif * nif)718 nx_netif_get_default_qset(struct nx_netif *nif)
719 {
720 	struct netif_qset *qset;
721 
722 	qset = nif->nif_default_llink->nll_default_qset;
723 	nx_netif_qset_retain(qset);
724 	return qset;
725 }
726 
727 /*
728  * Find the qset based on the qset hint. Fall back to the default qset
729  * if not found. The random qset is used for experimentation.
730  */
731 struct netif_qset *
nx_netif_find_qset(struct nx_netif * nif,uint64_t hint)732 nx_netif_find_qset(struct nx_netif *nif, uint64_t hint)
733 {
734 	uint16_t ll_id_internal, qset_idx;
735 	struct netif_llink *llink;
736 	struct netif_qset *qset;
737 	struct netif_stats *nifs = &nif->nif_stats;
738 	int i, j, random_id;
739 
740 	ASSERT(NETIF_LLINK_ENABLED(nif));
741 	if (__improbable(nif->nif_default_llink->nll_state !=
742 	    NETIF_LLINK_STATE_INIT)) {
743 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
744 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
745 		return NULL;
746 	}
747 	if (!NX_LLINK_PROV(nif->nif_nx) ||
748 	    (nx_netif_random_qset == 0 && hint == 0)) {
749 		goto def_qset;
750 	}
751 	if (nx_netif_random_qset == 0) {
752 		nx_netif_qset_hint_decode(hint, &ll_id_internal, &qset_idx);
753 	} else {
754 		ll_id_internal = 0;
755 		qset_idx = 0;
756 	}
757 	lck_rw_lock_shared(&nif->nif_llink_lock);
758 	i = 0;
759 	random_id = random();
760 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
761 		if (nx_netif_random_qset != 0 &&
762 		    (random_id % nif->nif_llink_cnt) == i) {
763 			break;
764 		} else if (llink->nll_link_id_internal == ll_id_internal) {
765 			break;
766 		}
767 		i++;
768 	}
769 	if (llink == NULL) {
770 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
771 		lck_rw_unlock_shared(&nif->nif_llink_lock);
772 		goto def_qset;
773 	}
774 	j = 0;
775 	random_id = random();
776 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
777 		if (nx_netif_random_qset != 0 &&
778 		    (random_id % llink->nll_qset_cnt) == j) {
779 			break;
780 		} else if (qset->nqs_idx == qset_idx) {
781 			break;
782 		}
783 		j++;
784 	}
785 	if (qset == NULL) {
786 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
787 		lck_rw_unlock_shared(&nif->nif_llink_lock);
788 		goto def_qset;
789 	}
790 	nx_netif_qset_retain(qset);
791 	STATS_INC(nifs, NETIF_STATS_LLINK_NONDEF_QSET_USED);
792 	lck_rw_unlock_shared(&nif->nif_llink_lock);
793 	if (nx_netif_random_qset != 0) {
794 		SK_DF(SK_VERB_LLINK, "%s: random qset: qset %p, ifcq %p, "
795 		    "llink_idx %d, qset_idx %d", if_name(nif->nif_ifp),
796 		    qset, qset->nqs_ifcq, i, j);
797 
798 		DTRACE_SKYWALK5(random__qset, struct nx_netif *, nif,
799 		    struct netif_qset *, qset, struct ifclassq *,
800 		    qset->nqs_ifcq, int, i, int, j);
801 	} else {
802 		SK_DF(SK_VERB_LLINK, "%s: non-default qset: qset %p, ifcq %p, "
803 		    " ll_id_internal 0x%x, qset_idx %d", if_name(nif->nif_ifp),
804 		    qset, qset->nqs_ifcq, ll_id_internal, qset_idx);
805 
806 		DTRACE_SKYWALK5(nondef__qset, struct nx_netif *, nif,
807 		    struct netif_qset *, qset, struct ifclassq *,
808 		    qset->nqs_ifcq, uint16_t, ll_id_internal,
809 		    uint16_t, qset_idx);
810 	}
811 	return qset;
812 
813 def_qset:
814 	STATS_INC(nifs, NETIF_STATS_LLINK_DEF_QSET_USED);
815 	qset = nx_netif_get_default_qset(nif);
816 	ASSERT(qset != NULL);
817 
818 	SK_DF(SK_VERB_LLINK, "%s: default qset: qset %p, ifcq %p, hint %llx",
819 	    if_name(nif->nif_ifp), qset, qset->nqs_ifcq, hint);
820 
821 	DTRACE_SKYWALK4(def__qset, struct nx_netif *, nif, struct netif_qset *,
822 	    qset, struct ifclassq *, qset->nqs_ifcq, uint64_t, hint);
823 	return qset;
824 }
825 
826 void
nx_netif_llink_init(struct nx_netif * nif)827 nx_netif_llink_init(struct nx_netif *nif)
828 {
829 	ifnet_t ifp = nif->nif_ifp;
830 
831 #if (DEVELOPMENT || DEBUG)
832 	if (__improbable(nx_netif_disable_llink != 0)) {
833 		SK_DF(SK_VERB_LLINK, "%s: llink is disabled",
834 		    if_name(nif->nif_ifp));
835 		return;
836 	}
837 #endif /* (DEVELOPMENT || DEBUG) */
838 
839 	if (!SKYWALK_NATIVE(ifp)) {
840 		SK_DF(SK_VERB_LLINK,
841 		    "%s: llink is supported on native devices only",
842 		    if_name(ifp));
843 		return;
844 	}
845 	ASSERT(!NETIF_LLINK_ENABLED(nif));
846 	lck_rw_init(&nif->nif_llink_lock, &netif_llink_lock_group,
847 	    &netif_llink_lock_attr);
848 
849 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
850 
851 	STAILQ_INIT(&nif->nif_llink_list);
852 	nif->nif_llink_cnt = 0;
853 	nx_netif_default_llink_add(nif);
854 	nif->nif_flags |= NETIF_FLAG_LLINK_INITIALIZED;
855 
856 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
857 
858 	SK_DF(SK_VERB_LLINK, "%s: llink initialized", if_name(ifp));
859 }
860 
861 void
nx_netif_llink_fini(struct nx_netif * nif)862 nx_netif_llink_fini(struct nx_netif *nif)
863 {
864 	if (!NETIF_LLINK_ENABLED(nif)) {
865 		SK_DF(SK_VERB_LLINK, "%s: llink not initialized",
866 		    if_name(nif->nif_ifp));
867 		return;
868 	}
869 
870 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
871 
872 	nif->nif_flags &= ~NETIF_FLAG_LLINK_INITIALIZED;
873 	nx_netif_default_llink_remove(nif);
874 	ASSERT(nif->nif_llink_cnt == 0);
875 	ASSERT(STAILQ_EMPTY(&nif->nif_llink_list));
876 
877 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
878 
879 	nx_netif_llink_config_free(nif);
880 	lck_rw_destroy(&nif->nif_llink_lock, &netif_llink_lock_group);
881 	SK_DF(SK_VERB_LLINK, "%s: llink uninitialization done",
882 	    if_name(nif->nif_ifp));
883 }
884 
885 int
nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init * init,bool default_llink)886 nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *init,
887     bool default_llink)
888 {
889 	struct kern_nexus_netif_llink_qset_init *qsinit;
890 	bool has_default_qset = false;
891 	bool default_llink_flag;
892 	uint8_t i;
893 
894 	default_llink_flag =
895 	    ((init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) != 0);
896 
897 	if (default_llink != default_llink_flag) {
898 		SK_ERR("default llink flag incompatible: default_llink(%s), "
899 		    "default_llink_flag(%s)",
900 		    default_llink ? "true" : "false",
901 		    default_llink_flag ? "true" : "false");
902 		return EINVAL;
903 	}
904 	if (init->nli_num_qsets == 0) {
905 		SK_ERR("num qsets is zero");
906 		return EINVAL;
907 	}
908 	if ((qsinit = init->nli_qsets) == NULL) {
909 		SK_ERR("qsets is NULL");
910 		return EINVAL;
911 	}
912 	for (i = 0; i < init->nli_num_qsets; i++) {
913 		if (qsinit[i].nlqi_flags &
914 		    KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
915 			if (has_default_qset) {
916 				SK_ERR("has more than one default qset");
917 				return EINVAL;
918 			}
919 			if (qsinit[i].nlqi_num_rxqs == 0) {
920 				SK_ERR("num_rxqs == 0");
921 				return EINVAL;
922 			}
923 			has_default_qset = true;
924 		}
925 		if (qsinit[i].nlqi_num_txqs == 0) {
926 			SK_ERR("num_txqs == 0");
927 			return EINVAL;
928 		}
929 		if ((qsinit[i].nlqi_flags &
930 		    KERN_NEXUS_NET_LLINK_QSET_WMM_MODE) &&
931 		    (qsinit[i].nlqi_num_txqs != NEXUS_NUM_WMM_QUEUES)) {
932 			SK_ERR("invalid wmm mode");
933 			return EINVAL;
934 		}
935 	}
936 	return 0;
937 }
938 
939 int
nx_netif_default_llink_config(struct nx_netif * nif,struct kern_nexus_netif_llink_init * init)940 nx_netif_default_llink_config(struct nx_netif *nif,
941     struct kern_nexus_netif_llink_init *init)
942 {
943 	struct kern_nexus_netif_llink_qset_init *qsinit;
944 	int i, err;
945 
946 	err = nx_netif_validate_llink_config(init, true);
947 	if (err != 0) {
948 		return err;
949 	}
950 	nif->nif_default_llink_params = sk_alloc_type(
951 		struct kern_nexus_netif_llink_init,
952 		Z_WAITOK | Z_NOFAIL, nx_netif_tag_llink_cfg);
953 
954 	qsinit = sk_alloc_type_array(struct kern_nexus_netif_llink_qset_init,
955 	    init->nli_num_qsets, Z_WAITOK, nx_netif_tag_llink_cfg);
956 	if (qsinit == NULL) {
957 		SK_ERR("failed to alloc kern_nexus_netif_llink_qset_init");
958 		sk_free_type(struct kern_nexus_netif_llink_init,
959 		    nif->nif_default_llink_params);
960 		nif->nif_default_llink_params = NULL;
961 		return ENOMEM;
962 	}
963 	memcpy(nif->nif_default_llink_params, init,
964 	    __builtin_offsetof(struct kern_nexus_netif_llink_init,
965 	    nli_qsets));
966 	for (i = 0; i < init->nli_num_qsets; i++) {
967 		*(&qsinit[i]) = *(&init->nli_qsets[i]);
968 	}
969 	nif->nif_default_llink_params->nli_qsets = qsinit;
970 	return 0;
971 }
972 
973 void
nx_netif_llink_config_free(struct nx_netif * nif)974 nx_netif_llink_config_free(struct nx_netif *nif)
975 {
976 	if (nif->nif_default_llink_params == NULL) {
977 		return;
978 	}
979 	sk_free_type_array(struct kern_nexus_netif_llink_qset_init,
980 	    nif->nif_default_llink_params->nli_num_qsets,
981 	    nif->nif_default_llink_params->nli_qsets);
982 	nif->nif_default_llink_params->nli_qsets = NULL;
983 
984 	sk_free_type(struct kern_nexus_netif_llink_init,
985 	    nif->nif_default_llink_params);
986 	nif->nif_default_llink_params = NULL;
987 }
988 
989 static int
nx_netif_llink_ext_init_queues(struct kern_nexus * nx,struct netif_llink * llink)990 nx_netif_llink_ext_init_queues(struct kern_nexus *nx, struct netif_llink *llink)
991 {
992 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
993 	struct kern_nexus_netif_provider_init *nxnpi;
994 	struct netif_qset *qset;
995 	struct netif_stats *nifs = &NX_NETIF_PRIVATE(nx)->nif_stats;
996 	int err = 0;
997 	uint8_t i;
998 
999 	nxnpi = &nxprov->nxprov_netif_ext;
1000 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_init != NULL);
1001 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_init != NULL);
1002 
1003 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1004 		struct netif_queue *drvq;
1005 
1006 		ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0);
1007 		err = nxnpi->nxnpi_qset_init(nxprov, nx, llink->nll_ctx,
1008 		    qset->nqs_idx, qset->nqs_id, qset, &qset->nqs_ctx);
1009 		if (err != 0) {
1010 			STATS_INC(nifs, NETIF_STATS_LLINK_QSET_INIT_FAIL);
1011 			SK_ERR("nx: 0x%llx, qset: %d, qset init err %d",
1012 			    SK_KVA(nx), qset->nqs_idx, err);
1013 			goto out;
1014 		}
1015 		qset->nqs_flags |= NETIF_QSET_FLAG_EXT_INITED;
1016 
1017 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1018 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1019 
1020 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1021 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1022 			    i, false, drvq, &drvq->nq_ctx);
1023 			if (err != 0) {
1024 				STATS_INC(nifs, NETIF_STATS_LLINK_RXQ_INIT_FAIL);
1025 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
1026 				    SK_KVA(nx), qset->nqs_idx, err);
1027 				goto out;
1028 			}
1029 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1030 		}
1031 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1032 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1033 
1034 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1035 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1036 			    i, true, drvq, &drvq->nq_ctx);
1037 			if (err != 0) {
1038 				STATS_INC(nifs, NETIF_STATS_LLINK_TXQ_INIT_FAIL);
1039 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
1040 				    SK_KVA(nx), qset->nqs_idx, err);
1041 				goto out;
1042 			}
1043 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1044 		}
1045 	}
1046 out:
1047 	if (err != 0) {
1048 		nx_netif_llink_ext_fini_queues(nx, llink);
1049 	}
1050 	return err;
1051 }
1052 
1053 static void
nx_netif_llink_ext_fini_queues(struct kern_nexus * nx,struct netif_llink * llink)1054 nx_netif_llink_ext_fini_queues(struct kern_nexus *nx, struct netif_llink *llink)
1055 {
1056 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
1057 	struct kern_nexus_netif_provider_init *nxnpi;
1058 	struct netif_qset *qset;
1059 	uint8_t i;
1060 
1061 	nxnpi = &nxprov->nxprov_netif_ext;
1062 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_fini != NULL);
1063 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_fini != NULL);
1064 
1065 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1066 		struct netif_queue *drvq;
1067 
1068 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1069 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1070 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1071 				continue;
1072 			}
1073 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1074 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1075 		}
1076 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1077 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1078 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1079 				continue;
1080 			}
1081 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1082 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1083 		}
1084 		if ((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0) {
1085 			continue;
1086 		}
1087 		nxnpi->nxnpi_qset_fini(nxprov, nx, qset->nqs_ctx);
1088 		qset->nqs_flags &= ~NETIF_QSET_FLAG_EXT_INITED;
1089 	}
1090 }
1091 
1092 int
nx_netif_llink_ext_init_default_queues(struct kern_nexus * nx)1093 nx_netif_llink_ext_init_default_queues(struct kern_nexus *nx)
1094 {
1095 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1096 	return nx_netif_llink_ext_init_queues(nx, nif->nif_default_llink);
1097 }
1098 
1099 void
nx_netif_llink_ext_fini_default_queues(struct kern_nexus * nx)1100 nx_netif_llink_ext_fini_default_queues(struct kern_nexus *nx)
1101 {
1102 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1103 	nx_netif_llink_ext_fini_queues(nx, nif->nif_default_llink);
1104 }
1105