xref: /xnu-8019.80.24/bsd/skywalk/nexus/netif/nx_netif_llink.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2020-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 #include <skywalk/os_skywalk_private.h>
29 #include <skywalk/nexus/netif/nx_netif.h>
30 #include <pexpert/pexpert.h> /* for PE_parse_boot_argn */
31 #include <os/refcnt.h>
32 #include <sys/sdt.h>
33 
34 #define NX_NETIF_TAG_QSET   "com.apple.skywalk.netif.qset"
35 static kern_allocation_name_t nx_netif_tag_qset;
36 
37 #define NX_NETIF_TAG_LLINK_CFG   "com.apple.skywalk.netif.llink.cfg"
38 static kern_allocation_name_t nx_netif_tag_llink_cfg;
39 
40 LCK_ATTR_DECLARE(netif_llink_lock_attr, 0, 0);
41 static LCK_GRP_DECLARE(netif_llink_lock_group, "netif llink locks");
42 
43 static uint32_t nx_netif_disable_llink = 0;
44 
45 static struct netif_llink *nx_netif_llink_alloc(void);
46 static void nx_netif_llink_free(struct netif_llink **);
47 static struct netif_qset *nx_netif_qset_alloc(uint8_t, uint8_t);
48 static void nx_netif_qset_free(struct netif_qset **);
49 static void nx_netif_qset_setup_ifclassq(struct ifnet *, struct netif_llink *,
50     struct netif_qset *);
51 static void nx_netif_qset_teardown_ifclassq(struct netif_qset *);
52 static void nx_netif_qset_init(struct netif_qset *, struct netif_llink *,
53     uint8_t idx, struct kern_nexus_netif_llink_qset_init *);
54 static struct netif_qset *nx_netif_qset_create(struct netif_llink *,
55     uint8_t, struct kern_nexus_netif_llink_qset_init *);
56 static void nx_netif_qset_destroy(struct netif_qset *);
57 static void nx_netif_llink_initialize(struct netif_llink *, struct nx_netif *,
58     struct kern_nexus_netif_llink_init *);
59 static void nx_netif_driver_queue_destroy(struct netif_queue *);
60 static void nx_netif_driver_queue_init(struct netif_qset *,
61     struct netif_queue *, kern_packet_svc_class_t, bool);
62 static struct netif_llink *nx_netif_llink_create_locked(struct nx_netif *,
63     struct kern_nexus_netif_llink_init *);
64 static void nx_netif_default_llink_add(struct nx_netif *);
65 static int netif_qset_enqueue_single(struct netif_qset *,
66     struct __kern_packet *, uint32_t *, uint32_t *);
67 static int nx_netif_llink_ext_init_queues(struct kern_nexus *,
68     struct netif_llink *);
69 static void nx_netif_llink_ext_fini_queues(struct kern_nexus *,
70     struct netif_llink *);
71 
72 static uint32_t nx_netif_random_qset = 0;
73 #if (DEVELOPMENT || DEBUG)
74 SYSCTL_UINT(_kern_skywalk_netif, OID_AUTO, random_qset,
75     CTLFLAG_RW | CTLFLAG_LOCKED, &nx_netif_random_qset, 0,
76     "pick a random qset");
77 #endif /* DEVELOPMENT || DEBUG */
78 
79 void
nx_netif_llink_module_init(void)80 nx_netif_llink_module_init(void)
81 {
82 	ASSERT(nx_netif_tag_qset == NULL);
83 	nx_netif_tag_qset =
84 	    kern_allocation_name_allocate(NX_NETIF_TAG_QSET, 0);
85 	ASSERT(nx_netif_tag_qset != NULL);
86 
87 	ASSERT(nx_netif_tag_llink_cfg == NULL);
88 	nx_netif_tag_llink_cfg =
89 	    kern_allocation_name_allocate(NX_NETIF_TAG_LLINK_CFG, 0);
90 	ASSERT(nx_netif_tag_llink_cfg != NULL);
91 
92 #if (DEVELOPMENT || DEBUG)
93 	PE_parse_boot_argn("sk_disable_llink", &nx_netif_disable_llink,
94 	    sizeof(nx_netif_disable_llink));
95 #endif /* DEVELOPMENT || DEBUG */
96 }
97 
98 void
nx_netif_llink_module_fini(void)99 nx_netif_llink_module_fini(void)
100 {
101 	if (nx_netif_tag_qset != NULL) {
102 		kern_allocation_name_release(nx_netif_tag_qset);
103 		nx_netif_tag_qset = NULL;
104 	}
105 	if (nx_netif_tag_llink_cfg != NULL) {
106 		kern_allocation_name_release(nx_netif_tag_llink_cfg);
107 		nx_netif_tag_llink_cfg = NULL;
108 	}
109 }
110 
111 /* retains a reference for the callee */
112 static struct netif_llink *
nx_netif_llink_alloc(void)113 nx_netif_llink_alloc(void)
114 {
115 	struct netif_llink *llink;
116 
117 	llink = sk_alloc_type(struct netif_llink, Z_WAITOK | Z_NOFAIL,
118 	    skmem_tag_netif_llink);
119 	os_ref_init(&llink->nll_refcnt, NULL);
120 	return llink;
121 }
122 
123 SK_NO_INLINE_ATTRIBUTE
124 void
nx_netif_llink_retain(struct netif_llink * llink)125 nx_netif_llink_retain(struct netif_llink *llink)
126 {
127 	os_ref_retain(&llink->nll_refcnt);
128 }
129 
130 SK_NO_INLINE_ATTRIBUTE
131 static void
nx_netif_llink_free(struct netif_llink ** pllink)132 nx_netif_llink_free(struct netif_llink **pllink)
133 {
134 	struct netif_llink *llink = *pllink;
135 	struct netif_qset *qset, *tqset;
136 
137 	VERIFY(llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
138 	*pllink = NULL;
139 	SLIST_FOREACH_SAFE(qset, &llink->nll_qset_list, nqs_list, tqset) {
140 		SLIST_REMOVE(&llink->nll_qset_list, qset, netif_qset,
141 		    nqs_list);
142 		nx_netif_qset_destroy(qset);
143 	}
144 	sk_free_type(struct netif_llink, llink);
145 }
146 
147 SK_NO_INLINE_ATTRIBUTE
148 void
nx_netif_llink_release(struct netif_llink ** pllink)149 nx_netif_llink_release(struct netif_llink **pllink)
150 {
151 	struct netif_llink *llink = *pllink;
152 
153 	*pllink = NULL;
154 	if (os_ref_release(&llink->nll_refcnt) == 0) {
155 		nx_netif_llink_free(&llink);
156 	}
157 }
158 
159 /* retains a reference for the callee */
160 static struct netif_qset *
nx_netif_qset_alloc(uint8_t nrxqs,uint8_t ntxqs)161 nx_netif_qset_alloc(uint8_t nrxqs, uint8_t ntxqs)
162 {
163 	struct netif_qset *qset;
164 
165 	_CASSERT(sizeof(struct netif_queue) % sizeof(uint64_t) == 0);
166 
167 	qset = sk_alloc_type_header_array(struct netif_qset, struct netif_queue,
168 	    nrxqs + ntxqs, Z_WAITOK | Z_NOFAIL, nx_netif_tag_qset);
169 
170 	qset->nqs_num_rx_queues = nrxqs;
171 	qset->nqs_num_tx_queues =  ntxqs;
172 	return qset;
173 }
174 
175 SK_NO_INLINE_ATTRIBUTE
176 void
nx_netif_qset_retain(struct netif_qset * qset)177 nx_netif_qset_retain(struct netif_qset *qset)
178 {
179 	/*
180 	 * Logical link is immutable, i.e. Queue Sets can't added/removed
181 	 * from it. We will rely on this property to simply acquire a refcnt
182 	 * on the logical link, which is the parent structure of a qset.
183 	 */
184 	nx_netif_llink_retain(qset->nqs_llink);
185 }
186 
187 SK_NO_INLINE_ATTRIBUTE
188 void
nx_netif_qset_release(struct netif_qset ** pqset)189 nx_netif_qset_release(struct netif_qset **pqset)
190 {
191 	struct netif_qset *qset = *pqset;
192 	struct netif_llink *llink = qset->nqs_llink;
193 
194 	*pqset = NULL;
195 	nx_netif_llink_release(&llink);
196 }
197 
198 SK_NO_INLINE_ATTRIBUTE
199 static void
nx_netif_qset_free(struct netif_qset ** pqset)200 nx_netif_qset_free(struct netif_qset **pqset)
201 {
202 	struct netif_qset *qset = *pqset;
203 	uint8_t i;
204 
205 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
206 
207 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
208 		nx_netif_driver_queue_destroy(NETIF_QSET_RX_QUEUE(qset, i));
209 	}
210 	for (i = 0; i < qset->nqs_num_tx_queues; i++) {
211 		nx_netif_driver_queue_destroy(NETIF_QSET_TX_QUEUE(qset, i));
212 	}
213 	if (qset->nqs_ifcq != NULL) {
214 		nx_netif_qset_teardown_ifclassq(qset);
215 	}
216 	qset->nqs_llink = NULL;
217 	sk_free_type_header_array(struct netif_qset, struct netif_queue,
218 	    qset->nqs_num_rx_queues + qset->nqs_num_tx_queues, qset);
219 }
220 
221 SK_NO_INLINE_ATTRIBUTE
222 static void
nx_netif_qset_destroy(struct netif_qset * qset)223 nx_netif_qset_destroy(struct netif_qset *qset)
224 {
225 	VERIFY(qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED);
226 	nx_netif_qset_free(&qset);
227 }
228 
229 SK_NO_INLINE_ATTRIBUTE
230 static void
nx_netif_qset_setup_ifclassq(struct ifnet * ifp,struct netif_llink * llink,struct netif_qset * qset)231 nx_netif_qset_setup_ifclassq(struct ifnet *ifp, struct netif_llink *llink,
232     struct netif_qset *qset)
233 {
234 	if (NETIF_DEFAULT_LLINK(llink) && NETIF_DEFAULT_QSET(qset)) {
235 		/* use the default AQM queues from ifnet */
236 		ifclassq_retain(ifp->if_snd);
237 		qset->nqs_ifcq = ifp->if_snd;
238 		return;
239 	}
240 	qset->nqs_ifcq = ifclassq_alloc();
241 	VERIFY(qset->nqs_ifcq != NULL);
242 	dlil_ifclassq_setup(ifp, qset->nqs_ifcq);
243 }
244 
245 SK_NO_INLINE_ATTRIBUTE
246 static void
nx_netif_qset_teardown_ifclassq(struct netif_qset * qset)247 nx_netif_qset_teardown_ifclassq(struct netif_qset *qset)
248 {
249 	ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_AQM) != 0);
250 
251 	if (NETIF_DEFAULT_LLINK(qset->nqs_llink) && NETIF_DEFAULT_QSET(qset)) {
252 		ifclassq_release(&qset->nqs_ifcq);
253 		return;
254 	}
255 	/* Drain and destroy send queue */
256 	ifclassq_teardown(qset->nqs_ifcq);
257 	ifclassq_release(&qset->nqs_ifcq);
258 }
259 
260 SK_NO_INLINE_ATTRIBUTE
261 static void
nx_netif_qset_init(struct netif_qset * qset,struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)262 nx_netif_qset_init(struct netif_qset *qset, struct netif_llink *llink,
263     uint8_t idx, struct kern_nexus_netif_llink_qset_init *qset_init)
264 {
265 #define _NETIF_QSET_MAX_TXQS    4
266 	kern_packet_svc_class_t svc[_NETIF_QSET_MAX_TXQS] =
267 	{KPKT_SC_VO, KPKT_SC_VI, KPKT_SC_BE, KPKT_SC_BK};
268 	struct ifnet *ifp = llink->nll_nif->nif_ifp;
269 	uint8_t i;
270 
271 	/*
272 	 * no need to retain a reference for llink, as the logical link is
273 	 * immutable and qsets are created and destroyed along with logical
274 	 * link.
275 	 */
276 	qset->nqs_llink = llink;
277 
278 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
279 		qset->nqs_flags |= NETIF_QSET_FLAG_DEFAULT;
280 	}
281 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY) {
282 		qset->nqs_flags |= NETIF_QSET_FLAG_LOW_LATENCY;
283 	}
284 	if (qset_init->nlqi_flags & KERN_NEXUS_NET_LLINK_QSET_AQM) {
285 		nx_netif_qset_setup_ifclassq(ifp, llink, qset);
286 		qset->nqs_flags |= NETIF_QSET_FLAG_AQM;
287 	}
288 	qset->nqs_id = NETIF_QSET_ID_ENCODE(llink->nll_link_id_internal, idx);
289 	qset->nqs_idx = idx;
290 
291 	for (i = 0; i < qset->nqs_num_rx_queues; i++) {
292 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
293 		    KPKT_SC_UNSPEC, true);
294 	}
295 
296 	/*
297 	 * TODO:
298 	 * Could be more flexible here to allow an arbitrary number of queues.
299 	 */
300 	if (qset->nqs_num_tx_queues > 1) {
301 		VERIFY(qset->nqs_num_tx_queues == _NETIF_QSET_MAX_TXQS);
302 		VERIFY(ifp->if_output_sched_model ==
303 		    IFNET_SCHED_MODEL_DRIVER_MANAGED);
304 		for (i = 0; i < _NETIF_QSET_MAX_TXQS; i++) {
305 			nx_netif_driver_queue_init(qset,
306 			    NETIF_QSET_TX_QUEUE(qset, i), svc[i], false);
307 		}
308 	} else {
309 		nx_netif_driver_queue_init(qset, NETIF_QSET_RX_QUEUE(qset, i),
310 		    KPKT_SC_UNSPEC, false);
311 	}
312 }
313 
314 SK_NO_INLINE_ATTRIBUTE
315 static struct netif_qset *
nx_netif_qset_create(struct netif_llink * llink,uint8_t idx,struct kern_nexus_netif_llink_qset_init * qset_init)316 nx_netif_qset_create(struct netif_llink *llink, uint8_t idx,
317     struct kern_nexus_netif_llink_qset_init *qset_init)
318 {
319 	struct netif_qset *qset;
320 
321 	qset = nx_netif_qset_alloc(qset_init->nlqi_num_rxqs,
322 	    qset_init->nlqi_num_txqs);
323 	nx_netif_qset_init(qset, llink, idx, qset_init);
324 	return qset;
325 }
326 
327 static uint16_t
nx_netif_generate_internal_llink_id(struct nx_netif * nif)328 nx_netif_generate_internal_llink_id(struct nx_netif *nif)
329 {
330 	struct netif_llink *llink;
331 	struct netif_stats *nifs = &nif->nif_stats;
332 	uint16_t id;
333 
334 again:
335 	id = (uint16_t)(random() % 65536);
336 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
337 		if (__improbable(llink->nll_link_id_internal == id)) {
338 			break;
339 		}
340 	}
341 	if (__probable(llink == NULL && id != 0)) {
342 		return id;
343 	} else {
344 		STATS_INC(nifs, NETIF_STATS_LLINK_DUP_INT_ID_GENERATED);
345 		DTRACE_SKYWALK1(dup__llink__id__internal, uint16_t, id);
346 		goto again;
347 	}
348 }
349 
350 static void
nx_netif_llink_initialize(struct netif_llink * llink,struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)351 nx_netif_llink_initialize(struct netif_llink *llink, struct nx_netif *nif,
352     struct kern_nexus_netif_llink_init *llink_init)
353 {
354 	uint8_t i;
355 
356 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
357 
358 	llink->nll_nif = nif;
359 	llink->nll_link_id = llink_init->nli_link_id;
360 	if (llink_init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) {
361 		llink->nll_flags |= NETIF_LLINK_FLAG_DEFAULT;
362 	}
363 	llink->nll_link_id_internal = nx_netif_generate_internal_llink_id(nif);
364 	llink->nll_ctx = llink_init->nli_ctx;
365 	SLIST_INIT(&llink->nll_qset_list);
366 
367 	for (i = 0; i < llink_init->nli_num_qsets; i++) {
368 		struct netif_qset *qset = nx_netif_qset_create(llink, i,
369 		    &llink_init->nli_qsets[i]);
370 		/* nx_netif_qset_create retains a reference for the callee */
371 		SLIST_INSERT_HEAD(&llink->nll_qset_list, qset, nqs_list);
372 		if (NETIF_DEFAULT_QSET(qset)) {
373 			/* there can only be one default queue set */
374 			VERIFY(llink->nll_default_qset == NULL);
375 			llink->nll_default_qset = qset;
376 		}
377 	}
378 	llink->nll_qset_cnt = llink_init->nli_num_qsets;
379 	/* there should be a default queue set */
380 	VERIFY(llink->nll_default_qset != NULL);
381 	llink->nll_state = NETIF_LLINK_STATE_INIT;
382 }
383 
384 static void
nx_netif_driver_queue_destroy(struct netif_queue * drvq)385 nx_netif_driver_queue_destroy(struct netif_queue *drvq)
386 {
387 	VERIFY(drvq->nq_qset->nqs_llink->nll_state ==
388 	    NETIF_LLINK_STATE_DESTROYED);
389 
390 	lck_mtx_lock(&drvq->nq_lock);
391 	VERIFY(KPKTQ_EMPTY(&drvq->nq_pktq));
392 	lck_mtx_unlock(&drvq->nq_lock);
393 
394 	drvq->nq_qset = NULL;
395 	lck_mtx_destroy(&drvq->nq_lock, &netif_llink_lock_group);
396 }
397 
398 static void
nx_netif_driver_queue_init(struct netif_qset * qset,struct netif_queue * drvq,kern_packet_svc_class_t svc,bool is_rx)399 nx_netif_driver_queue_init(struct netif_qset *qset,
400     struct netif_queue *drvq, kern_packet_svc_class_t svc, bool is_rx)
401 {
402 	lck_mtx_init(&drvq->nq_lock, &netif_llink_lock_group,
403 	    &netif_llink_lock_attr);
404 
405 	lck_mtx_lock(&drvq->nq_lock);
406 	KPKTQ_INIT(&drvq->nq_pktq);
407 	lck_mtx_unlock(&drvq->nq_lock);
408 
409 	/*
410 	 * no need to retain a reference for qset, as queue set is
411 	 * immutable and driver queue is part of the queue set data structure.
412 	 */
413 	drvq->nq_qset = qset;
414 	drvq->nq_svc = svc;
415 	if (is_rx) {
416 		drvq->nq_flags |= NETIF_QUEUE_IS_RX;
417 	}
418 }
419 
420 SK_NO_INLINE_ATTRIBUTE
421 static struct netif_llink *
nx_netif_llink_create_locked(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init)422 nx_netif_llink_create_locked(struct nx_netif *nif,
423     struct kern_nexus_netif_llink_init *llink_init)
424 {
425 	struct netif_llink *llink;
426 	struct netif_stats *nifs = &nif->nif_stats;
427 
428 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
429 	llink = nx_netif_llink_alloc();
430 	nx_netif_llink_initialize(llink, nif, llink_init);
431 	/* nx_netif_llink_alloc retains a reference for the caller */
432 	STAILQ_INSERT_TAIL(&nif->nif_llink_list, llink, nll_link);
433 	nif->nif_llink_cnt++;
434 	STATS_INC(nifs, NETIF_STATS_LLINK_ADD);
435 	if (NETIF_DEFAULT_LLINK(llink)) {
436 		/* there can only be one default logical link */
437 		VERIFY(nif->nif_default_llink == NULL);
438 	}
439 	return llink;
440 }
441 
442 SK_NO_INLINE_ATTRIBUTE
443 static void
nx_netif_llink_destroy_locked(struct nx_netif * nif,struct netif_llink ** pllink)444 nx_netif_llink_destroy_locked(struct nx_netif *nif, struct netif_llink **pllink)
445 {
446 	struct netif_stats *nifs = &nif->nif_stats;
447 
448 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
449 	(*pllink)->nll_state = NETIF_LLINK_STATE_DESTROYED;
450 	STAILQ_REMOVE(&nif->nif_llink_list, *pllink, netif_llink, nll_link);
451 	nif->nif_llink_cnt--;
452 	STATS_INC(nifs, NETIF_STATS_LLINK_REMOVE);
453 	nx_netif_llink_release(pllink);
454 }
455 
456 int
nx_netif_llink_add(struct nx_netif * nif,struct kern_nexus_netif_llink_init * llink_init,struct netif_llink ** pllink)457 nx_netif_llink_add(struct nx_netif *nif,
458     struct kern_nexus_netif_llink_init *llink_init, struct netif_llink **pllink)
459 {
460 	int err;
461 	struct netif_llink *llink;
462 	struct netif_stats *nifs = &nif->nif_stats;
463 
464 	*pllink = NULL;
465 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
466 	/* ensure logical_link_id is unique */
467 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
468 		if (llink->nll_link_id == llink_init->nli_link_id) {
469 			SK_ERR("duplicate llink_id 0x%llu",
470 			    llink_init->nli_link_id);
471 			STATS_INC(nifs, NETIF_STATS_LLINK_DUP_ID_GIVEN);
472 			DTRACE_SKYWALK1(dup__id__given, uint64_t,
473 			    llink_init->nli_link_id);
474 			lck_rw_unlock_exclusive(&nif->nif_llink_lock);
475 			return EINVAL;
476 		}
477 	}
478 	llink = nx_netif_llink_create_locked(nif, llink_init);
479 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
480 	VERIFY(llink != NULL);
481 	err = nx_netif_llink_ext_init_queues(nif->nif_nx, llink);
482 	if (err != 0) {
483 		lck_rw_lock_exclusive(&nif->nif_llink_lock);
484 		nx_netif_llink_destroy_locked(nif, &llink);
485 		lck_rw_unlock_exclusive(&nif->nif_llink_lock);
486 	} else {
487 		/* increment reference for the caller */
488 		nx_netif_llink_retain(llink);
489 		*pllink = llink;
490 	}
491 	return err;
492 }
493 
494 int
nx_netif_llink_remove(struct nx_netif * nif,kern_nexus_netif_llink_id_t llink_id)495 nx_netif_llink_remove(struct nx_netif *nif,
496     kern_nexus_netif_llink_id_t llink_id)
497 {
498 	bool llink_found = false;
499 	struct netif_llink *llink;
500 	struct netif_stats *nifs = &nif->nif_stats;
501 
502 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
503 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
504 		if (llink->nll_link_id == llink_id) {
505 			llink_found = true;
506 			break;
507 		}
508 	}
509 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
510 	if (!llink_found) {
511 		STATS_INC(nifs, NETIF_STATS_LLINK_NOT_FOUND_REMOVE);
512 		DTRACE_SKYWALK1(not__found, uint64_t, llink_id);
513 		return ENOENT;
514 	}
515 	nx_netif_llink_ext_fini_queues(nif->nif_nx, llink);
516 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
517 	nx_netif_llink_destroy_locked(nif, &llink);
518 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
519 	return 0;
520 }
521 
522 static void
nx_netif_default_llink_add(struct nx_netif * nif)523 nx_netif_default_llink_add(struct nx_netif *nif)
524 {
525 	struct kern_nexus_netif_llink_init llink_init, *pllink_init;
526 	struct kern_nexus_netif_llink_qset_init qset;
527 	struct ifnet *ifp = nif->nif_ifp;
528 	struct netif_llink *llink;
529 
530 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
531 	VERIFY(SKYWALK_NATIVE(ifp));
532 
533 	llink_init.nli_flags = KERN_NEXUS_NET_LLINK_DEFAULT;
534 
535 	if (NX_LLINK_PROV(nif->nif_nx)) {
536 		VERIFY(nif->nif_default_llink_params != NULL);
537 		pllink_init = nif->nif_default_llink_params;
538 	} else {
539 		struct nexus_adapter *devna =
540 		    nx_port_get_na(nif->nif_nx, NEXUS_PORT_NET_IF_DEV);
541 
542 		llink_init.nli_link_id = NETIF_LLINK_ID_DEFAULT;
543 		qset.nlqi_flags = KERN_NEXUS_NET_LLINK_QSET_DEFAULT;
544 		/*
545 		 * For the legacy mode of operation we will assume that
546 		 * AQM is not needed on low-latency interface.
547 		 */
548 		if (NETIF_IS_LOW_LATENCY(nif)) {
549 			qset.nlqi_flags |=
550 			    KERN_NEXUS_NET_LLINK_QSET_LOW_LATENCY;
551 		} else {
552 			qset.nlqi_flags |= KERN_NEXUS_NET_LLINK_QSET_AQM;
553 		}
554 		qset.nlqi_num_rxqs =
555 		    (uint8_t)na_get_nrings(devna, NR_RX);
556 		qset.nlqi_num_txqs =
557 		    (uint8_t)na_get_nrings(devna, NR_TX);
558 		llink_init.nli_num_qsets = 1;
559 		llink_init.nli_qsets = &qset;
560 		pllink_init = &llink_init;
561 	}
562 	llink = nx_netif_llink_create_locked(nif, pllink_init);
563 	/* there can only be one default logical link */
564 	VERIFY(nif->nif_default_llink == NULL);
565 	nx_netif_llink_retain(llink);
566 	/* obtain a reference for the default logical link pointer */
567 	nif->nif_default_llink = llink;
568 }
569 
570 static void
nx_netif_default_llink_remove(struct nx_netif * nif)571 nx_netif_default_llink_remove(struct nx_netif *nif)
572 {
573 	struct netif_llink *llink;
574 
575 	LCK_RW_ASSERT(&nif->nif_llink_lock, LCK_RW_ASSERT_EXCLUSIVE);
576 	ASSERT(nif->nif_default_llink != NULL);
577 	ASSERT(nif->nif_llink_cnt == 1);
578 	llink = nif->nif_default_llink;
579 	nx_netif_llink_release(&nif->nif_default_llink);
580 	ASSERT(nif->nif_default_llink == NULL);
581 	nx_netif_llink_destroy_locked(nif, &llink);
582 }
583 
584 __attribute__((always_inline))
585 static inline void
netif_ifp_inc_traffic_class_out_pkt(struct ifnet * ifp,uint32_t svc,uint32_t cnt,uint32_t len)586 netif_ifp_inc_traffic_class_out_pkt(struct ifnet *ifp, uint32_t svc,
587     uint32_t cnt, uint32_t len)
588 {
589 	switch (svc) {
590 	case PKT_TC_BE:
591 		ifp->if_tc.ifi_obepackets += cnt;
592 		ifp->if_tc.ifi_obebytes += len;
593 		break;
594 	case PKT_TC_BK:
595 		ifp->if_tc.ifi_obkpackets += cnt;
596 		ifp->if_tc.ifi_obkbytes += len;
597 		break;
598 	case PKT_TC_VI:
599 		ifp->if_tc.ifi_ovipackets += cnt;
600 		ifp->if_tc.ifi_ovibytes += len;
601 		break;
602 	case PKT_TC_VO:
603 		ifp->if_tc.ifi_ovopackets += cnt;
604 		ifp->if_tc.ifi_ovobytes += len;
605 		break;
606 	default:
607 		break;
608 	}
609 }
610 
611 static int
netif_qset_enqueue_single(struct netif_qset * qset,struct __kern_packet * pkt,uint32_t * flowctl,uint32_t * dropped)612 netif_qset_enqueue_single(struct netif_qset *qset, struct __kern_packet *pkt,
613     uint32_t *flowctl, uint32_t *dropped)
614 {
615 	struct ifnet *ifp = qset->nqs_ifcq->ifcq_ifp;
616 	boolean_t pkt_drop = FALSE;
617 	int err;
618 
619 	/*
620 	 * we are using the first 4 bytes of flow_id as the AQM flow
621 	 * identifier.
622 	 */
623 	ASSERT(!uuid_is_null(pkt->pkt_flow_id));
624 	netif_ifp_inc_traffic_class_out_pkt(ifp, pkt->pkt_svc_class,
625 	    1, pkt->pkt_length);
626 
627 	if (__improbable(pkt->pkt_trace_id != 0)) {
628 		KDBG(SK_KTRACE_PKT_TX_FSW | DBG_FUNC_END, pkt->pkt_trace_id);
629 		KDBG(SK_KTRACE_PKT_TX_AQM | DBG_FUNC_START, pkt->pkt_trace_id);
630 	}
631 
632 	/* Only native path is supported */
633 	ASSERT((pkt->pkt_pflags & PKT_F_MBUF_DATA) == 0);
634 	ASSERT(pkt->pkt_mbuf == NULL);
635 
636 	err = ifnet_enqueue_ifcq_pkt(ifp, qset->nqs_ifcq, pkt, false,
637 	    &pkt_drop);
638 	if (__improbable(err != 0)) {
639 		if ((err == EQFULL || err == EQSUSPENDED) && flowctl != NULL) {
640 			(*flowctl)++;
641 		}
642 		if (pkt_drop && dropped != NULL) {
643 			(*dropped)++;
644 		}
645 	}
646 	return err;
647 }
648 
649 int
netif_qset_enqueue(struct netif_qset * qset,struct __kern_packet * pkt_chain,struct __kern_packet * tail,uint32_t cnt,uint32_t bytes,uint32_t * flowctl,uint32_t * dropped)650 netif_qset_enqueue(struct netif_qset *qset, struct __kern_packet *pkt_chain,
651     struct __kern_packet *tail, uint32_t cnt, uint32_t bytes, uint32_t *flowctl,
652     uint32_t *dropped)
653 {
654 #pragma unused(tail)
655 	struct __kern_packet *pkt = pkt_chain;
656 	struct __kern_packet *next;
657 	struct netif_stats *nifs = &qset->nqs_llink->nll_nif->nif_stats;
658 	uint32_t c = 0, b = 0, drop_cnt = 0, flowctl_cnt = 0;
659 	int err = 0;
660 
661 	/* drop packets if logical link state is destroyed */
662 	if (qset->nqs_llink->nll_state == NETIF_LLINK_STATE_DESTROYED) {
663 		pp_free_packet_chain(pkt_chain, (int *)&drop_cnt);
664 		STATS_ADD(nifs, NETIF_STATS_LLINK_TX_DROP_BAD_STATE, drop_cnt);
665 		if (dropped != NULL) {
666 			*dropped = drop_cnt;
667 		}
668 		return ENXIO;
669 	}
670 
671 	/* We don't support chains for now */
672 	while (pkt != NULL) {
673 		next = pkt->pkt_nextpkt;
674 		pkt->pkt_nextpkt = NULL;
675 		c++;
676 		b += pkt->pkt_length;
677 
678 		(void) netif_qset_enqueue_single(qset, pkt, &flowctl_cnt,
679 		    &drop_cnt);
680 		pkt = next;
681 	}
682 	VERIFY(c == cnt);
683 	VERIFY(b == bytes);
684 	if (flowctl != NULL && flowctl_cnt > 0) {
685 		*flowctl = flowctl_cnt;
686 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_QFULL, flowctl_cnt);
687 		err = EIO;
688 	}
689 	if (dropped != NULL && drop_cnt > 0) {
690 		*dropped = drop_cnt;
691 		STATS_ADD(nifs, NETIF_STATS_LLINK_AQM_DROPPED, drop_cnt);
692 		err = EIO;
693 	}
694 	return err;
695 }
696 
697 struct netif_qset *
nx_netif_get_default_qset_noref(struct nx_netif * nif)698 nx_netif_get_default_qset_noref(struct nx_netif *nif)
699 {
700 	struct netif_qset *qset;
701 	struct netif_stats *nifs = &nif->nif_stats;
702 
703 	ASSERT(NETIF_LLINK_ENABLED(nif));
704 	if (__improbable(nif->nif_default_llink->nll_state !=
705 	    NETIF_LLINK_STATE_INIT)) {
706 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
707 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
708 		return NULL;
709 	}
710 	qset = nif->nif_default_llink->nll_default_qset;
711 	return qset;
712 }
713 
714 static void
nx_netif_qset_hint_decode(uint64_t hint,uint16_t * link_id_internal,uint16_t * qset_idx)715 nx_netif_qset_hint_decode(uint64_t hint,
716     uint16_t *link_id_internal, uint16_t *qset_idx)
717 {
718 	/* The top 32 bits are unused for now */
719 	*link_id_internal = (uint16_t)((0xffff0000 & hint) >> 16);
720 	*qset_idx = (uint16_t)((0x0000ffff & hint));
721 }
722 
723 /* retains a reference for the caller */
724 static struct netif_qset *
nx_netif_get_default_qset(struct nx_netif * nif)725 nx_netif_get_default_qset(struct nx_netif *nif)
726 {
727 	struct netif_qset *qset;
728 
729 	qset = nif->nif_default_llink->nll_default_qset;
730 	nx_netif_qset_retain(qset);
731 	return qset;
732 }
733 
734 /*
735  * Find the qset based on the qset hint. Fall back to the default qset
736  * if not found. The random qset is used for experimentation.
737  */
738 struct netif_qset *
nx_netif_find_qset(struct nx_netif * nif,uint64_t hint)739 nx_netif_find_qset(struct nx_netif *nif, uint64_t hint)
740 {
741 	uint16_t ll_id_internal, qset_idx;
742 	struct netif_llink *llink;
743 	struct netif_qset *qset;
744 	struct netif_stats *nifs = &nif->nif_stats;
745 	int i, j, random_id;
746 
747 	ASSERT(NETIF_LLINK_ENABLED(nif));
748 	if (__improbable(nif->nif_default_llink->nll_state !=
749 	    NETIF_LLINK_STATE_INIT)) {
750 		STATS_INC(nifs, NETIF_STATS_LLINK_QSET_BAD_STATE);
751 		DTRACE_SKYWALK1(llink__bad__state, struct nx_netif *, nif);
752 		return NULL;
753 	}
754 	if (!NX_LLINK_PROV(nif->nif_nx) ||
755 	    (nx_netif_random_qset == 0 && hint == 0)) {
756 		goto def_qset;
757 	}
758 	if (nx_netif_random_qset == 0) {
759 		nx_netif_qset_hint_decode(hint, &ll_id_internal, &qset_idx);
760 	} else {
761 		ll_id_internal = 0;
762 		qset_idx = 0;
763 	}
764 	lck_rw_lock_shared(&nif->nif_llink_lock);
765 	i = 0;
766 	random_id = random();
767 	STAILQ_FOREACH(llink, &nif->nif_llink_list, nll_link) {
768 		if (nx_netif_random_qset != 0 &&
769 		    (random_id % nif->nif_llink_cnt) == i) {
770 			break;
771 		} else if (llink->nll_link_id_internal == ll_id_internal) {
772 			break;
773 		}
774 		i++;
775 	}
776 	if (llink == NULL) {
777 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
778 		lck_rw_unlock_shared(&nif->nif_llink_lock);
779 		goto def_qset;
780 	}
781 	j = 0;
782 	random_id = random();
783 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
784 		if (nx_netif_random_qset != 0 &&
785 		    (random_id % llink->nll_qset_cnt) == j) {
786 			break;
787 		} else if (qset->nqs_idx == qset_idx) {
788 			break;
789 		}
790 		j++;
791 	}
792 	if (qset == NULL) {
793 		STATS_INC(nifs, NETIF_STATS_LLINK_HINT_NOT_USEFUL);
794 		lck_rw_unlock_shared(&nif->nif_llink_lock);
795 		goto def_qset;
796 	}
797 	nx_netif_qset_retain(qset);
798 	STATS_INC(nifs, NETIF_STATS_LLINK_NONDEF_QSET_USED);
799 	lck_rw_unlock_shared(&nif->nif_llink_lock);
800 	if (nx_netif_random_qset != 0) {
801 		SK_DF(SK_VERB_LLINK, "%s: random qset: qset %p, ifcq %p, "
802 		    "llink_idx %d, qset_idx %d", if_name(nif->nif_ifp),
803 		    qset, qset->nqs_ifcq, i, j);
804 
805 		DTRACE_SKYWALK5(random__qset, struct nx_netif *, nif,
806 		    struct netif_qset *, qset, struct ifclassq *,
807 		    qset->nqs_ifcq, int, i, int, j);
808 	} else {
809 		SK_DF(SK_VERB_LLINK, "%s: non-default qset: qset %p, ifcq %p, "
810 		    " ll_id_internal 0x%x, qset_idx %d", if_name(nif->nif_ifp),
811 		    qset, qset->nqs_ifcq, ll_id_internal, qset_idx);
812 
813 		DTRACE_SKYWALK5(nondef__qset, struct nx_netif *, nif,
814 		    struct netif_qset *, qset, struct ifclassq *,
815 		    qset->nqs_ifcq, uint16_t, ll_id_internal,
816 		    uint16_t, qset_idx);
817 	}
818 	return qset;
819 
820 def_qset:
821 	STATS_INC(nifs, NETIF_STATS_LLINK_DEF_QSET_USED);
822 	qset = nx_netif_get_default_qset(nif);
823 	ASSERT(qset != NULL);
824 
825 	SK_DF(SK_VERB_LLINK, "%s: default qset: qset %p, ifcq %p, hint %llx",
826 	    if_name(nif->nif_ifp), qset, qset->nqs_ifcq, hint);
827 
828 	DTRACE_SKYWALK4(def__qset, struct nx_netif *, nif, struct netif_qset *,
829 	    qset, struct ifclassq *, qset->nqs_ifcq, uint64_t, hint);
830 	return qset;
831 }
832 
833 void
nx_netif_llink_init(struct nx_netif * nif)834 nx_netif_llink_init(struct nx_netif *nif)
835 {
836 	ifnet_t ifp = nif->nif_ifp;
837 
838 	if (__improbable(nx_netif_disable_llink != 0)) {
839 		SK_DF(SK_VERB_LLINK, "%s: llink is disabled",
840 		    if_name(nif->nif_ifp));
841 		return;
842 	}
843 
844 	if (!SKYWALK_NATIVE(ifp)) {
845 		SK_DF(SK_VERB_LLINK,
846 		    "%s: llink is supported on native devices only",
847 		    if_name(ifp));
848 		return;
849 	}
850 	ASSERT(!NETIF_LLINK_ENABLED(nif));
851 	lck_rw_init(&nif->nif_llink_lock, &netif_llink_lock_group,
852 	    &netif_llink_lock_attr);
853 
854 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
855 
856 	STAILQ_INIT(&nif->nif_llink_list);
857 	nif->nif_llink_cnt = 0;
858 	nx_netif_default_llink_add(nif);
859 	nif->nif_flags |= NETIF_FLAG_LLINK_INITIALIZED;
860 
861 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
862 
863 	SK_DF(SK_VERB_LLINK, "%s: llink initialized", if_name(ifp));
864 }
865 
866 void
nx_netif_llink_fini(struct nx_netif * nif)867 nx_netif_llink_fini(struct nx_netif *nif)
868 {
869 	if (!NETIF_LLINK_ENABLED(nif)) {
870 		SK_DF(SK_VERB_LLINK, "%s: llink not initialized",
871 		    if_name(nif->nif_ifp));
872 		return;
873 	}
874 
875 	lck_rw_lock_exclusive(&nif->nif_llink_lock);
876 
877 	nif->nif_flags &= ~NETIF_FLAG_LLINK_INITIALIZED;
878 	nx_netif_default_llink_remove(nif);
879 	ASSERT(nif->nif_llink_cnt == 0);
880 	ASSERT(STAILQ_EMPTY(&nif->nif_llink_list));
881 
882 	lck_rw_unlock_exclusive(&nif->nif_llink_lock);
883 
884 	nx_netif_llink_config_free(nif);
885 	lck_rw_destroy(&nif->nif_llink_lock, &netif_llink_lock_group);
886 	SK_DF(SK_VERB_LLINK, "%s: llink uninitialization done",
887 	    if_name(nif->nif_ifp));
888 }
889 
890 int
nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init * init,bool default_llink)891 nx_netif_validate_llink_config(struct kern_nexus_netif_llink_init *init,
892     bool default_llink)
893 {
894 	struct kern_nexus_netif_llink_qset_init *qsinit;
895 	bool has_default_qset = false;
896 	bool default_llink_flag;
897 	uint8_t i;
898 
899 	default_llink_flag =
900 	    ((init->nli_flags & KERN_NEXUS_NET_LLINK_DEFAULT) != 0);
901 
902 	if (default_llink != default_llink_flag) {
903 		SK_ERR("default llink flag incompatible: default_llink(%s), "
904 		    "default_llink_flag(%s)",
905 		    default_llink ? "true" : "false",
906 		    default_llink_flag ? "true" : "false");
907 		return EINVAL;
908 	}
909 	if (init->nli_num_qsets == 0) {
910 		SK_ERR("num qsets is zero");
911 		return EINVAL;
912 	}
913 	if ((qsinit = init->nli_qsets) == NULL) {
914 		SK_ERR("qsets is NULL");
915 		return EINVAL;
916 	}
917 	for (i = 0; i < init->nli_num_qsets; i++) {
918 		if (qsinit[i].nlqi_flags &
919 		    KERN_NEXUS_NET_LLINK_QSET_DEFAULT) {
920 			if (has_default_qset) {
921 				SK_ERR("has more than one default qset");
922 				return EINVAL;
923 			}
924 			if (qsinit[i].nlqi_num_rxqs == 0) {
925 				SK_ERR("num_rxqs == 0");
926 				return EINVAL;
927 			}
928 			has_default_qset = true;
929 		}
930 		if (qsinit[i].nlqi_num_txqs == 0) {
931 			SK_ERR("num_txqs == 0");
932 			return EINVAL;
933 		}
934 		if ((qsinit[i].nlqi_flags &
935 		    KERN_NEXUS_NET_LLINK_QSET_WMM_MODE) &&
936 		    (qsinit[i].nlqi_num_txqs != NEXUS_NUM_WMM_QUEUES)) {
937 			SK_ERR("invalid wmm mode");
938 			return EINVAL;
939 		}
940 	}
941 	return 0;
942 }
943 
944 int
nx_netif_default_llink_config(struct nx_netif * nif,struct kern_nexus_netif_llink_init * init)945 nx_netif_default_llink_config(struct nx_netif *nif,
946     struct kern_nexus_netif_llink_init *init)
947 {
948 	struct kern_nexus_netif_llink_qset_init *qsinit;
949 	int i, err;
950 
951 	err = nx_netif_validate_llink_config(init, true);
952 	if (err != 0) {
953 		return err;
954 	}
955 	nif->nif_default_llink_params = sk_alloc_type(
956 		struct kern_nexus_netif_llink_init,
957 		Z_WAITOK | Z_NOFAIL, nx_netif_tag_llink_cfg);
958 
959 	qsinit = sk_alloc_type_array(struct kern_nexus_netif_llink_qset_init,
960 	    init->nli_num_qsets, Z_WAITOK, nx_netif_tag_llink_cfg);
961 	if (qsinit == NULL) {
962 		SK_ERR("failed to alloc kern_nexus_netif_llink_qset_init");
963 		sk_free_type(struct kern_nexus_netif_llink_init,
964 		    nif->nif_default_llink_params);
965 		nif->nif_default_llink_params = NULL;
966 		return ENOMEM;
967 	}
968 	memcpy(nif->nif_default_llink_params, init,
969 	    __builtin_offsetof(struct kern_nexus_netif_llink_init,
970 	    nli_qsets));
971 	for (i = 0; i < init->nli_num_qsets; i++) {
972 		*(&qsinit[i]) = *(&init->nli_qsets[i]);
973 	}
974 	nif->nif_default_llink_params->nli_qsets = qsinit;
975 	return 0;
976 }
977 
978 void
nx_netif_llink_config_free(struct nx_netif * nif)979 nx_netif_llink_config_free(struct nx_netif *nif)
980 {
981 	if (nif->nif_default_llink_params == NULL) {
982 		return;
983 	}
984 	sk_free_type_array(struct kern_nexus_netif_llink_qset_init,
985 	    nif->nif_default_llink_params->nli_num_qsets,
986 	    nif->nif_default_llink_params->nli_qsets);
987 	nif->nif_default_llink_params->nli_qsets = NULL;
988 
989 	sk_free_type(struct kern_nexus_netif_llink_init,
990 	    nif->nif_default_llink_params);
991 	nif->nif_default_llink_params = NULL;
992 }
993 
994 static int
nx_netif_llink_ext_init_queues(struct kern_nexus * nx,struct netif_llink * llink)995 nx_netif_llink_ext_init_queues(struct kern_nexus *nx, struct netif_llink *llink)
996 {
997 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
998 	struct kern_nexus_netif_provider_init *nxnpi;
999 	struct netif_qset *qset;
1000 	struct netif_stats *nifs = &NX_NETIF_PRIVATE(nx)->nif_stats;
1001 	int err = 0;
1002 	uint8_t i;
1003 
1004 	nxnpi = &nxprov->nxprov_netif_ext;
1005 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_init != NULL);
1006 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_init != NULL);
1007 
1008 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1009 		struct netif_queue *drvq;
1010 
1011 		ASSERT((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0);
1012 		err = nxnpi->nxnpi_qset_init(nxprov, nx, llink->nll_ctx,
1013 		    qset->nqs_idx, qset->nqs_id, qset, &qset->nqs_ctx);
1014 		if (err != 0) {
1015 			STATS_INC(nifs, NETIF_STATS_LLINK_QSET_INIT_FAIL);
1016 			SK_ERR("nx: 0x%llx, qset: %d, qset init err %d",
1017 			    SK_KVA(nx), qset->nqs_idx, err);
1018 			goto out;
1019 		}
1020 		qset->nqs_flags |= NETIF_QSET_FLAG_EXT_INITED;
1021 
1022 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1023 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1024 
1025 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1026 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1027 			    i, false, drvq, &drvq->nq_ctx);
1028 			if (err != 0) {
1029 				STATS_INC(nifs, NETIF_STATS_LLINK_RXQ_INIT_FAIL);
1030 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
1031 				    SK_KVA(nx), qset->nqs_idx, err);
1032 				goto out;
1033 			}
1034 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1035 		}
1036 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1037 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1038 
1039 			ASSERT((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0);
1040 			err = nxnpi->nxnpi_queue_init(nxprov, nx, qset->nqs_ctx,
1041 			    i, true, drvq, &drvq->nq_ctx);
1042 			if (err != 0) {
1043 				STATS_INC(nifs, NETIF_STATS_LLINK_TXQ_INIT_FAIL);
1044 				SK_ERR("nx: 0x%llx qset: %d queue_init err %d",
1045 				    SK_KVA(nx), qset->nqs_idx, err);
1046 				goto out;
1047 			}
1048 			drvq->nq_flags |= NETIF_QUEUE_EXT_INITED;
1049 		}
1050 	}
1051 out:
1052 	if (err != 0) {
1053 		nx_netif_llink_ext_fini_queues(nx, llink);
1054 	}
1055 	return err;
1056 }
1057 
1058 static void
nx_netif_llink_ext_fini_queues(struct kern_nexus * nx,struct netif_llink * llink)1059 nx_netif_llink_ext_fini_queues(struct kern_nexus *nx, struct netif_llink *llink)
1060 {
1061 	struct kern_nexus_provider *nxprov = NX_PROV(nx);
1062 	struct kern_nexus_netif_provider_init *nxnpi;
1063 	struct netif_qset *qset;
1064 	uint8_t i;
1065 
1066 	nxnpi = &nxprov->nxprov_netif_ext;
1067 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_qset_fini != NULL);
1068 	ASSERT(nxprov->nxprov_netif_ext.nxnpi_queue_fini != NULL);
1069 
1070 	SLIST_FOREACH(qset, &llink->nll_qset_list, nqs_list) {
1071 		struct netif_queue *drvq;
1072 
1073 		for (i = 0; i < qset->nqs_num_rx_queues; i++) {
1074 			drvq = NETIF_QSET_RX_QUEUE(qset, i);
1075 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1076 				continue;
1077 			}
1078 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1079 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1080 		}
1081 		for (i = 0; i < qset->nqs_num_tx_queues; i++) {
1082 			drvq = NETIF_QSET_TX_QUEUE(qset, i);
1083 			if ((drvq->nq_flags & NETIF_QUEUE_EXT_INITED) == 0) {
1084 				continue;
1085 			}
1086 			nxnpi->nxnpi_queue_fini(nxprov, nx, drvq->nq_ctx);
1087 			drvq->nq_flags &= ~NETIF_QUEUE_EXT_INITED;
1088 		}
1089 		if ((qset->nqs_flags & NETIF_QSET_FLAG_EXT_INITED) == 0) {
1090 			continue;
1091 		}
1092 		nxnpi->nxnpi_qset_fini(nxprov, nx, qset->nqs_ctx);
1093 		qset->nqs_flags &= ~NETIF_QSET_FLAG_EXT_INITED;
1094 	}
1095 }
1096 
1097 int
nx_netif_llink_ext_init_default_queues(struct kern_nexus * nx)1098 nx_netif_llink_ext_init_default_queues(struct kern_nexus *nx)
1099 {
1100 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1101 	return nx_netif_llink_ext_init_queues(nx, nif->nif_default_llink);
1102 }
1103 
1104 void
nx_netif_llink_ext_fini_default_queues(struct kern_nexus * nx)1105 nx_netif_llink_ext_fini_default_queues(struct kern_nexus *nx)
1106 {
1107 	struct nx_netif *nif = NX_NETIF_PRIVATE(nx);
1108 	nx_netif_llink_ext_fini_queues(nx, nif->nif_default_llink);
1109 }
1110