xref: /xnu-8019.80.24/bsd/net/pktsched/pktsched_fq_codel.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/types.h>
30 #include <sys/param.h>
31 #include <kern/zalloc.h>
32 #include <net/ethernet.h>
33 #include <net/if_var.h>
34 #include <net/if.h>
35 #include <net/classq/classq.h>
36 #include <net/classq/classq_fq_codel.h>
37 #include <net/pktsched/pktsched_fq_codel.h>
38 #include <os/log.h>
39 
40 #define FQ_CODEL_DEFAULT_QUANTUM 1500
41 
42 #define FQ_CODEL_QUANTUM_BK_SYS(_q)    (_q)
43 #define FQ_CODEL_QUANTUM_BK(_q)        (_q)
44 #define FQ_CODEL_QUANTUM_BE(_q)        (_q)
45 #define FQ_CODEL_QUANTUM_RD(_q)        (_q)
46 #define FQ_CODEL_QUANTUM_OAM(_q)       (_q)
47 #define FQ_CODEL_QUANTUM_AV(_q)        (_q * 2)
48 #define FQ_CODEL_QUANTUM_RV(_q)        (_q * 2)
49 #define FQ_CODEL_QUANTUM_VI(_q)        (_q * 2)
50 #define FQ_CODEL_QUANTUM_VO(_q)        ((_q * 2) / 5)
51 #define FQ_CODEL_QUANTUM_CTL(_q)       ((_q * 2) / 5)
52 
53 #define FQ_CODEL_DRR_MAX_BK_SYS    2
54 #define FQ_CODEL_DRR_MAX_BK        2
55 #define FQ_CODEL_DRR_MAX_BE        4
56 #define FQ_CODEL_DRR_MAX_RD        4
57 #define FQ_CODEL_DRR_MAX_OAM       4
58 #define FQ_CODEL_DRR_MAX_AV        6
59 #define FQ_CODEL_DRR_MAX_RV        6
60 #define FQ_CODEL_DRR_MAX_VI        6
61 #define FQ_CODEL_DRR_MAX_VO        8
62 #define FQ_CODEL_DRR_MAX_CTL       8
63 
64 static ZONE_DECLARE(fq_if_zone, "pktsched_fq_if", sizeof(fq_if_t), ZC_ZFREE_CLEARMEM);
65 
66 typedef STAILQ_HEAD(, flowq) flowq_dqlist_t;
67 
68 static fq_if_t *fq_if_alloc(struct ifnet *, struct ifclassq *, classq_pkt_type_t);
69 static void fq_if_destroy(fq_if_t *fqs);
70 static void fq_if_classq_init(fq_if_t *fqs, uint32_t priority,
71     uint16_t quantum, uint32_t drr_max, uint32_t svc_class);
72 static void fq_if_dequeue(fq_if_t *, fq_if_classq_t *, uint32_t,
73     int64_t, classq_pkt_t *, classq_pkt_t *, uint32_t *,
74     uint32_t *, flowq_dqlist_t *, boolean_t drvmgmt);
75 void fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat);
76 static void fq_if_purge(fq_if_t *);
77 static void fq_if_purge_classq(fq_if_t *, fq_if_classq_t *);
78 static void fq_if_purge_flow(fq_if_t *, fq_t *, u_int32_t *, u_int32_t *);
79 static void fq_if_empty_new_flow(fq_t *fq, fq_if_classq_t *fq_cl,
80     bool add_to_old);
81 static void fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl,
82     fq_t *fq, bool remove_hash, bool destroy);
83 
84 #define FQ_IF_FLOW_HASH_ID(_flowid_) \
85 	(((_flowid_) >> FQ_IF_HASH_TAG_SHIFT) & FQ_IF_HASH_TAG_MASK)
86 
87 #define FQ_IF_CLASSQ_IDLE(_fcl_) \
88 	(STAILQ_EMPTY(&(_fcl_)->fcl_new_flows) && \
89 	STAILQ_EMPTY(&(_fcl_)->fcl_old_flows))
90 
91 typedef void (* fq_if_append_pkt_t)(classq_pkt_t *, classq_pkt_t *);
92 typedef boolean_t (* fq_getq_flow_t)(fq_if_t *, fq_if_classq_t *, fq_t *,
93     int64_t, u_int32_t, classq_pkt_t *, classq_pkt_t *, u_int32_t *,
94     u_int32_t *, boolean_t *, u_int32_t);
95 
96 static void
fq_if_append_mbuf(classq_pkt_t * pkt,classq_pkt_t * next_pkt)97 fq_if_append_mbuf(classq_pkt_t *pkt, classq_pkt_t *next_pkt)
98 {
99 	pkt->cp_mbuf->m_nextpkt = next_pkt->cp_mbuf;
100 }
101 
102 #if SKYWALK
103 static void
fq_if_append_pkt(classq_pkt_t * pkt,classq_pkt_t * next_pkt)104 fq_if_append_pkt(classq_pkt_t *pkt, classq_pkt_t *next_pkt)
105 {
106 	pkt->cp_kpkt->pkt_nextpkt = next_pkt->cp_kpkt;
107 }
108 #endif /* SKYWALK */
109 
110 #if SKYWALK
111 static boolean_t
fq_getq_flow_kpkt(fq_if_t * fqs,fq_if_classq_t * fq_cl,fq_t * fq,int64_t byte_limit,u_int32_t pkt_limit,classq_pkt_t * head,classq_pkt_t * tail,u_int32_t * byte_cnt,u_int32_t * pkt_cnt,boolean_t * qempty,u_int32_t pflags)112 fq_getq_flow_kpkt(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq,
113     int64_t byte_limit, u_int32_t pkt_limit, classq_pkt_t *head,
114     classq_pkt_t *tail, u_int32_t *byte_cnt, u_int32_t *pkt_cnt,
115     boolean_t *qempty, u_int32_t pflags)
116 {
117 	u_int32_t plen;
118 	pktsched_pkt_t pkt;
119 	boolean_t limit_reached = FALSE;
120 	struct ifclassq *ifq = fqs->fqs_ifq;
121 	struct ifnet *ifp = ifq->ifcq_ifp;
122 
123 	/*
124 	 * Assert to make sure pflags is part of PKT_F_COMMON_MASK;
125 	 * all common flags need to be declared in that mask.
126 	 */
127 	ASSERT((pflags & ~PKT_F_COMMON_MASK) == 0);
128 
129 	while (fq->fq_deficit > 0 && limit_reached == FALSE &&
130 	    !KPKTQ_EMPTY(&fq->fq_kpktq)) {
131 		_PKTSCHED_PKT_INIT(&pkt);
132 		fq_getq_flow(fqs, fq, &pkt);
133 		ASSERT(pkt.pktsched_ptype == QP_PACKET);
134 
135 		plen = pktsched_get_pkt_len(&pkt);
136 		fq->fq_deficit -= plen;
137 		pkt.pktsched_pkt_kpkt->pkt_pflags |= pflags;
138 
139 		if (head->cp_kpkt == NULL) {
140 			*head = pkt.pktsched_pkt;
141 		} else {
142 			ASSERT(tail->cp_kpkt != NULL);
143 			ASSERT(tail->cp_kpkt->pkt_nextpkt == NULL);
144 			tail->cp_kpkt->pkt_nextpkt = pkt.pktsched_pkt_kpkt;
145 		}
146 		*tail = pkt.pktsched_pkt;
147 		tail->cp_kpkt->pkt_nextpkt = NULL;
148 		fq_cl->fcl_stat.fcl_dequeue++;
149 		fq_cl->fcl_stat.fcl_dequeue_bytes += plen;
150 		*pkt_cnt += 1;
151 		*byte_cnt += plen;
152 
153 		ifclassq_set_packet_metadata(ifq, ifp, &pkt.pktsched_pkt);
154 
155 		/* Check if the limit is reached */
156 		if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit) {
157 			limit_reached = TRUE;
158 		}
159 	}
160 
161 	*qempty = KPKTQ_EMPTY(&fq->fq_kpktq);
162 	return limit_reached;
163 }
164 #endif /* SKYWALK */
165 
166 static boolean_t
fq_getq_flow_mbuf(fq_if_t * fqs,fq_if_classq_t * fq_cl,fq_t * fq,int64_t byte_limit,u_int32_t pkt_limit,classq_pkt_t * head,classq_pkt_t * tail,u_int32_t * byte_cnt,u_int32_t * pkt_cnt,boolean_t * qempty,u_int32_t pflags)167 fq_getq_flow_mbuf(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq,
168     int64_t byte_limit, u_int32_t pkt_limit, classq_pkt_t *head,
169     classq_pkt_t *tail, u_int32_t *byte_cnt, u_int32_t *pkt_cnt,
170     boolean_t *qempty, u_int32_t pflags)
171 {
172 	u_int32_t plen;
173 	pktsched_pkt_t pkt;
174 	boolean_t limit_reached = FALSE;
175 	struct ifclassq *ifq = fqs->fqs_ifq;
176 	struct ifnet *ifp = ifq->ifcq_ifp;
177 
178 	while (fq->fq_deficit > 0 && limit_reached == FALSE &&
179 	    !MBUFQ_EMPTY(&fq->fq_mbufq)) {
180 		_PKTSCHED_PKT_INIT(&pkt);
181 		fq_getq_flow(fqs, fq, &pkt);
182 		ASSERT(pkt.pktsched_ptype == QP_MBUF);
183 
184 		plen = pktsched_get_pkt_len(&pkt);
185 		fq->fq_deficit -= plen;
186 		pkt.pktsched_pkt_mbuf->m_pkthdr.pkt_flags |= pflags;
187 
188 		if (head->cp_mbuf == NULL) {
189 			*head = pkt.pktsched_pkt;
190 		} else {
191 			ASSERT(tail->cp_mbuf != NULL);
192 			ASSERT(tail->cp_mbuf->m_nextpkt == NULL);
193 			tail->cp_mbuf->m_nextpkt = pkt.pktsched_pkt_mbuf;
194 		}
195 		*tail = pkt.pktsched_pkt;
196 		tail->cp_mbuf->m_nextpkt = NULL;
197 		fq_cl->fcl_stat.fcl_dequeue++;
198 		fq_cl->fcl_stat.fcl_dequeue_bytes += plen;
199 		*pkt_cnt += 1;
200 		*byte_cnt += plen;
201 
202 		ifclassq_set_packet_metadata(ifq, ifp, &pkt.pktsched_pkt);
203 
204 		/* Check if the limit is reached */
205 		if (*pkt_cnt >= pkt_limit || *byte_cnt >= byte_limit) {
206 			limit_reached = TRUE;
207 		}
208 	}
209 
210 	*qempty = MBUFQ_EMPTY(&fq->fq_mbufq);
211 	return limit_reached;
212 }
213 
214 fq_if_t *
fq_if_alloc(struct ifnet * ifp,struct ifclassq * ifq,classq_pkt_type_t ptype)215 fq_if_alloc(struct ifnet *ifp, struct ifclassq *ifq, classq_pkt_type_t ptype)
216 {
217 	fq_if_t *fqs;
218 
219 	fqs = zalloc_flags(fq_if_zone, Z_WAITOK | Z_ZERO);
220 	fqs->fqs_ifq = ifq;
221 	fqs->fqs_ptype = ptype;
222 
223 	/* Calculate target queue delay */
224 	ifclassq_calc_target_qdelay(ifp, &fqs->fqs_target_qdelay);
225 
226 	/* Calculate update interval */
227 	ifclassq_calc_update_interval(&fqs->fqs_update_interval);
228 
229 	/* Configure packet drop limit across all queues */
230 	fqs->fqs_pkt_droplimit = IFCQ_PKT_DROP_LIMIT(ifq);
231 	STAILQ_INIT(&fqs->fqs_fclist);
232 	return fqs;
233 }
234 
235 void
fq_if_destroy(fq_if_t * fqs)236 fq_if_destroy(fq_if_t *fqs)
237 {
238 	fq_if_purge(fqs);
239 	fqs->fqs_ifq = NULL;
240 	zfree(fq_if_zone, fqs);
241 }
242 
243 static inline uint8_t
fq_if_service_to_priority(fq_if_t * fqs,mbuf_svc_class_t svc)244 fq_if_service_to_priority(fq_if_t *fqs, mbuf_svc_class_t svc)
245 {
246 	uint8_t pri;
247 
248 	if (fqs->fqs_flags & FQS_DRIVER_MANAGED) {
249 		switch (svc) {
250 		case MBUF_SC_BK_SYS:
251 		case MBUF_SC_BK:
252 			pri = FQ_IF_BK_INDEX;
253 			break;
254 		case MBUF_SC_BE:
255 		case MBUF_SC_RD:
256 		case MBUF_SC_OAM:
257 			pri = FQ_IF_BE_INDEX;
258 			break;
259 		case MBUF_SC_AV:
260 		case MBUF_SC_RV:
261 		case MBUF_SC_VI:
262 		case MBUF_SC_SIG:
263 			pri = FQ_IF_VI_INDEX;
264 			break;
265 		case MBUF_SC_VO:
266 		case MBUF_SC_CTL:
267 			pri = FQ_IF_VO_INDEX;
268 			break;
269 		default:
270 			pri = FQ_IF_BE_INDEX; /* Use best effort by default */
271 			break;
272 		}
273 		return pri;
274 	}
275 
276 	/* scheduler is not managed by the driver */
277 	switch (svc) {
278 	case MBUF_SC_BK_SYS:
279 		pri = FQ_IF_BK_SYS_INDEX;
280 		break;
281 	case MBUF_SC_BK:
282 		pri = FQ_IF_BK_INDEX;
283 		break;
284 	case MBUF_SC_BE:
285 		pri = FQ_IF_BE_INDEX;
286 		break;
287 	case MBUF_SC_RD:
288 		pri = FQ_IF_RD_INDEX;
289 		break;
290 	case MBUF_SC_OAM:
291 		pri = FQ_IF_OAM_INDEX;
292 		break;
293 	case MBUF_SC_AV:
294 		pri = FQ_IF_AV_INDEX;
295 		break;
296 	case MBUF_SC_RV:
297 		pri = FQ_IF_RV_INDEX;
298 		break;
299 	case MBUF_SC_VI:
300 		pri = FQ_IF_VI_INDEX;
301 		break;
302 	case MBUF_SC_SIG:
303 		pri = FQ_IF_SIG_INDEX;
304 		break;
305 	case MBUF_SC_VO:
306 		pri = FQ_IF_VO_INDEX;
307 		break;
308 	case MBUF_SC_CTL:
309 		pri = FQ_IF_CTL_INDEX;
310 		break;
311 	default:
312 		pri = FQ_IF_BE_INDEX; /* Use best effort by default */
313 		break;
314 	}
315 	return pri;
316 }
317 
318 static void
fq_if_classq_init(fq_if_t * fqs,uint32_t pri,uint16_t quantum,uint32_t drr_max,uint32_t svc_class)319 fq_if_classq_init(fq_if_t *fqs, uint32_t pri, uint16_t quantum,
320     uint32_t drr_max, uint32_t svc_class)
321 {
322 	fq_if_classq_t *fq_cl;
323 	VERIFY(pri < FQ_IF_MAX_CLASSES);
324 	fq_cl = &fqs->fqs_classq[pri];
325 
326 	VERIFY(fq_cl->fcl_quantum == 0);
327 	fq_cl->fcl_quantum = quantum;
328 	fq_cl->fcl_pri = pri;
329 	fq_cl->fcl_drr_max = drr_max;
330 	fq_cl->fcl_service_class = svc_class;
331 	STAILQ_INIT(&fq_cl->fcl_new_flows);
332 	STAILQ_INIT(&fq_cl->fcl_old_flows);
333 }
334 
335 int
fq_if_enqueue_classq(struct ifclassq * ifq,classq_pkt_t * head,classq_pkt_t * tail,uint32_t cnt,uint32_t bytes,boolean_t * pdrop)336 fq_if_enqueue_classq(struct ifclassq *ifq, classq_pkt_t *head,
337     classq_pkt_t *tail, uint32_t cnt, uint32_t bytes, boolean_t *pdrop)
338 {
339 	uint8_t pri;
340 	fq_if_t *fqs;
341 	fq_if_classq_t *fq_cl;
342 	int ret;
343 	mbuf_svc_class_t svc;
344 	pktsched_pkt_t pkt;
345 
346 	pktsched_pkt_encap_chain(&pkt, head, tail, cnt, bytes);
347 
348 	fqs = (fq_if_t *)ifq->ifcq_disc;
349 	svc = pktsched_get_pkt_svc(&pkt);
350 	pri = fq_if_service_to_priority(fqs, svc);
351 	VERIFY(pri < FQ_IF_MAX_CLASSES);
352 	fq_cl = &fqs->fqs_classq[pri];
353 
354 	if (__improbable(svc == MBUF_SC_BK_SYS && fqs->fqs_throttle == 1)) {
355 		/* BK_SYS is currently throttled */
356 		atomic_add_32(&fq_cl->fcl_stat.fcl_throttle_drops, 1);
357 		pktsched_free_pkt(&pkt);
358 		*pdrop = TRUE;
359 		ret = EQSUSPENDED;
360 		goto done;
361 	}
362 
363 	IFCQ_LOCK_SPIN(ifq);
364 	ret = fq_addq(fqs, &pkt, fq_cl);
365 	if (!(fqs->fqs_flags & FQS_DRIVER_MANAGED) &&
366 	    !FQ_IF_CLASSQ_IDLE(fq_cl)) {
367 		if (((fqs->fqs_bitmaps[FQ_IF_ER] | fqs->fqs_bitmaps[FQ_IF_EB]) &
368 		    (1 << pri)) == 0) {
369 			/*
370 			 * this group is not in ER or EB groups,
371 			 * mark it as IB
372 			 */
373 			pktsched_bit_set(pri, &fqs->fqs_bitmaps[FQ_IF_IB]);
374 		}
375 	}
376 
377 	if (__improbable(ret != 0)) {
378 		if (ret == CLASSQEQ_SUCCESS_FC) {
379 			/* packet enqueued, return advisory feedback */
380 			ret = EQFULL;
381 			*pdrop = FALSE;
382 		} else if (ret == CLASSQEQ_COMPRESSED) {
383 			ret = 0;
384 			*pdrop = FALSE;
385 		} else {
386 			IFCQ_UNLOCK(ifq);
387 			*pdrop = TRUE;
388 			pktsched_free_pkt(&pkt);
389 			switch (ret) {
390 			case CLASSQEQ_DROP:
391 				ret = ENOBUFS;
392 				goto done;
393 			case CLASSQEQ_DROP_FC:
394 				ret = EQFULL;
395 				goto done;
396 			case CLASSQEQ_DROP_SP:
397 				ret = EQSUSPENDED;
398 				goto done;
399 			default:
400 				VERIFY(0);
401 				/* NOTREACHED */
402 				__builtin_unreachable();
403 			}
404 			/* NOTREACHED */
405 			__builtin_unreachable();
406 		}
407 	} else {
408 		*pdrop = FALSE;
409 	}
410 	IFCQ_ADD_LEN(ifq, cnt);
411 	IFCQ_INC_BYTES(ifq, bytes);
412 	IFCQ_UNLOCK(ifq);
413 done:
414 #if DEBUG || DEVELOPMENT
415 	if (__improbable((ret == EQFULL) && (ifclassq_flow_control_adv == 0))) {
416 		ret = 0;
417 	}
418 #endif /* DEBUG || DEVELOPMENT */
419 	return ret;
420 }
421 
422 void
fq_if_dequeue_classq(struct ifclassq * ifq,classq_pkt_t * pkt)423 fq_if_dequeue_classq(struct ifclassq *ifq, classq_pkt_t *pkt)
424 {
425 	(void) fq_if_dequeue_classq_multi(ifq, 1,
426 	    CLASSQ_DEQUEUE_MAX_BYTE_LIMIT, pkt, NULL, NULL, NULL);
427 }
428 
429 void
fq_if_dequeue_sc_classq(struct ifclassq * ifq,mbuf_svc_class_t svc,classq_pkt_t * pkt)430 fq_if_dequeue_sc_classq(struct ifclassq *ifq, mbuf_svc_class_t svc,
431     classq_pkt_t *pkt)
432 {
433 	fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
434 	uint32_t total_pktcnt = 0, total_bytecnt = 0;
435 	fq_if_classq_t *fq_cl;
436 	uint8_t pri;
437 
438 	pri = fq_if_service_to_priority(fqs, svc);
439 	fq_cl = &fqs->fqs_classq[pri];
440 
441 	fq_if_dequeue(fqs, fq_cl, 1, CLASSQ_DEQUEUE_MAX_BYTE_LIMIT,
442 	    pkt, NULL, &total_pktcnt, &total_bytecnt, NULL, TRUE);
443 
444 	IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
445 }
446 
447 static inline void
fq_dqlist_add(flowq_dqlist_t * fq_dqlist_head,fq_t * fq)448 fq_dqlist_add(flowq_dqlist_t *fq_dqlist_head, fq_t *fq)
449 {
450 	ASSERT(fq->fq_dq_head.cp_mbuf == NULL);
451 	ASSERT(!fq->fq_in_dqlist);
452 	STAILQ_INSERT_TAIL(fq_dqlist_head, fq, fq_dqlink);
453 	fq->fq_in_dqlist = true;
454 }
455 
456 static inline void
fq_dqlist_remove(flowq_dqlist_t * fq_dqlist_head,fq_t * fq,classq_pkt_t * head,classq_pkt_t * tail)457 fq_dqlist_remove(flowq_dqlist_t *fq_dqlist_head, fq_t *fq, classq_pkt_t *head,
458     classq_pkt_t *tail)
459 {
460 	ASSERT(fq->fq_in_dqlist);
461 	if (fq->fq_dq_head.cp_mbuf == NULL) {
462 		goto done;
463 	}
464 
465 	if (head->cp_mbuf == NULL) {
466 		*head = fq->fq_dq_head;
467 	} else {
468 		ASSERT(tail->cp_mbuf != NULL);
469 
470 		switch (fq->fq_ptype) {
471 		case QP_MBUF:
472 			ASSERT(tail->cp_mbuf->m_nextpkt == NULL);
473 			tail->cp_mbuf->m_nextpkt = fq->fq_dq_head.cp_mbuf;
474 			ASSERT(fq->fq_dq_tail.cp_mbuf->m_nextpkt == NULL);
475 			break;
476 #if SKYWALK
477 		case QP_PACKET:
478 			ASSERT(tail->cp_kpkt->pkt_nextpkt == NULL);
479 			tail->cp_kpkt->pkt_nextpkt = fq->fq_dq_head.cp_kpkt;
480 			ASSERT(fq->fq_dq_tail.cp_kpkt->pkt_nextpkt == NULL);
481 			break;
482 #endif /* SKYWALK */
483 		default:
484 			VERIFY(0);
485 			/* NOTREACHED */
486 			__builtin_unreachable();
487 		}
488 	}
489 	*tail = fq->fq_dq_tail;
490 done:
491 	STAILQ_REMOVE(fq_dqlist_head, fq, flowq, fq_dqlink);
492 	CLASSQ_PKT_INIT(&fq->fq_dq_head);
493 	CLASSQ_PKT_INIT(&fq->fq_dq_tail);
494 	fq->fq_in_dqlist = false;
495 	if (fq->fq_flags & FQF_DESTROYED) {
496 		fq_destroy(fq);
497 	}
498 }
499 
500 static inline void
fq_dqlist_get_packet_list(flowq_dqlist_t * fq_dqlist_head,classq_pkt_t * head,classq_pkt_t * tail)501 fq_dqlist_get_packet_list(flowq_dqlist_t *fq_dqlist_head, classq_pkt_t *head,
502     classq_pkt_t *tail)
503 {
504 	fq_t *fq, *tfq;
505 
506 	STAILQ_FOREACH_SAFE(fq, fq_dqlist_head, fq_dqlink, tfq) {
507 		fq_dqlist_remove(fq_dqlist_head, fq, head, tail);
508 	}
509 }
510 
511 int
fq_if_dequeue_classq_multi(struct ifclassq * ifq,u_int32_t maxpktcnt,u_int32_t maxbytecnt,classq_pkt_t * first_packet,classq_pkt_t * last_packet,u_int32_t * retpktcnt,u_int32_t * retbytecnt)512 fq_if_dequeue_classq_multi(struct ifclassq *ifq, u_int32_t maxpktcnt,
513     u_int32_t maxbytecnt, classq_pkt_t *first_packet,
514     classq_pkt_t *last_packet, u_int32_t *retpktcnt,
515     u_int32_t *retbytecnt)
516 {
517 	uint32_t total_pktcnt = 0, total_bytecnt = 0;
518 	classq_pkt_t first = CLASSQ_PKT_INITIALIZER(fisrt);
519 	classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
520 	classq_pkt_t tmp = CLASSQ_PKT_INITIALIZER(tmp);
521 	fq_if_append_pkt_t append_pkt;
522 	flowq_dqlist_t fq_dqlist_head;
523 	fq_if_classq_t *fq_cl;
524 	fq_if_t *fqs;
525 	int pri;
526 
527 	IFCQ_LOCK_ASSERT_HELD(ifq);
528 
529 	fqs = (fq_if_t *)ifq->ifcq_disc;
530 	STAILQ_INIT(&fq_dqlist_head);
531 
532 	switch (fqs->fqs_ptype) {
533 	case QP_MBUF:
534 		append_pkt = fq_if_append_mbuf;
535 		break;
536 
537 #if SKYWALK
538 	case QP_PACKET:
539 		append_pkt = fq_if_append_pkt;
540 		break;
541 #endif /* SKYWALK */
542 
543 	default:
544 		VERIFY(0);
545 		/* NOTREACHED */
546 		__builtin_unreachable();
547 	}
548 
549 	for (;;) {
550 		uint32_t pktcnt = 0, bytecnt = 0;
551 		classq_pkt_t head = CLASSQ_PKT_INITIALIZER(head);
552 		classq_pkt_t tail = CLASSQ_PKT_INITIALIZER(tail);
553 
554 		if (fqs->fqs_bitmaps[FQ_IF_ER] == 0 &&
555 		    fqs->fqs_bitmaps[FQ_IF_EB] == 0) {
556 			fqs->fqs_bitmaps[FQ_IF_EB] = fqs->fqs_bitmaps[FQ_IF_IB];
557 			fqs->fqs_bitmaps[FQ_IF_IB] = 0;
558 			if (fqs->fqs_bitmaps[FQ_IF_EB] == 0) {
559 				break;
560 			}
561 		}
562 		pri = pktsched_ffs(fqs->fqs_bitmaps[FQ_IF_ER]);
563 		if (pri == 0) {
564 			/*
565 			 * There are no ER flows, move the highest
566 			 * priority one from EB if there are any in that
567 			 * category
568 			 */
569 			pri = pktsched_ffs(fqs->fqs_bitmaps[FQ_IF_EB]);
570 			VERIFY(pri > 0);
571 			pktsched_bit_clr((pri - 1),
572 			    &fqs->fqs_bitmaps[FQ_IF_EB]);
573 			pktsched_bit_set((pri - 1),
574 			    &fqs->fqs_bitmaps[FQ_IF_ER]);
575 		}
576 		pri--; /* index starts at 0 */
577 		fq_cl = &fqs->fqs_classq[pri];
578 
579 		if (fq_cl->fcl_budget <= 0) {
580 			/* Update the budget */
581 			fq_cl->fcl_budget += (min(fq_cl->fcl_drr_max,
582 			    fq_cl->fcl_stat.fcl_flows_cnt) *
583 			    fq_cl->fcl_quantum);
584 			if (fq_cl->fcl_budget <= 0) {
585 				goto state_change;
586 			}
587 		}
588 		fq_if_dequeue(fqs, fq_cl, (maxpktcnt - total_pktcnt),
589 		    (maxbytecnt - total_bytecnt), &head, &tail, &pktcnt,
590 		    &bytecnt, &fq_dqlist_head, FALSE);
591 		if (head.cp_mbuf != NULL) {
592 			ASSERT(STAILQ_EMPTY(&fq_dqlist_head));
593 			if (first.cp_mbuf == NULL) {
594 				first = head;
595 			} else {
596 				ASSERT(last.cp_mbuf != NULL);
597 				append_pkt(&last, &head);
598 			}
599 			last = tail;
600 			append_pkt(&last, &tmp);
601 		}
602 		fq_cl->fcl_budget -= bytecnt;
603 		total_pktcnt += pktcnt;
604 		total_bytecnt += bytecnt;
605 
606 		/*
607 		 * If the class has exceeded the budget but still has data
608 		 * to send, move it to IB
609 		 */
610 state_change:
611 		if (!FQ_IF_CLASSQ_IDLE(fq_cl)) {
612 			if (fq_cl->fcl_budget <= 0) {
613 				pktsched_bit_set(pri,
614 				    &fqs->fqs_bitmaps[FQ_IF_IB]);
615 				pktsched_bit_clr(pri,
616 				    &fqs->fqs_bitmaps[FQ_IF_ER]);
617 			}
618 		} else {
619 			pktsched_bit_clr(pri, &fqs->fqs_bitmaps[FQ_IF_ER]);
620 			VERIFY(((fqs->fqs_bitmaps[FQ_IF_ER] |
621 			    fqs->fqs_bitmaps[FQ_IF_EB] |
622 			    fqs->fqs_bitmaps[FQ_IF_IB]) & (1 << pri)) == 0);
623 			fq_cl->fcl_budget = 0;
624 		}
625 		if (total_pktcnt >= maxpktcnt || total_bytecnt >= maxbytecnt) {
626 			break;
627 		}
628 	}
629 
630 	fq_dqlist_get_packet_list(&fq_dqlist_head, &first, &last);
631 
632 	if (__probable(first_packet != NULL)) {
633 		*first_packet = first;
634 	}
635 	if (last_packet != NULL) {
636 		*last_packet = last;
637 	}
638 	if (retpktcnt != NULL) {
639 		*retpktcnt = total_pktcnt;
640 	}
641 	if (retbytecnt != NULL) {
642 		*retbytecnt = total_bytecnt;
643 	}
644 
645 	IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
646 	return 0;
647 }
648 
649 int
fq_if_dequeue_sc_classq_multi(struct ifclassq * ifq,mbuf_svc_class_t svc,u_int32_t maxpktcnt,u_int32_t maxbytecnt,classq_pkt_t * first_packet,classq_pkt_t * last_packet,u_int32_t * retpktcnt,u_int32_t * retbytecnt)650 fq_if_dequeue_sc_classq_multi(struct ifclassq *ifq, mbuf_svc_class_t svc,
651     u_int32_t maxpktcnt, u_int32_t maxbytecnt, classq_pkt_t *first_packet,
652     classq_pkt_t *last_packet, u_int32_t *retpktcnt, u_int32_t *retbytecnt)
653 {
654 	fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
655 	uint8_t pri;
656 	u_int32_t total_pktcnt = 0, total_bytecnt = 0;
657 	fq_if_classq_t *fq_cl;
658 	classq_pkt_t first = CLASSQ_PKT_INITIALIZER(fisrt);
659 	classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
660 	fq_if_append_pkt_t append_pkt;
661 	flowq_dqlist_t fq_dqlist_head;
662 
663 	switch (fqs->fqs_ptype) {
664 	case QP_MBUF:
665 		append_pkt = fq_if_append_mbuf;
666 		break;
667 
668 #if SKYWALK
669 	case QP_PACKET:
670 		append_pkt = fq_if_append_pkt;
671 		break;
672 #endif /* SKYWALK */
673 
674 	default:
675 		VERIFY(0);
676 		/* NOTREACHED */
677 		__builtin_unreachable();
678 	}
679 
680 	STAILQ_INIT(&fq_dqlist_head);
681 	pri = fq_if_service_to_priority(fqs, svc);
682 	fq_cl = &fqs->fqs_classq[pri];
683 	/*
684 	 * Now we have the queue for a particular service class. We need
685 	 * to dequeue as many packets as needed, first from the new flows
686 	 * and then from the old flows.
687 	 */
688 	while (total_pktcnt < maxpktcnt && total_bytecnt < maxbytecnt &&
689 	    fq_cl->fcl_stat.fcl_pkt_cnt > 0) {
690 		classq_pkt_t head = CLASSQ_PKT_INITIALIZER(head);
691 		classq_pkt_t tail = CLASSQ_PKT_INITIALIZER(tail);
692 		u_int32_t pktcnt = 0, bytecnt = 0;
693 
694 		fq_if_dequeue(fqs, fq_cl, (maxpktcnt - total_pktcnt),
695 		    (maxbytecnt - total_bytecnt), &head, &tail, &pktcnt,
696 		    &bytecnt, &fq_dqlist_head, TRUE);
697 		if (head.cp_mbuf != NULL) {
698 			if (first.cp_mbuf == NULL) {
699 				first = head;
700 			} else {
701 				ASSERT(last.cp_mbuf != NULL);
702 				append_pkt(&last, &head);
703 			}
704 			last = tail;
705 		}
706 		total_pktcnt += pktcnt;
707 		total_bytecnt += bytecnt;
708 	}
709 
710 	fq_dqlist_get_packet_list(&fq_dqlist_head, &first, &last);
711 
712 	if (__probable(first_packet != NULL)) {
713 		*first_packet = first;
714 	}
715 	if (last_packet != NULL) {
716 		*last_packet = last;
717 	}
718 	if (retpktcnt != NULL) {
719 		*retpktcnt = total_pktcnt;
720 	}
721 	if (retbytecnt != NULL) {
722 		*retbytecnt = total_bytecnt;
723 	}
724 
725 	IFCQ_XMIT_ADD(ifq, total_pktcnt, total_bytecnt);
726 
727 	return 0;
728 }
729 
730 static void
fq_if_purge_flow(fq_if_t * fqs,fq_t * fq,u_int32_t * pktsp,u_int32_t * bytesp)731 fq_if_purge_flow(fq_if_t *fqs, fq_t *fq, u_int32_t *pktsp,
732     u_int32_t *bytesp)
733 {
734 	fq_if_classq_t *fq_cl;
735 	u_int32_t pkts, bytes;
736 	pktsched_pkt_t pkt;
737 
738 	fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
739 	pkts = bytes = 0;
740 	_PKTSCHED_PKT_INIT(&pkt);
741 	for (;;) {
742 		fq_getq_flow(fqs, fq, &pkt);
743 		if (pkt.pktsched_pkt_mbuf == NULL) {
744 			VERIFY(pkt.pktsched_ptype == QP_INVALID);
745 			break;
746 		}
747 		pkts++;
748 		bytes += pktsched_get_pkt_len(&pkt);
749 		pktsched_free_pkt(&pkt);
750 	}
751 	IFCQ_DROP_ADD(fqs->fqs_ifq, pkts, bytes);
752 
753 	if (fq->fq_flags & FQF_NEW_FLOW) {
754 		fq_if_empty_new_flow(fq, fq_cl, false);
755 	} else if (fq->fq_flags & FQF_OLD_FLOW) {
756 		fq_if_empty_old_flow(fqs, fq_cl, fq, false, true);
757 	}
758 
759 	fq_if_destroy_flow(fqs, fq_cl, fq, true);
760 
761 	if (FQ_IF_CLASSQ_IDLE(fq_cl)) {
762 		int i;
763 		for (i = FQ_IF_ER; i < FQ_IF_MAX_STATE; i++) {
764 			pktsched_bit_clr(fq_cl->fcl_pri,
765 			    &fqs->fqs_bitmaps[i]);
766 		}
767 	}
768 	if (pktsp != NULL) {
769 		*pktsp = pkts;
770 	}
771 	if (bytesp != NULL) {
772 		*bytesp = bytes;
773 	}
774 }
775 
776 static void
fq_if_purge_classq(fq_if_t * fqs,fq_if_classq_t * fq_cl)777 fq_if_purge_classq(fq_if_t *fqs, fq_if_classq_t *fq_cl)
778 {
779 	fq_t *fq, *tfq;
780 	/*
781 	 * Take each flow from new/old flow list and flush mbufs
782 	 * in that flow
783 	 */
784 	STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_new_flows, fq_actlink, tfq) {
785 		fq_if_purge_flow(fqs, fq, NULL, NULL);
786 	}
787 	STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_old_flows, fq_actlink, tfq) {
788 		fq_if_purge_flow(fqs, fq, NULL, NULL);
789 	}
790 	VERIFY(STAILQ_EMPTY(&fq_cl->fcl_new_flows));
791 	VERIFY(STAILQ_EMPTY(&fq_cl->fcl_old_flows));
792 
793 	STAILQ_INIT(&fq_cl->fcl_new_flows);
794 	STAILQ_INIT(&fq_cl->fcl_old_flows);
795 	fq_cl->fcl_budget = 0;
796 }
797 
798 static void
fq_if_purge(fq_if_t * fqs)799 fq_if_purge(fq_if_t *fqs)
800 {
801 	int i;
802 
803 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
804 	for (i = 0; i < FQ_IF_MAX_CLASSES; i++) {
805 		fq_if_purge_classq(fqs, &fqs->fqs_classq[i]);
806 	}
807 
808 	VERIFY(STAILQ_EMPTY(&fqs->fqs_fclist));
809 
810 	fqs->fqs_large_flow = NULL;
811 	for (i = 0; i < FQ_IF_HASH_TABLE_SIZE; i++) {
812 		VERIFY(SLIST_EMPTY(&fqs->fqs_flows[i]));
813 	}
814 
815 	bzero(&fqs->fqs_bitmaps, sizeof(fqs->fqs_bitmaps));
816 
817 	IFCQ_LEN(fqs->fqs_ifq) = 0;
818 	IFCQ_BYTES(fqs->fqs_ifq) = 0;
819 }
820 
821 static void
fq_if_purge_sc(fq_if_t * fqs,cqrq_purge_sc_t * req)822 fq_if_purge_sc(fq_if_t *fqs, cqrq_purge_sc_t *req)
823 {
824 	fq_t *fq;
825 
826 	IFCQ_LOCK_ASSERT_HELD(fqs->fqs_ifq);
827 	req->packets = req->bytes = 0;
828 	VERIFY(req->flow != 0);
829 
830 	/* packet type is needed only if we want to create a flow queue */
831 	fq = fq_if_hash_pkt(fqs, req->flow, req->sc, 0, FALSE, QP_INVALID);
832 
833 	if (fq != NULL) {
834 		fq_if_purge_flow(fqs, fq, &req->packets, &req->bytes);
835 	}
836 }
837 
838 static uint16_t
fq_if_calc_quantum(struct ifnet * ifp)839 fq_if_calc_quantum(struct ifnet *ifp)
840 {
841 	uint16_t quantum;
842 
843 	switch (ifp->if_family) {
844 	case IFNET_FAMILY_ETHERNET:
845 		VERIFY((ifp->if_mtu + ETHER_HDR_LEN) <= UINT16_MAX);
846 		quantum = (uint16_t)ifp->if_mtu + ETHER_HDR_LEN;
847 		break;
848 
849 	case IFNET_FAMILY_CELLULAR:
850 	case IFNET_FAMILY_IPSEC:
851 	case IFNET_FAMILY_UTUN:
852 		VERIFY(ifp->if_mtu <= UINT16_MAX);
853 		quantum = (uint16_t)ifp->if_mtu;
854 		break;
855 
856 	default:
857 		quantum = FQ_CODEL_DEFAULT_QUANTUM;
858 		break;
859 	}
860 
861 	/*
862 	 * XXX: Skywalk native interface doesn't support HW TSO offload.
863 	 */
864 	if (((ifp->if_eflags & IFEF_SKYWALK_NATIVE) == 0) &&
865 	    ((ifp->if_hwassist & IFNET_TSOF) != 0)) {
866 		VERIFY(ifp->if_tso_v4_mtu <= UINT16_MAX);
867 		VERIFY(ifp->if_tso_v6_mtu <= UINT16_MAX);
868 		quantum = (uint16_t)MAX(ifp->if_tso_v4_mtu, ifp->if_tso_v6_mtu);
869 		quantum = (quantum != 0) ? quantum : IF_MAXMTU;
870 	}
871 
872 	quantum = MAX(FQ_CODEL_DEFAULT_QUANTUM, quantum);
873 #if DEBUG || DEVELOPMENT
874 	quantum = (fq_codel_quantum != 0) ? fq_codel_quantum : quantum;
875 #endif /* DEBUG || DEVELOPMENT */
876 	return quantum;
877 }
878 
879 static void
fq_if_mtu_update(fq_if_t * fqs)880 fq_if_mtu_update(fq_if_t *fqs)
881 {
882 #define _FQ_CLASSQ_UPDATE_QUANTUM(_fqs, _s, _q)    \
883 	(_fqs)->fqs_classq[FQ_IF_ ## _s ## _INDEX].fcl_quantum = \
884 	FQ_CODEL_QUANTUM_ ## _s(_q)
885 
886 	uint16_t quantum;
887 
888 	quantum = fq_if_calc_quantum(fqs->fqs_ifq->ifcq_ifp);
889 
890 	if ((fqs->fqs_flags & FQS_DRIVER_MANAGED) != 0) {
891 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, BK, quantum);
892 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, BE, quantum);
893 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, VI, quantum);
894 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, VO, quantum);
895 	} else {
896 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, BK_SYS, quantum);
897 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, BK, quantum);
898 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, BE, quantum);
899 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, RD, quantum);
900 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, OAM, quantum);
901 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, AV, quantum);
902 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, RV, quantum);
903 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, VI, quantum);
904 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, VO, quantum);
905 		_FQ_CLASSQ_UPDATE_QUANTUM(fqs, CTL, quantum);
906 	}
907 #undef _FQ_CLASSQ_UPDATE_QUANTUM
908 }
909 
910 static void
fq_if_event(fq_if_t * fqs,cqev_t ev)911 fq_if_event(fq_if_t *fqs, cqev_t ev)
912 {
913 	IFCQ_LOCK_ASSERT_HELD(fqs->fqs_ifq);
914 
915 	switch (ev) {
916 	case CLASSQ_EV_LINK_UP:
917 	case CLASSQ_EV_LINK_DOWN:
918 		fq_if_purge(fqs);
919 		break;
920 	case CLASSQ_EV_LINK_MTU:
921 		fq_if_mtu_update(fqs);
922 		break;
923 	default:
924 		break;
925 	}
926 }
927 
928 static void
fq_if_classq_suspend(fq_if_t * fqs,fq_if_classq_t * fq_cl)929 fq_if_classq_suspend(fq_if_t *fqs, fq_if_classq_t *fq_cl)
930 {
931 	fq_if_purge_classq(fqs, fq_cl);
932 	fqs->fqs_throttle = 1;
933 	fq_cl->fcl_stat.fcl_throttle_on++;
934 }
935 
936 static void
fq_if_classq_resume(fq_if_t * fqs,fq_if_classq_t * fq_cl)937 fq_if_classq_resume(fq_if_t *fqs, fq_if_classq_t *fq_cl)
938 {
939 	VERIFY(FQ_IF_CLASSQ_IDLE(fq_cl));
940 	fqs->fqs_throttle = 0;
941 	fq_cl->fcl_stat.fcl_throttle_off++;
942 }
943 
944 
945 static int
fq_if_throttle(fq_if_t * fqs,cqrq_throttle_t * tr)946 fq_if_throttle(fq_if_t *fqs, cqrq_throttle_t *tr)
947 {
948 	struct ifclassq *ifq = fqs->fqs_ifq;
949 	uint8_t index;
950 #if !MACH_ASSERT
951 #pragma unused(ifq)
952 #endif
953 	IFCQ_LOCK_ASSERT_HELD(ifq);
954 
955 	if (!tr->set) {
956 		tr->level = fqs->fqs_throttle;
957 		return 0;
958 	}
959 
960 	if (tr->level == fqs->fqs_throttle) {
961 		return EALREADY;
962 	}
963 
964 	/* Throttling is allowed on BK_SYS class only */
965 	index = fq_if_service_to_priority(fqs, MBUF_SC_BK_SYS);
966 	switch (tr->level) {
967 	case IFNET_THROTTLE_OFF:
968 		fq_if_classq_resume(fqs, &fqs->fqs_classq[index]);
969 		break;
970 	case IFNET_THROTTLE_OPPORTUNISTIC:
971 		fq_if_classq_suspend(fqs, &fqs->fqs_classq[index]);
972 		break;
973 	default:
974 		break;
975 	}
976 	return 0;
977 }
978 
979 void
fq_if_stat_sc(fq_if_t * fqs,cqrq_stat_sc_t * stat)980 fq_if_stat_sc(fq_if_t *fqs, cqrq_stat_sc_t *stat)
981 {
982 	uint8_t pri;
983 	fq_if_classq_t *fq_cl;
984 
985 	if (stat == NULL) {
986 		return;
987 	}
988 
989 	pri = fq_if_service_to_priority(fqs, stat->sc);
990 	fq_cl = &fqs->fqs_classq[pri];
991 	stat->packets = (uint32_t)fq_cl->fcl_stat.fcl_pkt_cnt;
992 	stat->bytes = (uint32_t)fq_cl->fcl_stat.fcl_byte_cnt;
993 }
994 
995 int
fq_if_request_classq(struct ifclassq * ifq,cqrq_t rq,void * arg)996 fq_if_request_classq(struct ifclassq *ifq, cqrq_t rq, void *arg)
997 {
998 	int err = 0;
999 	fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
1000 
1001 	IFCQ_LOCK_ASSERT_HELD(ifq);
1002 
1003 	/*
1004 	 * These are usually slow operations, convert the lock ahead of time
1005 	 */
1006 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1007 	switch (rq) {
1008 	case CLASSQRQ_PURGE:
1009 		fq_if_purge(fqs);
1010 		break;
1011 	case CLASSQRQ_PURGE_SC:
1012 		fq_if_purge_sc(fqs, (cqrq_purge_sc_t *)arg);
1013 		break;
1014 	case CLASSQRQ_EVENT:
1015 		fq_if_event(fqs, (cqev_t)arg);
1016 		break;
1017 	case CLASSQRQ_THROTTLE:
1018 		fq_if_throttle(fqs, (cqrq_throttle_t *)arg);
1019 		break;
1020 	case CLASSQRQ_STAT_SC:
1021 		fq_if_stat_sc(fqs, (cqrq_stat_sc_t *)arg);
1022 		break;
1023 	}
1024 	return err;
1025 }
1026 
1027 int
fq_if_setup_ifclassq(struct ifclassq * ifq,u_int32_t flags,classq_pkt_type_t ptype)1028 fq_if_setup_ifclassq(struct ifclassq *ifq, u_int32_t flags,
1029     classq_pkt_type_t ptype)
1030 {
1031 #pragma unused(flags)
1032 #define _FQ_CLASSQ_INIT(_fqs, _s, _q)                         \
1033 	fq_if_classq_init((_fqs), FQ_IF_ ## _s ## _INDEX,     \
1034 	FQ_CODEL_QUANTUM_ ## _s(_q), FQ_CODEL_DRR_MAX_ ## _s, \
1035 	MBUF_SC_ ## _s )
1036 
1037 	struct ifnet *ifp = ifq->ifcq_ifp;
1038 	fq_if_t *fqs = NULL;
1039 	uint16_t quantum;
1040 	int err = 0;
1041 
1042 	IFCQ_LOCK_ASSERT_HELD(ifq);
1043 	VERIFY(ifq->ifcq_disc == NULL);
1044 	VERIFY(ifq->ifcq_type == PKTSCHEDT_NONE);
1045 
1046 	fqs = fq_if_alloc(ifp, ifq, ptype);
1047 	if (fqs == NULL) {
1048 		return ENOMEM;
1049 	}
1050 
1051 	quantum = fq_if_calc_quantum(ifp);
1052 
1053 	if (flags & PKTSCHEDF_QALG_DRIVER_MANAGED) {
1054 		fqs->fqs_flags |= FQS_DRIVER_MANAGED;
1055 		_FQ_CLASSQ_INIT(fqs, BK, quantum);
1056 		_FQ_CLASSQ_INIT(fqs, BE, quantum);
1057 		_FQ_CLASSQ_INIT(fqs, VI, quantum);
1058 		_FQ_CLASSQ_INIT(fqs, VO, quantum);
1059 	} else {
1060 		/* SIG shares same INDEX with VI */
1061 		_CASSERT(SCIDX_SIG == SCIDX_VI);
1062 		_CASSERT(FQ_IF_SIG_INDEX == FQ_IF_VI_INDEX);
1063 
1064 		_FQ_CLASSQ_INIT(fqs, BK_SYS, quantum);
1065 		_FQ_CLASSQ_INIT(fqs, BK, quantum);
1066 		_FQ_CLASSQ_INIT(fqs, BE, quantum);
1067 		_FQ_CLASSQ_INIT(fqs, RD, quantum);
1068 		_FQ_CLASSQ_INIT(fqs, OAM, quantum);
1069 		_FQ_CLASSQ_INIT(fqs, AV, quantum);
1070 		_FQ_CLASSQ_INIT(fqs, RV, quantum);
1071 		_FQ_CLASSQ_INIT(fqs, VI, quantum);
1072 		_FQ_CLASSQ_INIT(fqs, VO, quantum);
1073 		_FQ_CLASSQ_INIT(fqs, CTL, quantum);
1074 	}
1075 
1076 	err = ifclassq_attach(ifq, PKTSCHEDT_FQ_CODEL, fqs);
1077 	if (err != 0) {
1078 		os_log_error(OS_LOG_DEFAULT, "%s: error from ifclassq_attach, "
1079 		    "failed to attach fq_if: %d\n", __func__, err);
1080 		fq_if_destroy(fqs);
1081 	}
1082 	return err;
1083 #undef _FQ_CLASSQ_INIT
1084 }
1085 
1086 fq_t *
fq_if_hash_pkt(fq_if_t * fqs,u_int32_t flowid,mbuf_svc_class_t svc_class,u_int64_t now,boolean_t create,classq_pkt_type_t ptype)1087 fq_if_hash_pkt(fq_if_t *fqs, u_int32_t flowid, mbuf_svc_class_t svc_class,
1088     u_int64_t now, boolean_t create, classq_pkt_type_t ptype)
1089 {
1090 	fq_t *fq = NULL;
1091 	flowq_list_t *fq_list;
1092 	fq_if_classq_t *fq_cl;
1093 	u_int8_t fqs_hash_id;
1094 	u_int8_t scidx;
1095 
1096 	scidx = fq_if_service_to_priority(fqs, svc_class);
1097 
1098 	fqs_hash_id = FQ_IF_FLOW_HASH_ID(flowid);
1099 
1100 	fq_list = &fqs->fqs_flows[fqs_hash_id];
1101 
1102 	SLIST_FOREACH(fq, fq_list, fq_hashlink) {
1103 		if (fq->fq_flowhash == flowid &&
1104 		    fq->fq_sc_index == scidx) {
1105 			break;
1106 		}
1107 	}
1108 	if (fq == NULL && create == TRUE) {
1109 #if SKYWALK
1110 		ASSERT((ptype == QP_MBUF) || (ptype == QP_PACKET));
1111 #else /* !SKYWALK */
1112 		ASSERT(ptype == QP_MBUF);
1113 #endif /* !SKYWALK */
1114 
1115 		/* If the flow is not already on the list, allocate it */
1116 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1117 		fq = fq_alloc(ptype);
1118 		if (fq != NULL) {
1119 			fq->fq_flowhash = flowid;
1120 			fq->fq_sc_index = scidx;
1121 			fq->fq_updatetime = now + fqs->fqs_update_interval;
1122 			fq_cl = &fqs->fqs_classq[scidx];
1123 			fq->fq_flags = FQF_FLOWCTL_CAPABLE;
1124 			SLIST_INSERT_HEAD(fq_list, fq, fq_hashlink);
1125 			fq_cl->fcl_stat.fcl_flows_cnt++;
1126 		}
1127 	}
1128 
1129 	/*
1130 	 * If getq time is not set because this is the first packet or after
1131 	 * idle time, set it now so that we can detect a stall.
1132 	 */
1133 	if (fq != NULL && fq->fq_getqtime == 0) {
1134 		fq->fq_getqtime = now;
1135 	}
1136 
1137 	return fq;
1138 }
1139 
1140 void
fq_if_destroy_flow(fq_if_t * fqs,fq_if_classq_t * fq_cl,fq_t * fq,bool destroy_now)1141 fq_if_destroy_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq,
1142     bool destroy_now)
1143 {
1144 	u_int8_t hash_id;
1145 	hash_id = FQ_IF_FLOW_HASH_ID(fq->fq_flowhash);
1146 	SLIST_REMOVE(&fqs->fqs_flows[hash_id], fq, flowq,
1147 	    fq_hashlink);
1148 	fq_cl->fcl_stat.fcl_flows_cnt--;
1149 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1150 	if (__improbable(fq->fq_flags & FQF_FLOWCTL_ON)) {
1151 		fq_if_flow_feedback(fqs, fq, fq_cl);
1152 	}
1153 	fq->fq_flags |= FQF_DESTROYED;
1154 	if (destroy_now) {
1155 		fq_destroy(fq);
1156 	}
1157 }
1158 
1159 inline boolean_t
fq_if_at_drop_limit(fq_if_t * fqs)1160 fq_if_at_drop_limit(fq_if_t *fqs)
1161 {
1162 	return (IFCQ_LEN(fqs->fqs_ifq) >= fqs->fqs_pkt_droplimit) ?
1163 	       TRUE : FALSE;
1164 }
1165 
1166 static void
fq_if_empty_old_flow(fq_if_t * fqs,fq_if_classq_t * fq_cl,fq_t * fq,bool remove_hash,bool destroy)1167 fq_if_empty_old_flow(fq_if_t *fqs, fq_if_classq_t *fq_cl, fq_t *fq,
1168     bool remove_hash, bool destroy)
1169 {
1170 	/*
1171 	 * Remove the flow queue if it is empty
1172 	 * and delete it
1173 	 */
1174 	STAILQ_REMOVE(&fq_cl->fcl_old_flows, fq, flowq,
1175 	    fq_actlink);
1176 	fq->fq_flags &= ~FQF_OLD_FLOW;
1177 	fq_cl->fcl_stat.fcl_oldflows_cnt--;
1178 	VERIFY(fq->fq_bytes == 0);
1179 
1180 	if (remove_hash) {
1181 		/* Remove from the hash list */
1182 		fq_if_destroy_flow(fqs, fq_cl, fq, destroy);
1183 	}
1184 }
1185 
1186 static void
fq_if_empty_new_flow(fq_t * fq,fq_if_classq_t * fq_cl,bool add_to_old)1187 fq_if_empty_new_flow(fq_t *fq, fq_if_classq_t *fq_cl, bool add_to_old)
1188 {
1189 	/* Move to the end of old queue list */
1190 	STAILQ_REMOVE(&fq_cl->fcl_new_flows, fq,
1191 	    flowq, fq_actlink);
1192 	fq->fq_flags &= ~FQF_NEW_FLOW;
1193 	fq_cl->fcl_stat.fcl_newflows_cnt--;
1194 
1195 	if (add_to_old) {
1196 		STAILQ_INSERT_TAIL(&fq_cl->fcl_old_flows, fq,
1197 		    fq_actlink);
1198 		fq->fq_flags |= FQF_OLD_FLOW;
1199 		fq_cl->fcl_stat.fcl_oldflows_cnt++;
1200 	}
1201 }
1202 
1203 inline void
fq_if_drop_packet(fq_if_t * fqs)1204 fq_if_drop_packet(fq_if_t *fqs)
1205 {
1206 	fq_t *fq = fqs->fqs_large_flow;
1207 	fq_if_classq_t *fq_cl;
1208 	pktsched_pkt_t pkt;
1209 	volatile uint32_t *pkt_flags;
1210 	uint64_t *pkt_timestamp;
1211 
1212 	if (fq == NULL) {
1213 		return;
1214 	}
1215 	/* queue can not be empty on the largest flow */
1216 	VERIFY(!fq_empty(fq));
1217 
1218 	fq_cl = &fqs->fqs_classq[fq->fq_sc_index];
1219 	_PKTSCHED_PKT_INIT(&pkt);
1220 	fq_getq_flow_internal(fqs, fq, &pkt);
1221 	ASSERT(pkt.pktsched_ptype != QP_INVALID);
1222 
1223 	pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
1224 	    NULL, NULL);
1225 
1226 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1227 	*pkt_timestamp = 0;
1228 	switch (pkt.pktsched_ptype) {
1229 	case QP_MBUF:
1230 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
1231 		break;
1232 #if SKYWALK
1233 	case QP_PACKET:
1234 		/* sanity check */
1235 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
1236 		break;
1237 #endif /* SKYWALK */
1238 	default:
1239 		VERIFY(0);
1240 		/* NOTREACHED */
1241 		__builtin_unreachable();
1242 	}
1243 
1244 	if (fq_empty(fq)) {
1245 		fqs->fqs_large_flow = NULL;
1246 		if (fq->fq_flags & FQF_OLD_FLOW) {
1247 			fq_if_empty_old_flow(fqs, fq_cl, fq, true, true);
1248 		} else {
1249 			VERIFY(fq->fq_flags & FQF_NEW_FLOW);
1250 			fq_if_empty_new_flow(fq, fq_cl, true);
1251 		}
1252 	}
1253 	IFCQ_DROP_ADD(fqs->fqs_ifq, 1, pktsched_get_pkt_len(&pkt));
1254 
1255 	pktsched_free_pkt(&pkt);
1256 	fq_cl->fcl_stat.fcl_drop_overflow++;
1257 }
1258 
1259 inline void
fq_if_is_flow_heavy(fq_if_t * fqs,fq_t * fq)1260 fq_if_is_flow_heavy(fq_if_t *fqs, fq_t *fq)
1261 {
1262 	fq_t *prev_fq;
1263 
1264 	if (fqs->fqs_large_flow != NULL &&
1265 	    fqs->fqs_large_flow->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) {
1266 		fqs->fqs_large_flow = NULL;
1267 	}
1268 
1269 	if (fq == NULL || fq->fq_bytes < FQ_IF_LARGE_FLOW_BYTE_LIMIT) {
1270 		return;
1271 	}
1272 
1273 	prev_fq = fqs->fqs_large_flow;
1274 	if (prev_fq == NULL) {
1275 		if (!fq_empty(fq)) {
1276 			fqs->fqs_large_flow = fq;
1277 		}
1278 		return;
1279 	} else if (fq->fq_bytes > prev_fq->fq_bytes) {
1280 		fqs->fqs_large_flow = fq;
1281 	}
1282 }
1283 
1284 boolean_t
fq_if_add_fcentry(fq_if_t * fqs,pktsched_pkt_t * pkt,uint8_t flowsrc,fq_t * fq,fq_if_classq_t * fq_cl)1285 fq_if_add_fcentry(fq_if_t *fqs, pktsched_pkt_t *pkt, uint8_t flowsrc,
1286     fq_t *fq, fq_if_classq_t *fq_cl)
1287 {
1288 	struct flowadv_fcentry *fce;
1289 
1290 #if DEBUG || DEVELOPMENT
1291 	if (__improbable(ifclassq_flow_control_adv == 0)) {
1292 		os_log(OS_LOG_DEFAULT, "%s: skipped flow control", __func__);
1293 		return TRUE;
1294 	}
1295 #endif /* DEBUG || DEVELOPMENT */
1296 
1297 	STAILQ_FOREACH(fce, &fqs->fqs_fclist, fce_link) {
1298 		if ((uint8_t)fce->fce_flowsrc_type == flowsrc &&
1299 		    fce->fce_flowid == fq->fq_flowhash) {
1300 			/* Already on flowcontrol list */
1301 			return TRUE;
1302 		}
1303 	}
1304 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1305 	fce = pktsched_alloc_fcentry(pkt, fqs->fqs_ifq->ifcq_ifp, M_WAITOK);
1306 	if (fce != NULL) {
1307 		/* XXX Add number of bytes in the queue */
1308 		STAILQ_INSERT_TAIL(&fqs->fqs_fclist, fce, fce_link);
1309 		fq_cl->fcl_stat.fcl_flow_control++;
1310 		os_log(OS_LOG_DEFAULT, "%s: num: %d, scidx: %d, flowsrc: %d, "
1311 		    "flow: 0x%x, iface: %s\n", __func__,
1312 		    fq_cl->fcl_stat.fcl_flow_control,
1313 		    fq->fq_sc_index, fce->fce_flowsrc_type, fq->fq_flowhash,
1314 		    if_name(fqs->fqs_ifq->ifcq_ifp));
1315 	}
1316 	return (fce != NULL) ? TRUE : FALSE;
1317 }
1318 
1319 static void
fq_if_remove_fcentry(fq_if_t * fqs,struct flowadv_fcentry * fce)1320 fq_if_remove_fcentry(fq_if_t *fqs, struct flowadv_fcentry *fce)
1321 {
1322 	STAILQ_REMOVE(&fqs->fqs_fclist, fce, flowadv_fcentry, fce_link);
1323 	STAILQ_NEXT(fce, fce_link) = NULL;
1324 	flowadv_add_entry(fce);
1325 }
1326 
1327 void
fq_if_flow_feedback(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl)1328 fq_if_flow_feedback(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl)
1329 {
1330 	struct flowadv_fcentry *fce = NULL;
1331 
1332 	IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1333 	STAILQ_FOREACH(fce, &fqs->fqs_fclist, fce_link) {
1334 		if (fce->fce_flowid == fq->fq_flowhash) {
1335 			break;
1336 		}
1337 	}
1338 	if (fce != NULL) {
1339 		fq_cl->fcl_stat.fcl_flow_feedback++;
1340 		os_log(OS_LOG_DEFAULT, "%s: num: %d, scidx: %d, flowsrc: %d, "
1341 		    "flow: 0x%x, iface: %s\n", __func__,
1342 		    fq_cl->fcl_stat.fcl_flow_feedback, fq->fq_sc_index,
1343 		    fce->fce_flowsrc_type, fce->fce_flowid,
1344 		    if_name(fqs->fqs_ifq->ifcq_ifp));
1345 		fq_if_remove_fcentry(fqs, fce);
1346 	}
1347 	fq->fq_flags &= ~FQF_FLOWCTL_ON;
1348 }
1349 
1350 void
fq_if_dequeue(fq_if_t * fqs,fq_if_classq_t * fq_cl,uint32_t pktlimit,int64_t bytelimit,classq_pkt_t * top,classq_pkt_t * bottom,uint32_t * retpktcnt,uint32_t * retbytecnt,flowq_dqlist_t * fq_dqlist,boolean_t drvmgmt)1351 fq_if_dequeue(fq_if_t *fqs, fq_if_classq_t *fq_cl, uint32_t pktlimit,
1352     int64_t bytelimit, classq_pkt_t *top, classq_pkt_t *bottom,
1353     uint32_t *retpktcnt, uint32_t *retbytecnt, flowq_dqlist_t *fq_dqlist,
1354     boolean_t drvmgmt)
1355 {
1356 	fq_t *fq = NULL, *tfq = NULL;
1357 	flowq_stailq_t temp_stailq;
1358 	uint32_t pktcnt, bytecnt;
1359 	boolean_t qempty, limit_reached = FALSE;
1360 	classq_pkt_t last = CLASSQ_PKT_INITIALIZER(last);
1361 	fq_getq_flow_t fq_getq_flow_fn;
1362 	classq_pkt_t *head, *tail;
1363 
1364 	switch (fqs->fqs_ptype) {
1365 	case QP_MBUF:
1366 		fq_getq_flow_fn = fq_getq_flow_mbuf;
1367 		break;
1368 
1369 #if SKYWALK
1370 	case QP_PACKET:
1371 		fq_getq_flow_fn = fq_getq_flow_kpkt;
1372 		break;
1373 #endif /* SKYWALK */
1374 
1375 	default:
1376 		VERIFY(0);
1377 		/* NOTREACHED */
1378 		__builtin_unreachable();
1379 	}
1380 
1381 	/*
1382 	 * maximum byte limit should not be greater than the budget for
1383 	 * this class
1384 	 */
1385 	if (bytelimit > fq_cl->fcl_budget && !drvmgmt) {
1386 		bytelimit = fq_cl->fcl_budget;
1387 	}
1388 
1389 	VERIFY(pktlimit > 0 && bytelimit > 0 && top != NULL);
1390 	pktcnt = bytecnt = 0;
1391 	STAILQ_INIT(&temp_stailq);
1392 
1393 	STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_new_flows, fq_actlink, tfq) {
1394 		ASSERT((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) ==
1395 		    FQF_NEW_FLOW);
1396 
1397 		if (fq_dqlist != NULL) {
1398 			if (!fq->fq_in_dqlist) {
1399 				fq_dqlist_add(fq_dqlist, fq);
1400 			}
1401 			head = &fq->fq_dq_head;
1402 			tail = &fq->fq_dq_tail;
1403 		} else {
1404 			ASSERT(!fq->fq_in_dqlist);
1405 			head = top;
1406 			tail = &last;
1407 		}
1408 
1409 		limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit,
1410 		    pktlimit, head, tail, &bytecnt, &pktcnt, &qempty,
1411 		    PKTF_NEW_FLOW);
1412 
1413 		if (fq->fq_deficit <= 0 || qempty) {
1414 			fq_if_empty_new_flow(fq, fq_cl, true);
1415 		}
1416 		fq->fq_deficit += fq_cl->fcl_quantum;
1417 		if (limit_reached) {
1418 			goto done;
1419 		}
1420 	}
1421 
1422 	STAILQ_FOREACH_SAFE(fq, &fq_cl->fcl_old_flows, fq_actlink, tfq) {
1423 		VERIFY((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) ==
1424 		    FQF_OLD_FLOW);
1425 		bool destroy = true;
1426 
1427 		if (fq_dqlist != NULL) {
1428 			if (!fq->fq_in_dqlist) {
1429 				fq_dqlist_add(fq_dqlist, fq);
1430 			}
1431 			head = &fq->fq_dq_head;
1432 			tail = &fq->fq_dq_tail;
1433 			destroy = false;
1434 		} else {
1435 			ASSERT(!fq->fq_in_dqlist);
1436 			head = top;
1437 			tail = &last;
1438 		}
1439 
1440 		limit_reached = fq_getq_flow_fn(fqs, fq_cl, fq, bytelimit,
1441 		    pktlimit, head, tail, &bytecnt, &pktcnt, &qempty, 0);
1442 
1443 		if (qempty) {
1444 			fq_if_empty_old_flow(fqs, fq_cl, fq, true, destroy);
1445 		} else if (fq->fq_deficit <= 0) {
1446 			STAILQ_REMOVE(&fq_cl->fcl_old_flows, fq,
1447 			    flowq, fq_actlink);
1448 			/*
1449 			 * Move to the end of the old queues list. We do not
1450 			 * need to update the flow count since this flow
1451 			 * will be added to the tail again
1452 			 */
1453 			STAILQ_INSERT_TAIL(&temp_stailq, fq, fq_actlink);
1454 			fq->fq_deficit += fq_cl->fcl_quantum;
1455 		}
1456 		if (limit_reached) {
1457 			break;
1458 		}
1459 	}
1460 
1461 done:
1462 	if (!STAILQ_EMPTY(&fq_cl->fcl_old_flows)) {
1463 		STAILQ_CONCAT(&fq_cl->fcl_old_flows, &temp_stailq);
1464 	} else if (!STAILQ_EMPTY(&temp_stailq)) {
1465 		fq_cl->fcl_old_flows = temp_stailq;
1466 	}
1467 	if (last.cp_mbuf != NULL) {
1468 		VERIFY(top->cp_mbuf != NULL);
1469 		if (bottom != NULL) {
1470 			*bottom = last;
1471 		}
1472 	}
1473 	if (retpktcnt != NULL) {
1474 		*retpktcnt = pktcnt;
1475 	}
1476 	if (retbytecnt != NULL) {
1477 		*retbytecnt = bytecnt;
1478 	}
1479 }
1480 
1481 void
fq_if_teardown_ifclassq(struct ifclassq * ifq)1482 fq_if_teardown_ifclassq(struct ifclassq *ifq)
1483 {
1484 	fq_if_t *fqs = (fq_if_t *)ifq->ifcq_disc;
1485 
1486 	IFCQ_LOCK_ASSERT_HELD(ifq);
1487 	VERIFY(fqs != NULL && ifq->ifcq_type == PKTSCHEDT_FQ_CODEL);
1488 	fq_if_destroy(fqs);
1489 	ifq->ifcq_disc = NULL;
1490 	ifclassq_detach(ifq);
1491 }
1492 
1493 static void
fq_export_flowstats(fq_if_t * fqs,fq_t * fq,struct fq_codel_flowstats * flowstat)1494 fq_export_flowstats(fq_if_t *fqs, fq_t *fq,
1495     struct fq_codel_flowstats *flowstat)
1496 {
1497 	bzero(flowstat, sizeof(*flowstat));
1498 	flowstat->fqst_min_qdelay = (uint32_t)fq->fq_min_qdelay;
1499 	flowstat->fqst_bytes = fq->fq_bytes;
1500 	flowstat->fqst_flowhash = fq->fq_flowhash;
1501 	if (fq->fq_flags & FQF_NEW_FLOW) {
1502 		flowstat->fqst_flags |= FQ_FLOWSTATS_NEW_FLOW;
1503 	}
1504 	if (fq->fq_flags & FQF_OLD_FLOW) {
1505 		flowstat->fqst_flags |= FQ_FLOWSTATS_OLD_FLOW;
1506 	}
1507 	if (fq->fq_flags & FQF_DELAY_HIGH) {
1508 		flowstat->fqst_flags |= FQ_FLOWSTATS_DELAY_HIGH;
1509 	}
1510 	if (fq->fq_flags & FQF_FLOWCTL_ON) {
1511 		flowstat->fqst_flags |= FQ_FLOWSTATS_FLOWCTL_ON;
1512 	}
1513 	if (fqs->fqs_large_flow == fq) {
1514 		flowstat->fqst_flags |= FQ_FLOWSTATS_LARGE_FLOW;
1515 	}
1516 }
1517 
1518 int
fq_if_getqstats_ifclassq(struct ifclassq * ifq,u_int32_t qid,struct if_ifclassq_stats * ifqs)1519 fq_if_getqstats_ifclassq(struct ifclassq *ifq, u_int32_t qid,
1520     struct if_ifclassq_stats *ifqs)
1521 {
1522 	struct fq_codel_classstats *fcls;
1523 	fq_if_classq_t *fq_cl;
1524 	fq_if_t *fqs;
1525 	fq_t *fq = NULL;
1526 	u_int32_t i, flowstat_cnt;
1527 
1528 	if (qid >= FQ_IF_MAX_CLASSES) {
1529 		return EINVAL;
1530 	}
1531 
1532 	fqs = (fq_if_t *)ifq->ifcq_disc;
1533 	fcls = &ifqs->ifqs_fq_codel_stats;
1534 
1535 	fq_cl = &fqs->fqs_classq[qid];
1536 
1537 	fcls->fcls_pri = fq_cl->fcl_pri;
1538 	fcls->fcls_service_class = fq_cl->fcl_service_class;
1539 	fcls->fcls_quantum = fq_cl->fcl_quantum;
1540 	fcls->fcls_drr_max = fq_cl->fcl_drr_max;
1541 	fcls->fcls_budget = fq_cl->fcl_budget;
1542 	fcls->fcls_target_qdelay = fqs->fqs_target_qdelay;
1543 	fcls->fcls_update_interval = fqs->fqs_update_interval;
1544 	fcls->fcls_flow_control = fq_cl->fcl_stat.fcl_flow_control;
1545 	fcls->fcls_flow_feedback = fq_cl->fcl_stat.fcl_flow_feedback;
1546 	fcls->fcls_dequeue_stall = fq_cl->fcl_stat.fcl_dequeue_stall;
1547 	fcls->fcls_drop_overflow = fq_cl->fcl_stat.fcl_drop_overflow;
1548 	fcls->fcls_drop_early = fq_cl->fcl_stat.fcl_drop_early;
1549 	fcls->fcls_drop_memfailure = fq_cl->fcl_stat.fcl_drop_memfailure;
1550 	fcls->fcls_flows_cnt = fq_cl->fcl_stat.fcl_flows_cnt;
1551 	fcls->fcls_newflows_cnt = fq_cl->fcl_stat.fcl_newflows_cnt;
1552 	fcls->fcls_oldflows_cnt = fq_cl->fcl_stat.fcl_oldflows_cnt;
1553 	fcls->fcls_pkt_cnt = fq_cl->fcl_stat.fcl_pkt_cnt;
1554 	fcls->fcls_flow_control_fail = fq_cl->fcl_stat.fcl_flow_control_fail;
1555 	fcls->fcls_flow_control_fail = fq_cl->fcl_stat.fcl_flow_control_fail;
1556 	fcls->fcls_dequeue = fq_cl->fcl_stat.fcl_dequeue;
1557 	fcls->fcls_dequeue_bytes = fq_cl->fcl_stat.fcl_dequeue_bytes;
1558 	fcls->fcls_byte_cnt = fq_cl->fcl_stat.fcl_byte_cnt;
1559 	fcls->fcls_throttle_on = fq_cl->fcl_stat.fcl_throttle_on;
1560 	fcls->fcls_throttle_off = fq_cl->fcl_stat.fcl_throttle_off;
1561 	fcls->fcls_throttle_drops = fq_cl->fcl_stat.fcl_throttle_drops;
1562 	fcls->fcls_dup_rexmts = fq_cl->fcl_stat.fcl_dup_rexmts;
1563 	fcls->fcls_pkts_compressible = fq_cl->fcl_stat.fcl_pkts_compressible;
1564 	fcls->fcls_pkts_compressed = fq_cl->fcl_stat.fcl_pkts_compressed;
1565 	fcls->fcls_min_qdelay = fq_cl->fcl_stat.fcl_min_qdelay;
1566 	fcls->fcls_max_qdelay = fq_cl->fcl_stat.fcl_max_qdelay;
1567 	fcls->fcls_avg_qdelay = fq_cl->fcl_stat.fcl_avg_qdelay;
1568 
1569 	/* Gather per flow stats */
1570 	flowstat_cnt = min((fcls->fcls_newflows_cnt +
1571 	    fcls->fcls_oldflows_cnt), FQ_IF_MAX_FLOWSTATS);
1572 	i = 0;
1573 	STAILQ_FOREACH(fq, &fq_cl->fcl_new_flows, fq_actlink) {
1574 		if (i >= fcls->fcls_newflows_cnt || i >= flowstat_cnt) {
1575 			break;
1576 		}
1577 
1578 		/* leave space for a few old flows */
1579 		if ((flowstat_cnt - i) < fcls->fcls_oldflows_cnt &&
1580 		    i >= (FQ_IF_MAX_FLOWSTATS >> 1)) {
1581 			break;
1582 		}
1583 		fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]);
1584 		i++;
1585 	}
1586 	STAILQ_FOREACH(fq, &fq_cl->fcl_old_flows, fq_actlink) {
1587 		if (i >= flowstat_cnt) {
1588 			break;
1589 		}
1590 		fq_export_flowstats(fqs, fq, &fcls->fcls_flowstats[i]);
1591 		i++;
1592 	}
1593 	VERIFY(i <= flowstat_cnt);
1594 	fcls->fcls_flowstats_cnt = i;
1595 	return 0;
1596 }
1597