xref: /xnu-8792.41.9/bsd/net/classq/classq_fq_codel.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * The migration of flow queue between the different states is summarised in
31  * the below state diagram. (RFC 8290)
32  *
33  * +-----------------+                +------------------+
34  * |                 |     Empty      |                  |
35  * |     Empty       |<---------------+       Old        +----+
36  * |                 |                |                  |    |
37  * +-------+---------+                +------------------+    |
38  *         |                             ^            ^       |Credits
39  *         |Arrival                      |            |       |Exhausted
40  *         v                             |            |       |
41  * +-----------------+                   |            |       |
42  * |                 |      Empty or     |            |       |
43  * |      New        +-------------------+            +-------+
44  * |                 | Credits Exhausted
45  * +-----------------+
46  *
47  * In this implementation of FQ-CODEL, flow queue is a dynamically allocated
48  * object. An active flow queue goes through the above cycle of state
49  * transitions very often. To avoid the cost of frequent flow queue object
50  * allocation/free, this implementation retains the flow queue object in
51  * [Empty] state on an Empty flow queue list with an active reference in flow
52  * queue hash table. The flow queue objects on the Empty flow queue list have
53  * an associated age and are purged accordingly.
54  */
55 
56 #include <sys/cdefs.h>
57 #include <sys/param.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/systm.h>
62 #include <sys/syslog.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/kernel.h>
66 #include <sys/kauth.h>
67 #include <sys/sdt.h>
68 #include <kern/zalloc.h>
69 #include <netinet/in.h>
70 
71 #include <net/classq/classq.h>
72 #include <net/classq/if_classq.h>
73 #include <net/pktsched/pktsched.h>
74 #include <net/pktsched/pktsched_fq_codel.h>
75 #include <net/classq/classq_fq_codel.h>
76 
77 #include <netinet/tcp_var.h>
78 
79 static uint32_t flowq_size;                     /* size of flowq */
80 static struct mcache *flowq_cache = NULL;       /* mcache for flowq */
81 
82 #define FQ_ZONE_MAX     (32 * 1024)     /* across all interfaces */
83 
84 #define DTYPE_NODROP    0       /* no drop */
85 #define DTYPE_FORCED    1       /* a "forced" drop */
86 #define DTYPE_EARLY     2       /* an "unforced" (early) drop */
87 
88 static uint32_t pkt_compressor = 1;
89 #if (DEBUG || DEVELOPMENT)
90 SYSCTL_NODE(_net_classq, OID_AUTO, flow_q, CTLFLAG_RW | CTLFLAG_LOCKED,
91     0, "FQ-CODEL parameters");
92 
93 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, pkt_compressor,
94     CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_compressor, 0, "enable pkt compression");
95 #endif /* (DEBUG || DEVELOPMENT) */
96 
97 void
fq_codel_init(void)98 fq_codel_init(void)
99 {
100 	if (flowq_cache != NULL) {
101 		return;
102 	}
103 
104 	flowq_size = sizeof(fq_t);
105 	flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t),
106 	    0, MCR_SLEEP);
107 	if (flowq_cache == NULL) {
108 		panic("%s: failed to allocate flowq_cache", __func__);
109 		/* NOTREACHED */
110 		__builtin_unreachable();
111 	}
112 
113 	_CASSERT(AQM_KTRACE_AON_FLOW_HIGH_DELAY == 0x8300004);
114 	_CASSERT(AQM_KTRACE_AON_THROTTLE == 0x8300008);
115 	_CASSERT(AQM_KTRACE_AON_FLOW_OVERWHELMING == 0x830000c);
116 	_CASSERT(AQM_KTRACE_AON_FLOW_DQ_STALL == 0x8300010);
117 
118 	_CASSERT(AQM_KTRACE_STATS_FLOW_ENQUEUE == 0x8310004);
119 	_CASSERT(AQM_KTRACE_STATS_FLOW_DEQUEUE == 0x8310008);
120 	_CASSERT(AQM_KTRACE_STATS_FLOW_CTL == 0x831000c);
121 	_CASSERT(AQM_KTRACE_STATS_FLOW_ALLOC == 0x8310010);
122 	_CASSERT(AQM_KTRACE_STATS_FLOW_DESTROY == 0x8310014);
123 }
124 
125 void
fq_codel_reap_caches(boolean_t purge)126 fq_codel_reap_caches(boolean_t purge)
127 {
128 	mcache_reap_now(flowq_cache, purge);
129 }
130 
131 fq_t *
fq_alloc(classq_pkt_type_t ptype)132 fq_alloc(classq_pkt_type_t ptype)
133 {
134 	fq_t *fq = NULL;
135 	fq = mcache_alloc(flowq_cache, MCR_SLEEP);
136 	if (fq == NULL) {
137 		log(LOG_ERR, "%s: unable to allocate from flowq_cache\n", __func__);
138 		return NULL;
139 	}
140 
141 	bzero(fq, flowq_size);
142 	if (ptype == QP_MBUF) {
143 		MBUFQ_INIT(&fq->fq_mbufq);
144 	}
145 #if SKYWALK
146 	else {
147 		VERIFY(ptype == QP_PACKET);
148 		KPKTQ_INIT(&fq->fq_kpktq);
149 	}
150 #endif /* SKYWALK */
151 	CLASSQ_PKT_INIT(&fq->fq_dq_head);
152 	CLASSQ_PKT_INIT(&fq->fq_dq_tail);
153 	fq->fq_in_dqlist = false;
154 	return fq;
155 }
156 
157 void
fq_destroy(fq_t * fq,classq_pkt_type_t ptype)158 fq_destroy(fq_t *fq, classq_pkt_type_t ptype)
159 {
160 	VERIFY(!fq->fq_in_dqlist);
161 	VERIFY(fq_empty(fq, ptype));
162 	VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW |
163 	    FQF_EMPTY_FLOW)));
164 	VERIFY(fq->fq_bytes == 0);
165 	mcache_free(flowq_cache, fq);
166 }
167 
168 static inline void
fq_detect_dequeue_stall(fq_if_t * fqs,fq_t * flowq,fq_if_classq_t * fq_cl,u_int64_t * now)169 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
170     u_int64_t *now)
171 {
172 	u_int64_t maxgetqtime, update_interval;
173 	if (FQ_IS_DELAY_HIGH(flowq) || flowq->fq_getqtime == 0 ||
174 	    fq_empty(flowq, fqs->fqs_ptype) ||
175 	    flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
176 		return;
177 	}
178 
179 	update_interval = FQ_UPDATE_INTERVAL(flowq);
180 	maxgetqtime = flowq->fq_getqtime + update_interval;
181 	if ((*now) > maxgetqtime) {
182 		/*
183 		 * there was no dequeue in an update interval worth of
184 		 * time. It means that the queue is stalled.
185 		 */
186 		FQ_SET_DELAY_HIGH(flowq);
187 		fq_cl->fcl_stat.fcl_dequeue_stall++;
188 		os_log_error(OS_LOG_DEFAULT, "%s: dequeue stall num: %d, "
189 		    "scidx: %d, flow: 0x%x, iface: %s", __func__,
190 		    fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
191 		    flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp));
192 		KDBG(AQM_KTRACE_AON_FLOW_DQ_STALL, flowq->fq_flowhash,
193 		    AQM_KTRACE_FQ_GRP_SC_IDX(flowq), flowq->fq_bytes,
194 		    (*now) - flowq->fq_getqtime);
195 	}
196 }
197 
198 void
fq_head_drop(fq_if_t * fqs,fq_t * fq)199 fq_head_drop(fq_if_t *fqs, fq_t *fq)
200 {
201 	pktsched_pkt_t pkt;
202 	volatile uint32_t *pkt_flags;
203 	uint64_t *pkt_timestamp;
204 	struct ifclassq *ifq = fqs->fqs_ifq;
205 
206 	_PKTSCHED_PKT_INIT(&pkt);
207 	fq_getq_flow_internal(fqs, fq, &pkt);
208 	if (pkt.pktsched_pkt_mbuf == NULL) {
209 		return;
210 	}
211 
212 	pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
213 	    NULL, NULL);
214 
215 	*pkt_timestamp = 0;
216 	switch (pkt.pktsched_ptype) {
217 	case QP_MBUF:
218 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
219 		break;
220 #if SKYWALK
221 	case QP_PACKET:
222 		/* sanity check */
223 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
224 		break;
225 #endif /* SKYWALK */
226 	default:
227 		VERIFY(0);
228 		/* NOTREACHED */
229 		__builtin_unreachable();
230 	}
231 
232 	IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
233 	IFCQ_CONVERT_LOCK(ifq);
234 	pktsched_free_pkt(&pkt);
235 }
236 
237 
238 static int
fq_compressor(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl,pktsched_pkt_t * pkt)239 fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
240     pktsched_pkt_t *pkt)
241 {
242 	classq_pkt_type_t ptype = fqs->fqs_ptype;
243 	uint32_t comp_gencnt = 0;
244 	uint64_t *pkt_timestamp;
245 	uint64_t old_timestamp = 0;
246 	uint32_t old_pktlen = 0;
247 	struct ifclassq *ifq = fqs->fqs_ifq;
248 
249 	if (__improbable(pkt_compressor == 0)) {
250 		return 0;
251 	}
252 
253 	pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
254 	    &comp_gencnt);
255 
256 	if (comp_gencnt == 0) {
257 		return 0;
258 	}
259 
260 	fq_cl->fcl_stat.fcl_pkts_compressible++;
261 
262 	if (fq_empty(fq, fqs->fqs_ptype)) {
263 		return 0;
264 	}
265 
266 	if (ptype == QP_MBUF) {
267 		struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
268 
269 		if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
270 			return 0;
271 		}
272 
273 		/* If we got until here, we should merge/replace the segment */
274 		MBUFQ_REMOVE(&fq->fq_mbufq, m);
275 		old_pktlen = m_pktlen(m);
276 		old_timestamp = m->m_pkthdr.pkt_timestamp;
277 
278 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
279 		m_freem(m);
280 	}
281 #if SKYWALK
282 	else {
283 		struct __kern_packet *kpkt = KPKTQ_LAST(&fq->fq_kpktq);
284 
285 		if (comp_gencnt != kpkt->pkt_comp_gencnt) {
286 			return 0;
287 		}
288 
289 		/* If we got until here, we should merge/replace the segment */
290 		KPKTQ_REMOVE(&fq->fq_kpktq, kpkt);
291 		old_pktlen = kpkt->pkt_length;
292 		old_timestamp = kpkt->pkt_timestamp;
293 
294 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
295 		pp_free_packet(*(struct kern_pbufpool **)(uintptr_t)&
296 		    (((struct __kern_quantum *)kpkt)->qum_pp),
297 		    (uint64_t)kpkt);
298 	}
299 #endif /* SKYWALK */
300 
301 	fq->fq_bytes -= old_pktlen;
302 	fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
303 	fq_cl->fcl_stat.fcl_pkt_cnt--;
304 	IFCQ_DEC_LEN(ifq);
305 	IFCQ_DEC_BYTES(ifq, old_pktlen);
306 
307 	FQ_GRP_DEC_LEN(fq);
308 	FQ_GRP_DEC_BYTES(fq, old_pktlen);
309 
310 	*pkt_timestamp = old_timestamp;
311 
312 	return CLASSQEQ_COMPRESSED;
313 }
314 
315 int
fq_addq(fq_if_t * fqs,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)316 fq_addq(fq_if_t *fqs, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
317     fq_if_classq_t *fq_cl)
318 {
319 	int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
320 	u_int64_t now;
321 	fq_t *fq = NULL;
322 	uint64_t *pkt_timestamp;
323 	volatile uint32_t *pkt_flags;
324 	uint32_t pkt_flowid, cnt;
325 	uint8_t pkt_proto, pkt_flowsrc;
326 	fq_tfc_type_t tfc_type = FQ_TFC_C;
327 
328 	cnt = pkt->pktsched_pcnt;
329 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
330 	    &pkt_flowsrc, &pkt_proto, NULL);
331 
332 	/*
333 	 * XXX Not walking the chain to set this flag on every packet.
334 	 * This flag is only used for debugging. Nothing is affected if it's
335 	 * not set.
336 	 */
337 	switch (pkt->pktsched_ptype) {
338 	case QP_MBUF:
339 		/* See comments in <rdar://problem/14040693> */
340 		VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
341 		*pkt_flags |= PKTF_PRIV_GUARDED;
342 		break;
343 #if SKYWALK
344 	case QP_PACKET:
345 		/* sanity check */
346 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
347 		break;
348 #endif /* SKYWALK */
349 	default:
350 		VERIFY(0);
351 		/* NOTREACHED */
352 		__builtin_unreachable();
353 	}
354 
355 	/*
356 	 * Timestamps for every packet must be set prior to entering this path.
357 	 */
358 	now = *pkt_timestamp;
359 	ASSERT(now > 0);
360 
361 	/* find the flowq for this packet */
362 	fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
363 	    now, true, tfc_type);
364 	if (__improbable(fq == NULL)) {
365 		DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
366 		/* drop the packet if we could not allocate a flow queue */
367 		fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
368 		return CLASSQEQ_DROP;
369 	}
370 	VERIFY(fq->fq_group == fq_grp);
371 	VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
372 
373 	KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
374 	    AQM_KTRACE_FQ_GRP_SC_IDX(fq),
375 	    fq->fq_bytes, pktsched_get_pkt_len(pkt));
376 
377 	fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
378 
379 	if (__improbable(FQ_IS_DELAY_HIGH(fq) || FQ_IS_OVERWHELMING(fq))) {
380 		if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
381 		    (*pkt_flags & PKTF_FLOW_ADV)) {
382 			fc_adv = 1;
383 			/*
384 			 * If the flow is suspended or it is not
385 			 * TCP/QUIC, drop the chain.
386 			 */
387 			if ((pkt_proto != IPPROTO_TCP) &&
388 			    (pkt_proto != IPPROTO_QUIC)) {
389 				droptype = DTYPE_EARLY;
390 				fq_cl->fcl_stat.fcl_drop_early += cnt;
391 				IFCQ_DROP_ADD(fqs->fqs_ifq, cnt, pktsched_get_pkt_len(pkt));
392 			}
393 			DTRACE_IP6(flow__adv, fq_if_t *, fqs,
394 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
395 			    int, droptype, pktsched_pkt_t *, pkt,
396 			    uint32_t, cnt);
397 		} else {
398 			/*
399 			 * Need to drop packets to make room for the new
400 			 * ones. Try to drop from the head of the queue
401 			 * instead of the latest packets.
402 			 */
403 			if (!fq_empty(fq, fqs->fqs_ptype)) {
404 				uint32_t i;
405 
406 				for (i = 0; i < cnt; i++) {
407 					fq_head_drop(fqs, fq);
408 				}
409 				droptype = DTYPE_NODROP;
410 			} else {
411 				droptype = DTYPE_EARLY;
412 			}
413 			fq_cl->fcl_stat.fcl_drop_early += cnt;
414 
415 			DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
416 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
417 			    int, droptype, pktsched_pkt_t *, pkt,
418 			    uint32_t, cnt);
419 		}
420 	}
421 
422 	/* Set the return code correctly */
423 	if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
424 		if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
425 			fq->fq_flags |= FQF_FLOWCTL_ON;
426 			/* deliver flow control advisory error */
427 			if (droptype == DTYPE_NODROP) {
428 				ret = CLASSQEQ_SUCCESS_FC;
429 			} else {
430 				/* dropped due to flow control */
431 				ret = CLASSQEQ_DROP_FC;
432 			}
433 		} else {
434 			/*
435 			 * if we could not flow control the flow, it is
436 			 * better to drop
437 			 */
438 			droptype = DTYPE_FORCED;
439 			ret = CLASSQEQ_DROP_FC;
440 			fq_cl->fcl_stat.fcl_flow_control_fail++;
441 		}
442 		DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
443 	}
444 
445 	/*
446 	 * If the queue length hits the queue limit, drop a chain with the
447 	 * same number of packets from the front of the queue for a flow with
448 	 * maximum number of bytes. This will penalize heavy and unresponsive
449 	 * flows. It will also avoid a tail drop.
450 	 */
451 	if (__improbable(droptype == DTYPE_NODROP &&
452 	    fq_if_at_drop_limit(fqs))) {
453 		uint32_t i;
454 
455 		if (fqs->fqs_large_flow == fq) {
456 			/*
457 			 * Drop from the head of the current fq. Since a
458 			 * new packet will be added to the tail, it is ok
459 			 * to leave fq in place.
460 			 */
461 			DTRACE_IP5(large__flow, fq_if_t *, fqs,
462 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
463 			    pktsched_pkt_t *, pkt, uint32_t, cnt);
464 
465 			for (i = 0; i < cnt; i++) {
466 				fq_head_drop(fqs, fq);
467 			}
468 			fq_cl->fcl_stat.fcl_drop_overflow += cnt;
469 
470 			/*
471 			 * TCP and QUIC will react to the loss of those head dropped pkts
472 			 * and adjust send rate.
473 			 */
474 			if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
475 			    (*pkt_flags & PKTF_FLOW_ADV) &&
476 			    (pkt_proto != IPPROTO_TCP) &&
477 			    (pkt_proto != IPPROTO_QUIC)) {
478 				if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
479 					fq->fq_flags |= FQF_FLOWCTL_ON;
480 					FQ_SET_OVERWHELMING(fq);
481 					fq_cl->fcl_stat.fcl_overwhelming++;
482 					/* deliver flow control advisory error */
483 					ret = CLASSQEQ_SUCCESS_FC;
484 				}
485 			}
486 		} else {
487 			if (fqs->fqs_large_flow == NULL) {
488 				droptype = DTYPE_FORCED;
489 				fq_cl->fcl_stat.fcl_drop_overflow += cnt;
490 				ret = CLASSQEQ_DROP;
491 
492 				DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
493 				    fq_if_classq_t *, fq_cl, fq_t *, fq,
494 				    pktsched_pkt_t *, pkt, uint32_t, cnt);
495 
496 				/*
497 				 * if this fq was freshly created and there
498 				 * is nothing to enqueue, move it to empty list
499 				 */
500 				if (fq_empty(fq, fqs->fqs_ptype) &&
501 				    !(fq->fq_flags & (FQF_NEW_FLOW |
502 				    FQF_OLD_FLOW))) {
503 					fq_if_move_to_empty_flow(fqs, fq_cl,
504 					    fq, now);
505 					fq = NULL;
506 				}
507 			} else {
508 				DTRACE_IP5(different__large__flow,
509 				    fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
510 				    fq_t *, fq, pktsched_pkt_t *, pkt,
511 				    uint32_t, cnt);
512 
513 				for (i = 0; i < cnt; i++) {
514 					fq_if_drop_packet(fqs, now);
515 				}
516 			}
517 		}
518 	}
519 
520 	if (__probable(droptype == DTYPE_NODROP)) {
521 		uint32_t chain_len = pktsched_get_pkt_len(pkt);
522 
523 		/*
524 		 * We do not compress if we are enqueuing a chain.
525 		 * Traversing the chain to look for acks would defeat the
526 		 * purpose of batch enqueueing.
527 		 */
528 		if (cnt == 1) {
529 			ret = fq_compressor(fqs, fq, fq_cl, pkt);
530 			if (ret != CLASSQEQ_COMPRESSED) {
531 				ret = CLASSQEQ_SUCCESS;
532 			} else {
533 				fq_cl->fcl_stat.fcl_pkts_compressed++;
534 			}
535 		}
536 		DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
537 		    fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
538 		fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
539 		    pkt->pktsched_ptype);
540 
541 		fq->fq_bytes += chain_len;
542 		fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
543 		fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
544 
545 		/*
546 		 * check if this queue will qualify to be the next
547 		 * victim queue
548 		 */
549 		fq_if_is_flow_heavy(fqs, fq);
550 	} else {
551 		DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
552 		return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
553 	}
554 
555 	/*
556 	 * If the queue is not currently active, add it to the end of new
557 	 * flows list for that service class.
558 	 */
559 	if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
560 		VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
561 		STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
562 		fq->fq_flags |= FQF_NEW_FLOW;
563 
564 		fq_cl->fcl_stat.fcl_newflows_cnt++;
565 
566 		fq->fq_deficit = fq_cl->fcl_quantum;
567 	}
568 	return ret;
569 }
570 
571 void
fq_getq_flow_internal(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt)572 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
573 {
574 	classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
575 	uint32_t plen;
576 	fq_if_classq_t *fq_cl;
577 	struct ifclassq *ifq = fqs->fqs_ifq;
578 
579 	fq_dequeue(fq, &p, fqs->fqs_ptype);
580 	if (p.cp_ptype == QP_INVALID) {
581 		VERIFY(p.cp_mbuf == NULL);
582 		return;
583 	}
584 
585 	pktsched_pkt_encap(pkt, &p);
586 	plen = pktsched_get_pkt_len(pkt);
587 
588 	VERIFY(fq->fq_bytes >= plen);
589 	fq->fq_bytes -= plen;
590 
591 	fq_cl = &FQ_CLASSQ(fq);
592 	fq_cl->fcl_stat.fcl_byte_cnt -= plen;
593 	fq_cl->fcl_stat.fcl_pkt_cnt--;
594 	IFCQ_DEC_LEN(ifq);
595 	IFCQ_DEC_BYTES(ifq, plen);
596 
597 	FQ_GRP_DEC_LEN(fq);
598 	FQ_GRP_DEC_BYTES(fq, plen);
599 
600 	/* Reset getqtime so that we don't count idle times */
601 	if (fq_empty(fq, fqs->fqs_ptype)) {
602 		fq->fq_getqtime = 0;
603 	}
604 }
605 
606 void
fq_getq_flow(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt,uint64_t now)607 fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt, uint64_t now)
608 {
609 	fq_if_classq_t *fq_cl;
610 	int64_t qdelay = 0;
611 	volatile uint32_t *pkt_flags;
612 	uint64_t *pkt_timestamp;
613 
614 	fq_getq_flow_internal(fqs, fq, pkt);
615 	if (pkt->pktsched_ptype == QP_INVALID) {
616 		VERIFY(pkt->pktsched_pkt_mbuf == NULL);
617 		return;
618 	}
619 
620 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
621 	    NULL, NULL);
622 
623 	/* this will compute qdelay in nanoseconds */
624 	if (now > *pkt_timestamp) {
625 		qdelay = now - *pkt_timestamp;
626 	}
627 	fq_cl = &FQ_CLASSQ(fq);
628 
629 	if (fq->fq_min_qdelay == 0 ||
630 	    (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
631 		fq->fq_min_qdelay = qdelay;
632 	}
633 
634 	/* Update min/max/avg qdelay for the respective class */
635 	if (fq_cl->fcl_stat.fcl_min_qdelay == 0 ||
636 	    (qdelay > 0 && (u_int64_t)qdelay < fq_cl->fcl_stat.fcl_min_qdelay)) {
637 		fq_cl->fcl_stat.fcl_min_qdelay = qdelay;
638 	}
639 
640 	if (fq_cl->fcl_stat.fcl_max_qdelay == 0 ||
641 	    (qdelay > 0 && (u_int64_t)qdelay > fq_cl->fcl_stat.fcl_max_qdelay)) {
642 		fq_cl->fcl_stat.fcl_max_qdelay = qdelay;
643 	}
644 
645 	uint64_t num_dequeues = fq_cl->fcl_stat.fcl_dequeue;
646 
647 	if (num_dequeues == 0) {
648 		fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
649 	} else if (qdelay > 0) {
650 		uint64_t res = 0;
651 		if (os_add_overflow(num_dequeues, 1, &res)) {
652 			/* Reset the dequeue num and dequeue bytes */
653 			fq_cl->fcl_stat.fcl_dequeue = num_dequeues = 0;
654 			fq_cl->fcl_stat.fcl_dequeue_bytes = 0;
655 			fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
656 			os_log_info(OS_LOG_DEFAULT, "%s: dequeue num overflow, "
657 			    "flow: 0x%x, iface: %s", __func__, fq->fq_flowhash,
658 			    if_name(fqs->fqs_ifq->ifcq_ifp));
659 		} else {
660 			uint64_t product = 0;
661 			if (os_mul_overflow(fq_cl->fcl_stat.fcl_avg_qdelay,
662 			    num_dequeues, &product) || os_add_overflow(product, qdelay, &res)) {
663 				fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
664 			} else {
665 				fq_cl->fcl_stat.fcl_avg_qdelay = res /
666 				    (num_dequeues + 1);
667 			}
668 		}
669 	}
670 
671 	if (now >= fq->fq_updatetime) {
672 		if (fq->fq_min_qdelay > FQ_TARGET_DELAY(fq)) {
673 			if (!FQ_IS_DELAY_HIGH(fq)) {
674 				FQ_SET_DELAY_HIGH(fq);
675 				os_log_error(OS_LOG_DEFAULT,
676 				    "%s: high delay idx: %d, %llu, flow: 0x%x, "
677 				    "iface: %s", __func__, fq->fq_sc_index,
678 				    fq->fq_min_qdelay, fq->fq_flowhash,
679 				    if_name(fqs->fqs_ifq->ifcq_ifp));
680 			}
681 		} else {
682 			FQ_CLEAR_DELAY_HIGH(fq);
683 		}
684 		/* Reset measured queue delay and update time */
685 		fq->fq_updatetime = now + FQ_UPDATE_INTERVAL(fq);
686 		fq->fq_min_qdelay = 0;
687 	}
688 	if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs)) {
689 		FQ_CLEAR_OVERWHELMING(fq);
690 	}
691 	if (!FQ_IS_DELAY_HIGH(fq) || fq_empty(fq, fqs->fqs_ptype)) {
692 		FQ_CLEAR_DELAY_HIGH(fq);
693 	}
694 
695 	if ((fq->fq_flags & FQF_FLOWCTL_ON) &&
696 	    !FQ_IS_DELAY_HIGH(fq) && !FQ_IS_OVERWHELMING(fq)) {
697 		fq_if_flow_feedback(fqs, fq, fq_cl);
698 	}
699 
700 	if (fq_empty(fq, fqs->fqs_ptype)) {
701 		/* Reset getqtime so that we don't count idle times */
702 		fq->fq_getqtime = 0;
703 	} else {
704 		fq->fq_getqtime = now;
705 	}
706 	fq_if_is_flow_heavy(fqs, fq);
707 
708 	*pkt_timestamp = 0;
709 	switch (pkt->pktsched_ptype) {
710 	case QP_MBUF:
711 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
712 		break;
713 #if SKYWALK
714 	case QP_PACKET:
715 		/* sanity check */
716 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
717 		break;
718 #endif /* SKYWALK */
719 	default:
720 		VERIFY(0);
721 		/* NOTREACHED */
722 		__builtin_unreachable();
723 	}
724 }
725