xref: /xnu-8796.101.5/bsd/net/classq/classq_fq_codel.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * The migration of flow queue between the different states is summarised in
31  * the below state diagram. (RFC 8290)
32  *
33  * +-----------------+                +------------------+
34  * |                 |     Empty      |                  |
35  * |     Empty       |<---------------+       Old        +----+
36  * |                 |                |                  |    |
37  * +-------+---------+                +------------------+    |
38  *         |                             ^            ^       |Credits
39  *         |Arrival                      |            |       |Exhausted
40  *         v                             |            |       |
41  * +-----------------+                   |            |       |
42  * |                 |      Empty or     |            |       |
43  * |      New        +-------------------+            +-------+
44  * |                 | Credits Exhausted
45  * +-----------------+
46  *
47  * In this implementation of FQ-CODEL, flow queue is a dynamically allocated
48  * object. An active flow queue goes through the above cycle of state
49  * transitions very often. To avoid the cost of frequent flow queue object
50  * allocation/free, this implementation retains the flow queue object in
51  * [Empty] state on an Empty flow queue list with an active reference in flow
52  * queue hash table. The flow queue objects on the Empty flow queue list have
53  * an associated age and are purged accordingly.
54  */
55 
56 #include <sys/cdefs.h>
57 #include <sys/param.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/systm.h>
62 #include <sys/syslog.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/kernel.h>
66 #include <sys/kauth.h>
67 #include <sys/sdt.h>
68 #include <kern/zalloc.h>
69 #include <netinet/in.h>
70 
71 #include <net/classq/classq.h>
72 #include <net/classq/if_classq.h>
73 #include <net/pktsched/pktsched.h>
74 #include <net/pktsched/pktsched_fq_codel.h>
75 #include <net/classq/classq_fq_codel.h>
76 
77 #include <netinet/tcp_var.h>
78 
79 #define FQ_ZONE_MAX     (32 * 1024)     /* across all interfaces */
80 
81 #define DTYPE_NODROP    0       /* no drop */
82 #define DTYPE_FORCED    1       /* a "forced" drop */
83 #define DTYPE_EARLY     2       /* an "unforced" (early) drop */
84 
85 static uint32_t pkt_compressor = 1;
86 static uint64_t l4s_ce_threshold = 0;
87 #if (DEBUG || DEVELOPMENT)
88 SYSCTL_NODE(_net_classq, OID_AUTO, flow_q, CTLFLAG_RW | CTLFLAG_LOCKED,
89     0, "FQ-CODEL parameters");
90 
91 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, pkt_compressor,
92     CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_compressor, 0, "enable pkt compression");
93 
94 SYSCTL_QUAD(_net_classq, OID_AUTO, l4s_ce_threshold,
95     CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_ce_threshold,
96     "L4S CE threshold");
97 #endif /* (DEBUG || DEVELOPMENT) */
98 
99 void
fq_codel_init(void)100 fq_codel_init(void)
101 {
102 	_CASSERT(AQM_KTRACE_AON_FLOW_HIGH_DELAY == 0x8300004);
103 	_CASSERT(AQM_KTRACE_AON_THROTTLE == 0x8300008);
104 	_CASSERT(AQM_KTRACE_AON_FLOW_OVERWHELMING == 0x830000c);
105 	_CASSERT(AQM_KTRACE_AON_FLOW_DQ_STALL == 0x8300010);
106 
107 	_CASSERT(AQM_KTRACE_STATS_FLOW_ENQUEUE == 0x8310004);
108 	_CASSERT(AQM_KTRACE_STATS_FLOW_DEQUEUE == 0x8310008);
109 	_CASSERT(AQM_KTRACE_STATS_FLOW_CTL == 0x831000c);
110 	_CASSERT(AQM_KTRACE_STATS_FLOW_ALLOC == 0x8310010);
111 	_CASSERT(AQM_KTRACE_STATS_FLOW_DESTROY == 0x8310014);
112 }
113 
114 fq_t *
fq_alloc(classq_pkt_type_t ptype)115 fq_alloc(classq_pkt_type_t ptype)
116 {
117 	fq_t *fq = NULL;
118 
119 	fq = kalloc_type(fq_t, Z_WAITOK_ZERO);
120 	if (ptype == QP_MBUF) {
121 		MBUFQ_INIT(&fq->fq_mbufq);
122 	}
123 #if SKYWALK
124 	else {
125 		VERIFY(ptype == QP_PACKET);
126 		KPKTQ_INIT(&fq->fq_kpktq);
127 	}
128 #endif /* SKYWALK */
129 	CLASSQ_PKT_INIT(&fq->fq_dq_head);
130 	CLASSQ_PKT_INIT(&fq->fq_dq_tail);
131 	fq->fq_in_dqlist = false;
132 
133 	return fq;
134 }
135 
136 void
fq_destroy(fq_t * fq,classq_pkt_type_t ptype)137 fq_destroy(fq_t *fq, classq_pkt_type_t ptype)
138 {
139 	VERIFY(!fq->fq_in_dqlist);
140 	VERIFY(fq_empty(fq, ptype));
141 	VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW |
142 	    FQF_EMPTY_FLOW)));
143 	VERIFY(fq->fq_bytes == 0);
144 	kfree_type(fq_t, fq);
145 }
146 
147 static inline void
fq_detect_dequeue_stall(fq_if_t * fqs,fq_t * flowq,fq_if_classq_t * fq_cl,u_int64_t * now)148 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
149     u_int64_t *now)
150 {
151 	u_int64_t maxgetqtime, update_interval;
152 	if (FQ_IS_DELAY_HIGH(flowq) || flowq->fq_getqtime == 0 ||
153 	    fq_empty(flowq, fqs->fqs_ptype) ||
154 	    flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
155 		return;
156 	}
157 
158 	update_interval = FQ_UPDATE_INTERVAL(flowq);
159 	maxgetqtime = flowq->fq_getqtime + update_interval;
160 	if ((*now) > maxgetqtime) {
161 		/*
162 		 * there was no dequeue in an update interval worth of
163 		 * time. It means that the queue is stalled.
164 		 */
165 		FQ_SET_DELAY_HIGH(flowq);
166 		fq_cl->fcl_stat.fcl_dequeue_stall++;
167 		os_log_error(OS_LOG_DEFAULT, "%s: dequeue stall num: %d, "
168 		    "scidx: %d, flow: 0x%x, iface: %s", __func__,
169 		    fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
170 		    flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp));
171 		KDBG(AQM_KTRACE_AON_FLOW_DQ_STALL, flowq->fq_flowhash,
172 		    AQM_KTRACE_FQ_GRP_SC_IDX(flowq), flowq->fq_bytes,
173 		    (*now) - flowq->fq_getqtime);
174 	}
175 }
176 
177 void
fq_head_drop(fq_if_t * fqs,fq_t * fq)178 fq_head_drop(fq_if_t *fqs, fq_t *fq)
179 {
180 	pktsched_pkt_t pkt;
181 	volatile uint32_t *pkt_flags;
182 	uint64_t *pkt_timestamp;
183 	struct ifclassq *ifq = fqs->fqs_ifq;
184 
185 	_PKTSCHED_PKT_INIT(&pkt);
186 	fq_getq_flow_internal(fqs, fq, &pkt);
187 	if (pkt.pktsched_pkt_mbuf == NULL) {
188 		return;
189 	}
190 
191 	pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
192 	    NULL, NULL);
193 
194 	*pkt_timestamp = 0;
195 	switch (pkt.pktsched_ptype) {
196 	case QP_MBUF:
197 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
198 		break;
199 #if SKYWALK
200 	case QP_PACKET:
201 		/* sanity check */
202 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
203 		break;
204 #endif /* SKYWALK */
205 	default:
206 		VERIFY(0);
207 		/* NOTREACHED */
208 		__builtin_unreachable();
209 	}
210 
211 	IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
212 	IFCQ_CONVERT_LOCK(ifq);
213 	pktsched_free_pkt(&pkt);
214 }
215 
216 
217 static int
fq_compressor(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl,pktsched_pkt_t * pkt)218 fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
219     pktsched_pkt_t *pkt)
220 {
221 	classq_pkt_type_t ptype = fqs->fqs_ptype;
222 	uint32_t comp_gencnt = 0;
223 	uint64_t *pkt_timestamp;
224 	uint64_t old_timestamp = 0;
225 	uint32_t old_pktlen = 0;
226 	struct ifclassq *ifq = fqs->fqs_ifq;
227 
228 	if (__improbable(pkt_compressor == 0)) {
229 		return 0;
230 	}
231 
232 	pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
233 	    &comp_gencnt);
234 
235 	if (comp_gencnt == 0) {
236 		return 0;
237 	}
238 
239 	fq_cl->fcl_stat.fcl_pkts_compressible++;
240 
241 	if (fq_empty(fq, fqs->fqs_ptype)) {
242 		return 0;
243 	}
244 
245 	if (ptype == QP_MBUF) {
246 		struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
247 
248 		if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
249 			return 0;
250 		}
251 
252 		/* If we got until here, we should merge/replace the segment */
253 		MBUFQ_REMOVE(&fq->fq_mbufq, m);
254 		old_pktlen = m_pktlen(m);
255 		old_timestamp = m->m_pkthdr.pkt_timestamp;
256 
257 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
258 		m_freem(m);
259 	}
260 #if SKYWALK
261 	else {
262 		struct __kern_packet *kpkt = KPKTQ_LAST(&fq->fq_kpktq);
263 
264 		if (comp_gencnt != kpkt->pkt_comp_gencnt) {
265 			return 0;
266 		}
267 
268 		/* If we got until here, we should merge/replace the segment */
269 		KPKTQ_REMOVE(&fq->fq_kpktq, kpkt);
270 		old_pktlen = kpkt->pkt_length;
271 		old_timestamp = kpkt->pkt_timestamp;
272 
273 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
274 		pp_free_packet(*(struct kern_pbufpool **)(uintptr_t)&
275 		    (((struct __kern_quantum *)kpkt)->qum_pp),
276 		    (uint64_t)kpkt);
277 	}
278 #endif /* SKYWALK */
279 
280 	fq->fq_bytes -= old_pktlen;
281 	fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
282 	fq_cl->fcl_stat.fcl_pkt_cnt--;
283 	IFCQ_DEC_LEN(ifq);
284 	IFCQ_DEC_BYTES(ifq, old_pktlen);
285 
286 	FQ_GRP_DEC_LEN(fq);
287 	FQ_GRP_DEC_BYTES(fq, old_pktlen);
288 
289 	*pkt_timestamp = old_timestamp;
290 
291 	return CLASSQEQ_COMPRESSED;
292 }
293 
294 int
fq_addq(fq_if_t * fqs,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)295 fq_addq(fq_if_t *fqs, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
296     fq_if_classq_t *fq_cl)
297 {
298 	int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
299 	u_int64_t now;
300 	fq_t *fq = NULL;
301 	uint64_t *pkt_timestamp;
302 	volatile uint32_t *pkt_flags;
303 	uint32_t pkt_flowid, cnt;
304 	uint8_t pkt_proto, pkt_flowsrc;
305 	fq_tfc_type_t tfc_type = FQ_TFC_C;
306 
307 	cnt = pkt->pktsched_pcnt;
308 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
309 	    &pkt_flowsrc, &pkt_proto, NULL);
310 
311 	/*
312 	 * XXX Not walking the chain to set this flag on every packet.
313 	 * This flag is only used for debugging. Nothing is affected if it's
314 	 * not set.
315 	 */
316 	switch (pkt->pktsched_ptype) {
317 	case QP_MBUF:
318 		/* See comments in <rdar://problem/14040693> */
319 		VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
320 		*pkt_flags |= PKTF_PRIV_GUARDED;
321 		break;
322 #if SKYWALK
323 	case QP_PACKET:
324 		/* sanity check */
325 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
326 		break;
327 #endif /* SKYWALK */
328 	default:
329 		VERIFY(0);
330 		/* NOTREACHED */
331 		__builtin_unreachable();
332 	}
333 
334 	if (ifclassq_enable_l4s) {
335 		tfc_type = pktsched_is_pkt_l4s(pkt) ? FQ_TFC_L4S : FQ_TFC_C;
336 	}
337 
338 	/*
339 	 * Timestamps for every packet must be set prior to entering this path.
340 	 */
341 	now = *pkt_timestamp;
342 	ASSERT(now > 0);
343 
344 	/* find the flowq for this packet */
345 	fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
346 	    now, true, tfc_type);
347 	if (__improbable(fq == NULL)) {
348 		DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
349 		/* drop the packet if we could not allocate a flow queue */
350 		fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
351 		return CLASSQEQ_DROP;
352 	}
353 	VERIFY(fq->fq_group == fq_grp);
354 	VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
355 
356 	KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
357 	    AQM_KTRACE_FQ_GRP_SC_IDX(fq),
358 	    fq->fq_bytes, pktsched_get_pkt_len(pkt));
359 
360 	fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
361 
362 	/*
363 	 * Skip the dropping part if it's L4S. Flow control or ECN marking decision
364 	 * will be made at dequeue time.
365 	 */
366 	if (ifclassq_enable_l4s && tfc_type == FQ_TFC_L4S) {
367 		fq_cl->fcl_stat.fcl_l4s_pkts++;
368 		droptype = DTYPE_NODROP;
369 		goto no_drop;
370 	}
371 
372 	/*
373 	 * If L4S is not enabled, a fq should always be labeled as a classic.
374 	 */
375 	VERIFY(fq->fq_tfc_type == FQ_TFC_C);
376 
377 	if (__improbable(FQ_IS_DELAY_HIGH(fq) || FQ_IS_OVERWHELMING(fq))) {
378 		if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
379 		    (*pkt_flags & PKTF_FLOW_ADV)) {
380 			fc_adv = 1;
381 			/*
382 			 * If the flow is suspended or it is not
383 			 * TCP/QUIC, drop the chain.
384 			 */
385 			if ((pkt_proto != IPPROTO_TCP) &&
386 			    (pkt_proto != IPPROTO_QUIC)) {
387 				droptype = DTYPE_EARLY;
388 				fq_cl->fcl_stat.fcl_drop_early += cnt;
389 				IFCQ_DROP_ADD(fqs->fqs_ifq, cnt, pktsched_get_pkt_len(pkt));
390 			}
391 			DTRACE_IP6(flow__adv, fq_if_t *, fqs,
392 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
393 			    int, droptype, pktsched_pkt_t *, pkt,
394 			    uint32_t, cnt);
395 		} else {
396 			/*
397 			 * Need to drop packets to make room for the new
398 			 * ones. Try to drop from the head of the queue
399 			 * instead of the latest packets.
400 			 */
401 			if (!fq_empty(fq, fqs->fqs_ptype)) {
402 				uint32_t i;
403 
404 				for (i = 0; i < cnt; i++) {
405 					fq_head_drop(fqs, fq);
406 				}
407 				droptype = DTYPE_NODROP;
408 			} else {
409 				droptype = DTYPE_EARLY;
410 			}
411 			fq_cl->fcl_stat.fcl_drop_early += cnt;
412 
413 			DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
414 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
415 			    int, droptype, pktsched_pkt_t *, pkt,
416 			    uint32_t, cnt);
417 		}
418 	}
419 
420 	/* Set the return code correctly */
421 	if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
422 		if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
423 			fq->fq_flags |= FQF_FLOWCTL_ON;
424 			/* deliver flow control advisory error */
425 			if (droptype == DTYPE_NODROP) {
426 				ret = CLASSQEQ_SUCCESS_FC;
427 			} else {
428 				/* dropped due to flow control */
429 				ret = CLASSQEQ_DROP_FC;
430 			}
431 		} else {
432 			/*
433 			 * if we could not flow control the flow, it is
434 			 * better to drop
435 			 */
436 			droptype = DTYPE_FORCED;
437 			ret = CLASSQEQ_DROP_FC;
438 			fq_cl->fcl_stat.fcl_flow_control_fail++;
439 		}
440 		DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
441 	}
442 
443 	/*
444 	 * If the queue length hits the queue limit, drop a chain with the
445 	 * same number of packets from the front of the queue for a flow with
446 	 * maximum number of bytes. This will penalize heavy and unresponsive
447 	 * flows. It will also avoid a tail drop.
448 	 */
449 	if (__improbable(droptype == DTYPE_NODROP &&
450 	    fq_if_at_drop_limit(fqs))) {
451 		uint32_t i;
452 
453 		if (fqs->fqs_large_flow == fq) {
454 			/*
455 			 * Drop from the head of the current fq. Since a
456 			 * new packet will be added to the tail, it is ok
457 			 * to leave fq in place.
458 			 */
459 			DTRACE_IP5(large__flow, fq_if_t *, fqs,
460 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
461 			    pktsched_pkt_t *, pkt, uint32_t, cnt);
462 
463 			for (i = 0; i < cnt; i++) {
464 				fq_head_drop(fqs, fq);
465 			}
466 			fq_cl->fcl_stat.fcl_drop_overflow += cnt;
467 
468 			/*
469 			 * TCP and QUIC will react to the loss of those head dropped pkts
470 			 * and adjust send rate.
471 			 */
472 			if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
473 			    (*pkt_flags & PKTF_FLOW_ADV) &&
474 			    (pkt_proto != IPPROTO_TCP) &&
475 			    (pkt_proto != IPPROTO_QUIC)) {
476 				if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
477 					fq->fq_flags |= FQF_FLOWCTL_ON;
478 					FQ_SET_OVERWHELMING(fq);
479 					fq_cl->fcl_stat.fcl_overwhelming++;
480 					/* deliver flow control advisory error */
481 					ret = CLASSQEQ_SUCCESS_FC;
482 				}
483 			}
484 		} else {
485 			if (fqs->fqs_large_flow == NULL) {
486 				droptype = DTYPE_FORCED;
487 				fq_cl->fcl_stat.fcl_drop_overflow += cnt;
488 				ret = CLASSQEQ_DROP;
489 
490 				DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
491 				    fq_if_classq_t *, fq_cl, fq_t *, fq,
492 				    pktsched_pkt_t *, pkt, uint32_t, cnt);
493 
494 				/*
495 				 * if this fq was freshly created and there
496 				 * is nothing to enqueue, move it to empty list
497 				 */
498 				if (fq_empty(fq, fqs->fqs_ptype) &&
499 				    !(fq->fq_flags & (FQF_NEW_FLOW |
500 				    FQF_OLD_FLOW))) {
501 					fq_if_move_to_empty_flow(fqs, fq_cl,
502 					    fq, now);
503 					fq = NULL;
504 				}
505 			} else {
506 				DTRACE_IP5(different__large__flow,
507 				    fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
508 				    fq_t *, fq, pktsched_pkt_t *, pkt,
509 				    uint32_t, cnt);
510 
511 				for (i = 0; i < cnt; i++) {
512 					fq_if_drop_packet(fqs, now);
513 				}
514 			}
515 		}
516 	}
517 
518 no_drop:
519 	if (__probable(droptype == DTYPE_NODROP)) {
520 		uint32_t chain_len = pktsched_get_pkt_len(pkt);
521 		int ret_compress = 0;
522 
523 		/*
524 		 * We do not compress if we are enqueuing a chain.
525 		 * Traversing the chain to look for acks would defeat the
526 		 * purpose of batch enqueueing.
527 		 */
528 		if (cnt == 1) {
529 			ret_compress = fq_compressor(fqs, fq, fq_cl, pkt);
530 			if (ret_compress == CLASSQEQ_COMPRESSED) {
531 				fq_cl->fcl_stat.fcl_pkts_compressed++;
532 			}
533 		}
534 		DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
535 		    fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
536 		fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
537 		    pkt->pktsched_ptype);
538 
539 		fq->fq_bytes += chain_len;
540 		fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
541 		fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
542 
543 		/*
544 		 * check if this queue will qualify to be the next
545 		 * victim queue
546 		 */
547 		fq_if_is_flow_heavy(fqs, fq);
548 	} else {
549 		DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
550 		return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
551 	}
552 
553 	/*
554 	 * If the queue is not currently active, add it to the end of new
555 	 * flows list for that service class.
556 	 */
557 	if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
558 		VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
559 		STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
560 		fq->fq_flags |= FQF_NEW_FLOW;
561 
562 		fq_cl->fcl_stat.fcl_newflows_cnt++;
563 
564 		fq->fq_deficit = fq_cl->fcl_quantum;
565 	}
566 	return ret;
567 }
568 
569 void
fq_getq_flow_internal(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt)570 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
571 {
572 	classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
573 	uint32_t plen;
574 	fq_if_classq_t *fq_cl;
575 	struct ifclassq *ifq = fqs->fqs_ifq;
576 
577 	fq_dequeue(fq, &p, fqs->fqs_ptype);
578 	if (p.cp_ptype == QP_INVALID) {
579 		VERIFY(p.cp_mbuf == NULL);
580 		return;
581 	}
582 
583 	pktsched_pkt_encap(pkt, &p);
584 	plen = pktsched_get_pkt_len(pkt);
585 
586 	VERIFY(fq->fq_bytes >= plen);
587 	fq->fq_bytes -= plen;
588 
589 	fq_cl = &FQ_CLASSQ(fq);
590 	fq_cl->fcl_stat.fcl_byte_cnt -= plen;
591 	fq_cl->fcl_stat.fcl_pkt_cnt--;
592 	IFCQ_DEC_LEN(ifq);
593 	IFCQ_DEC_BYTES(ifq, plen);
594 
595 	FQ_GRP_DEC_LEN(fq);
596 	FQ_GRP_DEC_BYTES(fq, plen);
597 
598 	/* Reset getqtime so that we don't count idle times */
599 	if (fq_empty(fq, fqs->fqs_ptype)) {
600 		fq->fq_getqtime = 0;
601 	}
602 }
603 
604 void
fq_getq_flow(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt,uint64_t now)605 fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt, uint64_t now)
606 {
607 	fq_if_classq_t *fq_cl;
608 	int64_t qdelay = 0;
609 	volatile uint32_t *pkt_flags;
610 	uint64_t *pkt_timestamp;
611 	uint8_t pkt_flowsrc;
612 	boolean_t l4s_pkt;
613 
614 	fq_getq_flow_internal(fqs, fq, pkt);
615 	if (pkt->pktsched_ptype == QP_INVALID) {
616 		VERIFY(pkt->pktsched_pkt_mbuf == NULL);
617 		return;
618 	}
619 
620 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, &pkt_flowsrc,
621 	    NULL, NULL);
622 	l4s_pkt = pktsched_is_pkt_l4s(pkt);
623 
624 	/* this will compute qdelay in nanoseconds */
625 	if (now > *pkt_timestamp) {
626 		qdelay = now - *pkt_timestamp;
627 	}
628 	fq_cl = &FQ_CLASSQ(fq);
629 
630 	/* Update min/max/avg qdelay for the respective class */
631 	if (fq_cl->fcl_stat.fcl_min_qdelay == 0 ||
632 	    (qdelay > 0 && (u_int64_t)qdelay < fq_cl->fcl_stat.fcl_min_qdelay)) {
633 		fq_cl->fcl_stat.fcl_min_qdelay = qdelay;
634 	}
635 
636 	if (fq_cl->fcl_stat.fcl_max_qdelay == 0 ||
637 	    (qdelay > 0 && (u_int64_t)qdelay > fq_cl->fcl_stat.fcl_max_qdelay)) {
638 		fq_cl->fcl_stat.fcl_max_qdelay = qdelay;
639 	}
640 
641 	uint64_t num_dequeues = fq_cl->fcl_stat.fcl_dequeue;
642 
643 	if (num_dequeues == 0) {
644 		fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
645 	} else if (qdelay > 0) {
646 		uint64_t res = 0;
647 		if (os_add_overflow(num_dequeues, 1, &res)) {
648 			/* Reset the dequeue num and dequeue bytes */
649 			fq_cl->fcl_stat.fcl_dequeue = num_dequeues = 0;
650 			fq_cl->fcl_stat.fcl_dequeue_bytes = 0;
651 			fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
652 			os_log_info(OS_LOG_DEFAULT, "%s: dequeue num overflow, "
653 			    "flow: 0x%x, iface: %s", __func__, fq->fq_flowhash,
654 			    if_name(fqs->fqs_ifq->ifcq_ifp));
655 		} else {
656 			uint64_t product = 0;
657 			if (os_mul_overflow(fq_cl->fcl_stat.fcl_avg_qdelay,
658 			    num_dequeues, &product) || os_add_overflow(product, qdelay, &res)) {
659 				fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
660 			} else {
661 				fq_cl->fcl_stat.fcl_avg_qdelay = res /
662 				    (num_dequeues + 1);
663 			}
664 		}
665 	}
666 
667 	if (ifclassq_enable_l4s && l4s_pkt) {
668 		if ((l4s_ce_threshold != 0 && qdelay > l4s_ce_threshold) ||
669 		    (l4s_ce_threshold == 0 && qdelay > FQ_TARGET_DELAY(fq))) {
670 			if (pktsched_mark_ecn(pkt) == 0) {
671 				fq_cl->fcl_stat.fcl_ce_marked++;
672 			} else {
673 				fq_cl->fcl_stat.fcl_ce_mark_failures++;
674 			}
675 		}
676 		/* skip steps not needed for L4S traffic */
677 		goto out;
678 	}
679 
680 	if (fq->fq_min_qdelay == 0 ||
681 	    (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
682 		fq->fq_min_qdelay = qdelay;
683 	}
684 
685 	if (now >= fq->fq_updatetime) {
686 		if (fq->fq_min_qdelay > FQ_TARGET_DELAY(fq)) {
687 			if (!FQ_IS_DELAY_HIGH(fq)) {
688 				FQ_SET_DELAY_HIGH(fq);
689 				os_log_error(OS_LOG_DEFAULT,
690 				    "%s: high delay idx: %d, %llu, flow: 0x%x, "
691 				    "iface: %s", __func__, fq->fq_sc_index,
692 				    fq->fq_min_qdelay, fq->fq_flowhash,
693 				    if_name(fqs->fqs_ifq->ifcq_ifp));
694 			}
695 		} else {
696 			FQ_CLEAR_DELAY_HIGH(fq);
697 		}
698 		/* Reset measured queue delay and update time */
699 		fq->fq_updatetime = now + FQ_UPDATE_INTERVAL(fq);
700 		fq->fq_min_qdelay = 0;
701 	}
702 
703 out:
704 	if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs)) {
705 		FQ_CLEAR_OVERWHELMING(fq);
706 	}
707 	if (!FQ_IS_DELAY_HIGH(fq) || fq_empty(fq, fqs->fqs_ptype)) {
708 		FQ_CLEAR_DELAY_HIGH(fq);
709 	}
710 
711 	if ((fq->fq_flags & FQF_FLOWCTL_ON) &&
712 	    !FQ_IS_DELAY_HIGH(fq) && !FQ_IS_OVERWHELMING(fq)) {
713 		fq_if_flow_feedback(fqs, fq, fq_cl);
714 	}
715 
716 	if (fq_empty(fq, fqs->fqs_ptype)) {
717 		/* Reset getqtime so that we don't count idle times */
718 		fq->fq_getqtime = 0;
719 	} else {
720 		fq->fq_getqtime = now;
721 	}
722 	fq_if_is_flow_heavy(fqs, fq);
723 
724 	*pkt_timestamp = 0;
725 	switch (pkt->pktsched_ptype) {
726 	case QP_MBUF:
727 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
728 		break;
729 #if SKYWALK
730 	case QP_PACKET:
731 		/* sanity check */
732 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
733 		break;
734 #endif /* SKYWALK */
735 	default:
736 		VERIFY(0);
737 		/* NOTREACHED */
738 		__builtin_unreachable();
739 	}
740 }
741