xref: /xnu-10002.41.9/bsd/net/classq/classq_fq_codel.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 /*
30  * The migration of flow queue between the different states is summarised in
31  * the below state diagram. (RFC 8290)
32  *
33  * +-----------------+                +------------------+
34  * |                 |     Empty      |                  |
35  * |     Empty       |<---------------+       Old        +----+
36  * |                 |                |                  |    |
37  * +-------+---------+                +------------------+    |
38  *         |                             ^            ^       |Credits
39  *         |Arrival                      |            |       |Exhausted
40  *         v                             |            |       |
41  * +-----------------+                   |            |       |
42  * |                 |      Empty or     |            |       |
43  * |      New        +-------------------+            +-------+
44  * |                 | Credits Exhausted
45  * +-----------------+
46  *
47  * In this implementation of FQ-CODEL, flow queue is a dynamically allocated
48  * object. An active flow queue goes through the above cycle of state
49  * transitions very often. To avoid the cost of frequent flow queue object
50  * allocation/free, this implementation retains the flow queue object in
51  * [Empty] state on an Empty flow queue list with an active reference in flow
52  * queue hash table. The flow queue objects on the Empty flow queue list have
53  * an associated age and are purged accordingly.
54  */
55 
56 #include <sys/cdefs.h>
57 #include <sys/param.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/systm.h>
62 #include <sys/syslog.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/kernel.h>
66 #include <sys/kauth.h>
67 #include <sys/sdt.h>
68 #include <kern/zalloc.h>
69 #include <netinet/in.h>
70 
71 #include <net/classq/classq.h>
72 #include <net/classq/if_classq.h>
73 #include <net/pktsched/pktsched.h>
74 #include <net/pktsched/pktsched_fq_codel.h>
75 #include <net/classq/classq_fq_codel.h>
76 
77 #include <netinet/tcp_var.h>
78 
79 #define FQ_ZONE_MAX     (32 * 1024)     /* across all interfaces */
80 
81 #define DTYPE_NODROP    0       /* no drop */
82 #define DTYPE_FORCED    1       /* a "forced" drop */
83 #define DTYPE_EARLY     2       /* an "unforced" (early) drop */
84 
85 static uint32_t pkt_compressor = 1;
86 static uint64_t l4s_ce_threshold = 0; /* in usec */
87 static uint32_t l4s_local_ce_report = 0;
88 static uint64_t pkt_pacing_leeway = 0; /* in usec */
89 static uint64_t max_pkt_pacing_interval = 3 * NSEC_PER_SEC;
90 static uint64_t l4s_min_delay_threshold = 20 * NSEC_PER_MSEC; /* 20 ms */
91 #if (DEBUG || DEVELOPMENT)
92 SYSCTL_NODE(_net_classq, OID_AUTO, flow_q, CTLFLAG_RW | CTLFLAG_LOCKED,
93     0, "FQ-CODEL parameters");
94 
95 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, pkt_compressor,
96     CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_compressor, 0, "enable pkt compression");
97 
98 SYSCTL_QUAD(_net_classq, OID_AUTO, l4s_ce_threshold,
99     CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_ce_threshold,
100     "L4S CE threshold");
101 
102 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, l4s_local_ce_report,
103     CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_local_ce_report, 0,
104     "enable L4S local CE report");
105 
106 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, pkt_pacing_leeway,
107     CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_pacing_leeway, "packet pacing leeway");
108 
109 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, max_pkt_pacing_interval,
110     CTLFLAG_RW | CTLFLAG_LOCKED, &max_pkt_pacing_interval, "max packet pacing interval");
111 
112 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, l4s_min_delay_threshold,
113     CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_min_delay_threshold, "l4s min delay threshold");
114 #endif /* (DEBUG || DEVELOPMENT) */
115 
116 void
fq_codel_init(void)117 fq_codel_init(void)
118 {
119 	_CASSERT(AQM_KTRACE_AON_FLOW_HIGH_DELAY == 0x8300004);
120 	_CASSERT(AQM_KTRACE_AON_THROTTLE == 0x8300008);
121 	_CASSERT(AQM_KTRACE_AON_FLOW_OVERWHELMING == 0x830000c);
122 	_CASSERT(AQM_KTRACE_AON_FLOW_DQ_STALL == 0x8300010);
123 
124 	_CASSERT(AQM_KTRACE_STATS_FLOW_ENQUEUE == 0x8310004);
125 	_CASSERT(AQM_KTRACE_STATS_FLOW_DEQUEUE == 0x8310008);
126 	_CASSERT(AQM_KTRACE_STATS_FLOW_CTL == 0x831000c);
127 	_CASSERT(AQM_KTRACE_STATS_FLOW_ALLOC == 0x8310010);
128 	_CASSERT(AQM_KTRACE_STATS_FLOW_DESTROY == 0x8310014);
129 	_CASSERT(AQM_KTRACE_STATS_FLOW_REPORT_CE == 0x8310018);
130 	_CASSERT(AQM_KTRACE_STATS_GET_QLEN == 0x831001c);
131 }
132 
133 fq_t *
fq_alloc(classq_pkt_type_t ptype)134 fq_alloc(classq_pkt_type_t ptype)
135 {
136 	fq_t *fq = NULL;
137 
138 	fq = kalloc_type(fq_t, Z_WAITOK_ZERO);
139 	if (ptype == QP_MBUF) {
140 		MBUFQ_INIT(&fq->fq_mbufq);
141 	}
142 #if SKYWALK
143 	else {
144 		VERIFY(ptype == QP_PACKET);
145 		KPKTQ_INIT(&fq->fq_kpktq);
146 	}
147 #endif /* SKYWALK */
148 	CLASSQ_PKT_INIT(&fq->fq_dq_head);
149 	CLASSQ_PKT_INIT(&fq->fq_dq_tail);
150 	fq->fq_in_dqlist = false;
151 
152 	return fq;
153 }
154 
155 void
fq_destroy(fq_t * fq,classq_pkt_type_t ptype)156 fq_destroy(fq_t *fq, classq_pkt_type_t ptype)
157 {
158 	VERIFY(!fq->fq_in_dqlist);
159 	VERIFY(fq_empty(fq, ptype));
160 	VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW |
161 	    FQF_EMPTY_FLOW)));
162 	VERIFY(fq->fq_bytes == 0);
163 	kfree_type(fq_t, fq);
164 }
165 
166 static inline void
fq_detect_dequeue_stall(fq_if_t * fqs,fq_t * flowq,fq_if_classq_t * fq_cl,u_int64_t * now)167 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
168     u_int64_t *now)
169 {
170 	u_int64_t maxgetqtime, update_interval;
171 	if (FQ_IS_DELAY_HIGH(flowq) || flowq->fq_getqtime == 0 ||
172 	    fq_empty(flowq, fqs->fqs_ptype) ||
173 	    flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
174 		return;
175 	}
176 
177 	update_interval = FQ_UPDATE_INTERVAL(flowq);
178 	maxgetqtime = flowq->fq_getqtime + update_interval;
179 	if ((*now) > maxgetqtime) {
180 		/*
181 		 * there was no dequeue in an update interval worth of
182 		 * time. It means that the queue is stalled.
183 		 */
184 		FQ_SET_DELAY_HIGH(flowq);
185 		fq_cl->fcl_stat.fcl_dequeue_stall++;
186 		os_log_error(OS_LOG_DEFAULT, "%s:num: %d, "
187 		    "scidx: %d, flow: 0x%x, iface: %s grp: %hhu", __func__,
188 		    fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
189 		    flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp),
190 		    FQ_GROUP(flowq)->fqg_index);
191 		KDBG(AQM_KTRACE_AON_FLOW_DQ_STALL, flowq->fq_flowhash,
192 		    AQM_KTRACE_FQ_GRP_SC_IDX(flowq), flowq->fq_bytes,
193 		    (*now) - flowq->fq_getqtime);
194 	}
195 }
196 
197 void
fq_head_drop(fq_if_t * fqs,fq_t * fq)198 fq_head_drop(fq_if_t *fqs, fq_t *fq)
199 {
200 	pktsched_pkt_t pkt;
201 	volatile uint32_t *pkt_flags;
202 	uint64_t *pkt_timestamp;
203 	struct ifclassq *ifq = fqs->fqs_ifq;
204 
205 	_PKTSCHED_PKT_INIT(&pkt);
206 	fq_getq_flow_internal(fqs, fq, &pkt);
207 	if (pkt.pktsched_pkt_mbuf == NULL) {
208 		return;
209 	}
210 
211 	pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
212 	    NULL, NULL, NULL);
213 
214 	*pkt_timestamp = 0;
215 	switch (pkt.pktsched_ptype) {
216 	case QP_MBUF:
217 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
218 		break;
219 #if SKYWALK
220 	case QP_PACKET:
221 		/* sanity check */
222 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
223 		break;
224 #endif /* SKYWALK */
225 	default:
226 		VERIFY(0);
227 		/* NOTREACHED */
228 		__builtin_unreachable();
229 	}
230 
231 	IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
232 	IFCQ_CONVERT_LOCK(ifq);
233 	pktsched_free_pkt(&pkt);
234 }
235 
236 
237 static int
fq_compressor(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl,pktsched_pkt_t * pkt)238 fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
239     pktsched_pkt_t *pkt)
240 {
241 	classq_pkt_type_t ptype = fqs->fqs_ptype;
242 	uint32_t comp_gencnt = 0;
243 	uint64_t *pkt_timestamp;
244 	uint64_t old_timestamp = 0;
245 	uint32_t old_pktlen = 0;
246 	struct ifclassq *ifq = fqs->fqs_ifq;
247 
248 	if (__improbable(pkt_compressor == 0)) {
249 		return 0;
250 	}
251 
252 	pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
253 	    &comp_gencnt, NULL);
254 
255 	if (comp_gencnt == 0) {
256 		return 0;
257 	}
258 
259 	fq_cl->fcl_stat.fcl_pkts_compressible++;
260 
261 	if (fq_empty(fq, fqs->fqs_ptype)) {
262 		return 0;
263 	}
264 
265 	if (ptype == QP_MBUF) {
266 		struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
267 
268 		if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
269 			return 0;
270 		}
271 
272 		/* If we got until here, we should merge/replace the segment */
273 		MBUFQ_REMOVE(&fq->fq_mbufq, m);
274 		old_pktlen = m_pktlen(m);
275 		old_timestamp = m->m_pkthdr.pkt_timestamp;
276 
277 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
278 		m_freem(m);
279 	}
280 #if SKYWALK
281 	else {
282 		struct __kern_packet *kpkt = KPKTQ_LAST(&fq->fq_kpktq);
283 
284 		if (comp_gencnt != kpkt->pkt_comp_gencnt) {
285 			return 0;
286 		}
287 
288 		/* If we got until here, we should merge/replace the segment */
289 		KPKTQ_REMOVE(&fq->fq_kpktq, kpkt);
290 		old_pktlen = kpkt->pkt_length;
291 		old_timestamp = kpkt->pkt_timestamp;
292 
293 		IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
294 		pp_free_packet(*(struct kern_pbufpool **)(uintptr_t)&
295 		    (((struct __kern_quantum *)kpkt)->qum_pp),
296 		    (uint64_t)kpkt);
297 	}
298 #endif /* SKYWALK */
299 
300 	fq->fq_bytes -= old_pktlen;
301 	fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
302 	fq_cl->fcl_stat.fcl_pkt_cnt--;
303 	IFCQ_DEC_LEN(ifq);
304 	IFCQ_DEC_BYTES(ifq, old_pktlen);
305 
306 	FQ_GRP_DEC_LEN(fq);
307 	FQ_GRP_DEC_BYTES(fq, old_pktlen);
308 
309 	*pkt_timestamp = old_timestamp;
310 
311 	return CLASSQEQ_COMPRESSED;
312 }
313 
314 int
fq_addq(fq_if_t * fqs,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)315 fq_addq(fq_if_t *fqs, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
316     fq_if_classq_t *fq_cl)
317 {
318 	int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
319 	u_int64_t now;
320 	fq_t *fq = NULL;
321 	uint64_t *pkt_timestamp;
322 	volatile uint32_t *pkt_flags;
323 	uint32_t pkt_flowid, cnt;
324 	uint8_t pkt_proto, pkt_flowsrc;
325 	fq_tfc_type_t tfc_type = FQ_TFC_C;
326 
327 	cnt = pkt->pktsched_pcnt;
328 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
329 	    &pkt_flowsrc, &pkt_proto, NULL, NULL);
330 
331 	/*
332 	 * XXX Not walking the chain to set this flag on every packet.
333 	 * This flag is only used for debugging. Nothing is affected if it's
334 	 * not set.
335 	 */
336 	switch (pkt->pktsched_ptype) {
337 	case QP_MBUF:
338 		/* See comments in <rdar://problem/14040693> */
339 		VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
340 		*pkt_flags |= PKTF_PRIV_GUARDED;
341 		break;
342 #if SKYWALK
343 	case QP_PACKET:
344 		/* sanity check */
345 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
346 		break;
347 #endif /* SKYWALK */
348 	default:
349 		VERIFY(0);
350 		/* NOTREACHED */
351 		__builtin_unreachable();
352 	}
353 
354 	if (ifclassq_enable_l4s) {
355 		tfc_type = pktsched_is_pkt_l4s(pkt) ? FQ_TFC_L4S : FQ_TFC_C;
356 	}
357 
358 	/*
359 	 * Timestamps for every packet must be set prior to entering this path.
360 	 */
361 	now = *pkt_timestamp;
362 	ASSERT(now > 0);
363 
364 	/* find the flowq for this packet */
365 	fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
366 	    now, true, tfc_type);
367 	if (__improbable(fq == NULL)) {
368 		DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
369 		/* drop the packet if we could not allocate a flow queue */
370 		fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
371 		return CLASSQEQ_DROP;
372 	}
373 	VERIFY(fq->fq_group == fq_grp);
374 	VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
375 
376 	KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
377 	    AQM_KTRACE_FQ_GRP_SC_IDX(fq),
378 	    fq->fq_bytes, pktsched_get_pkt_len(pkt));
379 
380 	fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
381 
382 	/*
383 	 * Skip the dropping part if it's L4S. Flow control or ECN marking decision
384 	 * will be made at dequeue time.
385 	 */
386 	if (ifclassq_enable_l4s && tfc_type == FQ_TFC_L4S) {
387 		fq_cl->fcl_stat.fcl_l4s_pkts++;
388 		droptype = DTYPE_NODROP;
389 	}
390 
391 	if (__improbable(FQ_IS_DELAY_HIGH(fq) || FQ_IS_OVERWHELMING(fq))) {
392 		if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
393 		    (*pkt_flags & PKTF_FLOW_ADV)) {
394 			fc_adv = 1;
395 			/*
396 			 * If the flow is suspended or it is not
397 			 * TCP/QUIC, drop the chain.
398 			 */
399 			if ((pkt_proto != IPPROTO_TCP) &&
400 			    (pkt_proto != IPPROTO_QUIC)) {
401 				droptype = DTYPE_EARLY;
402 				fq_cl->fcl_stat.fcl_drop_early += cnt;
403 				IFCQ_DROP_ADD(fqs->fqs_ifq, cnt, pktsched_get_pkt_len(pkt));
404 			}
405 			DTRACE_IP6(flow__adv, fq_if_t *, fqs,
406 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
407 			    int, droptype, pktsched_pkt_t *, pkt,
408 			    uint32_t, cnt);
409 		} else {
410 			/*
411 			 * Need to drop packets to make room for the new
412 			 * ones. Try to drop from the head of the queue
413 			 * instead of the latest packets.
414 			 */
415 			if (!fq_empty(fq, fqs->fqs_ptype)) {
416 				uint32_t i;
417 
418 				for (i = 0; i < cnt; i++) {
419 					fq_head_drop(fqs, fq);
420 				}
421 				droptype = DTYPE_NODROP;
422 			} else {
423 				droptype = DTYPE_EARLY;
424 			}
425 			fq_cl->fcl_stat.fcl_drop_early += cnt;
426 
427 			DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
428 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
429 			    int, droptype, pktsched_pkt_t *, pkt,
430 			    uint32_t, cnt);
431 		}
432 	}
433 
434 	/* Set the return code correctly */
435 	if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
436 		if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
437 			fq->fq_flags |= FQF_FLOWCTL_ON;
438 			/* deliver flow control advisory error */
439 			if (droptype == DTYPE_NODROP) {
440 				ret = CLASSQEQ_SUCCESS_FC;
441 			} else {
442 				/* dropped due to flow control */
443 				ret = CLASSQEQ_DROP_FC;
444 			}
445 		} else {
446 			/*
447 			 * if we could not flow control the flow, it is
448 			 * better to drop
449 			 */
450 			droptype = DTYPE_FORCED;
451 			ret = CLASSQEQ_DROP_FC;
452 			fq_cl->fcl_stat.fcl_flow_control_fail++;
453 		}
454 		DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
455 	}
456 
457 	/*
458 	 * If the queue length hits the queue limit, drop a chain with the
459 	 * same number of packets from the front of the queue for a flow with
460 	 * maximum number of bytes. This will penalize heavy and unresponsive
461 	 * flows. It will also avoid a tail drop.
462 	 */
463 	if (__improbable(droptype == DTYPE_NODROP &&
464 	    fq_if_at_drop_limit(fqs))) {
465 		uint32_t i;
466 
467 		if (fqs->fqs_large_flow == fq) {
468 			/*
469 			 * Drop from the head of the current fq. Since a
470 			 * new packet will be added to the tail, it is ok
471 			 * to leave fq in place.
472 			 */
473 			DTRACE_IP5(large__flow, fq_if_t *, fqs,
474 			    fq_if_classq_t *, fq_cl, fq_t *, fq,
475 			    pktsched_pkt_t *, pkt, uint32_t, cnt);
476 
477 			for (i = 0; i < cnt; i++) {
478 				fq_head_drop(fqs, fq);
479 			}
480 			fq_cl->fcl_stat.fcl_drop_overflow += cnt;
481 
482 			/*
483 			 * TCP and QUIC will react to the loss of those head dropped pkts
484 			 * and adjust send rate.
485 			 */
486 			if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
487 			    (*pkt_flags & PKTF_FLOW_ADV) &&
488 			    (pkt_proto != IPPROTO_TCP) &&
489 			    (pkt_proto != IPPROTO_QUIC)) {
490 				if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
491 					fq->fq_flags |= FQF_FLOWCTL_ON;
492 					FQ_SET_OVERWHELMING(fq);
493 					fq_cl->fcl_stat.fcl_overwhelming++;
494 					/* deliver flow control advisory error */
495 					ret = CLASSQEQ_SUCCESS_FC;
496 				}
497 			}
498 		} else {
499 			if (fqs->fqs_large_flow == NULL) {
500 				droptype = DTYPE_FORCED;
501 				fq_cl->fcl_stat.fcl_drop_overflow += cnt;
502 				ret = CLASSQEQ_DROP;
503 
504 				DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
505 				    fq_if_classq_t *, fq_cl, fq_t *, fq,
506 				    pktsched_pkt_t *, pkt, uint32_t, cnt);
507 
508 				/*
509 				 * if this fq was freshly created and there
510 				 * is nothing to enqueue, move it to empty list
511 				 */
512 				if (fq_empty(fq, fqs->fqs_ptype) &&
513 				    !(fq->fq_flags & (FQF_NEW_FLOW |
514 				    FQF_OLD_FLOW))) {
515 					fq_if_move_to_empty_flow(fqs, fq_cl,
516 					    fq, now);
517 					fq = NULL;
518 				}
519 			} else {
520 				DTRACE_IP5(different__large__flow,
521 				    fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
522 				    fq_t *, fq, pktsched_pkt_t *, pkt,
523 				    uint32_t, cnt);
524 
525 				for (i = 0; i < cnt; i++) {
526 					fq_if_drop_packet(fqs, now);
527 				}
528 			}
529 		}
530 	}
531 
532 	fq_cl->fcl_flags &= ~FCL_PACED;
533 
534 	if (__probable(droptype == DTYPE_NODROP)) {
535 		uint32_t chain_len = pktsched_get_pkt_len(pkt);
536 		int ret_compress = 0;
537 
538 		/*
539 		 * We do not compress if we are enqueuing a chain.
540 		 * Traversing the chain to look for acks would defeat the
541 		 * purpose of batch enqueueing.
542 		 */
543 		if (cnt == 1) {
544 			ret_compress = fq_compressor(fqs, fq, fq_cl, pkt);
545 			if (ret_compress == CLASSQEQ_COMPRESSED) {
546 				fq_cl->fcl_stat.fcl_pkts_compressed++;
547 			}
548 		}
549 		DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
550 		    fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
551 		fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
552 		    pkt->pktsched_ptype);
553 
554 		fq->fq_bytes += chain_len;
555 		fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
556 		fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
557 
558 		/*
559 		 * check if this queue will qualify to be the next
560 		 * victim queue
561 		 */
562 		fq_if_is_flow_heavy(fqs, fq);
563 	} else {
564 		DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
565 		return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
566 	}
567 
568 	/*
569 	 * If the queue is not currently active, add it to the end of new
570 	 * flows list for that service class.
571 	 */
572 	if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
573 		VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
574 		STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
575 		fq->fq_flags |= FQF_NEW_FLOW;
576 
577 		fq_cl->fcl_stat.fcl_newflows_cnt++;
578 
579 		fq->fq_deficit = fq_cl->fcl_quantum;
580 	}
581 	return ret;
582 }
583 
584 void
fq_getq_flow_internal(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt)585 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
586 {
587 	classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
588 	uint32_t plen;
589 	fq_if_classq_t *fq_cl;
590 	struct ifclassq *ifq = fqs->fqs_ifq;
591 
592 	fq_dequeue(fq, &p, fqs->fqs_ptype);
593 	if (p.cp_ptype == QP_INVALID) {
594 		VERIFY(p.cp_mbuf == NULL);
595 		return;
596 	}
597 
598 	fq->fq_next_tx_time = FQ_INVALID_TX_TS;
599 
600 	pktsched_pkt_encap(pkt, &p);
601 	plen = pktsched_get_pkt_len(pkt);
602 
603 	VERIFY(fq->fq_bytes >= plen);
604 	fq->fq_bytes -= plen;
605 
606 	fq_cl = &FQ_CLASSQ(fq);
607 	fq_cl->fcl_stat.fcl_byte_cnt -= plen;
608 	fq_cl->fcl_stat.fcl_pkt_cnt--;
609 	fq_cl->fcl_flags &= ~FCL_PACED;
610 
611 	IFCQ_DEC_LEN(ifq);
612 	IFCQ_DEC_BYTES(ifq, plen);
613 
614 	FQ_GRP_DEC_LEN(fq);
615 	FQ_GRP_DEC_BYTES(fq, plen);
616 
617 	/* Reset getqtime so that we don't count idle times */
618 	if (fq_empty(fq, fqs->fqs_ptype)) {
619 		fq->fq_getqtime = 0;
620 	}
621 }
622 
623 /*
624  * fq_get_next_tx_time returns FQ_INVALID_TX_TS when there is no tx time in fq
625  */
626 static uint64_t
fq_get_next_tx_time(fq_if_t * fqs,fq_t * fq)627 fq_get_next_tx_time(fq_if_t *fqs, fq_t *fq)
628 {
629 	uint64_t tx_time = FQ_INVALID_TX_TS;
630 
631 	/*
632 	 * Check the cached value in fq
633 	 */
634 	if (fq->fq_next_tx_time != FQ_INVALID_TX_TS) {
635 		return fq->fq_next_tx_time;
636 	}
637 
638 	switch (fqs->fqs_ptype) {
639 	case QP_MBUF: {
640 		struct mbuf *m;
641 		if ((m = MBUFQ_FIRST(&fq->fq_mbufq)) != NULL) {
642 			struct m_tag *tag;
643 			tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
644 			    KERNEL_TAG_TYPE_AQM);
645 			if (tag != NULL) {
646 				tx_time = *(uint64_t *)tag->m_tag_data;
647 			}
648 		}
649 		break;
650 	}
651 	case QP_PACKET: {
652 		struct __kern_packet *p = KPKTQ_FIRST(&fq->fq_kpktq);
653 		if (__probable(p != NULL && (p->pkt_pflags & PKT_F_OPT_TX_TIMESTAMP) != 0)) {
654 			tx_time = p->pkt_com_opt->__po_pkt_tx_time;
655 		}
656 		break;
657 	}
658 	default:
659 		VERIFY(0);
660 		/* NOTREACHED */
661 		__builtin_unreachable();
662 	}
663 
664 	/*
665 	 * Cache the tx time in fq. The cache will be clear after dequeue or drop
666 	 * from the fq.
667 	 */
668 	fq->fq_next_tx_time = tx_time;
669 
670 	return tx_time;
671 }
672 
673 /*
674  * fq_tx_time_ready returns true if the fq is empty so that it doesn't
675  * affect caller logics that handles empty flow.
676  */
677 boolean_t
fq_tx_time_ready(fq_if_t * fqs,fq_t * fq,uint64_t now,uint64_t * ready_time)678 fq_tx_time_ready(fq_if_t *fqs, fq_t *fq, uint64_t now, uint64_t *ready_time)
679 {
680 	uint64_t pkt_tx_time;
681 	fq_if_classq_t *fq_cl = &FQ_CLASSQ(fq);
682 
683 	if (!ifclassq_enable_pacing || !ifclassq_enable_l4s || fq->fq_tfc_type != FQ_TFC_L4S) {
684 		return TRUE;
685 	}
686 
687 	pkt_tx_time = fq_get_next_tx_time(fqs, fq);
688 	if (ready_time != NULL) {
689 		*ready_time = pkt_tx_time;
690 	}
691 
692 	if (pkt_tx_time <= now + pkt_pacing_leeway ||
693 	    pkt_tx_time == FQ_INVALID_TX_TS) {
694 		return TRUE;
695 	}
696 
697 	/*
698 	 * Ignore the tx time if it's scheduled too far in the future
699 	 */
700 	if (pkt_tx_time > max_pkt_pacing_interval + now) {
701 		fq_cl->fcl_stat.fcl_ignore_tx_time++;
702 		return TRUE;
703 	}
704 
705 	ASSERT(pkt_tx_time != FQ_INVALID_TX_TS);
706 	return FALSE;
707 }
708 
709 void
fq_getq_flow(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt,uint64_t now)710 fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt, uint64_t now)
711 {
712 	fq_if_classq_t *fq_cl = &FQ_CLASSQ(fq);
713 	int64_t qdelay = 0;
714 	volatile uint32_t *pkt_flags;
715 	uint64_t *pkt_timestamp, pkt_tx_time = 0, pacing_delay = 0;
716 	uint64_t fq_min_delay_threshold = FQ_TARGET_DELAY(fq);
717 	uint8_t pkt_flowsrc;
718 	boolean_t l4s_pkt;
719 
720 	fq_getq_flow_internal(fqs, fq, pkt);
721 	if (pkt->pktsched_ptype == QP_INVALID) {
722 		VERIFY(pkt->pktsched_pkt_mbuf == NULL);
723 		return;
724 	}
725 
726 	pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, &pkt_flowsrc,
727 	    NULL, NULL, &pkt_tx_time);
728 	l4s_pkt = pktsched_is_pkt_l4s(pkt);
729 	if (ifclassq_enable_pacing && ifclassq_enable_l4s) {
730 		if (pkt_tx_time > *pkt_timestamp) {
731 			pacing_delay = pkt_tx_time - *pkt_timestamp;
732 			fq_cl->fcl_stat.fcl_paced_pkts++;
733 			DTRACE_SKYWALK3(aqm__pacing__delta, uint64_t, now - pkt_tx_time,
734 			    fq_if_t *, fqs, fq_t *, fq);
735 		}
736 #if (DEVELOPMENT || DEBUG)
737 		else {
738 			DTRACE_SKYWALK5(aqm__miss__pacing__delay, uint64_t, *pkt_timestamp,
739 			    uint64_t, pkt_tx_time, uint64_t, now, fq_if_t *,
740 			    fqs, fq_t *, fq);
741 		}
742 #endif // (DEVELOPMENT || DEBUG)
743 	}
744 
745 	/* this will compute qdelay in nanoseconds */
746 	if (now > *pkt_timestamp) {
747 		qdelay = now - *pkt_timestamp;
748 	}
749 
750 	/* Update min/max/avg qdelay for the respective class */
751 	if (fq_cl->fcl_stat.fcl_min_qdelay == 0 ||
752 	    (qdelay > 0 && (u_int64_t)qdelay < fq_cl->fcl_stat.fcl_min_qdelay)) {
753 		fq_cl->fcl_stat.fcl_min_qdelay = qdelay;
754 	}
755 
756 	if (fq_cl->fcl_stat.fcl_max_qdelay == 0 ||
757 	    (qdelay > 0 && (u_int64_t)qdelay > fq_cl->fcl_stat.fcl_max_qdelay)) {
758 		fq_cl->fcl_stat.fcl_max_qdelay = qdelay;
759 	}
760 
761 	uint64_t num_dequeues = fq_cl->fcl_stat.fcl_dequeue;
762 
763 	if (num_dequeues == 0) {
764 		fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
765 	} else if (qdelay > 0) {
766 		uint64_t res = 0;
767 		if (os_add_overflow(num_dequeues, 1, &res)) {
768 			/* Reset the dequeue num and dequeue bytes */
769 			fq_cl->fcl_stat.fcl_dequeue = num_dequeues = 0;
770 			fq_cl->fcl_stat.fcl_dequeue_bytes = 0;
771 			fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
772 			os_log_info(OS_LOG_DEFAULT, "%s: dequeue num overflow, "
773 			    "flow: 0x%x, iface: %s", __func__, fq->fq_flowhash,
774 			    if_name(fqs->fqs_ifq->ifcq_ifp));
775 		} else {
776 			uint64_t product = 0;
777 			if (os_mul_overflow(fq_cl->fcl_stat.fcl_avg_qdelay,
778 			    num_dequeues, &product) || os_add_overflow(product, qdelay, &res)) {
779 				fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
780 			} else {
781 				fq_cl->fcl_stat.fcl_avg_qdelay = res /
782 				    (num_dequeues + 1);
783 			}
784 		}
785 	}
786 
787 	fq->fq_pkts_since_last_report++;
788 	if (ifclassq_enable_l4s && l4s_pkt) {
789 		/*
790 		 * A safe guard to make sure that L4S is not going to build a huge
791 		 * queue if we encounter unexpected problems (for eg., if ACKs don't
792 		 * arrive in timely manner due to congestion in reverse path).
793 		 */
794 		fq_min_delay_threshold = l4s_min_delay_threshold;
795 
796 		if ((l4s_ce_threshold != 0 && qdelay > l4s_ce_threshold + pacing_delay) ||
797 		    (l4s_ce_threshold == 0 && qdelay > FQ_TARGET_DELAY(fq) + pacing_delay)) {
798 			DTRACE_SKYWALK4(aqm__mark__ce, uint64_t, qdelay, uint64_t, pacing_delay,
799 			    fq_if_t *, fqs, fq_t *, fq);
800 			KDBG(AQM_KTRACE_STATS_FLOW_REPORT_CE, fq->fq_flowhash,
801 			    AQM_KTRACE_FQ_GRP_SC_IDX(fq), qdelay, pacing_delay);
802 			/*
803 			 * The packet buffer that pktsched_mark_ecn writes to can be pageable.
804 			 * Since it is not safe to write to pageable memory while preemption
805 			 * is disabled, convert the spin lock into mutex.
806 			 */
807 			IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
808 			if (__improbable(l4s_local_ce_report != 0) &&
809 			    (*pkt_flags & PKTF_FLOW_ADV) != 0 &&
810 			    fq_if_report_ce(fqs, pkt, 1, fq->fq_pkts_since_last_report)) {
811 				fq->fq_pkts_since_last_report = 0;
812 				fq_cl->fcl_stat.fcl_ce_reported++;
813 			} else if (pktsched_mark_ecn(pkt) == 0) {
814 				fq_cl->fcl_stat.fcl_ce_marked++;
815 			} else {
816 				fq_cl->fcl_stat.fcl_ce_mark_failures++;
817 			}
818 		}
819 	}
820 
821 	ASSERT(pacing_delay <= INT64_MAX);
822 	qdelay = MAX(0, qdelay - (int64_t)pacing_delay);
823 	if (fq->fq_min_qdelay == 0 ||
824 	    (u_int64_t)qdelay < fq->fq_min_qdelay) {
825 		fq->fq_min_qdelay = qdelay;
826 	}
827 
828 	if (now >= fq->fq_updatetime) {
829 		if (fq->fq_min_qdelay > fq_min_delay_threshold) {
830 			if (!FQ_IS_DELAY_HIGH(fq)) {
831 				FQ_SET_DELAY_HIGH(fq);
832 				os_log_error(OS_LOG_DEFAULT,
833 				    "%s: scidx: %d, %llu, flow: 0x%x, "
834 				    "iface: %s, grp: %hhu\n", __func__, fq->fq_sc_index,
835 				    fq->fq_min_qdelay, fq->fq_flowhash,
836 				    if_name(fqs->fqs_ifq->ifcq_ifp),
837 				    FQ_GROUP(fq)->fqg_index);
838 			}
839 		} else {
840 			FQ_CLEAR_DELAY_HIGH(fq);
841 		}
842 		/* Reset measured queue delay and update time */
843 		fq->fq_updatetime = now + FQ_UPDATE_INTERVAL(fq);
844 		fq->fq_min_qdelay = 0;
845 	}
846 
847 	if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs)) {
848 		FQ_CLEAR_OVERWHELMING(fq);
849 	}
850 	if (!FQ_IS_DELAY_HIGH(fq) || fq_empty(fq, fqs->fqs_ptype)) {
851 		FQ_CLEAR_DELAY_HIGH(fq);
852 	}
853 
854 	if ((fq->fq_flags & FQF_FLOWCTL_ON) &&
855 	    !FQ_IS_DELAY_HIGH(fq) && !FQ_IS_OVERWHELMING(fq)) {
856 		fq_if_flow_feedback(fqs, fq, fq_cl);
857 	}
858 
859 	if (fq_empty(fq, fqs->fqs_ptype)) {
860 		/* Reset getqtime so that we don't count idle times */
861 		fq->fq_getqtime = 0;
862 	} else {
863 		fq->fq_getqtime = now;
864 	}
865 	fq_if_is_flow_heavy(fqs, fq);
866 
867 	*pkt_timestamp = 0;
868 	switch (pkt->pktsched_ptype) {
869 	case QP_MBUF:
870 		*pkt_flags &= ~PKTF_PRIV_GUARDED;
871 		break;
872 #if SKYWALK
873 	case QP_PACKET:
874 		/* sanity check */
875 		ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
876 		break;
877 #endif /* SKYWALK */
878 	default:
879 		VERIFY(0);
880 		/* NOTREACHED */
881 		__builtin_unreachable();
882 	}
883 }
884