1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * The migration of flow queue between the different states is summarised in
31 * the below state diagram. (RFC 8290)
32 *
33 * +-----------------+ +------------------+
34 * | | Empty | |
35 * | Empty |<---------------+ Old +----+
36 * | | | | |
37 * +-------+---------+ +------------------+ |
38 * | ^ ^ |Credits
39 * |Arrival | | |Exhausted
40 * v | | |
41 * +-----------------+ | | |
42 * | | Empty or | | |
43 * | New +-------------------+ +-------+
44 * | | Credits Exhausted
45 * +-----------------+
46 *
47 * In this implementation of FQ-CODEL, flow queue is a dynamically allocated
48 * object. An active flow queue goes through the above cycle of state
49 * transitions very often. To avoid the cost of frequent flow queue object
50 * allocation/free, this implementation retains the flow queue object in
51 * [Empty] state on an Empty flow queue list with an active reference in flow
52 * queue hash table. The flow queue objects on the Empty flow queue list have
53 * an associated age and are purged accordingly.
54 */
55
56 #include <sys/cdefs.h>
57 #include <sys/param.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/systm.h>
62 #include <sys/syslog.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/kernel.h>
66 #include <sys/kauth.h>
67 #include <sys/sdt.h>
68 #include <kern/zalloc.h>
69 #include <netinet/in.h>
70
71 #include <net/classq/classq.h>
72 #include <net/classq/if_classq.h>
73 #include <net/pktsched/pktsched.h>
74 #include <net/pktsched/pktsched_fq_codel.h>
75 #include <net/classq/classq_fq_codel.h>
76 #include <net/droptap.h>
77
78 #include <netinet/tcp_var.h>
79
80 #define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
81
82 #define DTYPE_NODROP 0 /* no drop */
83 #define DTYPE_FORCED 1 /* a "forced" drop */
84 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
85
86 static uint32_t pkt_compressor = 1;
87 static uint64_t l4s_ce_threshold = 0; /* in usec */
88 static uint32_t l4s_local_ce_report = 0;
89 static uint64_t pkt_pacing_leeway = 0; /* in usec */
90 static uint64_t max_pkt_pacing_interval = 3 * NSEC_PER_SEC;
91 static uint64_t l4s_min_delay_threshold = 20 * NSEC_PER_MSEC; /* 20 ms */
92 #if (DEBUG || DEVELOPMENT)
93 SYSCTL_NODE(_net_classq, OID_AUTO, flow_q, CTLFLAG_RW | CTLFLAG_LOCKED,
94 0, "FQ-CODEL parameters");
95
96 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, pkt_compressor,
97 CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_compressor, 0, "enable pkt compression");
98
99 SYSCTL_QUAD(_net_classq, OID_AUTO, l4s_ce_threshold,
100 CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_ce_threshold,
101 "L4S CE threshold");
102
103 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, l4s_local_ce_report,
104 CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_local_ce_report, 0,
105 "enable L4S local CE report");
106
107 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, pkt_pacing_leeway,
108 CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_pacing_leeway, "packet pacing leeway");
109
110 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, max_pkt_pacing_interval,
111 CTLFLAG_RW | CTLFLAG_LOCKED, &max_pkt_pacing_interval, "max packet pacing interval");
112
113 SYSCTL_QUAD(_net_classq_flow_q, OID_AUTO, l4s_min_delay_threshold,
114 CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_min_delay_threshold, "l4s min delay threshold");
115 #endif /* (DEBUG || DEVELOPMENT) */
116
117 void
fq_codel_init(void)118 fq_codel_init(void)
119 {
120 static_assert(AQM_KTRACE_AON_FLOW_HIGH_DELAY == 0x8300004);
121 static_assert(AQM_KTRACE_AON_THROTTLE == 0x8300008);
122 static_assert(AQM_KTRACE_AON_FLOW_OVERWHELMING == 0x830000c);
123 static_assert(AQM_KTRACE_AON_FLOW_DQ_STALL == 0x8300010);
124
125 static_assert(AQM_KTRACE_STATS_FLOW_ENQUEUE == 0x8310004);
126 static_assert(AQM_KTRACE_STATS_FLOW_DEQUEUE == 0x8310008);
127 static_assert(AQM_KTRACE_STATS_FLOW_CTL == 0x831000c);
128 static_assert(AQM_KTRACE_STATS_FLOW_ALLOC == 0x8310010);
129 static_assert(AQM_KTRACE_STATS_FLOW_DESTROY == 0x8310014);
130 static_assert(AQM_KTRACE_STATS_FLOW_REPORT_CE == 0x8310018);
131 static_assert(AQM_KTRACE_STATS_GET_QLEN == 0x831001c);
132 static_assert(AQM_KTRACE_TX_NOT_READY == 0x8310020);
133 static_assert(AQM_KTRACE_TX_PACEMAKER == 0x8310024);
134 static_assert(AQM_KTRACE_PKT_DROP == 0x8310028);
135 static_assert(AQM_KTRACE_OK_TO_DROP == 0x831002c);
136 static_assert(AQM_KTRACE_CONGESTION_INC == 0x8310030);
137 static_assert(AQM_KTRACE_CONGESTION_NOTIFIED == 0x8310034);
138 }
139
140 fq_t *
fq_alloc(classq_pkt_type_t ptype)141 fq_alloc(classq_pkt_type_t ptype)
142 {
143 fq_t *fq = NULL;
144
145 fq = kalloc_type(fq_t, Z_WAITOK_ZERO);
146 if (ptype == QP_MBUF) {
147 MBUFQ_INIT(&fq->fq_mbufq);
148 }
149 #if SKYWALK
150 else {
151 VERIFY(ptype == QP_PACKET);
152 KPKTQ_INIT(&fq->fq_kpktq);
153 }
154 #endif /* SKYWALK */
155 CLASSQ_PKT_INIT(&fq->fq_dq_head);
156 CLASSQ_PKT_INIT(&fq->fq_dq_tail);
157 fq->fq_in_dqlist = false;
158
159 return fq;
160 }
161
162 void
fq_destroy(fq_t * fq,classq_pkt_type_t ptype)163 fq_destroy(fq_t *fq, classq_pkt_type_t ptype)
164 {
165 VERIFY(!fq->fq_in_dqlist);
166 VERIFY(fq_empty(fq, ptype));
167 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW |
168 FQF_EMPTY_FLOW)));
169 VERIFY(fq->fq_bytes == 0);
170 kfree_type(fq_t, fq);
171 }
172
173 static inline void
fq_detect_dequeue_stall(fq_if_t * fqs,fq_t * flowq,fq_if_classq_t * fq_cl,u_int64_t * now)174 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
175 u_int64_t *now)
176 {
177 u_int64_t maxgetqtime, update_interval;
178 if (FQ_IS_DELAY_HIGH(flowq) || flowq->fq_getqtime == 0 ||
179 fq_empty(flowq, fqs->fqs_ptype) ||
180 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
181 return;
182 }
183
184 update_interval = FQ_UPDATE_INTERVAL(flowq);
185 maxgetqtime = flowq->fq_getqtime + update_interval;
186 if ((*now) > maxgetqtime) {
187 /*
188 * there was no dequeue in an update interval worth of
189 * time. It means that the queue is stalled.
190 */
191 FQ_SET_DELAY_HIGH(flowq);
192 fq_cl->fcl_stat.fcl_dequeue_stall++;
193 os_log_error(OS_LOG_DEFAULT, "%s:num: %d, "
194 "scidx: %d, flow: 0x%x, iface: %s grp: %hhu", __func__,
195 fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
196 flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp),
197 FQ_GROUP(flowq)->fqg_index);
198 KDBG(AQM_KTRACE_AON_FLOW_DQ_STALL, flowq->fq_flowhash,
199 AQM_KTRACE_FQ_GRP_SC_IDX(flowq), flowq->fq_bytes,
200 (*now) - flowq->fq_getqtime);
201 }
202 }
203
204 void
fq_head_drop(fq_if_t * fqs,fq_t * fq)205 fq_head_drop(fq_if_t *fqs, fq_t *fq)
206 {
207 pktsched_pkt_t pkt;
208 volatile uint32_t *__single pkt_flags;
209 uint64_t *__single pkt_timestamp;
210 struct ifclassq *ifq = fqs->fqs_ifq;
211
212 _PKTSCHED_PKT_INIT(&pkt);
213 fq_getq_flow_internal(fqs, fq, &pkt);
214 if (pkt.pktsched_pkt_mbuf == NULL) {
215 return;
216 }
217
218 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
219 NULL, NULL, NULL);
220
221 *pkt_timestamp = 0;
222 switch (pkt.pktsched_ptype) {
223 case QP_MBUF:
224 *pkt_flags &= ~PKTF_PRIV_GUARDED;
225 break;
226 #if SKYWALK
227 case QP_PACKET:
228 /* sanity check */
229 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
230 break;
231 #endif /* SKYWALK */
232 default:
233 VERIFY(0);
234 /* NOTREACHED */
235 __builtin_unreachable();
236 }
237
238 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
239 IFCQ_CONVERT_LOCK(ifq);
240 pktsched_free_pkt(&pkt);
241 }
242
243
244 static int
fq_compressor(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl,pktsched_pkt_t * pkt)245 fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
246 pktsched_pkt_t *pkt)
247 {
248 classq_pkt_type_t ptype = fqs->fqs_ptype;
249 uint32_t comp_gencnt = 0;
250 uint64_t *__single pkt_timestamp;
251 uint64_t old_timestamp = 0;
252 uint32_t old_pktlen = 0;
253 struct ifclassq *ifq = fqs->fqs_ifq;
254
255 if (__improbable(pkt_compressor == 0)) {
256 return 0;
257 }
258
259 pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
260 &comp_gencnt, NULL);
261
262 if (comp_gencnt == 0) {
263 return 0;
264 }
265
266 fq_cl->fcl_stat.fcl_pkts_compressible++;
267
268 if (fq_empty(fq, fqs->fqs_ptype)) {
269 return 0;
270 }
271
272 if (ptype == QP_MBUF) {
273 struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
274
275 if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
276 return 0;
277 }
278
279 /* If we got until here, we should merge/replace the segment */
280 MBUFQ_REMOVE(&fq->fq_mbufq, m);
281 old_pktlen = m_pktlen(m);
282 old_timestamp = m->m_pkthdr.pkt_timestamp;
283
284 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
285
286 if (__improbable(droptap_verbose > 0)) {
287 droptap_output_mbuf(m, DROP_REASON_AQM_COMPRESSED, NULL, 0, 0,
288 fqs->fqs_ifq->ifcq_ifp);
289 }
290
291 m_freem(m);
292 }
293 #if SKYWALK
294 else {
295 struct __kern_packet *kpkt = KPKTQ_LAST(&fq->fq_kpktq);
296
297 if (comp_gencnt != kpkt->pkt_comp_gencnt) {
298 return 0;
299 }
300
301 /* If we got until here, we should merge/replace the segment */
302 KPKTQ_REMOVE(&fq->fq_kpktq, kpkt);
303 old_pktlen = kpkt->pkt_length;
304 old_timestamp = kpkt->pkt_timestamp;
305
306 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
307
308 if (__improbable(droptap_verbose > 0)) {
309 droptap_output_packet(SK_PKT2PH(kpkt), DROP_REASON_AQM_COMPRESSED, NULL, 0, 0,
310 fqs->fqs_ifq->ifcq_ifp, 0, NULL, -1, NULL, 0, 0);
311 }
312
313 struct kern_pbufpool *pp =
314 __DECONST(struct kern_pbufpool *, ((struct __kern_quantum *)kpkt)->qum_pp);
315 pp_free_packet(pp, (uint64_t)kpkt);
316 }
317 #endif /* SKYWALK */
318
319 fq->fq_bytes -= old_pktlen;
320 fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
321 fq_cl->fcl_stat.fcl_pkt_cnt--;
322 IFCQ_DEC_LEN(ifq);
323 IFCQ_DEC_BYTES(ifq, old_pktlen);
324
325 FQ_GRP_DEC_LEN(fq);
326 FQ_GRP_DEC_BYTES(fq, old_pktlen);
327
328 *pkt_timestamp = old_timestamp;
329
330 return CLASSQEQ_COMPRESSED;
331 }
332
333 int
fq_codel_enq_legacy(void * fqs_p,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)334 fq_codel_enq_legacy(void *fqs_p, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
335 fq_if_classq_t *fq_cl)
336 {
337 fq_if_t *fqs = (fq_if_t *)fqs_p;
338 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
339 u_int64_t now;
340 fq_t *fq = NULL;
341 uint64_t *__single pkt_timestamp;
342 volatile uint32_t *__single pkt_flags;
343 uint32_t pkt_flowid, cnt;
344 uint8_t pkt_proto, pkt_flowsrc;
345 fq_tfc_type_t tfc_type = FQ_TFC_C;
346
347 cnt = pkt->pktsched_pcnt;
348 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
349 &pkt_flowsrc, &pkt_proto, NULL, NULL);
350
351 /*
352 * XXX Not walking the chain to set this flag on every packet.
353 * This flag is only used for debugging. Nothing is affected if it's
354 * not set.
355 */
356 switch (pkt->pktsched_ptype) {
357 case QP_MBUF:
358 /* See comments in <rdar://problem/14040693> */
359 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
360 *pkt_flags |= PKTF_PRIV_GUARDED;
361 break;
362 #if SKYWALK
363 case QP_PACKET:
364 /* sanity check */
365 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
366 break;
367 #endif /* SKYWALK */
368 default:
369 VERIFY(0);
370 /* NOTREACHED */
371 __builtin_unreachable();
372 }
373
374 if (fq_codel_enable_l4s) {
375 tfc_type = pktsched_is_pkt_l4s(pkt) ? FQ_TFC_L4S : FQ_TFC_C;
376 }
377
378 /*
379 * Timestamps for every packet must be set prior to entering this path.
380 */
381 now = *pkt_timestamp;
382 ASSERT(now > 0);
383
384 /* find the flowq for this packet */
385 fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
386 now, pkt_proto, pkt_flowsrc, tfc_type, true);
387 if (__improbable(fq == NULL)) {
388 DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
389 /* drop the packet if we could not allocate a flow queue */
390 fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
391 return CLASSQEQ_DROP;
392 }
393 VERIFY(fq->fq_group == fq_grp);
394 VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
395
396 KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
397 AQM_KTRACE_FQ_GRP_SC_IDX(fq),
398 fq->fq_bytes, pktsched_get_pkt_len(pkt));
399
400 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
401
402 /*
403 * Skip the dropping part if it's L4S. Flow control or ECN marking decision
404 * will be made at dequeue time.
405 */
406 if (fq_codel_enable_l4s && tfc_type == FQ_TFC_L4S) {
407 fq_cl->fcl_stat.fcl_l4s_pkts += cnt;
408 droptype = DTYPE_NODROP;
409 }
410
411 if (__improbable(FQ_IS_DELAY_HIGH(fq) || FQ_IS_OVERWHELMING(fq))) {
412 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
413 (*pkt_flags & PKTF_FLOW_ADV)) {
414 fc_adv = 1;
415 /*
416 * If the flow is suspended or it is not
417 * TCP/QUIC, drop the chain.
418 */
419 if ((pkt_proto != IPPROTO_TCP) &&
420 (pkt_proto != IPPROTO_QUIC)) {
421 droptype = DTYPE_EARLY;
422 fq_cl->fcl_stat.fcl_drop_early += cnt;
423 IFCQ_DROP_ADD(fqs->fqs_ifq, cnt, pktsched_get_pkt_len(pkt));
424 }
425 DTRACE_IP6(flow__adv, fq_if_t *, fqs,
426 fq_if_classq_t *, fq_cl, fq_t *, fq,
427 int, droptype, pktsched_pkt_t *, pkt,
428 uint32_t, cnt);
429 } else {
430 /*
431 * Need to drop packets to make room for the new
432 * ones. Try to drop from the head of the queue
433 * instead of the latest packets.
434 */
435 if (!fq_empty(fq, fqs->fqs_ptype)) {
436 uint32_t i;
437
438 for (i = 0; i < cnt; i++) {
439 fq_head_drop(fqs, fq);
440 }
441 droptype = DTYPE_NODROP;
442 } else {
443 droptype = DTYPE_EARLY;
444 }
445 fq_cl->fcl_stat.fcl_drop_early += cnt;
446
447 DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
448 fq_if_classq_t *, fq_cl, fq_t *, fq,
449 int, droptype, pktsched_pkt_t *, pkt,
450 uint32_t, cnt);
451 }
452 }
453
454 /* Set the return code correctly */
455 if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
456 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
457 fq->fq_flags |= FQF_FLOWCTL_ON;
458 /* deliver flow control advisory error */
459 if (droptype == DTYPE_NODROP) {
460 ret = CLASSQEQ_SUCCESS_FC;
461 } else {
462 /* dropped due to flow control */
463 ret = CLASSQEQ_DROP_FC;
464 }
465 } else {
466 /*
467 * if we could not flow control the flow, it is
468 * better to drop
469 */
470 droptype = DTYPE_FORCED;
471 ret = CLASSQEQ_DROP_FC;
472 fq_cl->fcl_stat.fcl_flow_control_fail++;
473 }
474 DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
475 }
476
477 /*
478 * If the queue length hits the queue limit, drop a chain with the
479 * same number of packets from the front of the queue for a flow with
480 * maximum number of bytes. This will penalize heavy and unresponsive
481 * flows. It will also avoid a tail drop.
482 */
483 if (__improbable(droptype == DTYPE_NODROP &&
484 fq_if_at_drop_limit(fqs))) {
485 uint32_t i;
486
487 if (fqs->fqs_large_flow == fq) {
488 /*
489 * Drop from the head of the current fq. Since a
490 * new packet will be added to the tail, it is ok
491 * to leave fq in place.
492 */
493 DTRACE_IP5(large__flow, fq_if_t *, fqs,
494 fq_if_classq_t *, fq_cl, fq_t *, fq,
495 pktsched_pkt_t *, pkt, uint32_t, cnt);
496
497 for (i = 0; i < cnt; i++) {
498 fq_head_drop(fqs, fq);
499 }
500 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
501
502 /*
503 * TCP and QUIC will react to the loss of those head dropped pkts
504 * and adjust send rate.
505 */
506 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
507 (*pkt_flags & PKTF_FLOW_ADV) &&
508 (pkt_proto != IPPROTO_TCP) &&
509 (pkt_proto != IPPROTO_QUIC)) {
510 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
511 fq->fq_flags |= FQF_FLOWCTL_ON;
512 FQ_SET_OVERWHELMING(fq);
513 fq_cl->fcl_stat.fcl_overwhelming++;
514 /* deliver flow control advisory error */
515 ret = CLASSQEQ_SUCCESS_FC;
516 }
517 }
518 } else {
519 if (fqs->fqs_large_flow == NULL) {
520 droptype = DTYPE_FORCED;
521 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
522 ret = CLASSQEQ_DROP;
523
524 DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
525 fq_if_classq_t *, fq_cl, fq_t *, fq,
526 pktsched_pkt_t *, pkt, uint32_t, cnt);
527
528 /*
529 * if this fq was freshly created and there
530 * is nothing to enqueue, move it to empty list
531 */
532 if (fq_empty(fq, fqs->fqs_ptype) &&
533 !(fq->fq_flags & (FQF_NEW_FLOW |
534 FQF_OLD_FLOW))) {
535 fq_if_move_to_empty_flow(fqs, fq_cl,
536 fq, now);
537 fq = NULL;
538 }
539 } else {
540 DTRACE_IP5(different__large__flow,
541 fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
542 fq_t *, fq, pktsched_pkt_t *, pkt,
543 uint32_t, cnt);
544
545 for (i = 0; i < cnt; i++) {
546 fq_if_drop_packet(fqs, now);
547 }
548 }
549 }
550 }
551
552 fq_cl->fcl_flags &= ~FCL_PACED;
553
554 if (__probable(droptype == DTYPE_NODROP)) {
555 uint32_t chain_len = pktsched_get_pkt_len(pkt);
556 int ret_compress = 0;
557
558 /*
559 * We do not compress if we are enqueuing a chain.
560 * Traversing the chain to look for acks would defeat the
561 * purpose of batch enqueueing.
562 */
563 if (cnt == 1) {
564 ret_compress = fq_compressor(fqs, fq, fq_cl, pkt);
565 if (ret_compress == CLASSQEQ_COMPRESSED) {
566 fq_cl->fcl_stat.fcl_pkts_compressed++;
567 }
568 }
569 DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
570 fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
571 fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
572 pkt->pktsched_ptype);
573
574 fq->fq_bytes += chain_len;
575 fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
576 fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
577
578 /*
579 * check if this queue will qualify to be the next
580 * victim queue
581 */
582 fq_if_is_flow_heavy(fqs, fq);
583 } else {
584 DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
585 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
586 }
587
588 /*
589 * If the queue is not currently active, add it to the end of new
590 * flows list for that service class.
591 */
592 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
593 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
594 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
595 fq->fq_flags |= FQF_NEW_FLOW;
596
597 fq_cl->fcl_stat.fcl_newflows_cnt++;
598
599 fq->fq_deficit = fq_cl->fcl_quantum;
600 }
601 return ret;
602 }
603
604 void
fq_getq_flow_internal(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt)605 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
606 {
607 classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
608 uint32_t plen;
609 fq_if_classq_t *fq_cl;
610 struct ifclassq *ifq = fqs->fqs_ifq;
611
612 fq_dequeue(fq, &p, fqs->fqs_ptype);
613 if (p.cp_ptype == QP_INVALID) {
614 VERIFY(p.cp_mbuf == NULL);
615 return;
616 }
617
618 fq->fq_next_tx_time = FQ_INVALID_TX_TS;
619
620 pktsched_pkt_encap(pkt, &p);
621 plen = pktsched_get_pkt_len(pkt);
622
623 VERIFY(fq->fq_bytes >= plen);
624 fq->fq_bytes -= plen;
625
626 fq_cl = &FQ_CLASSQ(fq);
627 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
628 fq_cl->fcl_stat.fcl_pkt_cnt--;
629 fq_cl->fcl_flags &= ~FCL_PACED;
630
631 IFCQ_DEC_LEN(ifq);
632 IFCQ_DEC_BYTES(ifq, plen);
633
634 FQ_GRP_DEC_LEN(fq);
635 FQ_GRP_DEC_BYTES(fq, plen);
636
637 /* Reset getqtime so that we don't count idle times */
638 if (fq_empty(fq, fqs->fqs_ptype)) {
639 fq->fq_getqtime = 0;
640 }
641 }
642
643 /*
644 * fq_get_next_tx_time returns FQ_INVALID_TX_TS when there is no tx time in fq
645 */
646 static uint64_t
fq_get_next_tx_time(fq_if_t * fqs,fq_t * fq)647 fq_get_next_tx_time(fq_if_t *fqs, fq_t *fq)
648 {
649 uint64_t tx_time = FQ_INVALID_TX_TS;
650
651 /*
652 * Check the cached value in fq
653 */
654 if (fq->fq_next_tx_time != FQ_INVALID_TX_TS) {
655 return fq->fq_next_tx_time;
656 }
657
658 switch (fqs->fqs_ptype) {
659 case QP_MBUF: {
660 struct mbuf *m;
661 if ((m = MBUFQ_FIRST(&fq->fq_mbufq)) != NULL) {
662 struct m_tag *tag;
663 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
664 KERNEL_TAG_TYPE_AQM);
665 if (tag != NULL) {
666 tx_time = *(uint64_t *)tag->m_tag_data;
667 }
668 }
669 break;
670 }
671 case QP_PACKET: {
672 struct __kern_packet *p = KPKTQ_FIRST(&fq->fq_kpktq);
673 if (__probable(p != NULL)) {
674 tx_time = __packet_get_tx_timestamp(SK_PKT2PH(p));
675 }
676 break;
677 }
678 default:
679 VERIFY(0);
680 /* NOTREACHED */
681 __builtin_unreachable();
682 }
683
684 /*
685 * Cache the tx time in fq. The cache will be clear after dequeue or drop
686 * from the fq.
687 */
688 fq->fq_next_tx_time = tx_time;
689
690 return tx_time;
691 }
692
693 /*
694 * fq_tx_time_ready returns true if the fq is empty so that it doesn't
695 * affect caller logics that handles empty flow.
696 */
697 boolean_t
fq_tx_time_ready(fq_if_t * fqs,fq_t * fq,uint64_t now,uint64_t * ready_time)698 fq_tx_time_ready(fq_if_t *fqs, fq_t *fq, uint64_t now, uint64_t *ready_time)
699 {
700 uint64_t pkt_tx_time;
701 fq_if_classq_t *fq_cl = &FQ_CLASSQ(fq);
702
703 if (!fq_codel_enable_pacing) {
704 return TRUE;
705 }
706
707 pkt_tx_time = fq_get_next_tx_time(fqs, fq);
708 if (ready_time != NULL) {
709 *ready_time = pkt_tx_time;
710 }
711
712 if (pkt_tx_time <= now + pkt_pacing_leeway ||
713 pkt_tx_time == FQ_INVALID_TX_TS) {
714 return TRUE;
715 }
716
717 /*
718 * Ignore the tx time if it's scheduled too far in the future
719 */
720 if (pkt_tx_time > max_pkt_pacing_interval + now) {
721 fq_cl->fcl_stat.fcl_ignore_tx_time++;
722 return TRUE;
723 }
724
725 ASSERT(pkt_tx_time != FQ_INVALID_TX_TS);
726 KDBG(AQM_KTRACE_TX_NOT_READY, fq->fq_flowhash,
727 AQM_KTRACE_FQ_GRP_SC_IDX(fq), now, pkt_tx_time);
728 return FALSE;
729 }
730
731 void
fq_codel_dq_legacy(void * fqs_p,void * fq_p,pktsched_pkt_t * pkt,uint64_t now)732 fq_codel_dq_legacy(void *fqs_p, void *fq_p, pktsched_pkt_t *pkt, uint64_t now)
733 {
734 fq_if_t *fqs = (fq_if_t *)fqs_p;
735 fq_t *fq = (fq_t *)fq_p;
736 fq_if_classq_t *fq_cl = &FQ_CLASSQ(fq);
737 int64_t qdelay = 0;
738 volatile uint32_t *__single pkt_flags;
739 uint64_t *__single pkt_timestamp, pkt_tx_time = 0, pacing_delay = 0;
740 uint64_t fq_min_delay_threshold = FQ_TARGET_DELAY(fq);
741 uint8_t pkt_flowsrc;
742 boolean_t l4s_pkt;
743
744 fq_getq_flow_internal(fqs, fq, pkt);
745 if (pkt->pktsched_ptype == QP_INVALID) {
746 VERIFY(pkt->pktsched_pkt_mbuf == NULL);
747 return;
748 }
749
750 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, &pkt_flowsrc,
751 NULL, NULL, &pkt_tx_time);
752 l4s_pkt = pktsched_is_pkt_l4s(pkt);
753 if (fq_codel_enable_pacing && fq_codel_enable_l4s) {
754 if (pkt_tx_time > *pkt_timestamp) {
755 pacing_delay = pkt_tx_time - *pkt_timestamp;
756 fq_cl->fcl_stat.fcl_paced_pkts++;
757 DTRACE_SKYWALK3(aqm__pacing__delta, uint64_t, now - pkt_tx_time,
758 fq_if_t *, fqs, fq_t *, fq);
759 }
760 #if (DEVELOPMENT || DEBUG)
761 else if (pkt_tx_time != 0) {
762 DTRACE_SKYWALK5(aqm__miss__pacing__delay, uint64_t, *pkt_timestamp,
763 uint64_t, pkt_tx_time, uint64_t, now, fq_if_t *,
764 fqs, fq_t *, fq);
765 }
766 #endif // (DEVELOPMENT || DEBUG)
767 }
768
769 /* this will compute qdelay in nanoseconds */
770 if (now > *pkt_timestamp) {
771 qdelay = now - *pkt_timestamp;
772 }
773
774 /* Update min/max/avg qdelay for the respective class */
775 if (fq_cl->fcl_stat.fcl_min_qdelay == 0 ||
776 (qdelay > 0 && (u_int64_t)qdelay < fq_cl->fcl_stat.fcl_min_qdelay)) {
777 fq_cl->fcl_stat.fcl_min_qdelay = qdelay;
778 }
779
780 if (fq_cl->fcl_stat.fcl_max_qdelay == 0 ||
781 (qdelay > 0 && (u_int64_t)qdelay > fq_cl->fcl_stat.fcl_max_qdelay)) {
782 fq_cl->fcl_stat.fcl_max_qdelay = qdelay;
783 }
784
785 uint64_t num_dequeues = fq_cl->fcl_stat.fcl_dequeue;
786
787 if (num_dequeues == 0) {
788 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
789 } else if (qdelay > 0) {
790 uint64_t res = 0;
791 if (os_add_overflow(num_dequeues, 1, &res)) {
792 /* Reset the dequeue num and dequeue bytes */
793 fq_cl->fcl_stat.fcl_dequeue = num_dequeues = 0;
794 fq_cl->fcl_stat.fcl_dequeue_bytes = 0;
795 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
796 os_log_info(OS_LOG_DEFAULT, "%s: dequeue num overflow, "
797 "flow: 0x%x, iface: %s", __func__, fq->fq_flowhash,
798 if_name(fqs->fqs_ifq->ifcq_ifp));
799 } else {
800 uint64_t product = 0;
801 if (os_mul_overflow(fq_cl->fcl_stat.fcl_avg_qdelay,
802 num_dequeues, &product) || os_add_overflow(product, qdelay, &res)) {
803 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
804 } else {
805 fq_cl->fcl_stat.fcl_avg_qdelay = res /
806 (num_dequeues + 1);
807 }
808 }
809 }
810
811 fq->fq_pkts_since_last_report++;
812 if (fq_codel_enable_l4s && l4s_pkt) {
813 /*
814 * A safe guard to make sure that L4S is not going to build a huge
815 * queue if we encounter unexpected problems (for eg., if ACKs don't
816 * arrive in timely manner due to congestion in reverse path).
817 */
818 fq_min_delay_threshold = l4s_min_delay_threshold;
819
820 if ((l4s_ce_threshold != 0 && qdelay > l4s_ce_threshold + pacing_delay) ||
821 (l4s_ce_threshold == 0 && qdelay > FQ_TARGET_DELAY(fq) + pacing_delay)) {
822 DTRACE_SKYWALK4(aqm__mark__ce, uint64_t, qdelay, uint64_t, pacing_delay,
823 fq_if_t *, fqs, fq_t *, fq);
824 KDBG(AQM_KTRACE_STATS_FLOW_REPORT_CE, fq->fq_flowhash,
825 AQM_KTRACE_FQ_GRP_SC_IDX(fq), qdelay, pacing_delay);
826 /*
827 * The packet buffer that pktsched_mark_ecn writes to can be pageable.
828 * Since it is not safe to write to pageable memory while preemption
829 * is disabled, convert the spin lock into mutex.
830 */
831 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
832 if (__improbable(l4s_local_ce_report != 0) &&
833 (*pkt_flags & PKTF_FLOW_ADV) != 0 &&
834 fq_if_report_congestion(fqs, pkt, 0, 1, fq->fq_pkts_since_last_report)) {
835 fq->fq_pkts_since_last_report = 0;
836 fq_cl->fcl_stat.fcl_ce_reported++;
837 } else if (pktsched_mark_ecn(pkt) == 0) {
838 fq_cl->fcl_stat.fcl_ce_marked++;
839 } else {
840 fq_cl->fcl_stat.fcl_ce_mark_failures++;
841 }
842 }
843 }
844
845 ASSERT(pacing_delay <= INT64_MAX);
846 qdelay = MAX(0, qdelay - (int64_t)pacing_delay);
847 if (fq->fq_min_qdelay == 0 ||
848 (u_int64_t)qdelay < fq->fq_min_qdelay) {
849 fq->fq_min_qdelay = qdelay;
850 }
851
852 if (now >= fq->fq_updatetime) {
853 if (fq->fq_min_qdelay > fq_min_delay_threshold) {
854 if (!FQ_IS_DELAY_HIGH(fq)) {
855 FQ_SET_DELAY_HIGH(fq);
856 }
857 } else {
858 FQ_CLEAR_DELAY_HIGH(fq);
859 }
860 /* Reset measured queue delay and update time */
861 fq->fq_updatetime = now + FQ_UPDATE_INTERVAL(fq);
862 fq->fq_min_qdelay = 0;
863 }
864
865 if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs)) {
866 FQ_CLEAR_OVERWHELMING(fq);
867 }
868 if (!FQ_IS_DELAY_HIGH(fq) || fq_empty(fq, fqs->fqs_ptype)) {
869 FQ_CLEAR_DELAY_HIGH(fq);
870 }
871
872 if ((fq->fq_flags & FQF_FLOWCTL_ON) &&
873 !FQ_IS_DELAY_HIGH(fq) && !FQ_IS_OVERWHELMING(fq)) {
874 fq_if_flow_feedback(fqs, fq, fq_cl);
875 }
876
877 if (fq_empty(fq, fqs->fqs_ptype)) {
878 /* Reset getqtime so that we don't count idle times */
879 fq->fq_getqtime = 0;
880 } else {
881 fq->fq_getqtime = now;
882 }
883 fq_if_is_flow_heavy(fqs, fq);
884
885 *pkt_timestamp = 0;
886 switch (pkt->pktsched_ptype) {
887 case QP_MBUF:
888 *pkt_flags &= ~PKTF_PRIV_GUARDED;
889 break;
890 #if SKYWALK
891 case QP_PACKET:
892 /* sanity check */
893 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
894 break;
895 #endif /* SKYWALK */
896 default:
897 VERIFY(0);
898 /* NOTREACHED */
899 __builtin_unreachable();
900 }
901 }
902
903 int
fq_codel_enq(void * fqs_p,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)904 fq_codel_enq(void *fqs_p, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
905 fq_if_classq_t *fq_cl)
906 {
907 fq_if_t *fqs = (fq_if_t *)fqs_p;
908 int droptype = DTYPE_NODROP, ret = CLASSQEQ_SUCCESS;
909 u_int64_t now;
910 fq_t *fq = NULL;
911 uint64_t *__single pkt_timestamp;
912 volatile uint32_t *__single pkt_flags;
913 uint32_t pkt_flowid, cnt;
914 uint8_t pkt_proto, pkt_flowsrc;
915 fq_tfc_type_t tfc_type = FQ_TFC_C;
916
917 cnt = pkt->pktsched_pcnt;
918 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
919 &pkt_flowsrc, &pkt_proto, NULL, NULL);
920
921 /*
922 * XXX Not walking the chain to set this flag on every packet.
923 * This flag is only used for debugging. Nothing is affected if it's
924 * not set.
925 */
926 switch (pkt->pktsched_ptype) {
927 case QP_MBUF:
928 /* See comments in <rdar://problem/14040693> */
929 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
930 break;
931 #if SKYWALK
932 case QP_PACKET:
933 /* sanity check */
934 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
935 break;
936 #endif /* SKYWALK */
937 default:
938 VERIFY(0);
939 /* NOTREACHED */
940 __builtin_unreachable();
941 }
942
943 if (fq_codel_enable_l4s) {
944 tfc_type = pktsched_is_pkt_l4s(pkt) ? FQ_TFC_L4S : FQ_TFC_C;
945 }
946
947 /*
948 * Timestamps for every packet must be set prior to entering this path.
949 */
950 now = *pkt_timestamp;
951 ASSERT(now > 0);
952
953 /* find the flowq for this packet */
954 fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
955 now, pkt_proto, pkt_flowsrc, tfc_type, true);
956 if (__improbable(fq == NULL)) {
957 DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
958 /* drop the packet if we could not allocate a flow queue */
959 fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
960 return CLASSQEQ_DROP;
961 }
962 VERIFY(fq->fq_group == fq_grp);
963 VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
964
965 KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
966 AQM_KTRACE_FQ_GRP_SC_IDX(fq),
967 fq->fq_bytes, pktsched_get_pkt_len(pkt));
968
969 /*
970 * Skip the dropping part if it's L4S. Flow control or ECN marking decision
971 * will be made at dequeue time.
972 */
973 if (fq_codel_enable_l4s && tfc_type == FQ_TFC_L4S) {
974 fq_cl->fcl_stat.fcl_l4s_pkts += cnt;
975 droptype = DTYPE_NODROP;
976 }
977
978 /*
979 * If the queue length hits the queue limit, drop a chain with the
980 * same number of packets from the front of the queue for a flow with
981 * maximum number of bytes. This will penalize heavy and unresponsive
982 * flows. It will also avoid a tail drop.
983 */
984 if (__improbable(droptype == DTYPE_NODROP &&
985 fq_if_at_drop_limit(fqs))) {
986 uint32_t i;
987
988 if (fqs->fqs_large_flow == fq) {
989 /*
990 * Drop from the head of the current fq. Since a
991 * new packet will be added to the tail, it is ok
992 * to leave fq in place.
993 */
994 DTRACE_IP5(large__flow, fq_if_t *, fqs,
995 fq_if_classq_t *, fq_cl, fq_t *, fq,
996 pktsched_pkt_t *, pkt, uint32_t, cnt);
997
998 for (i = 0; i < cnt; i++) {
999 fq_head_drop(fqs, fq);
1000 }
1001 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
1002 /*
1003 * For UDP, flow control it here so that we won't waste too much
1004 * CPU dropping packets.
1005 */
1006 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
1007 (*pkt_flags & PKTF_FLOW_ADV) &&
1008 (pkt_proto != IPPROTO_TCP) &&
1009 (pkt_proto != IPPROTO_QUIC)) {
1010 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
1011 fq->fq_flags |= FQF_FLOWCTL_ON;
1012 FQ_SET_OVERWHELMING(fq);
1013 fq_cl->fcl_stat.fcl_overwhelming++;
1014 /* deliver flow control advisory error */
1015 ret = CLASSQEQ_SUCCESS_FC;
1016 }
1017 }
1018 } else {
1019 if (fqs->fqs_large_flow == NULL) {
1020 droptype = DTYPE_FORCED;
1021 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
1022 ret = CLASSQEQ_DROP;
1023
1024 DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
1025 fq_if_classq_t *, fq_cl, fq_t *, fq,
1026 pktsched_pkt_t *, pkt, uint32_t, cnt);
1027
1028 /*
1029 * if this fq was freshly created and there
1030 * is nothing to enqueue, move it to empty list
1031 */
1032 if (fq_empty(fq, fqs->fqs_ptype) &&
1033 !(fq->fq_flags & (FQF_NEW_FLOW |
1034 FQF_OLD_FLOW))) {
1035 fq_if_move_to_empty_flow(fqs, fq_cl,
1036 fq, now);
1037 fq = NULL;
1038 }
1039 } else {
1040 DTRACE_IP5(different__large__flow,
1041 fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
1042 fq_t *, fq, pktsched_pkt_t *, pkt,
1043 uint32_t, cnt);
1044
1045 for (i = 0; i < cnt; i++) {
1046 fq_if_drop_packet(fqs, now);
1047 }
1048 }
1049 }
1050 }
1051
1052 fq_cl->fcl_flags &= ~FCL_PACED;
1053
1054 if (__probable(droptype == DTYPE_NODROP)) {
1055 uint32_t chain_len = pktsched_get_pkt_len(pkt);
1056 int ret_compress = 0;
1057
1058 /*
1059 * We do not compress if we are enqueuing a chain.
1060 * Traversing the chain to look for acks would defeat the
1061 * purpose of batch enqueueing.
1062 */
1063 if (cnt == 1) {
1064 ret_compress = fq_compressor(fqs, fq, fq_cl, pkt);
1065 if (ret_compress == CLASSQEQ_COMPRESSED) {
1066 fq_cl->fcl_stat.fcl_pkts_compressed++;
1067 }
1068 }
1069 DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
1070 fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
1071 fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
1072 pkt->pktsched_ptype);
1073
1074 fq->fq_bytes += chain_len;
1075 fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
1076 fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
1077
1078 /*
1079 * check if this queue will qualify to be the next
1080 * victim queue
1081 */
1082 fq_if_is_flow_heavy(fqs, fq);
1083
1084 if (FQ_CONGESTION_FEEDBACK_CAPABLE(fq) && fq->fq_flowsrc == FLOWSRC_INPCB &&
1085 fq->fq_congestion_cnt > fq->fq_last_congestion_cnt) {
1086 KDBG(AQM_KTRACE_CONGESTION_NOTIFIED,
1087 fq->fq_flowhash, AQM_KTRACE_FQ_GRP_SC_IDX(fq),
1088 fq->fq_bytes, fq->fq_congestion_cnt);
1089 fq_cl->fcl_stat.fcl_congestion_feedback++;
1090 ret = CLASSQEQ_CONGESTED;
1091 }
1092 } else {
1093 DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
1094 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
1095 }
1096
1097 /*
1098 * If the queue is not currently active, add it to the end of new
1099 * flows list for that service class.
1100 */
1101 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
1102 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
1103 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
1104 fq->fq_flags |= FQF_NEW_FLOW;
1105
1106 fq_cl->fcl_stat.fcl_newflows_cnt++;
1107
1108 fq->fq_deficit = fq_cl->fcl_quantum;
1109 }
1110 fq->fq_last_congestion_cnt = fq->fq_congestion_cnt;
1111
1112 return ret;
1113 }
1114
1115 static boolean_t
codel_ok_to_drop(pktsched_pkt_t * pkt,fq_t * fq,struct codel_status * codel_status,uint64_t now)1116 codel_ok_to_drop(pktsched_pkt_t *pkt, fq_t *fq,
1117 struct codel_status *codel_status, uint64_t now)
1118 {
1119 uint64_t *__single pkt_enq_time, sojourn_time;
1120 boolean_t ok_to_drop = false;
1121
1122 if (pkt->pktsched_pcnt == 0) {
1123 codel_status->first_above_time = 0;
1124 return false;
1125 }
1126
1127 pktsched_get_pkt_vars(pkt, NULL, &pkt_enq_time, NULL, NULL, NULL, NULL, NULL);
1128 sojourn_time = 0;
1129 // TODO: handle pacing
1130 VERIFY(now >= *pkt_enq_time);
1131 if (__probable(now > *pkt_enq_time)) {
1132 sojourn_time = now - *pkt_enq_time;
1133 }
1134
1135 if (sojourn_time <= FQ_TARGET_DELAY(fq)) {
1136 codel_status->first_above_time = 0;
1137 goto end;
1138 }
1139 if (codel_status->first_above_time == 0) {
1140 codel_status->first_above_time = now += FQ_UPDATE_INTERVAL(fq);
1141 } else if (now >= codel_status->first_above_time) {
1142 ok_to_drop = true;
1143 KDBG(AQM_KTRACE_OK_TO_DROP, fq->fq_flowhash, sojourn_time,
1144 *pkt_enq_time, now);
1145 }
1146
1147 end:
1148 /* we shouldn't need to access pkt_enq_time again, clear it now */
1149 *pkt_enq_time = 0;
1150 return ok_to_drop;
1151 }
1152
1153 static float
fast_inv_sqrt(float number)1154 fast_inv_sqrt(float number)
1155 {
1156 union {
1157 float f;
1158 uint32_t i;
1159 } conv = { .f = number };
1160 conv.i = 0x5f3759df - (conv.i >> 1);
1161 conv.f *= 1.5F - (number * 0.5F * conv.f * conv.f);
1162 return conv.f;
1163 }
1164
1165 static uint64_t
codel_control_law(float inv_sqrt,uint64_t t,uint64_t interval)1166 codel_control_law(float inv_sqrt, uint64_t t, uint64_t interval)
1167 {
1168 /* Drop becomes more frequeut as drop count increases */
1169 uint64_t val = (uint64_t)(inv_sqrt * interval);
1170 uint64_t result = t + val;
1171 VERIFY(val > 0 && val <= interval);
1172 return result;
1173 }
1174
1175 static void
fq_drop_pkt(struct ifclassq * ifcq,struct ifnet * ifp,pktsched_pkt_t * pkt)1176 fq_drop_pkt(struct ifclassq *ifcq, struct ifnet *ifp, pktsched_pkt_t *pkt)
1177 {
1178 IFCQ_DROP_ADD(ifcq, 1, pktsched_get_pkt_len(pkt));
1179 if (__improbable(droptap_verbose > 0)) {
1180 pktsched_drop_pkt(pkt, ifp, DROP_REASON_AQM_HIGH_DELAY,
1181 __func__, __LINE__, 0);
1182 } else {
1183 pktsched_free_pkt(pkt);
1184 }
1185 }
1186
1187 void
fq_codel_dq(void * fqs_p,void * fq_p,pktsched_pkt_t * pkt,uint64_t now)1188 fq_codel_dq(void *fqs_p, void *fq_p, pktsched_pkt_t *pkt, uint64_t now)
1189 {
1190 fq_if_t *fqs = (fq_if_t *)fqs_p;
1191 fq_t *fq = (fq_t *)fq_p;
1192 fq_if_classq_t *fq_cl = &FQ_CLASSQ(fq);
1193 struct ifnet *ifp = fqs->fqs_ifq->ifcq_ifp;
1194 struct codel_status *status = &fq->codel_status;
1195 struct ifclassq *ifcq = fqs->fqs_ifq;
1196 volatile uint32_t *__single pkt_flags;
1197 uint64_t *__single pkt_timestamp, pkt_tx_time = 0, pacing_delay = 0, pkt_enq_time = 0;
1198 int64_t qdelay = 0;
1199 boolean_t l4s_pkt;
1200 boolean_t ok_to_drop = false;
1201 uint32_t delta = 0;
1202
1203 fq_getq_flow_internal(fqs, fq, pkt);
1204 if (pkt->pktsched_ptype == QP_INVALID) {
1205 VERIFY(pkt->pktsched_pkt_mbuf == NULL);
1206 return;
1207 }
1208
1209 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
1210 NULL, NULL, &pkt_tx_time);
1211 l4s_pkt = pktsched_is_pkt_l4s(pkt);
1212 pkt_enq_time = *pkt_timestamp;
1213
1214 if (fq_codel_enable_pacing && fq_codel_enable_l4s) {
1215 if (pkt_tx_time > pkt_enq_time) {
1216 pacing_delay = pkt_tx_time - pkt_enq_time;
1217 fq_cl->fcl_stat.fcl_paced_pkts++;
1218 DTRACE_SKYWALK3(aqm__pacing__delta, uint64_t, now - pkt_tx_time,
1219 fq_if_t *, fqs, fq_t *, fq);
1220 }
1221 #if (DEVELOPMENT || DEBUG)
1222 else if (pkt_tx_time != 0) {
1223 DTRACE_SKYWALK5(aqm__miss__pacing__delay, uint64_t, pkt_enq_time,
1224 uint64_t, pkt_tx_time, uint64_t, now, fq_if_t *,
1225 fqs, fq_t *, fq);
1226 }
1227 #endif // (DEVELOPMENT || DEBUG)
1228 }
1229
1230 if (fq_codel_enable_l4s && l4s_pkt) {
1231 /* this will compute qdelay in nanoseconds */
1232 if (now > pkt_enq_time) {
1233 qdelay = now - pkt_enq_time;
1234 }
1235
1236 fq->fq_pkts_since_last_report++;
1237
1238 if ((l4s_ce_threshold != 0 && qdelay > l4s_ce_threshold + pacing_delay) ||
1239 (l4s_ce_threshold == 0 && qdelay > FQ_TARGET_DELAY(fq) + pacing_delay)) {
1240 DTRACE_SKYWALK4(aqm__mark__ce, uint64_t, qdelay, uint64_t, pacing_delay,
1241 fq_if_t *, fqs, fq_t *, fq);
1242 KDBG(AQM_KTRACE_STATS_FLOW_REPORT_CE, fq->fq_flowhash,
1243 AQM_KTRACE_FQ_GRP_SC_IDX(fq), qdelay, pacing_delay);
1244 /*
1245 * The packet buffer that pktsched_mark_ecn writes to can be pageable.
1246 * Since it is not safe to write to pageable memory while preemption
1247 * is disabled, convert the spin lock into mutex.
1248 */
1249 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1250 if (__improbable(l4s_local_ce_report != 0) &&
1251 (*pkt_flags & PKTF_FLOW_ADV) != 0 &&
1252 fq_if_report_congestion(fqs, pkt, 0, 1, fq->fq_pkts_since_last_report)) {
1253 fq->fq_pkts_since_last_report = 0;
1254 fq_cl->fcl_stat.fcl_ce_reported++;
1255 } else if (pktsched_mark_ecn(pkt) == 0) {
1256 fq_cl->fcl_stat.fcl_ce_marked++;
1257 } else {
1258 fq_cl->fcl_stat.fcl_ce_mark_failures++;
1259 }
1260 }
1261 *pkt_timestamp = 0;
1262 } else {
1263 ok_to_drop = codel_ok_to_drop(pkt, fq, status, now);
1264
1265 if (status->dropping) {
1266 if (!ok_to_drop) {
1267 status->dropping = false;
1268 }
1269 /*
1270 * Time for the next drop. Drop current packet and dequeue
1271 * next. If the dequeue doesn't take us out of dropping
1272 * state, schedule the next drop. A large backlog might
1273 * result in drop rates so high that the next drop should
1274 * happen now, hence the 'while' loop.
1275 */
1276 while (now >= status->drop_next && status->dropping) {
1277 status->count++;
1278 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1279 KDBG(AQM_KTRACE_CONGESTION_INC, fq->fq_flowhash,
1280 AQM_KTRACE_FQ_GRP_SC_IDX(fq), fq->fq_bytes, 0);
1281 if (FQ_CONGESTION_FEEDBACK_CAPABLE(fq) && (*pkt_flags & PKTF_FLOW_ADV)) {
1282 fq->fq_congestion_cnt++;
1283 /* Like in the case of ECN, return now and dequeue again. */
1284 if (fq->fq_flowsrc == FLOWSRC_CHANNEL) {
1285 fq_if_report_congestion(fqs, pkt, 1, 0, fq->fq_pkts_since_last_report);
1286 fq_cl->fcl_stat.fcl_congestion_feedback++;
1287 }
1288 goto end;
1289 /*
1290 * If congestion feedback is not supported,
1291 * try ECN first, if fail, drop the packet.
1292 */
1293 } else if (FQ_IS_ECN_CAPABLE(fq) && pktsched_mark_ecn(pkt) == 0) {
1294 fq_cl->fcl_stat.fcl_ce_marked++;
1295 status->drop_next =
1296 codel_control_law(fast_inv_sqrt(status->count), status->drop_next, FQ_UPDATE_INTERVAL(fq));
1297 KDBG(AQM_KTRACE_PKT_DROP, fq->fq_flowhash,
1298 status->count, status->drop_next, now);
1299 /*
1300 * Since we are not dropping, return now and let the
1301 * caller dequeue again
1302 */
1303 goto end;
1304 } else {
1305 /* Disable ECN marking for this flow if marking fails once */
1306 FQ_CLEAR_ECN_CAPABLE(fq);
1307 fq_drop_pkt(ifcq, ifp, pkt);
1308 fq_cl->fcl_stat.fcl_ce_mark_failures++;
1309 fq_cl->fcl_stat.fcl_high_delay_drop++;
1310
1311 fq_getq_flow_internal(fqs, fq, pkt);
1312 if (!codel_ok_to_drop(pkt, fq, status, now)) {
1313 /* exit dropping state when queuing delay falls below threshold */
1314 status->dropping = false;
1315 } else {
1316 status->drop_next =
1317 codel_control_law(fast_inv_sqrt(status->count), status->drop_next, FQ_UPDATE_INTERVAL(fq));
1318 }
1319 }
1320 KDBG(AQM_KTRACE_PKT_DROP, fq->fq_flowhash,
1321 status->count, status->drop_next, now);
1322 }
1323 /*
1324 * If we get here, we're not in drop state. The 'ok_to_drop'
1325 * return from dodequeue means that the sojourn time has been
1326 * above 'TARGET' for 'INTERVAL', so enter drop state.
1327 */
1328 } else if (ok_to_drop) {
1329 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
1330 KDBG(AQM_KTRACE_CONGESTION_INC, fq->fq_flowhash,
1331 AQM_KTRACE_FQ_GRP_SC_IDX(fq), fq->fq_bytes, 0);
1332 if (FQ_CONGESTION_FEEDBACK_CAPABLE(fq) && (*pkt_flags & PKTF_FLOW_ADV)) {
1333 fq->fq_congestion_cnt++;
1334 if (fq->fq_flowsrc == FLOWSRC_CHANNEL) {
1335 fq_if_report_congestion(fqs, pkt, 1, 0, fq->fq_pkts_since_last_report);
1336 fq_cl->fcl_stat.fcl_congestion_feedback++;
1337 }
1338 } else if (FQ_IS_ECN_CAPABLE(fq) && pktsched_mark_ecn(pkt) == 0) {
1339 fq_cl->fcl_stat.fcl_ce_marked++;
1340 } else {
1341 FQ_CLEAR_ECN_CAPABLE(fq);
1342 fq_drop_pkt(ifcq, ifp, pkt);
1343 fq_cl->fcl_stat.fcl_ce_mark_failures++;
1344 fq_cl->fcl_stat.fcl_high_delay_drop++;
1345
1346 fq_getq_flow_internal(fqs, fq, pkt);
1347 ok_to_drop = codel_ok_to_drop(pkt, fq, status, now);
1348 }
1349 status->dropping = true;
1350
1351 /*
1352 * If min went above TARGET close to when it last went
1353 * below, assume that the drop rate that controlled the
1354 * queue on the last cycle is a good starting point to
1355 * control it now. ('drop_next' will be at most 'INTERVAL'
1356 * later than the time of the last drop, so 'now - drop_next'
1357 * is a good approximation of the time from the last drop
1358 * until now.)
1359 */
1360 delta = status->count - status->lastcnt;
1361 if (delta > 0 && now - status->drop_next <= 16 * FQ_UPDATE_INTERVAL(fq)) {
1362 status->count = MAX(status->count - 1, 1);
1363 } else {
1364 status->count = 1;
1365 }
1366
1367 status->drop_next =
1368 codel_control_law(fast_inv_sqrt(status->count), now, FQ_UPDATE_INTERVAL(fq));
1369 status->lastcnt = status->count;
1370
1371 KDBG(AQM_KTRACE_PKT_DROP, fq->fq_flowhash,
1372 status->count, status->drop_next, now);
1373 }
1374 }
1375
1376 end:
1377 if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs) ||
1378 fq_empty(fq, fqs->fqs_ptype)) {
1379 FQ_CLEAR_OVERWHELMING(fq);
1380 }
1381 if ((fq->fq_flags & FQF_FLOWCTL_ON) && !FQ_IS_OVERWHELMING(fq)) {
1382 fq_if_flow_feedback(fqs, fq, fq_cl);
1383 }
1384
1385 if (fq_empty(fq, fqs->fqs_ptype)) {
1386 /* Reset getqtime so that we don't count idle times */
1387 fq->fq_getqtime = 0;
1388 fq->codel_status.dropping = false;
1389 } else {
1390 fq->fq_getqtime = now;
1391 }
1392 fq_if_is_flow_heavy(fqs, fq);
1393 }
1394