1 /*
2 * Copyright (c) 2016-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * The migration of flow queue between the different states is summarised in
31 * the below state diagram. (RFC 8290)
32 *
33 * +-----------------+ +------------------+
34 * | | Empty | |
35 * | Empty |<---------------+ Old +----+
36 * | | | | |
37 * +-------+---------+ +------------------+ |
38 * | ^ ^ |Credits
39 * |Arrival | | |Exhausted
40 * v | | |
41 * +-----------------+ | | |
42 * | | Empty or | | |
43 * | New +-------------------+ +-------+
44 * | | Credits Exhausted
45 * +-----------------+
46 *
47 * In this implementation of FQ-CODEL, flow queue is a dynamically allocated
48 * object. An active flow queue goes through the above cycle of state
49 * transitions very often. To avoid the cost of frequent flow queue object
50 * allocation/free, this implementation retains the flow queue object in
51 * [Empty] state on an Empty flow queue list with an active reference in flow
52 * queue hash table. The flow queue objects on the Empty flow queue list have
53 * an associated age and are purged accordingly.
54 */
55
56 #include <sys/cdefs.h>
57 #include <sys/param.h>
58 #include <sys/mbuf.h>
59 #include <sys/socket.h>
60 #include <sys/sockio.h>
61 #include <sys/systm.h>
62 #include <sys/syslog.h>
63 #include <sys/proc.h>
64 #include <sys/errno.h>
65 #include <sys/kernel.h>
66 #include <sys/kauth.h>
67 #include <sys/sdt.h>
68 #include <kern/zalloc.h>
69 #include <netinet/in.h>
70
71 #include <net/classq/classq.h>
72 #include <net/classq/if_classq.h>
73 #include <net/pktsched/pktsched.h>
74 #include <net/pktsched/pktsched_fq_codel.h>
75 #include <net/classq/classq_fq_codel.h>
76
77 #include <netinet/tcp_var.h>
78
79 static uint32_t flowq_size; /* size of flowq */
80 static struct mcache *flowq_cache = NULL; /* mcache for flowq */
81
82 #define FQ_ZONE_MAX (32 * 1024) /* across all interfaces */
83
84 #define DTYPE_NODROP 0 /* no drop */
85 #define DTYPE_FORCED 1 /* a "forced" drop */
86 #define DTYPE_EARLY 2 /* an "unforced" (early) drop */
87
88 static uint32_t pkt_compressor = 1;
89 static uint64_t l4s_ce_threshold = 0;
90 #if (DEBUG || DEVELOPMENT)
91 SYSCTL_NODE(_net_classq, OID_AUTO, flow_q, CTLFLAG_RW | CTLFLAG_LOCKED,
92 0, "FQ-CODEL parameters");
93
94 SYSCTL_UINT(_net_classq_flow_q, OID_AUTO, pkt_compressor,
95 CTLFLAG_RW | CTLFLAG_LOCKED, &pkt_compressor, 0, "enable pkt compression");
96
97 SYSCTL_QUAD(_net_classq, OID_AUTO, l4s_ce_threshold,
98 CTLFLAG_RW | CTLFLAG_LOCKED, &l4s_ce_threshold,
99 "L4S CE threshold");
100 #endif /* (DEBUG || DEVELOPMENT) */
101
102 void
fq_codel_init(void)103 fq_codel_init(void)
104 {
105 if (flowq_cache != NULL) {
106 return;
107 }
108
109 flowq_size = sizeof(fq_t);
110 flowq_cache = mcache_create("fq.flowq", flowq_size, sizeof(uint64_t),
111 0, MCR_SLEEP);
112 if (flowq_cache == NULL) {
113 panic("%s: failed to allocate flowq_cache", __func__);
114 /* NOTREACHED */
115 __builtin_unreachable();
116 }
117
118 _CASSERT(AQM_KTRACE_AON_FLOW_HIGH_DELAY == 0x8300004);
119 _CASSERT(AQM_KTRACE_AON_THROTTLE == 0x8300008);
120 _CASSERT(AQM_KTRACE_AON_FLOW_OVERWHELMING == 0x830000c);
121 _CASSERT(AQM_KTRACE_AON_FLOW_DQ_STALL == 0x8300010);
122
123 _CASSERT(AQM_KTRACE_STATS_FLOW_ENQUEUE == 0x8310004);
124 _CASSERT(AQM_KTRACE_STATS_FLOW_DEQUEUE == 0x8310008);
125 _CASSERT(AQM_KTRACE_STATS_FLOW_CTL == 0x831000c);
126 _CASSERT(AQM_KTRACE_STATS_FLOW_ALLOC == 0x8310010);
127 _CASSERT(AQM_KTRACE_STATS_FLOW_DESTROY == 0x8310014);
128 }
129
130 void
fq_codel_reap_caches(boolean_t purge)131 fq_codel_reap_caches(boolean_t purge)
132 {
133 mcache_reap_now(flowq_cache, purge);
134 }
135
136 fq_t *
fq_alloc(classq_pkt_type_t ptype)137 fq_alloc(classq_pkt_type_t ptype)
138 {
139 fq_t *fq = NULL;
140 fq = mcache_alloc(flowq_cache, MCR_SLEEP);
141 if (fq == NULL) {
142 log(LOG_ERR, "%s: unable to allocate from flowq_cache\n", __func__);
143 return NULL;
144 }
145
146 bzero(fq, flowq_size);
147 if (ptype == QP_MBUF) {
148 MBUFQ_INIT(&fq->fq_mbufq);
149 }
150 #if SKYWALK
151 else {
152 VERIFY(ptype == QP_PACKET);
153 KPKTQ_INIT(&fq->fq_kpktq);
154 }
155 #endif /* SKYWALK */
156 CLASSQ_PKT_INIT(&fq->fq_dq_head);
157 CLASSQ_PKT_INIT(&fq->fq_dq_tail);
158 fq->fq_in_dqlist = false;
159 return fq;
160 }
161
162 void
fq_destroy(fq_t * fq,classq_pkt_type_t ptype)163 fq_destroy(fq_t *fq, classq_pkt_type_t ptype)
164 {
165 VERIFY(!fq->fq_in_dqlist);
166 VERIFY(fq_empty(fq, ptype));
167 VERIFY(!(fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW |
168 FQF_EMPTY_FLOW)));
169 VERIFY(fq->fq_bytes == 0);
170 mcache_free(flowq_cache, fq);
171 }
172
173 static inline void
fq_detect_dequeue_stall(fq_if_t * fqs,fq_t * flowq,fq_if_classq_t * fq_cl,u_int64_t * now)174 fq_detect_dequeue_stall(fq_if_t *fqs, fq_t *flowq, fq_if_classq_t *fq_cl,
175 u_int64_t *now)
176 {
177 u_int64_t maxgetqtime, update_interval;
178 if (FQ_IS_DELAY_HIGH(flowq) || flowq->fq_getqtime == 0 ||
179 fq_empty(flowq, fqs->fqs_ptype) ||
180 flowq->fq_bytes < FQ_MIN_FC_THRESHOLD_BYTES) {
181 return;
182 }
183
184 update_interval = FQ_UPDATE_INTERVAL(flowq);
185 maxgetqtime = flowq->fq_getqtime + update_interval;
186 if ((*now) > maxgetqtime) {
187 /*
188 * there was no dequeue in an update interval worth of
189 * time. It means that the queue is stalled.
190 */
191 FQ_SET_DELAY_HIGH(flowq);
192 fq_cl->fcl_stat.fcl_dequeue_stall++;
193 os_log_error(OS_LOG_DEFAULT, "%s: dequeue stall num: %d, "
194 "scidx: %d, flow: 0x%x, iface: %s", __func__,
195 fq_cl->fcl_stat.fcl_dequeue_stall, flowq->fq_sc_index,
196 flowq->fq_flowhash, if_name(fqs->fqs_ifq->ifcq_ifp));
197 KDBG(AQM_KTRACE_AON_FLOW_DQ_STALL, flowq->fq_flowhash,
198 AQM_KTRACE_FQ_GRP_SC_IDX(flowq), flowq->fq_bytes,
199 (*now) - flowq->fq_getqtime);
200 }
201 }
202
203 void
fq_head_drop(fq_if_t * fqs,fq_t * fq)204 fq_head_drop(fq_if_t *fqs, fq_t *fq)
205 {
206 pktsched_pkt_t pkt;
207 volatile uint32_t *pkt_flags;
208 uint64_t *pkt_timestamp;
209 struct ifclassq *ifq = fqs->fqs_ifq;
210
211 _PKTSCHED_PKT_INIT(&pkt);
212 fq_getq_flow_internal(fqs, fq, &pkt);
213 if (pkt.pktsched_pkt_mbuf == NULL) {
214 return;
215 }
216
217 pktsched_get_pkt_vars(&pkt, &pkt_flags, &pkt_timestamp, NULL, NULL,
218 NULL, NULL);
219
220 *pkt_timestamp = 0;
221 switch (pkt.pktsched_ptype) {
222 case QP_MBUF:
223 *pkt_flags &= ~PKTF_PRIV_GUARDED;
224 break;
225 #if SKYWALK
226 case QP_PACKET:
227 /* sanity check */
228 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
229 break;
230 #endif /* SKYWALK */
231 default:
232 VERIFY(0);
233 /* NOTREACHED */
234 __builtin_unreachable();
235 }
236
237 IFCQ_DROP_ADD(ifq, 1, pktsched_get_pkt_len(&pkt));
238 IFCQ_CONVERT_LOCK(ifq);
239 pktsched_free_pkt(&pkt);
240 }
241
242
243 static int
fq_compressor(fq_if_t * fqs,fq_t * fq,fq_if_classq_t * fq_cl,pktsched_pkt_t * pkt)244 fq_compressor(fq_if_t *fqs, fq_t *fq, fq_if_classq_t *fq_cl,
245 pktsched_pkt_t *pkt)
246 {
247 classq_pkt_type_t ptype = fqs->fqs_ptype;
248 uint32_t comp_gencnt = 0;
249 uint64_t *pkt_timestamp;
250 uint64_t old_timestamp = 0;
251 uint32_t old_pktlen = 0;
252 struct ifclassq *ifq = fqs->fqs_ifq;
253
254 if (__improbable(pkt_compressor == 0)) {
255 return 0;
256 }
257
258 pktsched_get_pkt_vars(pkt, NULL, &pkt_timestamp, NULL, NULL, NULL,
259 &comp_gencnt);
260
261 if (comp_gencnt == 0) {
262 return 0;
263 }
264
265 fq_cl->fcl_stat.fcl_pkts_compressible++;
266
267 if (fq_empty(fq, fqs->fqs_ptype)) {
268 return 0;
269 }
270
271 if (ptype == QP_MBUF) {
272 struct mbuf *m = MBUFQ_LAST(&fq->fq_mbufq);
273
274 if (comp_gencnt != m->m_pkthdr.comp_gencnt) {
275 return 0;
276 }
277
278 /* If we got until here, we should merge/replace the segment */
279 MBUFQ_REMOVE(&fq->fq_mbufq, m);
280 old_pktlen = m_pktlen(m);
281 old_timestamp = m->m_pkthdr.pkt_timestamp;
282
283 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
284 m_freem(m);
285 }
286 #if SKYWALK
287 else {
288 struct __kern_packet *kpkt = KPKTQ_LAST(&fq->fq_kpktq);
289
290 if (comp_gencnt != kpkt->pkt_comp_gencnt) {
291 return 0;
292 }
293
294 /* If we got until here, we should merge/replace the segment */
295 KPKTQ_REMOVE(&fq->fq_kpktq, kpkt);
296 old_pktlen = kpkt->pkt_length;
297 old_timestamp = kpkt->pkt_timestamp;
298
299 IFCQ_CONVERT_LOCK(fqs->fqs_ifq);
300 pp_free_packet(*(struct kern_pbufpool **)(uintptr_t)&
301 (((struct __kern_quantum *)kpkt)->qum_pp),
302 (uint64_t)kpkt);
303 }
304 #endif /* SKYWALK */
305
306 fq->fq_bytes -= old_pktlen;
307 fq_cl->fcl_stat.fcl_byte_cnt -= old_pktlen;
308 fq_cl->fcl_stat.fcl_pkt_cnt--;
309 IFCQ_DEC_LEN(ifq);
310 IFCQ_DEC_BYTES(ifq, old_pktlen);
311
312 FQ_GRP_DEC_LEN(fq);
313 FQ_GRP_DEC_BYTES(fq, old_pktlen);
314
315 *pkt_timestamp = old_timestamp;
316
317 return CLASSQEQ_COMPRESSED;
318 }
319
320 int
fq_addq(fq_if_t * fqs,fq_if_group_t * fq_grp,pktsched_pkt_t * pkt,fq_if_classq_t * fq_cl)321 fq_addq(fq_if_t *fqs, fq_if_group_t *fq_grp, pktsched_pkt_t *pkt,
322 fq_if_classq_t *fq_cl)
323 {
324 int droptype = DTYPE_NODROP, fc_adv = 0, ret = CLASSQEQ_SUCCESS;
325 u_int64_t now;
326 fq_t *fq = NULL;
327 uint64_t *pkt_timestamp;
328 volatile uint32_t *pkt_flags;
329 uint32_t pkt_flowid, cnt;
330 uint8_t pkt_proto, pkt_flowsrc;
331 fq_tfc_type_t tfc_type = FQ_TFC_C;
332
333 cnt = pkt->pktsched_pcnt;
334 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, &pkt_flowid,
335 &pkt_flowsrc, &pkt_proto, NULL);
336
337 /*
338 * XXX Not walking the chain to set this flag on every packet.
339 * This flag is only used for debugging. Nothing is affected if it's
340 * not set.
341 */
342 switch (pkt->pktsched_ptype) {
343 case QP_MBUF:
344 /* See comments in <rdar://problem/14040693> */
345 VERIFY(!(*pkt_flags & PKTF_PRIV_GUARDED));
346 *pkt_flags |= PKTF_PRIV_GUARDED;
347 break;
348 #if SKYWALK
349 case QP_PACKET:
350 /* sanity check */
351 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
352 break;
353 #endif /* SKYWALK */
354 default:
355 VERIFY(0);
356 /* NOTREACHED */
357 __builtin_unreachable();
358 }
359
360 if (ifclassq_enable_l4s) {
361 tfc_type = pktsched_is_pkt_l4s(pkt) ? FQ_TFC_L4S : FQ_TFC_C;
362 }
363
364 /*
365 * Timestamps for every packet must be set prior to entering this path.
366 */
367 now = *pkt_timestamp;
368 ASSERT(now > 0);
369
370 /* find the flowq for this packet */
371 fq = fq_if_hash_pkt(fqs, fq_grp, pkt_flowid, pktsched_get_pkt_svc(pkt),
372 now, true, tfc_type);
373 if (__improbable(fq == NULL)) {
374 DTRACE_IP1(memfail__drop, fq_if_t *, fqs);
375 /* drop the packet if we could not allocate a flow queue */
376 fq_cl->fcl_stat.fcl_drop_memfailure += cnt;
377 return CLASSQEQ_DROP;
378 }
379 VERIFY(fq->fq_group == fq_grp);
380 VERIFY(fqs->fqs_ptype == pkt->pktsched_ptype);
381
382 KDBG(AQM_KTRACE_STATS_FLOW_ENQUEUE, fq->fq_flowhash,
383 AQM_KTRACE_FQ_GRP_SC_IDX(fq),
384 fq->fq_bytes, pktsched_get_pkt_len(pkt));
385
386 fq_detect_dequeue_stall(fqs, fq, fq_cl, &now);
387
388 /*
389 * Skip the dropping part if it's L4S. Flow control or ECN marking decision
390 * will be made at dequeue time.
391 */
392 if (ifclassq_enable_l4s && tfc_type == FQ_TFC_L4S) {
393 fq_cl->fcl_stat.fcl_l4s_pkts++;
394 droptype = DTYPE_NODROP;
395 goto no_drop;
396 }
397
398 /*
399 * If L4S is not enabled, a fq should always be labeled as a classic.
400 */
401 VERIFY(fq->fq_tfc_type == FQ_TFC_C);
402
403 if (__improbable(FQ_IS_DELAY_HIGH(fq) || FQ_IS_OVERWHELMING(fq))) {
404 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
405 (*pkt_flags & PKTF_FLOW_ADV)) {
406 fc_adv = 1;
407 /*
408 * If the flow is suspended or it is not
409 * TCP/QUIC, drop the chain.
410 */
411 if ((pkt_proto != IPPROTO_TCP) &&
412 (pkt_proto != IPPROTO_QUIC)) {
413 droptype = DTYPE_EARLY;
414 fq_cl->fcl_stat.fcl_drop_early += cnt;
415 IFCQ_DROP_ADD(fqs->fqs_ifq, cnt, pktsched_get_pkt_len(pkt));
416 }
417 DTRACE_IP6(flow__adv, fq_if_t *, fqs,
418 fq_if_classq_t *, fq_cl, fq_t *, fq,
419 int, droptype, pktsched_pkt_t *, pkt,
420 uint32_t, cnt);
421 } else {
422 /*
423 * Need to drop packets to make room for the new
424 * ones. Try to drop from the head of the queue
425 * instead of the latest packets.
426 */
427 if (!fq_empty(fq, fqs->fqs_ptype)) {
428 uint32_t i;
429
430 for (i = 0; i < cnt; i++) {
431 fq_head_drop(fqs, fq);
432 }
433 droptype = DTYPE_NODROP;
434 } else {
435 droptype = DTYPE_EARLY;
436 }
437 fq_cl->fcl_stat.fcl_drop_early += cnt;
438
439 DTRACE_IP6(no__flow__adv, fq_if_t *, fqs,
440 fq_if_classq_t *, fq_cl, fq_t *, fq,
441 int, droptype, pktsched_pkt_t *, pkt,
442 uint32_t, cnt);
443 }
444 }
445
446 /* Set the return code correctly */
447 if (__improbable(fc_adv == 1 && droptype != DTYPE_FORCED)) {
448 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
449 fq->fq_flags |= FQF_FLOWCTL_ON;
450 /* deliver flow control advisory error */
451 if (droptype == DTYPE_NODROP) {
452 ret = CLASSQEQ_SUCCESS_FC;
453 } else {
454 /* dropped due to flow control */
455 ret = CLASSQEQ_DROP_FC;
456 }
457 } else {
458 /*
459 * if we could not flow control the flow, it is
460 * better to drop
461 */
462 droptype = DTYPE_FORCED;
463 ret = CLASSQEQ_DROP_FC;
464 fq_cl->fcl_stat.fcl_flow_control_fail++;
465 }
466 DTRACE_IP3(fc__ret, fq_if_t *, fqs, int, droptype, int, ret);
467 }
468
469 /*
470 * If the queue length hits the queue limit, drop a chain with the
471 * same number of packets from the front of the queue for a flow with
472 * maximum number of bytes. This will penalize heavy and unresponsive
473 * flows. It will also avoid a tail drop.
474 */
475 if (__improbable(droptype == DTYPE_NODROP &&
476 fq_if_at_drop_limit(fqs))) {
477 uint32_t i;
478
479 if (fqs->fqs_large_flow == fq) {
480 /*
481 * Drop from the head of the current fq. Since a
482 * new packet will be added to the tail, it is ok
483 * to leave fq in place.
484 */
485 DTRACE_IP5(large__flow, fq_if_t *, fqs,
486 fq_if_classq_t *, fq_cl, fq_t *, fq,
487 pktsched_pkt_t *, pkt, uint32_t, cnt);
488
489 for (i = 0; i < cnt; i++) {
490 fq_head_drop(fqs, fq);
491 }
492 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
493
494 /*
495 * TCP and QUIC will react to the loss of those head dropped pkts
496 * and adjust send rate.
497 */
498 if ((fq->fq_flags & FQF_FLOWCTL_CAPABLE) &&
499 (*pkt_flags & PKTF_FLOW_ADV) &&
500 (pkt_proto != IPPROTO_TCP) &&
501 (pkt_proto != IPPROTO_QUIC)) {
502 if (fq_if_add_fcentry(fqs, pkt, pkt_flowsrc, fq, fq_cl)) {
503 fq->fq_flags |= FQF_FLOWCTL_ON;
504 FQ_SET_OVERWHELMING(fq);
505 fq_cl->fcl_stat.fcl_overwhelming++;
506 /* deliver flow control advisory error */
507 ret = CLASSQEQ_SUCCESS_FC;
508 }
509 }
510 } else {
511 if (fqs->fqs_large_flow == NULL) {
512 droptype = DTYPE_FORCED;
513 fq_cl->fcl_stat.fcl_drop_overflow += cnt;
514 ret = CLASSQEQ_DROP;
515
516 DTRACE_IP5(no__large__flow, fq_if_t *, fqs,
517 fq_if_classq_t *, fq_cl, fq_t *, fq,
518 pktsched_pkt_t *, pkt, uint32_t, cnt);
519
520 /*
521 * if this fq was freshly created and there
522 * is nothing to enqueue, move it to empty list
523 */
524 if (fq_empty(fq, fqs->fqs_ptype) &&
525 !(fq->fq_flags & (FQF_NEW_FLOW |
526 FQF_OLD_FLOW))) {
527 fq_if_move_to_empty_flow(fqs, fq_cl,
528 fq, now);
529 fq = NULL;
530 }
531 } else {
532 DTRACE_IP5(different__large__flow,
533 fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
534 fq_t *, fq, pktsched_pkt_t *, pkt,
535 uint32_t, cnt);
536
537 for (i = 0; i < cnt; i++) {
538 fq_if_drop_packet(fqs, now);
539 }
540 }
541 }
542 }
543
544 no_drop:
545 if (__probable(droptype == DTYPE_NODROP)) {
546 uint32_t chain_len = pktsched_get_pkt_len(pkt);
547 int ret_compress = 0;
548
549 /*
550 * We do not compress if we are enqueuing a chain.
551 * Traversing the chain to look for acks would defeat the
552 * purpose of batch enqueueing.
553 */
554 if (cnt == 1) {
555 ret_compress = fq_compressor(fqs, fq, fq_cl, pkt);
556 if (ret_compress == CLASSQEQ_COMPRESSED) {
557 fq_cl->fcl_stat.fcl_pkts_compressed++;
558 }
559 }
560 DTRACE_IP5(fq_enqueue, fq_if_t *, fqs, fq_if_classq_t *, fq_cl,
561 fq_t *, fq, pktsched_pkt_t *, pkt, uint32_t, cnt);
562 fq_enqueue(fq, pkt->pktsched_pkt, pkt->pktsched_tail, cnt,
563 pkt->pktsched_ptype);
564
565 fq->fq_bytes += chain_len;
566 fq_cl->fcl_stat.fcl_byte_cnt += chain_len;
567 fq_cl->fcl_stat.fcl_pkt_cnt += cnt;
568
569 /*
570 * check if this queue will qualify to be the next
571 * victim queue
572 */
573 fq_if_is_flow_heavy(fqs, fq);
574 } else {
575 DTRACE_IP3(fq_drop, fq_if_t *, fqs, int, droptype, int, ret);
576 return (ret != CLASSQEQ_SUCCESS) ? ret : CLASSQEQ_DROP;
577 }
578
579 /*
580 * If the queue is not currently active, add it to the end of new
581 * flows list for that service class.
582 */
583 if ((fq->fq_flags & (FQF_NEW_FLOW | FQF_OLD_FLOW)) == 0) {
584 VERIFY(STAILQ_NEXT(fq, fq_actlink) == NULL);
585 STAILQ_INSERT_TAIL(&fq_cl->fcl_new_flows, fq, fq_actlink);
586 fq->fq_flags |= FQF_NEW_FLOW;
587
588 fq_cl->fcl_stat.fcl_newflows_cnt++;
589
590 fq->fq_deficit = fq_cl->fcl_quantum;
591 }
592 return ret;
593 }
594
595 void
fq_getq_flow_internal(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt)596 fq_getq_flow_internal(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt)
597 {
598 classq_pkt_t p = CLASSQ_PKT_INITIALIZER(p);
599 uint32_t plen;
600 fq_if_classq_t *fq_cl;
601 struct ifclassq *ifq = fqs->fqs_ifq;
602
603 fq_dequeue(fq, &p, fqs->fqs_ptype);
604 if (p.cp_ptype == QP_INVALID) {
605 VERIFY(p.cp_mbuf == NULL);
606 return;
607 }
608
609 pktsched_pkt_encap(pkt, &p);
610 plen = pktsched_get_pkt_len(pkt);
611
612 VERIFY(fq->fq_bytes >= plen);
613 fq->fq_bytes -= plen;
614
615 fq_cl = &FQ_CLASSQ(fq);
616 fq_cl->fcl_stat.fcl_byte_cnt -= plen;
617 fq_cl->fcl_stat.fcl_pkt_cnt--;
618 IFCQ_DEC_LEN(ifq);
619 IFCQ_DEC_BYTES(ifq, plen);
620
621 FQ_GRP_DEC_LEN(fq);
622 FQ_GRP_DEC_BYTES(fq, plen);
623
624 /* Reset getqtime so that we don't count idle times */
625 if (fq_empty(fq, fqs->fqs_ptype)) {
626 fq->fq_getqtime = 0;
627 }
628 }
629
630 void
fq_getq_flow(fq_if_t * fqs,fq_t * fq,pktsched_pkt_t * pkt,uint64_t now)631 fq_getq_flow(fq_if_t *fqs, fq_t *fq, pktsched_pkt_t *pkt, uint64_t now)
632 {
633 fq_if_classq_t *fq_cl;
634 int64_t qdelay = 0;
635 volatile uint32_t *pkt_flags;
636 uint64_t *pkt_timestamp;
637 uint8_t pkt_flowsrc;
638 boolean_t l4s_pkt;
639
640 fq_getq_flow_internal(fqs, fq, pkt);
641 if (pkt->pktsched_ptype == QP_INVALID) {
642 VERIFY(pkt->pktsched_pkt_mbuf == NULL);
643 return;
644 }
645
646 pktsched_get_pkt_vars(pkt, &pkt_flags, &pkt_timestamp, NULL, &pkt_flowsrc,
647 NULL, NULL);
648 l4s_pkt = pktsched_is_pkt_l4s(pkt);
649
650 /* this will compute qdelay in nanoseconds */
651 if (now > *pkt_timestamp) {
652 qdelay = now - *pkt_timestamp;
653 }
654 fq_cl = &FQ_CLASSQ(fq);
655
656 /* Update min/max/avg qdelay for the respective class */
657 if (fq_cl->fcl_stat.fcl_min_qdelay == 0 ||
658 (qdelay > 0 && (u_int64_t)qdelay < fq_cl->fcl_stat.fcl_min_qdelay)) {
659 fq_cl->fcl_stat.fcl_min_qdelay = qdelay;
660 }
661
662 if (fq_cl->fcl_stat.fcl_max_qdelay == 0 ||
663 (qdelay > 0 && (u_int64_t)qdelay > fq_cl->fcl_stat.fcl_max_qdelay)) {
664 fq_cl->fcl_stat.fcl_max_qdelay = qdelay;
665 }
666
667 uint64_t num_dequeues = fq_cl->fcl_stat.fcl_dequeue;
668
669 if (num_dequeues == 0) {
670 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
671 } else if (qdelay > 0) {
672 uint64_t res = 0;
673 if (os_add_overflow(num_dequeues, 1, &res)) {
674 /* Reset the dequeue num and dequeue bytes */
675 fq_cl->fcl_stat.fcl_dequeue = num_dequeues = 0;
676 fq_cl->fcl_stat.fcl_dequeue_bytes = 0;
677 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
678 os_log_info(OS_LOG_DEFAULT, "%s: dequeue num overflow, "
679 "flow: 0x%x, iface: %s", __func__, fq->fq_flowhash,
680 if_name(fqs->fqs_ifq->ifcq_ifp));
681 } else {
682 uint64_t product = 0;
683 if (os_mul_overflow(fq_cl->fcl_stat.fcl_avg_qdelay,
684 num_dequeues, &product) || os_add_overflow(product, qdelay, &res)) {
685 fq_cl->fcl_stat.fcl_avg_qdelay = qdelay;
686 } else {
687 fq_cl->fcl_stat.fcl_avg_qdelay = res /
688 (num_dequeues + 1);
689 }
690 }
691 }
692
693 if (ifclassq_enable_l4s && l4s_pkt) {
694 if ((l4s_ce_threshold != 0 && qdelay > l4s_ce_threshold) ||
695 (l4s_ce_threshold == 0 && qdelay > FQ_TARGET_DELAY(fq))) {
696 if (pktsched_mark_ecn(pkt) == 0) {
697 fq_cl->fcl_stat.fcl_ce_marked++;
698 } else {
699 fq_cl->fcl_stat.fcl_ce_mark_failures++;
700 }
701 }
702 /* skip steps not needed for L4S traffic */
703 goto out;
704 }
705
706 if (fq->fq_min_qdelay == 0 ||
707 (qdelay > 0 && (u_int64_t)qdelay < fq->fq_min_qdelay)) {
708 fq->fq_min_qdelay = qdelay;
709 }
710
711 if (now >= fq->fq_updatetime) {
712 if (fq->fq_min_qdelay > FQ_TARGET_DELAY(fq)) {
713 if (!FQ_IS_DELAY_HIGH(fq)) {
714 FQ_SET_DELAY_HIGH(fq);
715 os_log_error(OS_LOG_DEFAULT,
716 "%s: high delay idx: %d, %llu, flow: 0x%x, "
717 "iface: %s", __func__, fq->fq_sc_index,
718 fq->fq_min_qdelay, fq->fq_flowhash,
719 if_name(fqs->fqs_ifq->ifcq_ifp));
720 }
721 } else {
722 FQ_CLEAR_DELAY_HIGH(fq);
723 }
724 /* Reset measured queue delay and update time */
725 fq->fq_updatetime = now + FQ_UPDATE_INTERVAL(fq);
726 fq->fq_min_qdelay = 0;
727 }
728
729 out:
730 if (fqs->fqs_large_flow != fq || !fq_if_almost_at_drop_limit(fqs)) {
731 FQ_CLEAR_OVERWHELMING(fq);
732 }
733 if (!FQ_IS_DELAY_HIGH(fq) || fq_empty(fq, fqs->fqs_ptype)) {
734 FQ_CLEAR_DELAY_HIGH(fq);
735 }
736
737 if ((fq->fq_flags & FQF_FLOWCTL_ON) &&
738 !FQ_IS_DELAY_HIGH(fq) && !FQ_IS_OVERWHELMING(fq)) {
739 fq_if_flow_feedback(fqs, fq, fq_cl);
740 }
741
742 if (fq_empty(fq, fqs->fqs_ptype)) {
743 /* Reset getqtime so that we don't count idle times */
744 fq->fq_getqtime = 0;
745 } else {
746 fq->fq_getqtime = now;
747 }
748 fq_if_is_flow_heavy(fqs, fq);
749
750 *pkt_timestamp = 0;
751 switch (pkt->pktsched_ptype) {
752 case QP_MBUF:
753 *pkt_flags &= ~PKTF_PRIV_GUARDED;
754 break;
755 #if SKYWALK
756 case QP_PACKET:
757 /* sanity check */
758 ASSERT((*pkt_flags & ~PKT_F_COMMON_MASK) == 0);
759 break;
760 #endif /* SKYWALK */
761 default:
762 VERIFY(0);
763 /* NOTREACHED */
764 __builtin_unreachable();
765 }
766 }
767