1 /*
2 * Copyright (c) 2010-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include "tcp_includes.h"
30
31 #include <sys/param.h>
32 #include <sys/kernel.h>
33 #include <sys/sysctl.h>
34
35 #include <net/route.h>
36 #include <netinet/in.h>
37 #include <netinet/in_systm.h>
38 #include <netinet/ip.h>
39 #include <netinet/ip6.h>
40 #include <netinet/ip_var.h>
41
42 /* This file implements an alternate TCP congestion control algorithm
43 * for background transport developed by LEDBAT working group at IETF and
44 * described in draft: draft-ietf-ledbat-congestion-02
45 *
46 * Currently, it also implements LEDBAT++ as described in draft
47 * draft-irtf-iccrg-ledbat-plus-plus-01.
48 */
49
50 #define GAIN_CONSTANT (16)
51 #define DEFER_SLOWDOWN_DURATION (30 * 1000) /* 30s */
52
53 int tcp_ledbat_init(struct tcpcb *tp);
54 int tcp_ledbat_cleanup(struct tcpcb *tp);
55 void tcp_ledbat_cwnd_init(struct tcpcb *tp);
56 void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th);
57 void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th);
58 static void ledbat_pp_ack_rcvd(struct tcpcb *tp, uint32_t bytes_acked);
59 void tcp_ledbat_pre_fr(struct tcpcb *tp);
60 void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th);
61 void tcp_ledbat_after_idle(struct tcpcb *tp);
62 void tcp_ledbat_after_timeout(struct tcpcb *tp);
63 static int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th);
64 void tcp_ledbat_switch_cc(struct tcpcb *tp);
65
66 struct tcp_cc_algo tcp_cc_ledbat = {
67 .name = "ledbat",
68 .init = tcp_ledbat_init,
69 .cleanup = tcp_ledbat_cleanup,
70 .cwnd_init = tcp_ledbat_cwnd_init,
71 .congestion_avd = tcp_ledbat_congestion_avd,
72 .ack_rcvd = tcp_ledbat_ack_rcvd,
73 .pre_fr = tcp_ledbat_pre_fr,
74 .post_fr = tcp_ledbat_post_fr,
75 .after_idle = tcp_ledbat_after_idle,
76 .after_timeout = tcp_ledbat_after_timeout,
77 .delay_ack = tcp_ledbat_delay_ack,
78 .switch_to = tcp_ledbat_switch_cc
79 };
80
81 static void
update_cwnd(struct tcpcb * tp,uint32_t update,bool is_incr)82 update_cwnd(struct tcpcb *tp, uint32_t update, bool is_incr)
83 {
84 uint32_t max_allowed_cwnd = 0, flight_size = 0;
85 uint32_t base_rtt = get_base_rtt(tp);
86 uint32_t curr_rtt = tcp_use_min_curr_rtt ? tp->curr_rtt_min :
87 tp->t_rttcur;
88
89 /* If we do not have a good RTT measurement yet, increment
90 * congestion window by the default value.
91 */
92 if (base_rtt == 0 || curr_rtt == 0) {
93 tp->snd_cwnd += update;
94 goto check_max;
95 }
96
97 if (curr_rtt <= (base_rtt + target_qdelay)) {
98 /*
99 * Delay decreased or remained the same, we can increase
100 * the congestion window according to RFC 3465.
101 *
102 * Move background slow-start threshold to current
103 * congestion window so that the next time (after some idle
104 * period), we can attempt to do slow-start till here if there
105 * is no increase in rtt
106 */
107 if (tp->bg_ssthresh < tp->snd_cwnd) {
108 tp->bg_ssthresh = tp->snd_cwnd;
109 }
110 tp->snd_cwnd += update;
111 tp->snd_cwnd = tcp_round_to(tp->snd_cwnd, tp->t_maxseg);
112 } else {
113 if (tcp_ledbat_plus_plus) {
114 VERIFY(is_incr == false);
115 tp->snd_cwnd -= update;
116 } else {
117 /* In response to an increase in rtt, reduce the congestion
118 * window by one-eighth. This will help to yield immediately
119 * to a competing stream.
120 */
121 uint32_t redwin;
122
123 redwin = tp->snd_cwnd >> 3;
124 tp->snd_cwnd -= redwin;
125 }
126
127 if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) {
128 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg;
129 }
130
131 tp->snd_cwnd = tcp_round_to(tp->snd_cwnd, tp->t_maxseg);
132 /* Lower background slow-start threshold so that the connection
133 * will go into congestion avoidance phase
134 */
135 if (tp->bg_ssthresh > tp->snd_cwnd) {
136 tp->bg_ssthresh = tp->snd_cwnd;
137 }
138 }
139 check_max:
140 if (!tcp_ledbat_plus_plus) {
141 /* Calculate the outstanding flight size and restrict the
142 * congestion window to a factor of flight size.
143 */
144 flight_size = tp->snd_max - tp->snd_una;
145
146 max_allowed_cwnd = (tcp_ledbat_allowed_increase * tp->t_maxseg)
147 + (flight_size << tcp_ledbat_tether_shift);
148 tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd);
149 } else {
150 tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
151 }
152 }
153
154 static inline void
tcp_ledbat_clear_state(struct tcpcb * tp)155 tcp_ledbat_clear_state(struct tcpcb *tp)
156 {
157 tp->t_ccstate->ledbat_slowdown_events = 0;
158 tp->t_ccstate->ledbat_slowdown_ts = 0;
159 tp->t_ccstate->ledbat_slowdown_begin = 0;
160 tp->t_ccstate->ledbat_md_bytes_acked = 0;
161 }
162
163 int
tcp_ledbat_init(struct tcpcb * tp)164 tcp_ledbat_init(struct tcpcb *tp)
165 {
166 os_atomic_inc(&tcp_cc_ledbat.num_sockets, relaxed);
167 tcp_ledbat_clear_state(tp);
168 return 0;
169 }
170
171 int
tcp_ledbat_cleanup(struct tcpcb * tp)172 tcp_ledbat_cleanup(struct tcpcb *tp)
173 {
174 #pragma unused(tp)
175 os_atomic_dec(&tcp_cc_ledbat.num_sockets, relaxed);
176 return 0;
177 }
178
179 /*
180 * Initialize the congestion window for a connection
181 */
182 void
tcp_ledbat_cwnd_init(struct tcpcb * tp)183 tcp_ledbat_cwnd_init(struct tcpcb *tp)
184 {
185 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
186 tp->bg_ssthresh = tp->snd_ssthresh;
187 }
188
189 /* Function to handle an in-sequence ack which is fast-path processing
190 * of an in sequence ack in tcp_input function (called as header prediction).
191 * This gets called only during congestion avoidance phase.
192 */
193 void
tcp_ledbat_congestion_avd(struct tcpcb * tp,struct tcphdr * th)194 tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th)
195 {
196 int acked = 0;
197 uint32_t incr = 0;
198
199 acked = BYTES_ACKED(th, tp);
200
201 if (tcp_ledbat_plus_plus) {
202 ledbat_pp_ack_rcvd(tp, acked);
203 } else {
204 tp->t_bytes_acked += acked;
205 if (tp->t_bytes_acked > tp->snd_cwnd) {
206 tp->t_bytes_acked -= tp->snd_cwnd;
207 incr = tp->t_maxseg;
208 }
209
210 if (tp->snd_cwnd < tp->snd_wnd && incr > 0) {
211 update_cwnd(tp, incr, true);
212 }
213 }
214 }
215
216 /*
217 * Compute the denominator
218 * MIN(16, ceil(2 * TARGET / base))
219 */
220 static uint32_t
ledbat_gain(uint32_t base_rtt)221 ledbat_gain(uint32_t base_rtt)
222 {
223 return MIN(GAIN_CONSTANT, tcp_ceil(2 * target_qdelay /
224 (double)base_rtt));
225 }
226
227 /*
228 * Congestion avoidance for ledbat++
229 */
230 static void
ledbat_pp_congestion_avd(struct tcpcb * tp,uint32_t bytes_acked,uint32_t base_rtt,uint32_t curr_rtt,uint32_t now)231 ledbat_pp_congestion_avd(struct tcpcb *tp, uint32_t bytes_acked,
232 uint32_t base_rtt, uint32_t curr_rtt, uint32_t now)
233 {
234 uint32_t update = 0;
235 /*
236 * Set the next slowdown time i.e. 9 times the duration
237 * of previous slowdown except the initial slowdown.
238 */
239 if (tp->t_ccstate->ledbat_slowdown_ts == 0) {
240 uint32_t slowdown_duration = 0;
241 if (tp->t_ccstate->ledbat_slowdown_events > 0) {
242 slowdown_duration = now -
243 tp->t_ccstate->ledbat_slowdown_begin;
244
245 if (tp->bg_ssthresh > tp->snd_cwnd) {
246 /*
247 * Special case for slowdowns (other than initial)
248 * where cwnd doesn't recover fully to previous
249 * ssthresh
250 */
251 slowdown_duration *= 2;
252 }
253 }
254 tp->t_ccstate->ledbat_slowdown_ts = now + (9 * slowdown_duration);
255 if (slowdown_duration == 0) {
256 tp->t_ccstate->ledbat_slowdown_ts += (2 * (tp->t_srtt >> TCP_RTT_SHIFT));
257 }
258 /* Reset the start */
259 tp->t_ccstate->ledbat_slowdown_begin = 0;
260
261 /* On exit slow start due to higher qdelay, cap the ssthresh */
262 if (tp->bg_ssthresh > tp->snd_cwnd) {
263 tp->bg_ssthresh = tp->snd_cwnd;
264 }
265 }
266
267 if (curr_rtt <= base_rtt + target_qdelay) {
268 /* Additive increase */
269 tp->t_bytes_acked += bytes_acked;
270 if (tp->t_bytes_acked >= tp->snd_cwnd) {
271 update = tp->t_maxseg;
272 tp->t_bytes_acked -= tp->snd_cwnd;
273 update_cwnd(tp, update, true);
274 }
275 } else {
276 /*
277 * Multiplicative decrease
278 * W -= min(W * (qdelay/target - 1), W/2) (per RTT)
279 * To calculate per bytes acked, it becomes
280 * W -= min((qdelay/target - 1), 1/2) * bytes_acked
281 */
282 uint32_t qdelay = curr_rtt > base_rtt ?
283 (curr_rtt - base_rtt) : 0;
284
285 tp->t_ccstate->ledbat_md_bytes_acked += bytes_acked;
286 if (tp->t_ccstate->ledbat_md_bytes_acked >= tp->snd_cwnd) {
287 update = (uint32_t)(MIN(((double)qdelay / target_qdelay - 1), 0.5) *
288 (double)tp->snd_cwnd);
289 tp->t_ccstate->ledbat_md_bytes_acked -= tp->snd_cwnd;
290 update_cwnd(tp, update, false);
291
292 if (tp->t_ccstate->ledbat_slowdown_ts != 0) {
293 /* As the window has been reduced, defer the slowdown. */
294 tp->t_ccstate->ledbat_slowdown_ts = now + DEFER_SLOWDOWN_DURATION;
295 }
296 }
297 }
298 }
299
300 /*
301 * Different handling for ack received for ledbat++
302 */
303 static void
ledbat_pp_ack_rcvd(struct tcpcb * tp,uint32_t bytes_acked)304 ledbat_pp_ack_rcvd(struct tcpcb *tp, uint32_t bytes_acked)
305 {
306 uint32_t update = 0;
307 const uint32_t base_rtt = get_base_rtt(tp);
308 const uint32_t curr_rtt = tcp_use_min_curr_rtt ? tp->curr_rtt_min :
309 tp->t_rttcur;
310 const uint32_t ss_target = (uint32_t)(3 * target_qdelay / 4);
311 struct tcp_globals *globals = tcp_get_globals(tp);
312
313 /*
314 * Slowdown period - first slowdown
315 * is 2RTT after we exit initial slow start.
316 * Subsequent slowdowns are after 9 times the
317 * previous slow down durations.
318 */
319 if (tp->t_ccstate->ledbat_slowdown_ts != 0 &&
320 tcp_globals_now(globals) >= tp->t_ccstate->ledbat_slowdown_ts) {
321 if (tp->t_ccstate->ledbat_slowdown_begin == 0) {
322 tp->t_ccstate->ledbat_slowdown_begin = tcp_globals_now(globals);
323 tp->t_ccstate->ledbat_slowdown_events++;
324 }
325 if (tcp_globals_now(globals) < tp->t_ccstate->ledbat_slowdown_ts +
326 (2 * (tp->t_srtt >> TCP_RTT_SHIFT))) {
327 // Set cwnd to 2 packets and return
328 if (tp->snd_cwnd > bg_ss_fltsz * tp->t_maxseg) {
329 if (tp->bg_ssthresh < tp->snd_cwnd) {
330 tp->bg_ssthresh = tp->snd_cwnd;
331 }
332 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg;
333 /* Reset total bytes acked */
334 tp->t_bytes_acked = 0;
335 }
336 return;
337 }
338 }
339
340 if (curr_rtt == 0 || base_rtt == 0) {
341 update = MIN(bytes_acked, TCP_CC_CWND_INIT_PKTS *
342 tp->t_maxseg);
343 update_cwnd(tp, update, true);
344 } else if (tp->snd_cwnd < tp->bg_ssthresh &&
345 ((tp->t_ccstate->ledbat_slowdown_events > 0 &&
346 curr_rtt <= (base_rtt + target_qdelay)) ||
347 curr_rtt <= (base_rtt + ss_target))) {
348 /*
349 * Modified slow start with a dynamic GAIN
350 * If the queuing delay is larger than 3/4 of the target
351 * delay, exit slow start, iff, it is the initial slow start.
352 * After the initial slow start, during CA, window growth
353 * will be bound by ssthresh.
354 */
355 tp->t_bytes_acked += bytes_acked;
356 uint32_t gain_factor = ledbat_gain(base_rtt);
357 if (tp->t_bytes_acked >= tp->t_maxseg * gain_factor) {
358 update = MIN(tp->t_bytes_acked / gain_factor,
359 TCP_CC_CWND_INIT_PKTS * tp->t_maxseg);
360 tp->t_bytes_acked = 0;
361 update_cwnd(tp, update, true);
362 }
363
364 /* Reset the next slowdown timestamp */
365 if (tp->t_ccstate->ledbat_slowdown_ts != 0) {
366 tp->t_ccstate->ledbat_slowdown_ts = 0;
367 }
368 } else {
369 /* Congestion avoidance */
370 ledbat_pp_congestion_avd(tp, bytes_acked, base_rtt, curr_rtt, tcp_globals_now(globals));
371 }
372 }
373
374 /* Function to process an ack.
375 */
376 void
tcp_ledbat_ack_rcvd(struct tcpcb * tp,struct tcphdr * th)377 tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th)
378 {
379 /*
380 * RFC 3465 - Appropriate Byte Counting.
381 *
382 * If the window is currently less than ssthresh,
383 * open the window by the number of bytes ACKed by
384 * the last ACK, however clamp the window increase
385 * to an upper limit "L".
386 *
387 * In congestion avoidance phase, open the window by
388 * one segment each time "bytes_acked" grows to be
389 * greater than or equal to the congestion window.
390 */
391
392 uint32_t cw = tp->snd_cwnd;
393 uint32_t incr = tp->t_maxseg;
394 uint32_t acked = 0;
395
396 acked = BYTES_ACKED(th, tp);
397
398 if (tcp_ledbat_plus_plus) {
399 ledbat_pp_ack_rcvd(tp, acked);
400 return;
401 }
402
403 tp->t_bytes_acked += acked;
404
405 if (cw >= tp->bg_ssthresh) {
406 /* congestion-avoidance */
407 if (tp->t_bytes_acked < cw) {
408 /* No need to increase yet. */
409 incr = 0;
410 }
411 } else {
412 /*
413 * If the user explicitly enables RFC3465
414 * use 2*SMSS for the "L" param. Otherwise
415 * use the more conservative 1*SMSS.
416 *
417 * (See RFC 3465 2.3 Choosing the Limit)
418 */
419 u_int abc_lim;
420
421 abc_lim = (tp->snd_nxt == tp->snd_max) ? incr * 2 : incr;
422
423 incr = ulmin(acked, abc_lim);
424 }
425 if (tp->t_bytes_acked >= cw) {
426 tp->t_bytes_acked -= cw;
427 }
428 if (incr > 0) {
429 update_cwnd(tp, incr, true);
430 }
431 }
432
433 void
tcp_ledbat_pre_fr(struct tcpcb * tp)434 tcp_ledbat_pre_fr(struct tcpcb *tp)
435 {
436 uint32_t win = min(tp->snd_wnd, tp->snd_cwnd);
437
438 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
439 tp->t_lossflightsize = tp->snd_max - tp->snd_una;
440 win = max(tp->t_pipeack, tp->t_lossflightsize);
441 } else {
442 tp->t_lossflightsize = 0;
443 }
444
445 win = win / 2;
446 win = tcp_round_to(win, tp->t_maxseg);
447 if (win < 2 * tp->t_maxseg) {
448 win = 2 * tp->t_maxseg;
449 }
450 tp->snd_ssthresh = win;
451 if (tp->bg_ssthresh > tp->snd_ssthresh) {
452 tp->bg_ssthresh = tp->snd_ssthresh;
453 }
454
455 tcp_cc_resize_sndbuf(tp);
456 }
457
458 void
tcp_ledbat_post_fr(struct tcpcb * tp,struct tcphdr * th)459 tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th)
460 {
461 int32_t ss;
462
463 if (th) {
464 ss = tp->snd_max - th->th_ack;
465 } else {
466 ss = tp->snd_max - tp->snd_una;
467 }
468
469 /*
470 * Complete ack. Inflate the congestion window to
471 * ssthresh and exit fast recovery.
472 *
473 * Window inflation should have left us with approx.
474 * snd_ssthresh outstanding data. But in case we
475 * would be inclined to send a burst, better to do
476 * it via the slow start mechanism.
477 *
478 * If the flight size is zero, then make congestion
479 * window to be worth at least 2 segments to avoid
480 * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05).
481 */
482 if (ss < (int32_t)tp->snd_ssthresh) {
483 tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg;
484 } else {
485 tp->snd_cwnd = tp->snd_ssthresh;
486 }
487 tp->t_bytes_acked = 0;
488 tp->t_ccstate->ledbat_md_bytes_acked = 0;
489 }
490
491 /*
492 * Function to handle connections that have been idle for
493 * some time. Slow start to get ack "clock" running again.
494 * Clear base history after idle time.
495 */
496 void
tcp_ledbat_after_idle(struct tcpcb * tp)497 tcp_ledbat_after_idle(struct tcpcb *tp)
498 {
499 tcp_ledbat_clear_state(tp);
500 /* Reset the congestion window */
501 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
502 tp->t_bytes_acked = 0;
503 tp->t_ccstate->ledbat_md_bytes_acked = 0;
504 }
505
506 /* Function to change the congestion window when the retransmit
507 * timer fires. The behavior is the same as that for best-effort
508 * TCP, reduce congestion window to one segment and start probing
509 * the link using "slow start". The slow start threshold is set
510 * to half of the current window. Lower the background slow start
511 * threshold also.
512 */
513 void
tcp_ledbat_after_timeout(struct tcpcb * tp)514 tcp_ledbat_after_timeout(struct tcpcb *tp)
515 {
516 if (tp->t_state >= TCPS_ESTABLISHED) {
517 tcp_ledbat_clear_state(tp);
518 tcp_ledbat_pre_fr(tp);
519 tp->snd_cwnd = tp->t_maxseg;
520 }
521 }
522
523 /*
524 * Indicate whether this ack should be delayed.
525 * We can delay the ack if:
526 * - our last ack wasn't a 0-sized window.
527 * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this
528 * as a clue that we need to ACK without any delay. This helps higher
529 * level protocols who won't send us more data even if the window is
530 * open because their last "segment" hasn't been ACKed
531 * Otherwise the receiver will ack every other full-sized segment or when the
532 * delayed ack timer fires. This will help to generate better rtt estimates for
533 * the other end if it is a ledbat sender.
534 *
535 */
536
537 static int
tcp_ledbat_delay_ack(struct tcpcb * tp,struct tcphdr * th)538 tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th)
539 {
540 if (tcp_ack_strategy == TCP_ACK_STRATEGY_MODERN) {
541 return tcp_cc_delay_ack(tp, th);
542 } else {
543 if ((tp->t_flags & TF_RXWIN0SENT) == 0 &&
544 (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) {
545 return 1;
546 }
547 return 0;
548 }
549 }
550
551 /* Change a connection to use ledbat. First, lower bg_ssthresh value
552 * if it needs to be.
553 */
554 void
tcp_ledbat_switch_cc(struct tcpcb * tp)555 tcp_ledbat_switch_cc(struct tcpcb *tp)
556 {
557 uint32_t cwnd;
558
559 tcp_ledbat_clear_state(tp);
560
561 if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) {
562 tp->bg_ssthresh = tp->snd_ssthresh;
563 }
564
565 cwnd = min(tp->snd_wnd, tp->snd_cwnd);
566
567 if (tp->snd_cwnd > tp->bg_ssthresh) {
568 cwnd = cwnd / tp->t_maxseg;
569 } else {
570 cwnd = cwnd / 2 / tp->t_maxseg;
571 }
572
573 if (cwnd < bg_ss_fltsz) {
574 cwnd = bg_ss_fltsz;
575 }
576
577 tp->snd_cwnd = cwnd * tp->t_maxseg;
578 tp->t_bytes_acked = 0;
579
580 os_atomic_inc(&tcp_cc_ledbat.num_sockets, relaxed);
581 }
582