1 /*
2 * Copyright (c) 2010-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/kernel.h>
31 #include <sys/protosw.h>
32 #include <sys/mcache.h>
33 #include <sys/sysctl.h>
34
35 #include <net/route.h>
36 #include <netinet/in.h>
37 #include <netinet/in_systm.h>
38 #include <netinet/ip.h>
39
40 #include <netinet/ip6.h>
41 #include <netinet/ip_var.h>
42 #include <netinet/tcp.h>
43 #include <netinet/tcp_fsm.h>
44 #include <netinet/tcp_timer.h>
45 #include <netinet/tcp_var.h>
46 #include <netinet/tcpip.h>
47 #include <netinet/tcp_cc.h>
48
49 #include <libkern/OSAtomic.h>
50
51 /* This file implements an alternate TCP congestion control algorithm
52 * for background transport developed by LEDBAT working group at IETF and
53 * described in draft: draft-ietf-ledbat-congestion-02
54 *
55 * Currently, it also implements LEDBAT++ as described in draft
56 * draft-irtf-iccrg-ledbat-plus-plus-01.
57 */
58
59 #define GAIN_CONSTANT (16)
60 #define DEFER_SLOWDOWN_DURATION (30 * 1000) /* 30s */
61
62 int tcp_ledbat_init(struct tcpcb *tp);
63 int tcp_ledbat_cleanup(struct tcpcb *tp);
64 void tcp_ledbat_cwnd_init(struct tcpcb *tp);
65 void tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th);
66 void tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th);
67 static void ledbat_pp_ack_rcvd(struct tcpcb *tp, uint32_t bytes_acked);
68 void tcp_ledbat_pre_fr(struct tcpcb *tp);
69 void tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th);
70 void tcp_ledbat_after_idle(struct tcpcb *tp);
71 void tcp_ledbat_after_timeout(struct tcpcb *tp);
72 static int tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th);
73 void tcp_ledbat_switch_cc(struct tcpcb *tp);
74
75 struct tcp_cc_algo tcp_cc_ledbat = {
76 .name = "ledbat",
77 .init = tcp_ledbat_init,
78 .cleanup = tcp_ledbat_cleanup,
79 .cwnd_init = tcp_ledbat_cwnd_init,
80 .congestion_avd = tcp_ledbat_congestion_avd,
81 .ack_rcvd = tcp_ledbat_ack_rcvd,
82 .pre_fr = tcp_ledbat_pre_fr,
83 .post_fr = tcp_ledbat_post_fr,
84 .after_idle = tcp_ledbat_after_idle,
85 .after_timeout = tcp_ledbat_after_timeout,
86 .delay_ack = tcp_ledbat_delay_ack,
87 .switch_to = tcp_ledbat_switch_cc
88 };
89
90 /* Target queuing delay in milliseconds. This includes the processing
91 * and scheduling delay on both of the end-hosts. A LEDBAT sender tries
92 * to keep queuing delay below this limit. When the queuing delay
93 * goes above this limit, a LEDBAT sender will start reducing the
94 * congestion window.
95 *
96 * The LEDBAT draft says that target queue delay MUST be 100 ms for
97 * inter-operability.
98 * As we are enabling LEDBAT++ by default, we are updating the target
99 * queuing delay to 60ms as recommended by the draft.
100 */
101 SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_target_qdelay, CTLFLAG_RW | CTLFLAG_LOCKED,
102 int, target_qdelay, 40, "Target queuing delay");
103
104 /* Allowed increase and tether are used to place an upper bound on
105 * congestion window based on the amount of data that is outstanding.
106 * This will limit the congestion window when the amount of data in
107 * flight is little because the application is writing to the socket
108 * intermittently and is preventing the connection from becoming idle .
109 *
110 * max_allowed_cwnd = allowed_increase + (tether * flight_size)
111 * cwnd = min(cwnd, max_allowed_cwnd)
112 *
113 * 'Allowed_increase' parameter is set to 8. If the flight size is zero, then
114 * we want the congestion window to be at least 8 packets to reduce the
115 * delay induced by delayed ack. This helps when the receiver is acking
116 * more than 2 packets at a time (stretching acks for better performance).
117 *
118 * 'Tether' is also set to 2. We do not want this to limit the growth of cwnd
119 * during slow-start.
120 */
121 SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_allowed_increase, CTLFLAG_RW | CTLFLAG_LOCKED,
122 int, allowed_increase, 8,
123 "Additive constant used to calculate max allowed congestion window");
124
125 /* Left shift for cwnd to get tether value of 2 */
126 SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_tether_shift, CTLFLAG_RW | CTLFLAG_LOCKED,
127 int, tether_shift, 1, "Tether shift for max allowed congestion window");
128
129 /* Start with an initial window of 2. This will help to get more accurate
130 * minimum RTT measurement in the beginning. It will help to probe
131 * the path slowly and will not add to the existing delay if the path is
132 * already congested. Using 2 packets will reduce the delay induced by delayed-ack.
133 */
134 SYSCTL_SKMEM_TCP_INT(OID_AUTO, bg_ss_fltsz, CTLFLAG_RW | CTLFLAG_LOCKED,
135 uint32_t, bg_ss_fltsz, 2, "Initial congestion window for background transport");
136
137 SYSCTL_SKMEM_TCP_INT(OID_AUTO, ledbat_plus_plus, CTLFLAG_RW | CTLFLAG_LOCKED,
138 int, tcp_ledbat_plus_plus, 1, "Use LEDBAT++");
139
140 extern int rtt_samples_per_slot;
141
142 #if DEVELOPMENT || DEBUG
143 static void
update_cwnd(struct tcpcb * tp,uint32_t update,bool is_incr)144 update_cwnd(struct tcpcb *tp, uint32_t update, bool is_incr)
145 #else
146 static void
147 update_cwnd(struct tcpcb *tp, uint32_t update, __unused bool is_incr)
148 #endif
149 {
150 uint32_t max_allowed_cwnd = 0, flight_size = 0;
151 uint32_t base_rtt = get_base_rtt(tp);
152 uint32_t curr_rtt = tcp_use_min_curr_rtt ? tp->curr_rtt_min :
153 tp->t_rttcur;
154
155 /* If we do not have a good RTT measurement yet, increment
156 * congestion window by the default value.
157 */
158 if (base_rtt == 0 || curr_rtt == 0) {
159 tp->snd_cwnd += update;
160 goto check_max;
161 }
162
163 if (curr_rtt <= (base_rtt + target_qdelay)) {
164 /*
165 * Delay decreased or remained the same, we can increase
166 * the congestion window according to RFC 3465.
167 *
168 * Move background slow-start threshold to current
169 * congestion window so that the next time (after some idle
170 * period), we can attempt to do slow-start till here if there
171 * is no increase in rtt
172 */
173 if (tp->bg_ssthresh < tp->snd_cwnd) {
174 tp->bg_ssthresh = tp->snd_cwnd;
175 }
176 tp->snd_cwnd += update;
177 tp->snd_cwnd = tcp_round_to(tp->snd_cwnd, tp->t_maxseg);
178 } else {
179 if (tcp_ledbat_plus_plus) {
180 assert(is_incr == false);
181 tp->snd_cwnd -= update;
182 } else {
183 /* In response to an increase in rtt, reduce the congestion
184 * window by one-eighth. This will help to yield immediately
185 * to a competing stream.
186 */
187 uint32_t redwin;
188
189 redwin = tp->snd_cwnd >> 3;
190 tp->snd_cwnd -= redwin;
191 }
192
193 if (tp->snd_cwnd < bg_ss_fltsz * tp->t_maxseg) {
194 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg;
195 }
196
197 tp->snd_cwnd = tcp_round_to(tp->snd_cwnd, tp->t_maxseg);
198 /* Lower background slow-start threshold so that the connection
199 * will go into congestion avoidance phase
200 */
201 if (tp->bg_ssthresh > tp->snd_cwnd) {
202 tp->bg_ssthresh = tp->snd_cwnd;
203 }
204 }
205 check_max:
206 if (!tcp_ledbat_plus_plus) {
207 /* Calculate the outstanding flight size and restrict the
208 * congestion window to a factor of flight size.
209 */
210 flight_size = tp->snd_max - tp->snd_una;
211
212 max_allowed_cwnd = (allowed_increase * tp->t_maxseg)
213 + (flight_size << tether_shift);
214 tp->snd_cwnd = min(tp->snd_cwnd, max_allowed_cwnd);
215 } else {
216 tp->snd_cwnd = min(tp->snd_cwnd, TCP_MAXWIN << tp->snd_scale);
217 }
218 }
219
220 static inline void
tcp_ledbat_clear_state(struct tcpcb * tp)221 tcp_ledbat_clear_state(struct tcpcb *tp)
222 {
223 tp->t_ccstate->ledbat_slowdown_events = 0;
224 tp->t_ccstate->ledbat_slowdown_ts = 0;
225 tp->t_ccstate->ledbat_slowdown_begin = 0;
226 tp->t_ccstate->ledbat_md_bytes_acked = 0;
227 }
228
229 int
tcp_ledbat_init(struct tcpcb * tp)230 tcp_ledbat_init(struct tcpcb *tp)
231 {
232 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
233 tcp_ledbat_clear_state(tp);
234 return 0;
235 }
236
237 int
tcp_ledbat_cleanup(struct tcpcb * tp)238 tcp_ledbat_cleanup(struct tcpcb *tp)
239 {
240 #pragma unused(tp)
241 OSDecrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
242 return 0;
243 }
244
245 /*
246 * Initialize the congestion window for a connection
247 */
248 void
tcp_ledbat_cwnd_init(struct tcpcb * tp)249 tcp_ledbat_cwnd_init(struct tcpcb *tp)
250 {
251 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
252 tp->bg_ssthresh = tp->snd_ssthresh;
253 }
254
255 /* Function to handle an in-sequence ack which is fast-path processing
256 * of an in sequence ack in tcp_input function (called as header prediction).
257 * This gets called only during congestion avoidance phase.
258 */
259 void
tcp_ledbat_congestion_avd(struct tcpcb * tp,struct tcphdr * th)260 tcp_ledbat_congestion_avd(struct tcpcb *tp, struct tcphdr *th)
261 {
262 int acked = 0;
263 u_int32_t incr = 0;
264
265 acked = BYTES_ACKED(th, tp);
266
267 if (tcp_ledbat_plus_plus) {
268 ledbat_pp_ack_rcvd(tp, acked);
269 } else {
270 tp->t_bytes_acked += acked;
271 if (tp->t_bytes_acked > tp->snd_cwnd) {
272 tp->t_bytes_acked -= tp->snd_cwnd;
273 incr = tp->t_maxseg;
274 }
275
276 if (tp->snd_cwnd < tp->snd_wnd && incr > 0) {
277 update_cwnd(tp, incr, true);
278 }
279 }
280 }
281
282 /*
283 * Compute the denominator
284 * MIN(16, ceil(2 * TARGET / base))
285 */
286 static uint32_t
ledbat_gain(uint32_t base_rtt)287 ledbat_gain(uint32_t base_rtt)
288 {
289 return MIN(GAIN_CONSTANT, tcp_ceil(2 * target_qdelay /
290 (double)base_rtt));
291 }
292
293 /*
294 * Congestion avoidance for ledbat++
295 */
296 static void
ledbat_pp_congestion_avd(struct tcpcb * tp,uint32_t bytes_acked,uint32_t base_rtt,uint32_t curr_rtt)297 ledbat_pp_congestion_avd(struct tcpcb *tp, uint32_t bytes_acked,
298 uint32_t base_rtt, uint32_t curr_rtt)
299 {
300 uint32_t update = 0;
301 /*
302 * Set the next slowdown time i.e. 9 times the duration
303 * of previous slowdown except the initial slowdown.
304 */
305 if (tp->t_ccstate->ledbat_slowdown_ts == 0) {
306 uint32_t slowdown_duration = 0;
307 if (tp->t_ccstate->ledbat_slowdown_events > 0) {
308 slowdown_duration = tcp_now -
309 tp->t_ccstate->ledbat_slowdown_begin;
310
311 if (tp->bg_ssthresh > tp->snd_cwnd) {
312 /*
313 * Special case for slowdowns (other than initial)
314 * where cwnd doesn't recover fully to previous
315 * ssthresh
316 */
317 slowdown_duration *= 2;
318 }
319 }
320 tp->t_ccstate->ledbat_slowdown_ts = tcp_now +
321 (9 * slowdown_duration);
322 if (slowdown_duration == 0) {
323 tp->t_ccstate->ledbat_slowdown_ts += (2 * (tp->t_srtt >> TCP_RTT_SHIFT));
324 }
325 /* Reset the start */
326 tp->t_ccstate->ledbat_slowdown_begin = 0;
327
328 /* On exit slow start due to higher qdelay, cap the ssthresh */
329 if (tp->bg_ssthresh > tp->snd_cwnd) {
330 tp->bg_ssthresh = tp->snd_cwnd;
331 }
332 }
333
334 if (curr_rtt <= base_rtt + target_qdelay) {
335 /* Additive increase */
336 tp->t_bytes_acked += bytes_acked;
337 if (tp->t_bytes_acked >= tp->snd_cwnd) {
338 update = tp->t_maxseg;
339 tp->t_bytes_acked -= tp->snd_cwnd;
340 update_cwnd(tp, update, true);
341 }
342 } else {
343 /*
344 * Multiplicative decrease
345 * W -= min(W * (qdelay/target - 1), W/2) (per RTT)
346 * To calculate per bytes acked, it becomes
347 * W -= min((qdelay/target - 1), 1/2) * bytes_acked
348 */
349 uint32_t qdelay = curr_rtt > base_rtt ?
350 (curr_rtt - base_rtt) : 0;
351
352 tp->t_ccstate->ledbat_md_bytes_acked += bytes_acked;
353 if (tp->t_ccstate->ledbat_md_bytes_acked >= tp->snd_cwnd) {
354 update = (uint32_t)(MIN(((double)qdelay / target_qdelay
355 - 1), 0.5) * (double)tp->snd_cwnd);
356 tp->t_ccstate->ledbat_md_bytes_acked -= tp->snd_cwnd;
357 update_cwnd(tp, update, false);
358
359 if (tp->t_ccstate->ledbat_slowdown_ts != 0) {
360 /*
361 * As the window has been reduced,
362 * defer the slowdown.
363 */
364 tp->t_ccstate->ledbat_slowdown_ts = tcp_now +
365 DEFER_SLOWDOWN_DURATION;
366 }
367 }
368 }
369 }
370
371 /*
372 * Different handling for ack received for ledbat++
373 */
374 static void
ledbat_pp_ack_rcvd(struct tcpcb * tp,uint32_t bytes_acked)375 ledbat_pp_ack_rcvd(struct tcpcb *tp, uint32_t bytes_acked)
376 {
377 uint32_t update = 0;
378 const uint32_t base_rtt = get_base_rtt(tp);
379 const uint32_t curr_rtt = tcp_use_min_curr_rtt ? tp->curr_rtt_min :
380 tp->t_rttcur;
381 const uint32_t ss_target = (uint32_t)(3 * target_qdelay / 4);
382
383 /*
384 * Slowdown period - first slowdown
385 * is 2RTT after we exit initial slow start.
386 * Subsequent slowdowns are after 9 times the
387 * previous slow down durations.
388 */
389 if (tp->t_ccstate->ledbat_slowdown_ts != 0 &&
390 tcp_now >= tp->t_ccstate->ledbat_slowdown_ts) {
391 if (tp->t_ccstate->ledbat_slowdown_begin == 0) {
392 tp->t_ccstate->ledbat_slowdown_begin = tcp_now;
393 tp->t_ccstate->ledbat_slowdown_events++;
394 }
395 if (tcp_now < tp->t_ccstate->ledbat_slowdown_ts +
396 (2 * (tp->t_srtt >> TCP_RTT_SHIFT))) {
397 // Set cwnd to 2 packets and return
398 if (tp->snd_cwnd > bg_ss_fltsz * tp->t_maxseg) {
399 if (tp->bg_ssthresh < tp->snd_cwnd) {
400 tp->bg_ssthresh = tp->snd_cwnd;
401 }
402 tp->snd_cwnd = bg_ss_fltsz * tp->t_maxseg;
403 /* Reset total bytes acked */
404 tp->t_bytes_acked = 0;
405 }
406 return;
407 }
408 }
409 if (curr_rtt == 0 || base_rtt == 0) {
410 update = MIN(bytes_acked, TCP_CC_CWND_INIT_PKTS *
411 tp->t_maxseg);
412 update_cwnd(tp, update, true);
413 } else if (tp->snd_cwnd < tp->bg_ssthresh &&
414 ((tp->t_ccstate->ledbat_slowdown_events > 0 &&
415 curr_rtt <= (base_rtt + target_qdelay)) ||
416 curr_rtt <= (base_rtt + ss_target))) {
417 /*
418 * Modified slow start with a dynamic GAIN
419 * If the queuing delay is larger than 3/4 of the target
420 * delay, exit slow start, iff, it is the initial slow start.
421 * After the initial slow start, during CA, window growth
422 * will be bound by ssthresh.
423 */
424 tp->t_bytes_acked += bytes_acked;
425 uint32_t gain_factor = ledbat_gain(base_rtt);
426 if (tp->t_bytes_acked >= tp->t_maxseg * gain_factor) {
427 update = MIN(tp->t_bytes_acked / gain_factor,
428 TCP_CC_CWND_INIT_PKTS * tp->t_maxseg);
429 tp->t_bytes_acked = 0;
430 update_cwnd(tp, update, true);
431 }
432
433 /* Reset the next slowdown timestamp */
434 if (tp->t_ccstate->ledbat_slowdown_ts != 0) {
435 tp->t_ccstate->ledbat_slowdown_ts = 0;
436 }
437 } else {
438 /* Congestion avoidance */
439 ledbat_pp_congestion_avd(tp, bytes_acked, base_rtt, curr_rtt);
440 }
441 }
442
443 /* Function to process an ack.
444 */
445 void
tcp_ledbat_ack_rcvd(struct tcpcb * tp,struct tcphdr * th)446 tcp_ledbat_ack_rcvd(struct tcpcb *tp, struct tcphdr *th)
447 {
448 /*
449 * RFC 3465 - Appropriate Byte Counting.
450 *
451 * If the window is currently less than ssthresh,
452 * open the window by the number of bytes ACKed by
453 * the last ACK, however clamp the window increase
454 * to an upper limit "L".
455 *
456 * In congestion avoidance phase, open the window by
457 * one segment each time "bytes_acked" grows to be
458 * greater than or equal to the congestion window.
459 */
460
461 uint32_t cw = tp->snd_cwnd;
462 uint32_t incr = tp->t_maxseg;
463 uint32_t acked = 0;
464
465 acked = BYTES_ACKED(th, tp);
466
467 if (tcp_ledbat_plus_plus) {
468 ledbat_pp_ack_rcvd(tp, acked);
469 return;
470 }
471
472 tp->t_bytes_acked += acked;
473
474 if (cw >= tp->bg_ssthresh) {
475 /* congestion-avoidance */
476 if (tp->t_bytes_acked < cw) {
477 /* No need to increase yet. */
478 incr = 0;
479 }
480 } else {
481 /*
482 * If the user explicitly enables RFC3465
483 * use 2*SMSS for the "L" param. Otherwise
484 * use the more conservative 1*SMSS.
485 *
486 * (See RFC 3465 2.3 Choosing the Limit)
487 */
488 u_int abc_lim;
489
490 abc_lim = (tp->snd_nxt == tp->snd_max) ? incr * 2 : incr;
491
492 incr = ulmin(acked, abc_lim);
493 }
494 if (tp->t_bytes_acked >= cw) {
495 tp->t_bytes_acked -= cw;
496 }
497 if (incr > 0) {
498 update_cwnd(tp, incr, true);
499 }
500 }
501
502 void
tcp_ledbat_pre_fr(struct tcpcb * tp)503 tcp_ledbat_pre_fr(struct tcpcb *tp)
504 {
505 uint32_t win;
506
507 win = min(tp->snd_wnd, tp->snd_cwnd);
508
509 if (tp->t_flagsext & TF_CWND_NONVALIDATED) {
510 tp->t_lossflightsize = tp->snd_max - tp->snd_una;
511 win = max(tp->t_pipeack, tp->t_lossflightsize);
512 } else {
513 tp->t_lossflightsize = 0;
514 }
515
516 win = win / 2;
517 win = tcp_round_to(win, tp->t_maxseg);
518 if (win < 2 * tp->t_maxseg) {
519 win = 2 * tp->t_maxseg;
520 }
521 tp->snd_ssthresh = win;
522 if (tp->bg_ssthresh > tp->snd_ssthresh) {
523 tp->bg_ssthresh = tp->snd_ssthresh;
524 }
525
526 tcp_cc_resize_sndbuf(tp);
527 }
528
529 void
tcp_ledbat_post_fr(struct tcpcb * tp,struct tcphdr * th)530 tcp_ledbat_post_fr(struct tcpcb *tp, struct tcphdr *th)
531 {
532 int32_t ss;
533
534 if (th) {
535 ss = tp->snd_max - th->th_ack;
536 } else {
537 ss = tp->snd_max - tp->snd_una;
538 }
539
540 /*
541 * Complete ack. Inflate the congestion window to
542 * ssthresh and exit fast recovery.
543 *
544 * Window inflation should have left us with approx.
545 * snd_ssthresh outstanding data. But in case we
546 * would be inclined to send a burst, better to do
547 * it via the slow start mechanism.
548 *
549 * If the flight size is zero, then make congestion
550 * window to be worth at least 2 segments to avoid
551 * delayed acknowledgement (draft-ietf-tcpm-rfc3782-bis-05).
552 */
553 if (ss < (int32_t)tp->snd_ssthresh) {
554 tp->snd_cwnd = max(ss, tp->t_maxseg) + tp->t_maxseg;
555 } else {
556 tp->snd_cwnd = tp->snd_ssthresh;
557 }
558 tp->t_bytes_acked = 0;
559 tp->t_ccstate->ledbat_md_bytes_acked = 0;
560 }
561
562 /*
563 * Function to handle connections that have been idle for
564 * some time. Slow start to get ack "clock" running again.
565 * Clear base history after idle time.
566 */
567 void
tcp_ledbat_after_idle(struct tcpcb * tp)568 tcp_ledbat_after_idle(struct tcpcb *tp)
569 {
570 tcp_ledbat_clear_state(tp);
571 /* Reset the congestion window */
572 tp->snd_cwnd = tp->t_maxseg * bg_ss_fltsz;
573 tp->t_bytes_acked = 0;
574 tp->t_ccstate->ledbat_md_bytes_acked = 0;
575 }
576
577 /* Function to change the congestion window when the retransmit
578 * timer fires. The behavior is the same as that for best-effort
579 * TCP, reduce congestion window to one segment and start probing
580 * the link using "slow start". The slow start threshold is set
581 * to half of the current window. Lower the background slow start
582 * threshold also.
583 */
584 void
tcp_ledbat_after_timeout(struct tcpcb * tp)585 tcp_ledbat_after_timeout(struct tcpcb *tp)
586 {
587 if (tp->t_state >= TCPS_ESTABLISHED) {
588 tcp_ledbat_clear_state(tp);
589 tcp_ledbat_pre_fr(tp);
590 tp->snd_cwnd = tp->t_maxseg;
591 }
592 }
593
594 /*
595 * Indicate whether this ack should be delayed.
596 * We can delay the ack if:
597 * - our last ack wasn't a 0-sized window.
598 * - the peer hasn't sent us a TH_PUSH data packet: if he did, take this
599 * as a clue that we need to ACK without any delay. This helps higher
600 * level protocols who won't send us more data even if the window is
601 * open because their last "segment" hasn't been ACKed
602 * Otherwise the receiver will ack every other full-sized segment or when the
603 * delayed ack timer fires. This will help to generate better rtt estimates for
604 * the other end if it is a ledbat sender.
605 *
606 */
607
608 static int
tcp_ledbat_delay_ack(struct tcpcb * tp,struct tcphdr * th)609 tcp_ledbat_delay_ack(struct tcpcb *tp, struct tcphdr *th)
610 {
611 if (tcp_ack_strategy == TCP_ACK_STRATEGY_MODERN) {
612 return tcp_cc_delay_ack(tp, th);
613 } else {
614 if ((tp->t_flags & TF_RXWIN0SENT) == 0 &&
615 (th->th_flags & TH_PUSH) == 0 && (tp->t_unacksegs == 1)) {
616 return 1;
617 }
618 return 0;
619 }
620 }
621
622 /* Change a connection to use ledbat. First, lower bg_ssthresh value
623 * if it needs to be.
624 */
625 void
tcp_ledbat_switch_cc(struct tcpcb * tp)626 tcp_ledbat_switch_cc(struct tcpcb *tp)
627 {
628 uint32_t cwnd;
629 tcp_ledbat_clear_state(tp);
630
631 if (tp->bg_ssthresh == 0 || tp->bg_ssthresh > tp->snd_ssthresh) {
632 tp->bg_ssthresh = tp->snd_ssthresh;
633 }
634
635 cwnd = min(tp->snd_wnd, tp->snd_cwnd);
636
637 if (tp->snd_cwnd > tp->bg_ssthresh) {
638 cwnd = cwnd / tp->t_maxseg;
639 } else {
640 cwnd = cwnd / 2 / tp->t_maxseg;
641 }
642
643 if (cwnd < bg_ss_fltsz) {
644 cwnd = bg_ss_fltsz;
645 }
646
647 tp->snd_cwnd = cwnd * tp->t_maxseg;
648 tp->t_bytes_acked = 0;
649
650 OSIncrementAtomic((volatile SInt32 *)&tcp_cc_ledbat.num_sockets);
651 }
652