xref: /xnu-11215.1.10/bsd/netinet/tcp_sack.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *	notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *	notice, this list of conditions and the following disclaimer in the
39  *	documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *	must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *	may be used to endorse or promote products derived from this software
46  *	without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  */
61 
62 #define _IP_VHL
63 
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
69 #include <sys/mbuf.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 
75 #include <kern/zalloc.h>
76 
77 #include <net/route.h>
78 
79 #include <netinet/in.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/ip.h>
82 #include <netinet/in_pcb.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/in6_pcb.h>
85 #include <netinet/ip6.h>
86 #include <netinet6/ip6_var.h>
87 #include <netinet/tcp.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcpip.h>
93 #include <netinet/tcp_cache.h>
94 #include <sys/kdebug.h>
95 
96 #if IPSEC
97 #include <netinet6/ipsec.h>
98 #endif /*IPSEC*/
99 
100 #include <libkern/OSAtomic.h>
101 
102 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_LOCKED,
103     int, tcp_do_sack, 1, "Enable/Disable TCP SACK support");
104 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_maxholes, CTLFLAG_RW | CTLFLAG_LOCKED,
105     static int, tcp_sack_maxholes, 128,
106     "Maximum number of TCP SACK holes allowed per connection");
107 
108 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_globalmaxholes,
109     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536,
110     "Global maximum number of TCP SACK holes");
111 
112 static SInt32 tcp_sack_globalholes = 0;
113 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sack_globalholes, CTLFLAG_RD | CTLFLAG_LOCKED,
114     &tcp_sack_globalholes, 0,
115     "Global number of TCP SACK holes currently allocated");
116 
117 static KALLOC_TYPE_DEFINE(sack_hole_zone, struct sackhole, NET_KT_DEFAULT);
118 
119 #define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \
120     (SEQ_GT((_sb_)->end, (_sb_)->start) && \
121     SEQ_GT((_sb_)->start, (_tp_)->snd_una) && \
122     SEQ_GT((_sb_)->start, (_ack_)) && \
123     SEQ_LT((_sb_)->start, (_tp_)->snd_max) && \
124     SEQ_GT((_sb_)->end, (_tp_)->snd_una) && \
125     SEQ_LEQ((_sb_)->end, (_tp_)->snd_max))
126 
127 /*
128  * This function is called upon receipt of new valid data (while not in header
129  * prediction mode), and it updates the ordered list of sacks.
130  */
131 void
tcp_update_sack_list(struct tcpcb * tp,tcp_seq rcv_start,tcp_seq rcv_end)132 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
133 {
134 	/*
135 	 * First reported block MUST be the most recent one.  Subsequent
136 	 * blocks SHOULD be in the order in which they arrived at the
137 	 * receiver.  These two conditions make the implementation fully
138 	 * compliant with RFC 2018.
139 	 */
140 	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
141 	int num_head, num_saved, i;
142 
143 	/* SACK block for the received segment. */
144 	head_blk.start = rcv_start;
145 	head_blk.end = rcv_end;
146 
147 	/*
148 	 * Merge updated SACK blocks into head_blk, and
149 	 * save unchanged SACK blocks into saved_blks[].
150 	 * num_saved will have the number of the saved SACK blocks.
151 	 */
152 	num_saved = 0;
153 	for (i = 0; i < tp->rcv_numsacks; i++) {
154 		tcp_seq start = tp->sackblks[i].start;
155 		tcp_seq end = tp->sackblks[i].end;
156 		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
157 			/*
158 			 * Discard this SACK block.
159 			 */
160 		} else if (SEQ_LEQ(head_blk.start, end) &&
161 		    SEQ_GEQ(head_blk.end, start)) {
162 			/*
163 			 * Merge this SACK block into head_blk.
164 			 * This SACK block itself will be discarded.
165 			 */
166 			if (SEQ_GT(head_blk.start, start)) {
167 				head_blk.start = start;
168 			}
169 			if (SEQ_LT(head_blk.end, end)) {
170 				head_blk.end = end;
171 			}
172 		} else {
173 			/*
174 			 * Save this SACK block.
175 			 */
176 			saved_blks[num_saved].start = start;
177 			saved_blks[num_saved].end = end;
178 			num_saved++;
179 		}
180 	}
181 
182 	/*
183 	 * Update SACK list in tp->sackblks[].
184 	 */
185 	num_head = 0;
186 	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
187 		/*
188 		 * The received data segment is an out-of-order segment.
189 		 * Put head_blk at the top of SACK list.
190 		 */
191 		tp->sackblks[0] = head_blk;
192 		num_head = 1;
193 		/*
194 		 * If the number of saved SACK blocks exceeds its limit,
195 		 * discard the last SACK block.
196 		 */
197 		if (num_saved >= MAX_SACK_BLKS) {
198 			num_saved--;
199 		}
200 	}
201 	if (num_saved > 0) {
202 		/*
203 		 * Copy the saved SACK blocks back.
204 		 */
205 		bcopy(saved_blks, &tp->sackblks[num_head], sizeof(struct sackblk) * num_saved);
206 	}
207 
208 	/* Save the number of SACK blocks. */
209 	tp->rcv_numsacks = num_head + num_saved;
210 
211 	/* If we are requesting SACK recovery, reset the stretch-ack state
212 	 * so that connection will generate more acks after recovery and
213 	 * sender's cwnd will open.
214 	 */
215 	if ((tp->t_flags & TF_STRETCHACK) != 0 && tp->rcv_numsacks > 0) {
216 		tcp_reset_stretch_ack(tp);
217 	}
218 	if (tp->rcv_numsacks > 0) {
219 		tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
220 	}
221 
222 #if TRAFFIC_MGT
223 	if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) {
224 		reset_acc_iaj(tp);
225 	}
226 #endif /* TRAFFIC_MGT */
227 }
228 
229 /*
230  * Delete all receiver-side SACK information.
231  */
232 void
tcp_clean_sackreport(struct tcpcb * tp)233 tcp_clean_sackreport( struct tcpcb *tp)
234 {
235 	tp->rcv_numsacks = 0;
236 	bzero(&tp->sackblks[0], sizeof(struct sackblk) * MAX_SACK_BLKS);
237 }
238 
239 /*
240  * Allocate struct sackhole.
241  */
242 static struct sackhole *
tcp_sackhole_alloc(struct tcpcb * tp,tcp_seq start,tcp_seq end)243 tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
244 {
245 	struct sackhole *hole;
246 
247 	if (tp->snd_numholes >= tcp_sack_maxholes ||
248 	    tcp_sack_globalholes >= tcp_sack_globalmaxholes) {
249 		tcpstat.tcps_sack_sboverflow++;
250 		return NULL;
251 	}
252 
253 	hole = zalloc_flags(sack_hole_zone, Z_WAITOK | Z_NOFAIL);
254 
255 	hole->start = start;
256 	hole->end = end;
257 	hole->rxmit = start;
258 
259 	tp->snd_numholes++;
260 	OSIncrementAtomic(&tcp_sack_globalholes);
261 
262 	return hole;
263 }
264 
265 /*
266  * Free struct sackhole.
267  */
268 static void
tcp_sackhole_free(struct tcpcb * tp,struct sackhole * hole)269 tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
270 {
271 	zfree(sack_hole_zone, hole);
272 
273 	tp->snd_numholes--;
274 	OSDecrementAtomic(&tcp_sack_globalholes);
275 }
276 
277 /*
278  * Insert new SACK hole into scoreboard.
279  */
280 static struct sackhole *
tcp_sackhole_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end,struct sackhole * after)281 tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end,
282     struct sackhole *after)
283 {
284 	struct sackhole *hole;
285 
286 	/* Allocate a new SACK hole. */
287 	hole = tcp_sackhole_alloc(tp, start, end);
288 	if (hole == NULL) {
289 		return NULL;
290 	}
291 	hole->rxmit_start = tcp_now;
292 	/* Insert the new SACK hole into scoreboard */
293 	if (after != NULL) {
294 		TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink);
295 	} else {
296 		TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink);
297 	}
298 
299 	/* Update SACK hint. */
300 	if (tp->sackhint.nexthole == NULL) {
301 		tp->sackhint.nexthole = hole;
302 	}
303 
304 	return hole;
305 }
306 
307 /*
308  * Remove SACK hole from scoreboard.
309  */
310 static void
tcp_sackhole_remove(struct tcpcb * tp,struct sackhole * hole)311 tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole)
312 {
313 	/* Update SACK hint. */
314 	if (tp->sackhint.nexthole == hole) {
315 		tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink);
316 	}
317 
318 	/* Remove this SACK hole. */
319 	TAILQ_REMOVE(&tp->snd_holes, hole, scblink);
320 
321 	/* Free this SACK hole. */
322 	tcp_sackhole_free(tp, hole);
323 }
324 /*
325  * When a new ack with SACK is received, check if it indicates packet
326  * reordering. If there is packet reordering, the socket is marked and
327  * the late time offset by which the packet was reordered with
328  * respect to its closest neighboring packets is computed.
329  */
330 static void
tcp_sack_detect_reordering(struct tcpcb * tp,struct sackhole * s,tcp_seq sacked_seq,tcp_seq snd_fack)331 tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s,
332     tcp_seq sacked_seq, tcp_seq snd_fack)
333 {
334 	int32_t rext = 0, reordered = 0;
335 
336 	/*
337 	 * If the SACK hole is past snd_fack, this is from new SACK
338 	 * information, so we can ignore it.
339 	 */
340 	if (SEQ_GT(s->end, snd_fack)) {
341 		return;
342 	}
343 	/*
344 	 * If there has been a retransmit timeout, then the timestamp on
345 	 * the SACK segment will be newer. This might lead to a
346 	 * false-positive. Avoid re-ordering detection in this case.
347 	 */
348 	if (tp->t_rxtshift > 0) {
349 		return;
350 	}
351 
352 	/*
353 	 * Detect reordering from SACK information by checking
354 	 * if recently sacked data was never retransmitted from this hole.
355 	 *
356 	 * First, we look for the byte in the list of retransmitted segments. This one
357 	 * will contain even the segments that are retransmitted thanks to RTO/TLP.
358 	 *
359 	 * Then, we check the sackhole which indicates whether or not the sackhole
360 	 * was subject to retransmission.
361 	 */
362 	if (SEQ_LT(s->rxmit, sacked_seq) &&
363 	    (tcp_rxtseg_find(tp, sacked_seq - 1, sacked_seq - 1) == NULL)) {
364 		reordered = 1;
365 		tcpstat.tcps_avoid_rxmt++;
366 	}
367 
368 	if (reordered) {
369 		if (!(tp->t_flagsext & TF_PKTS_REORDERED)) {
370 			tp->t_flagsext |= TF_PKTS_REORDERED;
371 			tcpstat.tcps_detect_reordering++;
372 		}
373 
374 		tcpstat.tcps_reordered_pkts++;
375 		tp->t_reordered_pkts++;
376 
377 		/*
378 		 * If reordering is seen on a connection wth ECN enabled,
379 		 * increment the heuristic
380 		 */
381 		if (TCP_ECN_ENABLED(tp)) {
382 			INP_INC_IFNET_STAT(tp->t_inpcb, ecn_fallback_reorder);
383 			tcpstat.tcps_ecn_fallback_reorder++;
384 			tcp_heuristic_ecn_aggressive(tp);
385 		}
386 
387 		VERIFY(SEQ_GEQ(snd_fack, s->rxmit));
388 
389 		if (s->rxmit_start > 0) {
390 			rext = timer_diff(tcp_now, 0, s->rxmit_start, 0);
391 			if (rext < 0) {
392 				return;
393 			}
394 
395 			/*
396 			 * We take the maximum reorder window to schedule
397 			 * DELAYFR timer as that will take care of jitter
398 			 * on the network path.
399 			 *
400 			 * Computing average and standard deviation seems
401 			 * to cause unnecessary retransmissions when there
402 			 * is high jitter.
403 			 *
404 			 * We set a maximum of SRTT/2 and a minimum of
405 			 * 10 ms on the reorder window.
406 			 */
407 			tp->t_reorderwin = max(tp->t_reorderwin, rext);
408 			tp->t_reorderwin = min(tp->t_reorderwin,
409 			    (tp->t_srtt >> (TCP_RTT_SHIFT + 1)));
410 			tp->t_reorderwin = max(tp->t_reorderwin, 10);
411 		}
412 	}
413 }
414 
415 static void
tcp_sack_update_byte_counter(uint32_t start,uint32_t end,uint32_t * newbytes_acked)416 tcp_sack_update_byte_counter(uint32_t start, uint32_t end, uint32_t *newbytes_acked)
417 {
418 	*newbytes_acked += (end - start);
419 }
420 
421 /*
422  * Process cumulative ACK and the TCP SACK option to update the scoreboard.
423  * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
424  * the sequence space).
425  */
426 void
tcp_sack_doack(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,u_int32_t * newbytes_acked,uint32_t * highest_sacked_seq)427 tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
428     u_int32_t *newbytes_acked, uint32_t *highest_sacked_seq)
429 {
430 	struct sackhole *cur, *temp;
431 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
432 	int i, j, num_sack_blks;
433 	tcp_seq old_snd_fack = 0, th_ack = th->th_ack;
434 	uint32_t tsecr = 0;
435 
436 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
437 		tsecr = to->to_tsecr;
438 	}
439 
440 	num_sack_blks = 0;
441 	/*
442 	 * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
443 	 * treat [SND.UNA, SEG.ACK) as if it is a SACK block. We don't need
444 	 * this for RACK.
445 	 */
446 	if (!TCP_RACK_ENABLED(tp) && SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
447 		sack_blocks[num_sack_blks].start = tp->snd_una;
448 		sack_blocks[num_sack_blks++].end = th_ack;
449 	}
450 	/*
451 	 * Append received valid SACK blocks to sack_blocks[].
452 	 * Check that the SACK block range is valid.
453 	 */
454 	for (i = 0; i < to->to_nsacks; i++) {
455 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
456 		    &sack, sizeof(sack));
457 		sack.start = ntohl(sack.start);
458 		sack.end = ntohl(sack.end);
459 		if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) {
460 			sack_blocks[num_sack_blks++] = sack;
461 		}
462 	}
463 
464 	/*
465 	 * Return if SND.UNA is not advanced and no valid SACK block
466 	 * is received.
467 	 */
468 	if (num_sack_blks == 0) {
469 		return;
470 	}
471 
472 	VERIFY(num_sack_blks <= (TCP_MAX_SACK + 1));
473 	/*
474 	 * Sort the SACK blocks so we can update the scoreboard
475 	 * with just one pass. The overhead of sorting upto 4+1 elements
476 	 * is less than making upto 4+1 passes over the scoreboard.
477 	 */
478 	for (i = 0; i < num_sack_blks; i++) {
479 		for (j = i + 1; j < num_sack_blks; j++) {
480 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
481 				sack = sack_blocks[i];
482 				sack_blocks[i] = sack_blocks[j];
483 				sack_blocks[j] = sack;
484 			}
485 		}
486 	}
487 
488 	if (TCP_RACK_ENABLED(tp)) {
489 		sblkp = &sack_blocks[num_sack_blks - 1];        /* Last SACK block */
490 
491 		*highest_sacked_seq = sblkp->end;
492 
493 		while (sblkp >= sack_blocks) {
494 			/*
495 			 * Mark SACKed segments which allows us to skip through such
496 			 * segments during RACK loss detection
497 			 */
498 			tcp_segs_dosack(tp, sblkp->start, sblkp->end, tsecr, newbytes_acked);
499 			sblkp--;
500 		}
501 
502 		return;
503 	}
504 
505 	/* Process holes only when RACK is not used */
506 	if (TAILQ_EMPTY(&tp->snd_holes)) {
507 		/*
508 		 * Empty scoreboard. Need to initialize snd_fack (it may be
509 		 * uninitialized or have a bogus value). Scoreboard holes
510 		 * (from the sack blocks received) are created later below (in
511 		 * the logic that adds holes to the tail of the scoreboard).
512 		 */
513 		tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
514 	}
515 
516 	old_snd_fack = tp->snd_fack;
517 	/*
518 	 * In the while-loop below, incoming SACK blocks (sack_blocks[])
519 	 * and SACK holes (snd_holes) are traversed from their tails with
520 	 * just one pass in order to reduce the number of compares especially
521 	 * when the bandwidth-delay product is large.
522 	 * Note: Typically, in the first RTT of SACK recovery, the highest
523 	 * three or four SACK blocks with the same ack number are received.
524 	 * In the second RTT, if retransmitted data segments are not lost,
525 	 * the highest three or four SACK blocks with ack number advancing
526 	 * are received.
527 	 */
528 	sblkp = &sack_blocks[num_sack_blks - 1];        /* Last SACK block */
529 	if (SEQ_LT(tp->snd_fack, sblkp->start)) {
530 		/*
531 		 * The highest SACK block is beyond fack.
532 		 * Append new SACK hole at the tail.
533 		 * If the second or later highest SACK blocks are also
534 		 * beyond the current fack, they will be inserted by
535 		 * way of hole splitting in the while-loop below.
536 		 */
537 		temp = tcp_sackhole_insert(tp, tp->snd_fack, sblkp->start, NULL);
538 		if (temp != NULL) {
539 			tp->snd_fack = sblkp->end;
540 			tcp_sack_update_byte_counter(sblkp->start, sblkp->end, newbytes_acked);
541 
542 			/* Go to the previous sack block. */
543 			sblkp--;
544 		} else {
545 			/*
546 			 * We failed to add a new hole based on the current
547 			 * sack block.  Skip over all the sack blocks that
548 			 * fall completely to the right of snd_fack and proceed
549 			 * to trim the scoreboard based on the remaining sack
550 			 * blocks. This also trims the scoreboard for th_ack
551 			 * (which is sack_blocks[0]).
552 			 */
553 			while (sblkp >= sack_blocks &&
554 			    SEQ_LT(tp->snd_fack, sblkp->start)) {
555 				sblkp--;
556 			}
557 			if (sblkp >= sack_blocks &&
558 			    SEQ_LT(tp->snd_fack, sblkp->end)) {
559 				tcp_sack_update_byte_counter(tp->snd_fack, sblkp->end, newbytes_acked);
560 				tp->snd_fack = sblkp->end;
561 			}
562 		}
563 	} else if (SEQ_LT(tp->snd_fack, sblkp->end)) {
564 		/* fack is advanced. */
565 		tcp_sack_update_byte_counter(tp->snd_fack, sblkp->end, newbytes_acked);
566 		tp->snd_fack = sblkp->end;
567 	}
568 	/* We must have at least one SACK hole in scoreboard */
569 	cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole */
570 	/*
571 	 * Since the incoming sack blocks are sorted, we can process them
572 	 * making one sweep of the scoreboard.
573 	 */
574 	while (sblkp >= sack_blocks && cur != NULL) {
575 		if (SEQ_GEQ(sblkp->start, cur->end)) {
576 			/*
577 			 * SACKs data beyond the current hole.
578 			 * Go to the previous sack block.
579 			 */
580 			sblkp--;
581 			continue;
582 		}
583 		if (SEQ_LEQ(sblkp->end, cur->start)) {
584 			/*
585 			 * SACKs data before the current hole.
586 			 * Go to the previous hole.
587 			 */
588 			cur = TAILQ_PREV(cur, sackhole_head, scblink);
589 			continue;
590 		}
591 		tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
592 		if (tp->sackhint.sack_bytes_rexmit < 0) {
593 			tp->sackhint.sack_bytes_rexmit = 0;
594 		}
595 
596 		if (SEQ_LEQ(sblkp->start, cur->start)) {
597 			/* Data acks at least the beginning of hole */
598 			if (SEQ_GEQ(sblkp->end, cur->end)) {
599 				/* Acks entire hole, so delete hole */
600 				tcp_sack_update_byte_counter(cur->start, cur->end, newbytes_acked);
601 
602 				tcp_sack_detect_reordering(tp, cur,
603 				    cur->end, old_snd_fack);
604 				temp = cur;
605 				cur = TAILQ_PREV(cur, sackhole_head, scblink);
606 				tcp_sackhole_remove(tp, temp);
607 				/*
608 				 * The sack block may ack all or part of the next
609 				 * hole too, so continue onto the next hole.
610 				 */
611 				continue;
612 			} else {
613 				/* Move start of hole forward */
614 				tcp_sack_update_byte_counter(cur->start, sblkp->end, newbytes_acked);
615 				tcp_sack_detect_reordering(tp, cur,
616 				    sblkp->end, old_snd_fack);
617 				cur->start = sblkp->end;
618 				cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
619 			}
620 		} else {
621 			/* Data acks at least the end of hole */
622 			if (SEQ_GEQ(sblkp->end, cur->end)) {
623 				/* Move end of hole backward */
624 				tcp_sack_update_byte_counter(sblkp->start, cur->end, newbytes_acked);
625 				tcp_sack_detect_reordering(tp, cur,
626 				    cur->end, old_snd_fack);
627 				cur->end = sblkp->start;
628 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
629 			} else {
630 				/*
631 				 * ACKs some data in the middle of a hole;
632 				 * need to split current hole
633 				 */
634 				tcp_sack_detect_reordering(tp, cur,
635 				    sblkp->end, old_snd_fack);
636 				temp = tcp_sackhole_insert(tp, sblkp->end,
637 				    cur->end, cur);
638 				if (temp != NULL) {
639 					tcp_sack_update_byte_counter(sblkp->start, sblkp->end, newbytes_acked);
640 					if (SEQ_GT(cur->rxmit, temp->rxmit)) {
641 						temp->rxmit = cur->rxmit;
642 						tp->sackhint.sack_bytes_rexmit
643 						        += (temp->rxmit
644 						    - temp->start);
645 					}
646 					cur->end = sblkp->start;
647 					cur->rxmit = SEQ_MIN(cur->rxmit,
648 					    cur->end);
649 					/*
650 					 * Reset the rxmit_start to that of
651 					 * the current hole as that will
652 					 * help to compute the reorder
653 					 * window correctly
654 					 */
655 					temp->rxmit_start = cur->rxmit_start;
656 				}
657 			}
658 		}
659 		tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
660 		/*
661 		 * Testing sblkp->start against cur->start tells us whether
662 		 * we're done with the sack block or the sack hole.
663 		 * Accordingly, we advance one or the other.
664 		 */
665 		if (SEQ_LEQ(sblkp->start, cur->start)) {
666 			cur = TAILQ_PREV(cur, sackhole_head, scblink);
667 		} else {
668 			sblkp--;
669 		}
670 	}
671 }
672 
673 /*
674  * Free all SACK holes to clear the scoreboard.
675  */
676 void
tcp_free_sackholes(struct tcpcb * tp)677 tcp_free_sackholes(struct tcpcb *tp)
678 {
679 	struct sackhole *q;
680 
681 	while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) {
682 		tcp_sackhole_remove(tp, q);
683 	}
684 	tp->sackhint.sack_bytes_rexmit = 0;
685 	tp->sackhint.sack_bytes_acked = 0;
686 	tp->sackhint.nexthole = NULL;
687 }
688 
689 /*
690  * Partial ack handling within a sack recovery episode.
691  * Keeping this very simple for now. When a partial ack
692  * is received, force snd_cwnd to a value that will allow
693  * the sender to transmit no more than 2 segments.
694  * If necessary, a better scheme can be adopted at a
695  * later point, but for now, the goal is to prevent the
696  * sender from bursting a large amount of data in the midst
697  * of sack recovery.
698  */
699 void
tcp_sack_partialack(struct tcpcb * tp,struct tcphdr * th)700 tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
701 {
702 	int num_segs = 1;
703 
704 	tcp_seq onxt = tp->snd_nxt;
705 	tp->t_timer[TCPT_REXMT] = 0;
706 	tp->t_rtttime = 0;
707 	/*
708 	 * Avoid retransmitting what was already cumulatively ACKed by
709 	 * a partial ACK when snd_nxt was set to snd_una after RTO.
710 	 * Example, snd_nxt=10001 (after doing 1 retransmit after RTO and
711 	 * partial ACK cumulatively acknowledges 13001).
712 	 */
713 	if (SEQ_LT(tp->snd_nxt, th->th_ack)) {
714 		tp->snd_nxt = th->th_ack;
715 	}
716 	/* send one or 2 segments based on how much new data was acked */
717 	if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) {
718 		num_segs = 2;
719 	}
720 
721 	tp->snd_cwnd = tcp_flight_size(tp) + num_segs * tp->t_maxseg;
722 	if (tp->snd_cwnd > tp->snd_ssthresh) {
723 		tp->snd_cwnd = tp->snd_ssthresh;
724 	}
725 	if (SEQ_LT(tp->snd_fack, tp->snd_recover) &&
726 	    tp->snd_fack == th->th_ack && TAILQ_EMPTY(&tp->snd_holes)) {
727 		struct sackhole *temp;
728 		/*
729 		 * we received a partial ack but there is no sack_hole
730 		 * that will cover the remaining seq space. In this case,
731 		 * create a hole from snd_fack to snd_recover so that
732 		 * the sack recovery will continue.
733 		 */
734 		temp = tcp_sackhole_insert(tp, tp->snd_fack,
735 		    tp->snd_recover, NULL);
736 		if (temp != NULL) {
737 			tp->snd_fack = tp->snd_recover;
738 		}
739 	}
740 	(void) tcp_output(tp);
741 	/*
742 	 * When we send a FIN, we increment snd_nxt by 1 and on subsequent
743 	 * transmission of FIN, we reduce snd_nxt by 1 if it is equal to snd_max.
744 	 * If snd_nxt was reduced previously, we want to preserve that value.
745 	 * Otherwise, restore snd_nxt to its previous value.
746 	 */
747 	if (!((tp->t_flags & TF_SENTFIN) && tp->snd_nxt == tp->snd_max - 1) &&
748 	    SEQ_GT(onxt, tp->snd_nxt)) {
749 		tp->snd_nxt = onxt;
750 	}
751 }
752 
753 /*
754  * Debug version of tcp_sack_output() that walks the scoreboard. Used for
755  * now to sanity check the hint.
756  */
757 static struct sackhole *
tcp_sack_output_debug(struct tcpcb * tp,int * sack_bytes_rexmt)758 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
759 {
760 	struct sackhole *p;
761 
762 	*sack_bytes_rexmt = 0;
763 	TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
764 		if (SEQ_LT(p->rxmit, p->end)) {
765 			if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
766 				continue;
767 			}
768 			*sack_bytes_rexmt += (p->rxmit - p->start);
769 			break;
770 		}
771 		*sack_bytes_rexmt += (p->rxmit - p->start);
772 	}
773 	return p;
774 }
775 
776 /*
777  * Returns the next hole to retransmit and the number of retransmitted bytes
778  * from the scoreboard. We store both the next hole and the number of
779  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
780  * reception). This avoids scoreboard traversals completely.
781  *
782  * The loop here will traverse *at most* one link. Here's the argument.
783  * For the loop to traverse more than 1 link before finding the next hole to
784  * retransmit, we would need to have at least 1 node following the current hint
785  * with (rxmit == end). But, for all holes following the current hint,
786  * (start == rxmit), since we have not yet retransmitted from them. Therefore,
787  * in order to traverse more 1 link in the loop below, we need to have at least
788  * one node following the current hint with (start == rxmit == end).
789  * But that can't happen, (start == end) means that all the data in that hole
790  * has been sacked, in which case, the hole would have been removed from the
791  * scoreboard.
792  */
793 struct sackhole *
tcp_sack_output(struct tcpcb * tp,int * sack_bytes_rexmt)794 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
795 {
796 	struct sackhole *hole = NULL, *dbg_hole = NULL;
797 	int dbg_bytes_rexmt;
798 
799 	dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt);
800 	*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
801 	hole = tp->sackhint.nexthole;
802 	if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) {
803 		goto out;
804 	}
805 	while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
806 		if (SEQ_LT(hole->rxmit, hole->end)) {
807 			tp->sackhint.nexthole = hole;
808 			break;
809 		}
810 	}
811 out:
812 	if (dbg_hole != hole) {
813 		printf("%s: Computed sack hole not the same as cached value\n", __func__);
814 		hole = dbg_hole;
815 	}
816 	if (*sack_bytes_rexmt != dbg_bytes_rexmt) {
817 		printf("%s: Computed sack_bytes_retransmitted (%d) not "
818 		    "the same as cached value (%d)\n",
819 		    __func__, dbg_bytes_rexmt, *sack_bytes_rexmt);
820 		*sack_bytes_rexmt = dbg_bytes_rexmt;
821 	}
822 	return hole;
823 }
824 
825 /*
826  * After a timeout, the SACK list may be rebuilt.  This SACK information
827  * should be used to avoid retransmitting SACKed data.  This function
828  * traverses the SACK list to see if snd_nxt should be moved forward.
829  */
830 uint32_t
tcp_sack_adjust(struct tcpcb * tp)831 tcp_sack_adjust(struct tcpcb *tp)
832 {
833 	struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
834 
835 	if (cur == NULL) {
836 		return 0; /* No holes */
837 	}
838 	if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) {
839 		return 0; /* We're already beyond any SACKed blocks */
840 	}
841 	/*
842 	 * Two cases for which we want to advance snd_nxt:
843 	 * i) snd_nxt lies between end of one hole and beginning of another
844 	 * ii) snd_nxt lies between end of last hole and snd_fack
845 	 */
846 	while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
847 		if (SEQ_LT(tp->snd_nxt, cur->end)) {
848 			return cur->end - tp->snd_nxt;
849 		}
850 		if (SEQ_GEQ(tp->snd_nxt, p->start)) {
851 			cur = p;
852 		} else {
853 			tp->snd_nxt = p->start;
854 			return p->end - tp->snd_nxt;
855 		}
856 	}
857 	if (SEQ_LT(tp->snd_nxt, cur->end)) {
858 		return cur->end - tp->snd_nxt;
859 	}
860 	tp->snd_nxt = tp->snd_fack;
861 	return 0;
862 }
863 
864 /*
865  * This function returns TRUE if more than (tcprexmtthresh - 1) * SMSS
866  * bytes with sequence numbers greater than snd_una have been SACKed.
867  */
868 boolean_t
tcp_sack_byte_islost(struct tcpcb * tp)869 tcp_sack_byte_islost(struct tcpcb *tp)
870 {
871 	u_int32_t unacked_bytes, sndhole_bytes = 0;
872 	struct sackhole *sndhole;
873 	if (!SACK_ENABLED(tp) || IN_FASTRECOVERY(tp) ||
874 	    TAILQ_EMPTY(&tp->snd_holes) ||
875 	    (tp->t_flagsext & TF_PKTS_REORDERED)) {
876 		return FALSE;
877 	}
878 
879 	unacked_bytes = tp->snd_max - tp->snd_una;
880 
881 	TAILQ_FOREACH(sndhole, &tp->snd_holes, scblink) {
882 		sndhole_bytes += (sndhole->end - sndhole->start);
883 	}
884 
885 	VERIFY(unacked_bytes >= sndhole_bytes);
886 	return (unacked_bytes - sndhole_bytes) >
887 	       ((tcprexmtthresh - 1) * tp->t_maxseg);
888 }
889 
890 /*
891  * Process any DSACK options that might be present on an input packet
892  */
893 
894 boolean_t
tcp_sack_process_dsack(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,boolean_t * dsack_tlp)895 tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to,
896     struct tcphdr *th, boolean_t *dsack_tlp)
897 {
898 	struct sackblk first_sack, second_sack;
899 
900 	bcopy(to->to_sacks, &first_sack, sizeof(first_sack));
901 	first_sack.start = ntohl(first_sack.start);
902 	first_sack.end = ntohl(first_sack.end);
903 
904 	if (to->to_nsacks > 1) {
905 		bcopy((to->to_sacks + TCPOLEN_SACK), &second_sack,
906 		    sizeof(second_sack));
907 		second_sack.start = ntohl(second_sack.start);
908 		second_sack.end = ntohl(second_sack.end);
909 	}
910 
911 	if (SEQ_LT(first_sack.start, th->th_ack) &&
912 	    SEQ_LEQ(first_sack.end, th->th_ack)) {
913 		/*
914 		 * There is a dsack option reporting a duplicate segment
915 		 * also covered by cumulative acknowledgement.
916 		 *
917 		 * Validate the sequence numbers before looking at dsack
918 		 * option. The duplicate notification can come after
919 		 * snd_una moves forward. In order to set a window of valid
920 		 * sequence numbers to look for, we set a maximum send
921 		 * window within which the DSACK option will be processed.
922 		 */
923 		if (!(TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.start, th->th_ack) &&
924 		    TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.end, th->th_ack))) {
925 			to->to_nsacks--;
926 			to->to_sacks += TCPOLEN_SACK;
927 			tcpstat.tcps_dsack_recvd_old++;
928 
929 			/*
930 			 * returning true here so that the ack will not be
931 			 * treated as duplicate ack.
932 			 */
933 			return TRUE;
934 		}
935 	} else if (to->to_nsacks > 1 &&
936 	    SEQ_LEQ(second_sack.start, first_sack.start) &&
937 	    SEQ_GEQ(second_sack.end, first_sack.end)) {
938 		/*
939 		 * there is a dsack option in the first block not
940 		 * covered by the cumulative acknowledgement but covered
941 		 * by the second sack block.
942 		 *
943 		 * verify the sequence numbes on the second sack block
944 		 * before processing the DSACK option. Returning false
945 		 * here will treat the ack as a duplicate ack.
946 		 */
947 		if (!TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &second_sack,
948 		    th->th_ack)) {
949 			to->to_nsacks--;
950 			to->to_sacks += TCPOLEN_SACK;
951 			tcpstat.tcps_dsack_recvd_old++;
952 			return TRUE;
953 		}
954 	} else {
955 		/* no dsack options, proceed with processing the sack */
956 		return FALSE;
957 	}
958 
959 	/* Update the tcpopt pointer to exclude dsack block */
960 	to->to_nsacks--;
961 	to->to_sacks += TCPOLEN_SACK;
962 	tcpstat.tcps_dsack_recvd++;
963 	tp->t_dsack_recvd++;
964 
965 	/* DSACK was due to TLP */
966 	if (tp->t_tlphightrxt_persist && tp->t_tlphightrxt_persist == first_sack.end) {
967 		*dsack_tlp = true;
968 		tp->t_tlphightrxt_persist = 0;
969 	}
970 	if (TCP_RACK_ENABLED(tp) && *dsack_tlp == false) {
971 		tcp_rack_detect_reordering_dsack(tp, first_sack.start, first_sack.end);
972 	}
973 
974 	/* Update the sender's retransmit segment state */
975 	if (((tp->t_rxtshift == 1 && first_sack.start == tp->snd_una) ||
976 	    ((tp->t_flagsext & TF_SENT_TLPROBE) &&
977 	    first_sack.end == tp->t_tlphighrxt)) &&
978 	    TAILQ_EMPTY(&tp->snd_holes) &&
979 	    SEQ_GT(th->th_ack, tp->snd_una)) {
980 		/*
981 		 * If the dsack is for a retransmitted packet and one of
982 		 * the two cases is true, it indicates ack loss:
983 		 * - retransmit timeout and first_sack.start == snd_una
984 		 * - TLP probe and first_sack.end == tlphighrxt
985 		 *
986 		 * Ignore dsack and do not update state when there is
987 		 * ack loss
988 		 */
989 		tcpstat.tcps_dsack_ackloss++;
990 
991 		return TRUE;
992 	} else {
993 		tcp_rxtseg_set_spurious(tp, first_sack.start, (first_sack.end - 1));
994 	}
995 	return TRUE;
996 }
997