xref: /xnu-12377.61.12/bsd/netinet/tcp_sack.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2004-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
30  *	The Regents of the University of California.  All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions
34  * are met:
35  * 1. Redistributions of source code must retain the above copyright
36  *	notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce the above copyright
38  *	notice, this list of conditions and the following disclaimer in the
39  *	documentation and/or other materials provided with the distribution.
40  * 3. All advertising materials mentioning features or use of this software
41  *	must display the following acknowledgement:
42  *	This product includes software developed by the University of
43  *	California, Berkeley and its contributors.
44  * 4. Neither the name of the University nor the names of its contributors
45  *	may be used to endorse or promote products derived from this software
46  *	without specific prior written permission.
47  *
48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58  * SUCH DAMAGE.
59  *
60  */
61 
62 #define _IP_VHL
63 
64 
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/kernel.h>
68 #include <sys/sysctl.h>
69 #include <sys/mbuf.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/socketvar.h>
74 
75 #include <kern/zalloc.h>
76 
77 #include <net/route.h>
78 
79 #include <netinet/in.h>
80 #include <netinet/in_systm.h>
81 #include <netinet/ip.h>
82 #include <netinet/in_pcb.h>
83 #include <netinet/ip_var.h>
84 #include <netinet6/in6_pcb.h>
85 #include <netinet/ip6.h>
86 #include <netinet6/ip6_var.h>
87 #include <netinet/tcp.h>
88 #include <netinet/tcp_fsm.h>
89 #include <netinet/tcp_seq.h>
90 #include <netinet/tcp_timer.h>
91 #include <netinet/tcp_var.h>
92 #include <netinet/tcpip.h>
93 #include <netinet/tcp_cache.h>
94 #include <sys/kdebug.h>
95 
96 #include "tcp_includes.h"
97 
98 #if IPSEC
99 #include <netinet6/ipsec.h>
100 #endif /*IPSEC*/
101 
102 #include <libkern/OSAtomic.h>
103 
104 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack, CTLFLAG_RW | CTLFLAG_LOCKED,
105     int, tcp_do_sack, 1, "Enable/Disable TCP SACK support");
106 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_maxholes, CTLFLAG_RW | CTLFLAG_LOCKED,
107     static int, tcp_sack_maxholes, 128,
108     "Maximum number of TCP SACK holes allowed per connection");
109 
110 /* ToDo - remove when uTCP stops using it */
111 SYSCTL_SKMEM_TCP_INT(OID_AUTO, sack_globalmaxholes,
112     CTLFLAG_RW | CTLFLAG_LOCKED, static int, tcp_sack_globalmaxholes, 65536,
113     "Global maximum number of TCP SACK holes");
114 
115 static KALLOC_TYPE_DEFINE(sack_hole_zone, struct sackhole, NET_KT_DEFAULT);
116 
117 #define TCP_VALIDATE_SACK_SEQ_NUMBERS(_tp_, _sb_, _ack_) \
118     (SEQ_GT((_sb_)->end, (_sb_)->start) && \
119     SEQ_GT((_sb_)->start, (_tp_)->snd_una) && \
120     SEQ_GT((_sb_)->start, (_ack_)) && \
121     SEQ_LT((_sb_)->start, (_tp_)->snd_max) && \
122     SEQ_GT((_sb_)->end, (_tp_)->snd_una) && \
123     SEQ_LEQ((_sb_)->end, (_tp_)->snd_max))
124 
125 /*
126  * This function is called upon receipt of new valid data (while not in header
127  * prediction mode), and it updates the ordered list of sacks.
128  */
129 void
tcp_update_sack_list(struct tcpcb * tp,tcp_seq rcv_start,tcp_seq rcv_end)130 tcp_update_sack_list(struct tcpcb *tp, tcp_seq rcv_start, tcp_seq rcv_end)
131 {
132 	/*
133 	 * First reported block MUST be the most recent one.  Subsequent
134 	 * blocks SHOULD be in the order in which they arrived at the
135 	 * receiver.  These two conditions make the implementation fully
136 	 * compliant with RFC 2018.
137 	 */
138 	struct sackblk head_blk, saved_blks[MAX_SACK_BLKS];
139 	int num_head, num_saved, i;
140 
141 	/* SACK block for the received segment. */
142 	head_blk.start = rcv_start;
143 	head_blk.end = rcv_end;
144 
145 	/*
146 	 * Merge updated SACK blocks into head_blk, and
147 	 * save unchanged SACK blocks into saved_blks[].
148 	 * num_saved will have the number of the saved SACK blocks.
149 	 */
150 	num_saved = 0;
151 	for (i = 0; i < tp->rcv_numsacks; i++) {
152 		tcp_seq start = tp->sackblks[i].start;
153 		tcp_seq end = tp->sackblks[i].end;
154 		if (SEQ_GEQ(start, end) || SEQ_LEQ(start, tp->rcv_nxt)) {
155 			/*
156 			 * Discard this SACK block.
157 			 */
158 		} else if (SEQ_LEQ(head_blk.start, end) &&
159 		    SEQ_GEQ(head_blk.end, start)) {
160 			/*
161 			 * Merge this SACK block into head_blk.
162 			 * This SACK block itself will be discarded.
163 			 */
164 			if (SEQ_GT(head_blk.start, start)) {
165 				head_blk.start = start;
166 			}
167 			if (SEQ_LT(head_blk.end, end)) {
168 				head_blk.end = end;
169 			}
170 		} else {
171 			/*
172 			 * Save this SACK block.
173 			 */
174 			saved_blks[num_saved].start = start;
175 			saved_blks[num_saved].end = end;
176 			num_saved++;
177 		}
178 	}
179 
180 	/*
181 	 * Update SACK list in tp->sackblks[].
182 	 */
183 	num_head = 0;
184 	if (SEQ_GT(head_blk.start, tp->rcv_nxt)) {
185 		/*
186 		 * The received data segment is an out-of-order segment.
187 		 * Put head_blk at the top of SACK list.
188 		 */
189 		tp->sackblks[0] = head_blk;
190 		num_head = 1;
191 		/*
192 		 * If the number of saved SACK blocks exceeds its limit,
193 		 * discard the last SACK block.
194 		 */
195 		if (num_saved >= MAX_SACK_BLKS) {
196 			num_saved--;
197 		}
198 	}
199 	if (num_saved > 0) {
200 		/*
201 		 * Copy the saved SACK blocks back.
202 		 */
203 		bcopy(saved_blks, &tp->sackblks[num_head], sizeof(struct sackblk) * num_saved);
204 	}
205 
206 	/* Save the number of SACK blocks. */
207 	tp->rcv_numsacks = num_head + num_saved;
208 
209 	/* If we are requesting SACK recovery, reset the force-ACK counter
210 	 * so that connection will generate more acks after recovery and
211 	 * sender's cwnd will open.
212 	 */
213 	if (tp->rcv_numsacks > 0) {
214 		tp->t_forced_acks = TCP_FORCED_ACKS_COUNT;
215 	}
216 
217 #if TRAFFIC_MGT
218 	if (tp->acc_iaj > 0 && tp->rcv_numsacks > 0) {
219 		reset_acc_iaj(tp);
220 	}
221 #endif /* TRAFFIC_MGT */
222 }
223 
224 /*
225  * Delete all receiver-side SACK information.
226  */
227 void
tcp_clean_sackreport(struct tcpcb * tp)228 tcp_clean_sackreport( struct tcpcb *tp)
229 {
230 	tp->rcv_numsacks = 0;
231 	bzero(&tp->sackblks[0], sizeof(struct sackblk) * MAX_SACK_BLKS);
232 }
233 
234 /*
235  * Allocate struct sackhole.
236  */
237 static struct sackhole *
tcp_sackhole_alloc(struct tcpcb * tp,tcp_seq start,tcp_seq end)238 tcp_sackhole_alloc(struct tcpcb *tp, tcp_seq start, tcp_seq end)
239 {
240 	struct sackhole *hole;
241 
242 	if (tp->snd_numholes >= tcp_sack_maxholes ||
243 	    tcp_memacct_hardlimit()) {
244 		/*
245 		 * We only check for hardlimit, because properly handling SACK
246 		 * will allow us to recover quicker (and thus free memory).
247 		 */
248 		tcpstat.tcps_sack_sboverflow++;
249 		return NULL;
250 	}
251 
252 	hole = zalloc_flags(sack_hole_zone, Z_WAITOK | Z_NOFAIL);
253 	tcp_memacct_add(kalloc_type_size(sack_hole_zone));
254 
255 	hole->start = start;
256 	hole->end = end;
257 	hole->rxmit = start;
258 
259 	tp->snd_numholes++;
260 
261 	return hole;
262 }
263 
264 /*
265  * Free struct sackhole.
266  */
267 static void
tcp_sackhole_free(struct tcpcb * tp,struct sackhole * hole)268 tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
269 {
270 	zfree(sack_hole_zone, hole);
271 	tcp_memacct_sub(kalloc_type_size(sack_hole_zone));
272 
273 	tp->snd_numholes--;
274 }
275 
276 /*
277  * Insert new SACK hole into scoreboard.
278  */
279 static struct sackhole *
tcp_sackhole_insert(struct tcpcb * tp,tcp_seq start,tcp_seq end,struct sackhole * after)280 tcp_sackhole_insert(struct tcpcb *tp, tcp_seq start, tcp_seq end,
281     struct sackhole *after)
282 {
283 	struct sackhole *hole;
284 
285 	/* Allocate a new SACK hole. */
286 	hole = tcp_sackhole_alloc(tp, start, end);
287 	if (hole == NULL) {
288 		return NULL;
289 	}
290 	hole->rxmit_start = tcp_now;
291 	/* Insert the new SACK hole into scoreboard */
292 	if (after != NULL) {
293 		TAILQ_INSERT_AFTER(&tp->snd_holes, after, hole, scblink);
294 	} else {
295 		TAILQ_INSERT_TAIL(&tp->snd_holes, hole, scblink);
296 	}
297 
298 	/* Update SACK hint. */
299 	if (tp->sackhint.nexthole == NULL) {
300 		tp->sackhint.nexthole = hole;
301 	}
302 
303 	return hole;
304 }
305 
306 /*
307  * Remove SACK hole from scoreboard.
308  */
309 static void
tcp_sackhole_remove(struct tcpcb * tp,struct sackhole * hole)310 tcp_sackhole_remove(struct tcpcb *tp, struct sackhole *hole)
311 {
312 	/* Update SACK hint. */
313 	if (tp->sackhint.nexthole == hole) {
314 		tp->sackhint.nexthole = TAILQ_NEXT(hole, scblink);
315 	}
316 
317 	/* Remove this SACK hole. */
318 	TAILQ_REMOVE(&tp->snd_holes, hole, scblink);
319 
320 	/* Free this SACK hole. */
321 	tcp_sackhole_free(tp, hole);
322 }
323 /*
324  * When a new ack with SACK is received, check if it indicates packet
325  * reordering. If there is packet reordering, the socket is marked and
326  * the late time offset by which the packet was reordered with
327  * respect to its closest neighboring packets is computed.
328  */
329 static void
tcp_sack_detect_reordering(struct tcpcb * tp,struct sackhole * s,tcp_seq sacked_seq,tcp_seq snd_fack)330 tcp_sack_detect_reordering(struct tcpcb *tp, struct sackhole *s,
331     tcp_seq sacked_seq, tcp_seq snd_fack)
332 {
333 	int32_t rext = 0, reordered = 0;
334 
335 	/*
336 	 * If the SACK hole is past snd_fack, this is from new SACK
337 	 * information, so we can ignore it.
338 	 */
339 	if (SEQ_GT(s->end, snd_fack)) {
340 		return;
341 	}
342 	/*
343 	 * If there has been a retransmit timeout, then the timestamp on
344 	 * the SACK segment will be newer. This might lead to a
345 	 * false-positive. Avoid re-ordering detection in this case.
346 	 */
347 	if (tp->t_rxtshift > 0) {
348 		return;
349 	}
350 
351 	/*
352 	 * Detect reordering from SACK information by checking
353 	 * if recently sacked data was never retransmitted from this hole.
354 	 *
355 	 * First, we look for the byte in the list of retransmitted segments. This one
356 	 * will contain even the segments that are retransmitted thanks to RTO/TLP.
357 	 *
358 	 * Then, we check the sackhole which indicates whether or not the sackhole
359 	 * was subject to retransmission.
360 	 */
361 	if (SEQ_LT(s->rxmit, sacked_seq) &&
362 	    (tcp_rxtseg_find(tp, sacked_seq - 1, sacked_seq - 1) == NULL)) {
363 		reordered = 1;
364 		tcpstat.tcps_avoid_rxmt++;
365 	}
366 
367 	if (reordered) {
368 		if (!(tp->t_flagsext & TF_PKTS_REORDERED)) {
369 			tp->t_flagsext |= TF_PKTS_REORDERED;
370 			tcpstat.tcps_detect_reordering++;
371 		}
372 
373 		tcpstat.tcps_reordered_pkts++;
374 		tp->t_reordered_pkts++;
375 
376 		VERIFY(SEQ_GEQ(snd_fack, s->rxmit));
377 
378 		if (s->rxmit_start > 0) {
379 			rext = timer_diff(tcp_now, 0, s->rxmit_start, 0);
380 			if (rext < 0) {
381 				return;
382 			}
383 
384 			/*
385 			 * We take the maximum reorder window to schedule
386 			 * DELAYFR timer as that will take care of jitter
387 			 * on the network path.
388 			 *
389 			 * Computing average and standard deviation seems
390 			 * to cause unnecessary retransmissions when there
391 			 * is high jitter.
392 			 *
393 			 * We set a maximum of SRTT/2 and a minimum of
394 			 * 10 ms on the reorder window.
395 			 */
396 			tp->t_reorderwin = max(tp->t_reorderwin, rext);
397 			tp->t_reorderwin = min(tp->t_reorderwin,
398 			    (tp->t_srtt >> (TCP_RTT_SHIFT + 1)));
399 			tp->t_reorderwin = max(tp->t_reorderwin, 10);
400 		}
401 	}
402 }
403 
404 static void
tcp_sack_update_byte_counter(uint32_t start,uint32_t end,uint32_t * newbytes_acked)405 tcp_sack_update_byte_counter(uint32_t start, uint32_t end, uint32_t *newbytes_acked)
406 {
407 	*newbytes_acked += (end - start);
408 }
409 
410 /*
411  * Process cumulative ACK and the TCP SACK option to update the scoreboard.
412  * tp->snd_holes is an ordered list of holes (oldest to newest, in terms of
413  * the sequence space).
414  */
415 void
tcp_sack_doack(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,u_int32_t * newbytes_acked,uint32_t * highest_sacked_seq)416 tcp_sack_doack(struct tcpcb *tp, struct tcpopt *to, struct tcphdr *th,
417     u_int32_t *newbytes_acked, uint32_t *highest_sacked_seq)
418 {
419 	struct sackhole *cur, *temp;
420 	struct sackblk sack, sack_blocks[TCP_MAX_SACK + 1], *sblkp;
421 	int i, j, num_sack_blks;
422 	tcp_seq old_snd_fack = 0, th_ack = th->th_ack;
423 	uint32_t tsecr = 0;
424 
425 	if (((to->to_flags & TOF_TS) != 0) && (to->to_tsecr != 0)) {
426 		tsecr = to->to_tsecr;
427 	}
428 
429 	num_sack_blks = 0;
430 	/*
431 	 * If SND.UNA will be advanced by SEG.ACK, and if SACK holes exist,
432 	 * treat [SND.UNA, SEG.ACK) as if it is a SACK block. We don't need
433 	 * this for RACK.
434 	 */
435 	if (!TCP_RACK_ENABLED(tp) && SEQ_LT(tp->snd_una, th_ack) && !TAILQ_EMPTY(&tp->snd_holes)) {
436 		sack_blocks[num_sack_blks].start = tp->snd_una;
437 		sack_blocks[num_sack_blks++].end = th_ack;
438 	}
439 	/*
440 	 * Append received valid SACK blocks to sack_blocks[].
441 	 * Check that the SACK block range is valid.
442 	 */
443 	for (i = 0; i < to->to_nsacks; i++) {
444 		bcopy((to->to_sacks + i * TCPOLEN_SACK),
445 		    &sack, sizeof(sack));
446 		sack.start = ntohl(sack.start);
447 		sack.end = ntohl(sack.end);
448 		if (TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &sack, th_ack)) {
449 			sack_blocks[num_sack_blks++] = sack;
450 		}
451 	}
452 
453 	/*
454 	 * Return if SND.UNA is not advanced and no valid SACK block
455 	 * is received.
456 	 */
457 	if (num_sack_blks == 0) {
458 		return;
459 	}
460 
461 	VERIFY(num_sack_blks <= (TCP_MAX_SACK + 1));
462 	/*
463 	 * Sort the SACK blocks so we can update the scoreboard
464 	 * with just one pass. The overhead of sorting upto 4+1 elements
465 	 * is less than making upto 4+1 passes over the scoreboard.
466 	 */
467 	for (i = 0; i < num_sack_blks; i++) {
468 		for (j = i + 1; j < num_sack_blks; j++) {
469 			if (SEQ_GT(sack_blocks[i].end, sack_blocks[j].end)) {
470 				sack = sack_blocks[i];
471 				sack_blocks[i] = sack_blocks[j];
472 				sack_blocks[j] = sack;
473 			}
474 		}
475 	}
476 
477 	if (TCP_RACK_ENABLED(tp)) {
478 		sblkp = &sack_blocks[num_sack_blks - 1];        /* Last SACK block */
479 
480 		*highest_sacked_seq = sblkp->end;
481 
482 		/* RACK can get disabled if segment allocation fails */
483 		while (sblkp >= sack_blocks && TCP_RACK_ENABLED(tp)) {
484 			/*
485 			 * Mark SACKed segments which allows us to skip through such
486 			 * segments during RACK loss detection
487 			 */
488 			tcp_segs_dosack(tp, sblkp->start, sblkp->end, tsecr, newbytes_acked);
489 			sblkp--;
490 		}
491 
492 		return;
493 	}
494 
495 	/* Process holes only when RACK is not used */
496 	if (TAILQ_EMPTY(&tp->snd_holes)) {
497 		/*
498 		 * Empty scoreboard. Need to initialize snd_fack (it may be
499 		 * uninitialized or have a bogus value). Scoreboard holes
500 		 * (from the sack blocks received) are created later below (in
501 		 * the logic that adds holes to the tail of the scoreboard).
502 		 */
503 		tp->snd_fack = SEQ_MAX(tp->snd_una, th_ack);
504 	}
505 
506 	old_snd_fack = tp->snd_fack;
507 	/*
508 	 * In the while-loop below, incoming SACK blocks (sack_blocks[])
509 	 * and SACK holes (snd_holes) are traversed from their tails with
510 	 * just one pass in order to reduce the number of compares especially
511 	 * when the bandwidth-delay product is large.
512 	 * Note: Typically, in the first RTT of SACK recovery, the highest
513 	 * three or four SACK blocks with the same ack number are received.
514 	 * In the second RTT, if retransmitted data segments are not lost,
515 	 * the highest three or four SACK blocks with ack number advancing
516 	 * are received.
517 	 */
518 	sblkp = &sack_blocks[num_sack_blks - 1];        /* Last SACK block */
519 	if (SEQ_LT(tp->snd_fack, sblkp->start)) {
520 		/*
521 		 * The highest SACK block is beyond fack.
522 		 * Append new SACK hole at the tail.
523 		 * If the second or later highest SACK blocks are also
524 		 * beyond the current fack, they will be inserted by
525 		 * way of hole splitting in the while-loop below.
526 		 */
527 		temp = tcp_sackhole_insert(tp, tp->snd_fack, sblkp->start, NULL);
528 		if (temp != NULL) {
529 			tp->snd_fack = sblkp->end;
530 			tcp_sack_update_byte_counter(sblkp->start, sblkp->end, newbytes_acked);
531 
532 			/* Go to the previous sack block. */
533 			sblkp--;
534 		} else {
535 			/*
536 			 * We failed to add a new hole based on the current
537 			 * sack block.  Skip over all the sack blocks that
538 			 * fall completely to the right of snd_fack and proceed
539 			 * to trim the scoreboard based on the remaining sack
540 			 * blocks. This also trims the scoreboard for th_ack
541 			 * (which is sack_blocks[0]).
542 			 */
543 			while (sblkp >= sack_blocks &&
544 			    SEQ_LT(tp->snd_fack, sblkp->start)) {
545 				sblkp--;
546 			}
547 			if (sblkp >= sack_blocks &&
548 			    SEQ_LT(tp->snd_fack, sblkp->end)) {
549 				tcp_sack_update_byte_counter(tp->snd_fack, sblkp->end, newbytes_acked);
550 				tp->snd_fack = sblkp->end;
551 			}
552 		}
553 	} else if (SEQ_LT(tp->snd_fack, sblkp->end)) {
554 		/* fack is advanced. */
555 		tcp_sack_update_byte_counter(tp->snd_fack, sblkp->end, newbytes_acked);
556 		tp->snd_fack = sblkp->end;
557 	}
558 	/* We must have at least one SACK hole in scoreboard */
559 	cur = TAILQ_LAST(&tp->snd_holes, sackhole_head); /* Last SACK hole */
560 	/*
561 	 * Since the incoming sack blocks are sorted, we can process them
562 	 * making one sweep of the scoreboard.
563 	 */
564 	while (sblkp >= sack_blocks && cur != NULL) {
565 		if (SEQ_GEQ(sblkp->start, cur->end)) {
566 			/*
567 			 * SACKs data beyond the current hole.
568 			 * Go to the previous sack block.
569 			 */
570 			sblkp--;
571 			continue;
572 		}
573 		if (SEQ_LEQ(sblkp->end, cur->start)) {
574 			/*
575 			 * SACKs data before the current hole.
576 			 * Go to the previous hole.
577 			 */
578 			cur = TAILQ_PREV(cur, sackhole_head, scblink);
579 			continue;
580 		}
581 		tp->sackhint.sack_bytes_rexmit -= (cur->rxmit - cur->start);
582 		if (tp->sackhint.sack_bytes_rexmit < 0) {
583 			tp->sackhint.sack_bytes_rexmit = 0;
584 		}
585 
586 		if (SEQ_LEQ(sblkp->start, cur->start)) {
587 			/* Data acks at least the beginning of hole */
588 			if (SEQ_GEQ(sblkp->end, cur->end)) {
589 				/* Acks entire hole, so delete hole */
590 				tcp_sack_update_byte_counter(cur->start, cur->end, newbytes_acked);
591 
592 				tcp_sack_detect_reordering(tp, cur,
593 				    cur->end, old_snd_fack);
594 				temp = cur;
595 				cur = TAILQ_PREV(cur, sackhole_head, scblink);
596 				tcp_sackhole_remove(tp, temp);
597 				/*
598 				 * The sack block may ack all or part of the next
599 				 * hole too, so continue onto the next hole.
600 				 */
601 				continue;
602 			} else {
603 				/* Move start of hole forward */
604 				tcp_sack_update_byte_counter(cur->start, sblkp->end, newbytes_acked);
605 				tcp_sack_detect_reordering(tp, cur,
606 				    sblkp->end, old_snd_fack);
607 				cur->start = sblkp->end;
608 				cur->rxmit = SEQ_MAX(cur->rxmit, cur->start);
609 			}
610 		} else {
611 			/* Data acks at least the end of hole */
612 			if (SEQ_GEQ(sblkp->end, cur->end)) {
613 				/* Move end of hole backward */
614 				tcp_sack_update_byte_counter(sblkp->start, cur->end, newbytes_acked);
615 				tcp_sack_detect_reordering(tp, cur,
616 				    cur->end, old_snd_fack);
617 				cur->end = sblkp->start;
618 				cur->rxmit = SEQ_MIN(cur->rxmit, cur->end);
619 			} else {
620 				/*
621 				 * ACKs some data in the middle of a hole;
622 				 * need to split current hole
623 				 */
624 				tcp_sack_detect_reordering(tp, cur,
625 				    sblkp->end, old_snd_fack);
626 				temp = tcp_sackhole_insert(tp, sblkp->end,
627 				    cur->end, cur);
628 				if (temp != NULL) {
629 					tcp_sack_update_byte_counter(sblkp->start, sblkp->end, newbytes_acked);
630 					if (SEQ_GT(cur->rxmit, temp->rxmit)) {
631 						temp->rxmit = cur->rxmit;
632 						tp->sackhint.sack_bytes_rexmit
633 						        += (temp->rxmit
634 						    - temp->start);
635 					}
636 					cur->end = sblkp->start;
637 					cur->rxmit = SEQ_MIN(cur->rxmit,
638 					    cur->end);
639 					/*
640 					 * Reset the rxmit_start to that of
641 					 * the current hole as that will
642 					 * help to compute the reorder
643 					 * window correctly
644 					 */
645 					temp->rxmit_start = cur->rxmit_start;
646 				}
647 			}
648 		}
649 		tp->sackhint.sack_bytes_rexmit += (cur->rxmit - cur->start);
650 		/*
651 		 * Testing sblkp->start against cur->start tells us whether
652 		 * we're done with the sack block or the sack hole.
653 		 * Accordingly, we advance one or the other.
654 		 */
655 		if (SEQ_LEQ(sblkp->start, cur->start)) {
656 			cur = TAILQ_PREV(cur, sackhole_head, scblink);
657 		} else {
658 			sblkp--;
659 		}
660 	}
661 }
662 
663 /*
664  * Free all SACK holes to clear the scoreboard.
665  */
666 void
tcp_free_sackholes(struct tcpcb * tp)667 tcp_free_sackholes(struct tcpcb *tp)
668 {
669 	struct sackhole *q;
670 
671 	while ((q = TAILQ_FIRST(&tp->snd_holes)) != NULL) {
672 		tcp_sackhole_remove(tp, q);
673 	}
674 	tp->sackhint.sack_bytes_rexmit = 0;
675 	tp->sackhint.sack_bytes_acked = 0;
676 	tp->sackhint.nexthole = NULL;
677 }
678 
679 /*
680  * Partial ack handling within a sack recovery episode.
681  * Keeping this very simple for now. When a partial ack
682  * is received, force snd_cwnd to a value that will allow
683  * the sender to transmit no more than 2 segments.
684  * If necessary, a better scheme can be adopted at a
685  * later point, but for now, the goal is to prevent the
686  * sender from bursting a large amount of data in the midst
687  * of sack recovery.
688  */
689 void
tcp_sack_partialack(struct tcpcb * tp,struct tcphdr * th)690 tcp_sack_partialack(struct tcpcb *tp, struct tcphdr *th)
691 {
692 	int num_segs = 1;
693 
694 	tcp_seq onxt = tp->snd_nxt;
695 	tp->t_timer[TCPT_REXMT] = 0;
696 	tp->t_rtttime = 0;
697 	/*
698 	 * Avoid retransmitting what was already cumulatively ACKed by
699 	 * a partial ACK when snd_nxt was set to snd_una after RTO.
700 	 * Example, snd_nxt=10001 (after doing 1 retransmit after RTO and
701 	 * partial ACK cumulatively acknowledges 13001).
702 	 */
703 	if (SEQ_LT(tp->snd_nxt, th->th_ack)) {
704 		tp->snd_nxt = th->th_ack;
705 	}
706 	/* send one or 2 segments based on how much new data was acked */
707 	if (((BYTES_ACKED(th, tp)) / tp->t_maxseg) > 2) {
708 		num_segs = 2;
709 	}
710 
711 	tp->snd_cwnd = tcp_flight_size(tp) + num_segs * tp->t_maxseg;
712 	if (tp->snd_cwnd > tp->snd_ssthresh) {
713 		tp->snd_cwnd = tp->snd_ssthresh;
714 	}
715 	if (SEQ_LT(tp->snd_fack, tp->snd_recover) &&
716 	    tp->snd_fack == th->th_ack && TAILQ_EMPTY(&tp->snd_holes)) {
717 		struct sackhole *temp;
718 		/*
719 		 * we received a partial ack but there is no sack_hole
720 		 * that will cover the remaining seq space. In this case,
721 		 * create a hole from snd_fack to snd_recover so that
722 		 * the sack recovery will continue.
723 		 */
724 		temp = tcp_sackhole_insert(tp, tp->snd_fack,
725 		    tp->snd_recover, NULL);
726 		if (temp != NULL) {
727 			tp->snd_fack = tp->snd_recover;
728 		}
729 	}
730 	(void) tcp_output(tp);
731 	/*
732 	 * When we send a FIN, we increment snd_nxt by 1 and on subsequent
733 	 * transmission of FIN, we reduce snd_nxt by 1 if it is equal to snd_max.
734 	 * If snd_nxt was reduced previously, we want to preserve that value.
735 	 * Otherwise, restore snd_nxt to its previous value.
736 	 */
737 	if (!((tp->t_flags & TF_SENTFIN) && tp->snd_nxt == tp->snd_max - 1) &&
738 	    SEQ_GT(onxt, tp->snd_nxt)) {
739 		tp->snd_nxt = onxt;
740 	}
741 }
742 
743 /*
744  * Debug version of tcp_sack_output() that walks the scoreboard. Used for
745  * now to sanity check the hint.
746  */
747 static struct sackhole *
tcp_sack_output_debug(struct tcpcb * tp,int * sack_bytes_rexmt)748 tcp_sack_output_debug(struct tcpcb *tp, int *sack_bytes_rexmt)
749 {
750 	struct sackhole *p;
751 
752 	*sack_bytes_rexmt = 0;
753 	TAILQ_FOREACH(p, &tp->snd_holes, scblink) {
754 		if (SEQ_LT(p->rxmit, p->end)) {
755 			if (SEQ_LT(p->rxmit, tp->snd_una)) {/* old SACK hole */
756 				continue;
757 			}
758 			*sack_bytes_rexmt += (p->rxmit - p->start);
759 			break;
760 		}
761 		*sack_bytes_rexmt += (p->rxmit - p->start);
762 	}
763 	return p;
764 }
765 
766 /*
767  * Returns the next hole to retransmit and the number of retransmitted bytes
768  * from the scoreboard. We store both the next hole and the number of
769  * retransmitted bytes as hints (and recompute these on the fly upon SACK/ACK
770  * reception). This avoids scoreboard traversals completely.
771  *
772  * The loop here will traverse *at most* one link. Here's the argument.
773  * For the loop to traverse more than 1 link before finding the next hole to
774  * retransmit, we would need to have at least 1 node following the current hint
775  * with (rxmit == end). But, for all holes following the current hint,
776  * (start == rxmit), since we have not yet retransmitted from them. Therefore,
777  * in order to traverse more 1 link in the loop below, we need to have at least
778  * one node following the current hint with (start == rxmit == end).
779  * But that can't happen, (start == end) means that all the data in that hole
780  * has been sacked, in which case, the hole would have been removed from the
781  * scoreboard.
782  */
783 struct sackhole *
tcp_sack_output(struct tcpcb * tp,int * sack_bytes_rexmt)784 tcp_sack_output(struct tcpcb *tp, int *sack_bytes_rexmt)
785 {
786 	struct sackhole *hole = NULL, *dbg_hole = NULL;
787 	int dbg_bytes_rexmt;
788 
789 	dbg_hole = tcp_sack_output_debug(tp, &dbg_bytes_rexmt);
790 	*sack_bytes_rexmt = tp->sackhint.sack_bytes_rexmit;
791 	hole = tp->sackhint.nexthole;
792 	if (hole == NULL || SEQ_LT(hole->rxmit, hole->end)) {
793 		goto out;
794 	}
795 	while ((hole = TAILQ_NEXT(hole, scblink)) != NULL) {
796 		if (SEQ_LT(hole->rxmit, hole->end)) {
797 			tp->sackhint.nexthole = hole;
798 			break;
799 		}
800 	}
801 out:
802 	if (dbg_hole != hole) {
803 		printf("%s: Computed sack hole not the same as cached value\n", __func__);
804 		hole = dbg_hole;
805 	}
806 	if (*sack_bytes_rexmt != dbg_bytes_rexmt) {
807 		printf("%s: Computed sack_bytes_retransmitted (%d) not "
808 		    "the same as cached value (%d)\n",
809 		    __func__, dbg_bytes_rexmt, *sack_bytes_rexmt);
810 		*sack_bytes_rexmt = dbg_bytes_rexmt;
811 	}
812 	return hole;
813 }
814 
815 /*
816  * After a timeout, the SACK list may be rebuilt.  This SACK information
817  * should be used to avoid retransmitting SACKed data.  This function
818  * traverses the SACK list to see if snd_nxt should be moved forward.
819  */
820 uint32_t
tcp_sack_adjust(struct tcpcb * tp)821 tcp_sack_adjust(struct tcpcb *tp)
822 {
823 	struct sackhole *p, *cur = TAILQ_FIRST(&tp->snd_holes);
824 
825 	if (cur == NULL) {
826 		return 0; /* No holes */
827 	}
828 	if (SEQ_GEQ(tp->snd_nxt, tp->snd_fack)) {
829 		return 0; /* We're already beyond any SACKed blocks */
830 	}
831 	/*
832 	 * Two cases for which we want to advance snd_nxt:
833 	 * i) snd_nxt lies between end of one hole and beginning of another
834 	 * ii) snd_nxt lies between end of last hole and snd_fack
835 	 */
836 	while ((p = TAILQ_NEXT(cur, scblink)) != NULL) {
837 		if (SEQ_LT(tp->snd_nxt, cur->end)) {
838 			return cur->end - tp->snd_nxt;
839 		}
840 		if (SEQ_GEQ(tp->snd_nxt, p->start)) {
841 			cur = p;
842 		} else {
843 			tp->snd_nxt = p->start;
844 			return p->end - tp->snd_nxt;
845 		}
846 	}
847 	if (SEQ_LT(tp->snd_nxt, cur->end)) {
848 		return cur->end - tp->snd_nxt;
849 	}
850 	tp->snd_nxt = tp->snd_fack;
851 	return 0;
852 }
853 
854 /*
855  * This function returns TRUE if more than (tcprexmtthresh - 1) * SMSS
856  * bytes with sequence numbers greater than snd_una have been SACKed.
857  */
858 boolean_t
tcp_sack_byte_islost(struct tcpcb * tp)859 tcp_sack_byte_islost(struct tcpcb *tp)
860 {
861 	u_int32_t unacked_bytes, sndhole_bytes = 0;
862 	struct sackhole *sndhole;
863 	if (!SACK_ENABLED(tp) || IN_FASTRECOVERY(tp) ||
864 	    TAILQ_EMPTY(&tp->snd_holes) ||
865 	    (tp->t_flagsext & TF_PKTS_REORDERED)) {
866 		return FALSE;
867 	}
868 
869 	unacked_bytes = tp->snd_max - tp->snd_una;
870 
871 	TAILQ_FOREACH(sndhole, &tp->snd_holes, scblink) {
872 		sndhole_bytes += (sndhole->end - sndhole->start);
873 	}
874 
875 	VERIFY(unacked_bytes >= sndhole_bytes);
876 	return (unacked_bytes - sndhole_bytes) >
877 	       ((tcprexmtthresh - 1) * tp->t_maxseg);
878 }
879 
880 /*
881  * Process any DSACK options that might be present on an input packet
882  */
883 
884 boolean_t
tcp_sack_process_dsack(struct tcpcb * tp,struct tcpopt * to,struct tcphdr * th,boolean_t * dsack_tlp)885 tcp_sack_process_dsack(struct tcpcb *tp, struct tcpopt *to,
886     struct tcphdr *th, boolean_t *dsack_tlp)
887 {
888 	struct sackblk first_sack, second_sack;
889 
890 	bcopy(to->to_sacks, &first_sack, sizeof(first_sack));
891 	first_sack.start = ntohl(first_sack.start);
892 	first_sack.end = ntohl(first_sack.end);
893 
894 	if (to->to_nsacks > 1) {
895 		bcopy((to->to_sacks + TCPOLEN_SACK), &second_sack,
896 		    sizeof(second_sack));
897 		second_sack.start = ntohl(second_sack.start);
898 		second_sack.end = ntohl(second_sack.end);
899 	}
900 
901 	if (SEQ_LT(first_sack.start, th->th_ack) &&
902 	    SEQ_LEQ(first_sack.end, th->th_ack)) {
903 		/*
904 		 * There is a dsack option reporting a duplicate segment
905 		 * also covered by cumulative acknowledgement.
906 		 *
907 		 * Validate the sequence numbers before looking at dsack
908 		 * option. The duplicate notification can come after
909 		 * snd_una moves forward. In order to set a window of valid
910 		 * sequence numbers to look for, we set a maximum send
911 		 * window within which the DSACK option will be processed.
912 		 */
913 		if (!(TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.start, th->th_ack) &&
914 		    TCP_DSACK_SEQ_IN_WINDOW(tp, first_sack.end, th->th_ack))) {
915 			to->to_nsacks--;
916 			to->to_sacks += TCPOLEN_SACK;
917 			to->to_sacks_size -= TCPOLEN_SACK;
918 			tcpstat.tcps_dsack_recvd_old++;
919 
920 			/*
921 			 * returning true here so that the ack will not be
922 			 * treated as duplicate ack.
923 			 */
924 			return TRUE;
925 		}
926 	} else if (to->to_nsacks > 1 &&
927 	    SEQ_LEQ(second_sack.start, first_sack.start) &&
928 	    SEQ_GEQ(second_sack.end, first_sack.end)) {
929 		/*
930 		 * there is a dsack option in the first block not
931 		 * covered by the cumulative acknowledgement but covered
932 		 * by the second sack block.
933 		 *
934 		 * verify the sequence numbes on the second sack block
935 		 * before processing the DSACK option. Returning false
936 		 * here will treat the ack as a duplicate ack.
937 		 */
938 		if (!TCP_VALIDATE_SACK_SEQ_NUMBERS(tp, &second_sack,
939 		    th->th_ack)) {
940 			to->to_nsacks--;
941 			to->to_sacks += TCPOLEN_SACK;
942 			to->to_sacks_size -= TCPOLEN_SACK;
943 			tcpstat.tcps_dsack_recvd_old++;
944 			return TRUE;
945 		}
946 	} else {
947 		/* no dsack options, proceed with processing the sack */
948 		return FALSE;
949 	}
950 
951 	/* Update the tcpopt pointer to exclude dsack block */
952 	to->to_nsacks--;
953 	to->to_sacks += TCPOLEN_SACK;
954 	to->to_sacks_size -= TCPOLEN_SACK;
955 	tcpstat.tcps_dsack_recvd++;
956 	tp->t_dsack_recvd++;
957 
958 	/* DSACK was due to TLP */
959 	if (tp->t_tlphightrxt_persist && tp->t_tlphightrxt_persist == first_sack.end) {
960 		*dsack_tlp = true;
961 		tp->t_tlphightrxt_persist = 0;
962 	}
963 	if (TCP_RACK_ENABLED(tp) && *dsack_tlp == false) {
964 		tcp_rack_detect_reordering_dsack(tp, first_sack.start, first_sack.end);
965 	}
966 
967 	/* Update the sender's retransmit segment state */
968 	if (((tp->t_rxtshift == 1 && first_sack.start == tp->snd_una) ||
969 	    (tcp_sent_tlp_retrans(tp) &&
970 	    first_sack.end == tp->t_tlphighrxt)) &&
971 	    TAILQ_EMPTY(&tp->snd_holes) &&
972 	    SEQ_GT(th->th_ack, tp->snd_una)) {
973 		/*
974 		 * If the dsack is for a retransmitted packet and one of
975 		 * the two cases is true, it indicates ack loss:
976 		 * - retransmit timeout and first_sack.start == snd_una
977 		 * - TLP retransmission and first_sack.end == tlphighrxt
978 		 *
979 		 * Ignore dsack and do not update state when there is
980 		 * ack loss
981 		 */
982 		tcpstat.tcps_dsack_ackloss++;
983 
984 		return TRUE;
985 	} else {
986 		tcp_rxtseg_set_spurious(tp, first_sack.start, (first_sack.end - 1));
987 	}
988 	return TRUE;
989 }
990