cregit-Linux how code gets into the kernel

Release 4.8 net/ipv4/tcp_input.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              Implementation of the Transmission Control Protocol(TCP).
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 *              Florian La Roche, <flla@stud.uni-sb.de>
 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 *              Matthew Dillon, <dillon@apollo.west.oic.com>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Jorge Cwik, <jorge@laser.satlink.net>
 */

/*
 * Changes:
 *              Pedro Roque     :       Fast Retransmit/Recovery.
 *                                      Two receive queues.
 *                                      Retransmit queue handled by TCP.
 *                                      Better retransmit timer handling.
 *                                      New congestion avoidance.
 *                                      Header prediction.
 *                                      Variable renaming.
 *
 *              Eric            :       Fast Retransmit.
 *              Randy Scott     :       MSS option defines.
 *              Eric Schenk     :       Fixes to slow start algorithm.
 *              Eric Schenk     :       Yet another double ACK bug.
 *              Eric Schenk     :       Delayed ACK bug fixes.
 *              Eric Schenk     :       Floyd style fast retrans war avoidance.
 *              David S. Miller :       Don't allow zero congestion window.
 *              Eric Schenk     :       Fix retransmitter so that it sends
 *                                      next packet on ack of previous packet.
 *              Andi Kleen      :       Moved open_request checking here
 *                                      and process RSTs for open_requests.
 *              Andi Kleen      :       Better prune_queue, and other fixes.
 *              Andrey Savochkin:       Fix RTT measurements in the presence of
 *                                      timestamps.
 *              Andrey Savochkin:       Check sequence numbers correctly when
 *                                      removing SACKs due to in sequence incoming
 *                                      data segments.
 *              Andi Kleen:             Make sure we never ack data there is not
 *                                      enough room for. Also make this condition
 *                                      a fatal error if it might still happen.
 *              Andi Kleen:             Add tcp_measure_rcv_mss to make
 *                                      connections with MSS<min(MTU,ann. MSS)
 *                                      work without delayed acks.
 *              Andi Kleen:             Process packets with PSH set in the
 *                                      fast path.
 *              J Hadi Salim:           ECN support
 *              Andrei Gurtov,
 *              Pasi Sarolahti,
 *              Panu Kuhlberg:          Experimental audit of TCP (re)transmission
 *                                      engine. Lots of bugs are found.
 *              Pasi Sarolahti:         F-RTO for dealing with spurious RTOs
 */


#define pr_fmt(fmt) "TCP: " fmt

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/sysctl.h>
#include <linux/kernel.h>
#include <linux/prefetch.h>
#include <net/dst.h>
#include <net/tcp.h>
#include <net/inet_common.h>
#include <linux/ipsec.h>
#include <asm/unaligned.h>
#include <linux/errqueue.h>


int sysctl_tcp_timestamps __read_mostly = 1;

int sysctl_tcp_window_scaling __read_mostly = 1;

int sysctl_tcp_sack __read_mostly = 1;

int sysctl_tcp_fack __read_mostly = 1;

int sysctl_tcp_max_reordering __read_mostly = 300;

int sysctl_tcp_dsack __read_mostly = 1;

int sysctl_tcp_app_win __read_mostly = 31;

int sysctl_tcp_adv_win_scale __read_mostly = 1;

EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);

/* rfc5961 challenge ack rate limiting */

int sysctl_tcp_challenge_ack_limit = 1000;


int sysctl_tcp_stdurg __read_mostly;

int sysctl_tcp_rfc1337 __read_mostly;

int sysctl_tcp_max_orphans __read_mostly = NR_FILE;

int sysctl_tcp_frto __read_mostly = 2;

int sysctl_tcp_min_rtt_wlen __read_mostly = 300;


int sysctl_tcp_thin_dupack __read_mostly;


int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;

int sysctl_tcp_early_retrans __read_mostly = 3;

int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;


#define FLAG_DATA		0x01 
/* Incoming frame contained data.               */

#define FLAG_WIN_UPDATE		0x02 
/* Incoming ACK was a window update.    */

#define FLAG_DATA_ACKED		0x04 
/* This ACK acknowledged new data.              */

#define FLAG_RETRANS_DATA_ACKED	0x08 
/* "" "" some of which was retransmitted.       */

#define FLAG_SYN_ACKED		0x10 
/* This ACK acknowledged SYN.           */

#define FLAG_DATA_SACKED	0x20 
/* New SACK.                            */

#define FLAG_ECE		0x40 
/* ECE in this ACK                              */

#define FLAG_LOST_RETRANS	0x80 
/* This ACK marks some retransmission lost */

#define FLAG_SLOWPATH		0x100 
/* Do not skip RFC checks for window update.*/

#define FLAG_ORIG_SACK_ACKED	0x200 
/* Never retransmitted data are (s)acked        */

#define FLAG_SND_UNA_ADVANCED	0x400 
/* Snd_una was changed (!= FLAG_DATA_ACKED) */

#define FLAG_DSACKING_ACK	0x800 
/* SACK blocks contained D-SACK info */

#define FLAG_SACK_RENEGING	0x2000 
/* snd_una advanced to a sacked seq */

#define FLAG_UPDATE_TS_RECENT	0x4000 
/* tcp_replace_ts_recent() */


#define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)

#define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED)

#define FLAG_CA_ALERT		(FLAG_DATA_SACKED|FLAG_ECE)

#define FLAG_FORWARD_PROGRESS	(FLAG_ACKED|FLAG_DATA_SACKED)


#define TCP_REMNANT (TCP_FLAG_FIN|TCP_FLAG_URG|TCP_FLAG_SYN|TCP_FLAG_PSH)

#define TCP_HP_BITS (~(TCP_RESERVED_BITS|TCP_FLAG_PSH))


#define REXMIT_NONE	0 
/* no loss recovery to do */

#define REXMIT_LOST	1 
/* retransmit packets marked lost */

#define REXMIT_NEW	2 
/* FRTO-style transmit of unsent/new packets */

/* Adapt the MSS value used to make delayed ack decision to the
 * real world.
 */

static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb) { struct inet_connection_sock *icsk = inet_csk(sk); const unsigned int lss = icsk->icsk_ack.last_seg_size; unsigned int len; icsk->icsk_ack.last_seg_size = 0; /* skb->len may jitter because of SACKs, even if peer * sends good full-sized frames. */ len = skb_shinfo(skb)->gso_size ? : skb->len; if (len >= icsk->icsk_ack.rcv_mss) { icsk->icsk_ack.rcv_mss = len; } else { /* Otherwise, we make more careful check taking into account, * that SACKs block is variable. * * "len" is invariant segment length, including TCP header. */ len += skb->data - skb_transport_header(skb); if (len >= TCP_MSS_DEFAULT + sizeof(struct tcphdr) || /* If PSH is not set, packet should be * full sized, provided peer TCP is not badly broken. * This observation (if it is correct 8)) allows * to handle super-low mtu links fairly. */ (len >= TCP_MIN_MSS + sizeof(struct tcphdr) && !(tcp_flag_word(tcp_hdr(skb)) & TCP_REMNANT))) { /* Subtract also invariant (if peer is RFC compliant), * tcp header plus fixed timestamp option length. * Resulting "len" is MSS free of SACK jitter. */ len -= tcp_sk(sk)->tcp_header_len; icsk->icsk_ack.last_seg_size = len; if (len == lss) { icsk->icsk_ack.rcv_mss = len; return; } } if (icsk->icsk_ack.pending & ICSK_ACK_PUSHED) icsk->icsk_ack.pending |= ICSK_ACK_PUSHED2; icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git12763.18%956.25%
arnaldo carvalho de meloarnaldo carvalho de melo4522.39%318.75%
alexey kuznetsovalexey kuznetsov188.96%16.25%
herbert xuherbert xu83.98%16.25%
linus torvaldslinus torvalds21.00%16.25%
william allen simpsonwilliam allen simpson10.50%16.25%
Total201100.00%16100.00%


static void tcp_incr_quickack(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); unsigned int quickacks = tcp_sk(sk)->rcv_wnd / (2 * icsk->icsk_ack.rcv_mss); if (quickacks == 0) quickacks = 2; if (quickacks > icsk->icsk_ack.quick) icsk->icsk_ack.quick = min(quickacks, TCP_MAX_QUICKACKS); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git4965.33%562.50%
arnaldo carvalho de meloarnaldo carvalho de melo2229.33%112.50%
linus torvaldslinus torvalds34.00%112.50%
eric dumazeteric dumazet11.33%112.50%
Total75100.00%8100.00%


static void tcp_enter_quickack_mode(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_incr_quickack(sk); icsk->icsk_ack.pingpong = 0; icsk->icsk_ack.ato = TCP_ATO_MIN; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2457.14%133.33%
arnaldo carvalho de meloarnaldo carvalho de melo1740.48%133.33%
stephen hemmingerstephen hemminger12.38%133.33%
Total42100.00%3100.00%

/* Send ACKs quickly, if "quick" count is not exhausted * and the session is not interactive. */
static bool tcp_in_quickack_mode(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); const struct dst_entry *dst = __sk_dst_get(sk); return (dst && dst_metric(dst, RTAX_QUICKACK)) || (icsk->icsk_ack.quick && !icsk->icsk_ack.pingpong); }

Contributors

PersonTokensPropCommitsCommitProp
jon maxwelljon maxwell2440.00%125.00%
pre-gitpre-git1830.00%125.00%
arnaldo carvalho de meloarnaldo carvalho de melo1728.33%125.00%
eric dumazeteric dumazet11.67%125.00%
Total60100.00%4100.00%


static void tcp_ecn_queue_cwr(struct tcp_sock *tp) { if (tp->ecn_flags & TCP_ECN_OK) tp->ecn_flags |= TCP_ECN_QUEUE_CWR; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen2496.00%150.00%
florian westphalflorian westphal14.00%150.00%
Total25100.00%2100.00%


static void tcp_ecn_accept_cwr(struct tcp_sock *tp, const struct sk_buff *skb) { if (tcp_hdr(skb)->cwr) tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen3193.94%133.33%
florian westphalflorian westphal13.03%133.33%
eric dumazeteric dumazet13.03%133.33%
Total33100.00%3100.00%


static void tcp_ecn_withdraw_cwr(struct tcp_sock *tp) { tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen1794.44%150.00%
florian westphalflorian westphal15.56%150.00%
Total18100.00%2100.00%


static void __tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { switch (TCP_SKB_CB(skb)->ip_dsfield & INET_ECN_MASK) { case INET_ECN_NOT_ECT: /* Funny extension: if ECT is not set on a segment, * and we already seen ECT on a previous segment, * it is probably a retransmit. */ if (tp->ecn_flags & TCP_ECN_SEEN) tcp_enter_quickack_mode((struct sock *)tp); break; case INET_ECN_CE: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_IS_CE); if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) { /* Better not delay acks, sender can have a very low cwnd */ tcp_enter_quickack_mode((struct sock *)tp); tp->ecn_flags |= TCP_ECN_DEMAND_CWR; } tp->ecn_flags |= TCP_ECN_SEEN; break; default: if (tcp_ca_needs_ecn((struct sock *)tp)) tcp_ca_event((struct sock *)tp, CA_EVENT_ECN_NO_CE); tp->ecn_flags |= TCP_ECN_SEEN; break; } }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal5738.26%233.33%
eric dumazeteric dumazet5536.91%350.00%
ilpo jarvinenilpo jarvinen3724.83%116.67%
Total149100.00%6100.00%


static void tcp_ecn_check_ce(struct tcp_sock *tp, const struct sk_buff *skb) { if (tp->ecn_flags & TCP_ECN_OK) __tcp_ecn_check_ce(tp, skb); }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal32100.00%1100.00%
Total32100.00%1100.00%


static void tcp_ecn_rcv_synack(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen4395.56%133.33%
eric dumazeteric dumazet12.22%133.33%
florian westphalflorian westphal12.22%133.33%
Total45100.00%3100.00%


static void tcp_ecn_rcv_syn(struct tcp_sock *tp, const struct tcphdr *th) { if ((tp->ecn_flags & TCP_ECN_OK) && (!th->ece || !th->cwr)) tp->ecn_flags &= ~TCP_ECN_OK; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen4495.65%133.33%
florian westphalflorian westphal12.17%133.33%
eric dumazeteric dumazet12.17%133.33%
Total46100.00%3100.00%


static bool tcp_ecn_rcv_ecn_echo(const struct tcp_sock *tp, const struct tcphdr *th) { if (th->ece && !th->syn && (tp->ecn_flags & TCP_ECN_OK)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen3786.05%125.00%
eric dumazeteric dumazet511.63%250.00%
florian westphalflorian westphal12.33%125.00%
Total43100.00%4100.00%

/* Buffer size and advertised window tuning. * * 1. Tuning sk->sk_sndbuf, when connection enters established state. */
static void tcp_sndbuf_expand(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); int sndmem, per_mss; u32 nr_segs; /* Worst case is non GSO/TSO : each frame consumes one skb * and skb->head is kmalloced using power of two area of memory */ per_mss = max_t(u32, tp->rx_opt.mss_clamp, tp->mss_cache) + MAX_TCP_HEADER + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); per_mss = roundup_pow_of_two(per_mss) + SKB_DATA_ALIGN(sizeof(struct sk_buff)); nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd); nr_segs = max_t(u32, nr_segs, tp->reordering + 1); /* Fast Recovery (RFC 5681 3.2) : * Cubic needs 1.7 factor, rounded to 2 to include * extra cushion (application might react slowly to POLLOUT) */ sndmem = 2 * nr_segs * per_mss; if (sk->sk_sndbuf < sndmem) sk->sk_sndbuf = min(sndmem, sysctl_tcp_wmem[2]); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet9470.15%222.22%
pre-gitpre-git3123.13%333.33%
arnaldo carvalho de meloarnaldo carvalho de melo53.73%333.33%
david s. millerdavid s. miller42.99%111.11%
Total134100.00%9100.00%

/* 2. Tuning advertised window (window_clamp, rcv_ssthresh) * * All tcp_full_space() is split to two parts: "network" buffer, allocated * forward and advertised in receiver window (tp->rcv_wnd) and * "application buffer", required to isolate scheduling/application * latencies from network. * window_clamp is maximal advertised window. It can be less than * tcp_full_space(), in this case tcp_full_space() - window_clamp * is reserved for "application" buffer. The less window_clamp is * the smoother our behaviour from viewpoint of network, but the lower * throughput and the higher sensitivity of the connection to losses. 8) * * rcv_ssthresh is more strict window_clamp used at "slow start" * phase to predict further behaviour of this connection. * It is used for two goals: * - to enforce header prediction at sender, even when application * requires some significant "application buffer". It is check #1. * - to prevent pruning of receive queue because of misprediction * of receiver window. Check #2. * * The scheme does not work when sender sends good segments opening * window and then starts to feed us spaghetti. But it should work * in common situations. Otherwise, we have to rely on queue collapsing. */ /* Slow part of check#2. */
static int __tcp_grow_window(const struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Optimize this! */ int truesize = tcp_win_from_space(skb->truesize) >> 1; int window = tcp_win_from_space(sysctl_tcp_rmem[2]) >> 1; while (tp->rcv_ssthresh <= window) { if (truesize <= skb->len) return 2 * inet_csk(sk)->icsk_ack.rcv_mss; truesize >>= 1; window >>= 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git6972.63%450.00%
ilpo jarvinenilpo jarvinen1010.53%112.50%
arnaldo carvalho de meloarnaldo carvalho de melo77.37%112.50%
john heffnerjohn heffner55.26%112.50%
eric dumazeteric dumazet44.21%112.50%
Total95100.00%8100.00%


static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); /* Check #1 */ if (tp->rcv_ssthresh < tp->window_clamp && (int)tp->rcv_ssthresh < tcp_space(sk) && !tcp_under_memory_pressure(sk)) { int incr; /* Check #2. Increase window, if skb with such overhead * will fit to rcvbuf in future. */ if (tcp_win_from_space(skb->truesize) <= skb->len) incr = 2 * tp->advmss; else incr = __tcp_grow_window(sk, skb); if (incr) { incr = max_t(int, incr, 2 * skb->len); tp->rcv_ssthresh = min(tp->rcv_ssthresh + incr, tp->window_clamp); inet_csk(sk)->icsk_ack.quick |= 1; } } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git10674.65%646.15%
eric dumazeteric dumazet1711.97%323.08%
ilpo jarvinenilpo jarvinen107.04%17.69%
arnaldo carvalho de meloarnaldo carvalho de melo53.52%17.69%
glauber costaglauber costa32.11%17.69%
linus torvaldslinus torvalds10.70%17.69%
Total142100.00%13100.00%

/* 3. Tuning rcvbuf, when connection enters established state. */
static void tcp_fixup_rcvbuf(struct sock *sk) { u32 mss = tcp_sk(sk)->advmss; int rcvmem; rcvmem = 2 * SKB_TRUESIZE(mss + MAX_TCP_HEADER) * tcp_default_init_rwnd(mss); /* Dynamic Right Sizing (DRS) has 2 to 3 RTT latency * Allow enough cushion so that sender is not limited by our window */ if (sysctl_tcp_moderate_rcvbuf) rcvmem <<= 2; if (sk->sk_rcvbuf < rcvmem) sk->sk_rcvbuf = min(rcvmem, sysctl_tcp_rmem[2]); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git3650.70%430.77%
eric dumazeteric dumazet2332.39%430.77%
yuchung chengyuchung cheng57.04%17.69%
david s. millerdavid s. miller34.23%17.69%
arnaldo carvalho de meloarnaldo carvalho de melo34.23%215.38%
linus torvaldslinus torvalds11.41%17.69%
Total71100.00%13100.00%

/* 4. Try to fixup all. It is made immediately after connection enters * established state. */
void tcp_init_buffer_space(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int maxwin; if (!(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) tcp_fixup_rcvbuf(sk); if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK)) tcp_sndbuf_expand(sk); tp->rcvq_space.space = tp->rcv_wnd; tp->rcvq_space.time = tcp_time_stamp; tp->rcvq_space.seq = tp->copied_seq; maxwin = tcp_full_space(sk); if (tp->window_clamp >= maxwin) { tp->window_clamp = maxwin; if (sysctl_tcp_app_win && maxwin > 4 * tp->advmss) tp->window_clamp = max(maxwin - (maxwin >> sysctl_tcp_app_win), 4 * tp->advmss); } /* Force reservation of one segment. */ if (sysctl_tcp_app_win && tp->window_clamp > 2 * tp->advmss && tp->window_clamp + tp->advmss > maxwin) tp->window_clamp = max(2 * tp->advmss, maxwin - tp->advmss); tp->rcv_ssthresh = min(tp->rcv_ssthresh, tp->window_clamp); tp->snd_cwnd_stamp = tcp_time_stamp; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git16681.37%853.33%
eric dumazeteric dumazet199.31%213.33%
david s. millerdavid s. miller136.37%213.33%
arnaldo carvalho de meloarnaldo carvalho de melo31.47%213.33%
linus torvaldslinus torvalds31.47%16.67%
Total204100.00%15100.00%

/* 5. Recalculate window clamp after socket hit its memory bounds. */
static void tcp_clamp_window(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ack.quick = 0; if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) && !tcp_under_memory_pressure(sk) && sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) { sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc), sysctl_tcp_rmem[2]); } if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) tp->rcv_ssthresh = min(tp->window_clamp, 2U * tp->advmss); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git8766.92%323.08%
arnaldo carvalho de meloarnaldo carvalho de melo2015.38%538.46%
glauber costaglauber costa107.69%17.69%
ilpo jarvinenilpo jarvinen86.15%17.69%
linus torvaldslinus torvalds32.31%17.69%
john heffnerjohn heffner10.77%17.69%
eric dumazeteric dumazet10.77%17.69%
Total130100.00%13100.00%

/* Initialize RCV_MSS value. * RCV_MSS is an our guess about MSS used by the peer. * We haven't any direct information about the MSS. * It's better to underestimate the RCV_MSS rather than overestimate. * Overestimations make us ACKing less frequently than needed. * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). */
void tcp_initialize_rcv_mss(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); unsigned int hint = min_t(unsigned int, tp->advmss, tp->mss_cache); hint = min(hint, tp->rcv_wnd / 2); hint = min(hint, TCP_MSS_DEFAULT); hint = max(hint, TCP_MIN_MSS); inet_csk(sk)->icsk_ack.rcv_mss = hint; }

Contributors

PersonTokensPropCommitsCommitProp
stephen hemmingerstephen hemminger7497.37%133.33%
william allen simpsonwilliam allen simpson11.32%133.33%
eric dumazeteric dumazet11.32%133.33%
Total76100.00%3100.00%

EXPORT_SYMBOL(tcp_initialize_rcv_mss); /* Receiver "autotuning" code. * * The algorithm for RTT estimation w/o timestamps is based on * Dynamic Right-Sizing (DRS) by Wu Feng and Mike Fisk of LANL. * <http://public.lanl.gov/radiant/pubs.html#DRS> * * More detail on this code can be found at * <http://staff.psc.edu/jheffner/>, * though this reference is out of date. A new paper * is pending. */
static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) { u32 new_sample = tp->rcv_rtt_est.rtt; long m = sample; if (m == 0) m = 1; if (new_sample != 0) { /* If we sample in larger samples in the non-timestamp * case, we could grossly overestimate the RTT especially * with chatty applications or bulk transfer apps which * are stalled on filesystem I/O. * * Also, since we are only going for a minimum in the * non-timestamp case, we do not smooth things out * else with timestamps disabled convergence takes too * long. */ if (!win_dep) { m -= (new_sample >> 3); new_sample += m; } else { m <<= 3; if (m < new_sample) new_sample = m; } } else { /* No previous measure. */ new_sample = m << 3; } if (tp->rcv_rtt_est.rtt != new_sample) tp->rcv_rtt_est.rtt = new_sample; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller10491.23%120.00%
neal cardwellneal cardwell76.14%120.00%
stephen hemmingerstephen hemminger21.75%240.00%
arnaldo carvalho de meloarnaldo carvalho de melo10.88%120.00%
Total114100.00%5100.00%


static inline void tcp_rcv_rtt_measure(struct tcp_sock *tp) { if (tp->rcv_rtt_est.time == 0) goto new_measure; if (before(tp->rcv_nxt, tp->rcv_rtt_est.seq)) return; tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rcv_rtt_est.time, 1); new_measure: tp->rcv_rtt_est.seq = tp->rcv_nxt + tp->rcv_wnd; tp->rcv_rtt_est.time = tcp_time_stamp; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller7897.50%133.33%
arnaldo carvalho de meloarnaldo carvalho de melo11.25%133.33%
neal cardwellneal cardwell11.25%133.33%
Total80100.00%3100.00%


static inline void tcp_rcv_rtt_measure_ts(struct sock *sk, const struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (tp->rx_opt.rcv_tsecr && (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq >= inet_csk(sk)->icsk_ack.rcv_mss)) tcp_rcv_rtt_update(tp, tcp_time_stamp - tp->rx_opt.rcv_tsecr, 0); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller5471.05%133.33%
arnaldo carvalho de meloarnaldo carvalho de melo2228.95%266.67%
Total76100.00%3100.00%

/* * This function should be called every time data is copied to user space. * It calculates the appropriate TCP receive buffer space. */
void tcp_rcv_space_adjust(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int time; int copied; time = tcp_time_stamp - tp->rcvq_space.time; if (time < (tp->rcv_rtt_est.rtt >> 3) || tp->rcv_rtt_est.rtt == 0) return; /* Number of bytes copied to user in last RTT */ copied = tp->copied_seq - tp->rcvq_space.seq; if (copied <= tp->rcvq_space.space) goto new_measure; /* A bit of theory : * copied = bytes received in previous RTT, our base window * To cope with packet losses, we need a 2x factor * To cope with slow start, and sender growing its cwin by 100 % * every RTT, we need a 4x factor, because the ACK we are sending * now is for the next RTT, not the current one : * <prev RTT . ><current RTT .. ><next RTT .... > */ if (sysctl_tcp_moderate_rcvbuf && !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { int rcvwin, rcvmem, rcvbuf; /* minimal window to cope with packet losses, assuming * steady state. Add some cushion because of small variations. */ rcvwin = (copied << 1) + 16 * tp->advmss; /* If rate increased by 25%, * assume slow start, rcvwin = 3 * copied * If rate increased by 50%, * assume sender can use 2x growth, rcvwin = 4 * copied */ if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 2)) { if (copied >= tp->rcvq_space.space + (tp->rcvq_space.space >> 1)) rcvwin <<= 1; else rcvwin += (rcvwin >> 1); } rcvmem = SKB_TRUESIZE(tp->advmss + MAX_TCP_HEADER); while (tcp_win_from_space(rcvmem) < tp->advmss) rcvmem += 128; rcvbuf = min(rcvwin / tp->advmss * rcvmem, sysctl_tcp_rmem[2]); if (rcvbuf > sk->sk_rcvbuf) { sk->sk_rcvbuf = rcvbuf; /* Make the window clamp follow along. */ tp->window_clamp = rcvwin; } } tp->rcvq_space.space = copied; new_measure: tp->rcvq_space.seq = tp->copied_seq; tp->rcvq_space.time = tcp_time_stamp; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller16861.31%337.50%
eric dumazeteric dumazet9534.67%225.00%
john heffnerjohn heffner93.28%112.50%
arnaldo carvalho de meloarnaldo carvalho de melo20.73%225.00%
Total274100.00%8100.00%

/* There is something which you must keep in mind when you analyze the * behavior of the tp->ato delayed ack timeout interval. When a * connection starts up, we want to ack as quickly as possible. The * problem is that "good" TCP's do slow start at the beginning of data * transmission. The means that until we send the first few ACK's the * sender will sit on his end and only queue most of his data, because * he can only send snd_cwnd unacked packets at any given time. For * each ACK we send, he increments snd_cwnd and transmits more of his * queue. -DaveM */
static void tcp_event_data_recv(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); u32 now; inet_csk_schedule_ack(sk); tcp_measure_rcv_mss(sk, skb); tcp_rcv_rtt_measure(tp); now = tcp_time_stamp; if (!icsk->icsk_ack.ato) { /* The _first_ data packet received, initialize * delayed ACK engine. */ tcp_incr_quickack(sk); icsk->icsk_ack.ato = TCP_ATO_MIN; } else { int m = now - icsk->icsk_ack.lrcvtime; if (m <= TCP_ATO_MIN / 2) { /* The fastest case is the first. */ icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + TCP_ATO_MIN / 2; } else if (m < icsk->icsk_ack.ato) { icsk->icsk_ack.ato = (icsk->icsk_ack.ato >> 1) + m; if (icsk->icsk_ack.ato > icsk->icsk_rto) icsk->icsk_ack.ato = icsk->icsk_rto; } else if (m > icsk->icsk_rto) { /* Too long gap. Apparently sender failed to * restart window, so that we send ACKs quickly. */ tcp_incr_quickack(sk); sk_mem_reclaim(sk); } } icsk->icsk_ack.lrcvtime = now; tcp_ecn_check_ce(tp, skb); if (skb->len >= 128) tcp_grow_window(sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git16771.06%1161.11%
arnaldo carvalho de meloarnaldo carvalho de melo4318.30%15.56%
ilpo jarvinenilpo jarvinen104.26%15.56%
linus torvaldslinus torvalds72.98%15.56%
david s. millerdavid s. miller52.13%15.56%
stephen hemmingerstephen hemminger10.43%15.56%
florian westphalflorian westphal10.43%15.56%
hideo aokihideo aoki10.43%15.56%
Total235100.00%18100.00%

/* Called to compute a smoothed rtt estimate. The data fed to this * routine either comes from timestamps, or from segments that were * known _not_ to have been retransmitted [see Karn/Partridge * Proceedings SIGCOMM 87]. The algorithm is from the SIGCOMM 88 * piece by Van Jacobson. * NOTE: the next three routines used to be one big routine. * To save cycles in the RFC 1323 implementation it was better to break * it up into three procedures. -- erics */
static void tcp_rtt_estimator(struct sock *sk, long mrtt_us) { struct tcp_sock *tp = tcp_sk(sk); long m = mrtt_us; /* RTT */ u32 srtt = tp->srtt_us; /* The following amusing code comes from Jacobson's * article in SIGCOMM '88. Note that rtt and mdev * are scaled versions of rtt and mean deviation. * This is designed to be as fast as possible * m stands for "measurement". * * On a 1990 paper the rto value is changed to: * RTO = rtt + 4 * mdev * * Funny. This algorithm seems to be very broken. * These formulae increase RTO, when it should be decreased, increase * too slowly, when it should be increased quickly, decrease too quickly * etc. I guess in BSD RTO takes ONE value, so that it is absolutely * does not matter how to _calculate_ it. Seems, it was trap * that VJ failed to avoid. 8) */ if (srtt != 0) { m -= (srtt >> 3); /* m is now error in rtt est */ srtt += m; /* rtt = 7/8 rtt + 1/8 new */ if (m < 0) { m = -m; /* m is now abs(error) */ m -= (tp->mdev_us >> 2); /* similar update on mdev */ /* This is similar to one of Eifel findings. * Eifel blocks mdev updates when rtt decreases. * This solution is a bit different: we use finer gain * for mdev in this case (alpha*beta). * Like Eifel it also prevents growth of rto, * but also it limits too fast rto decreases, * happening in pure Eifel. */ if (m > 0) m >>= 3; } else { m -= (tp->mdev_us >> 2); /* similar update on mdev */ } tp->mdev_us += m; /* mdev = 3/4 mdev + 1/4 new */ if (tp->mdev_us > tp->mdev_max_us) { tp->mdev_max_us = tp->mdev_us; if (tp->mdev_max_us > tp->rttvar_us) tp->rttvar_us = tp->mdev_max_us; } if (after(tp->snd_una, tp->rtt_seq)) { if (tp->mdev_max_us < tp->rttvar_us) tp->rttvar_us -= (tp->rttvar_us - tp->mdev_max_us) >> 2; tp->rtt_seq = tp->snd_nxt; tp->mdev_max_us = tcp_rto_min_us(sk); } } else { /* no previous measure. */ srtt = m << 3; /* take the measured time to be rtt */ tp->mdev_us = m << 1; /* make sure rto = 3*rtt */ tp->rttvar_us = max(tp->mdev_us, tcp_rto_min_us(sk)); tp->mdev_max_us = tp->rttvar_us; tp->rtt_seq = tp->snd_nxt; } tp->srtt_us = max(1U, srtt); }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git20373.55%642.86%
eric dumazeteric dumazet5118.48%214.29%
arnaldo carvalho de meloarnaldo carvalho de melo124.35%17.14%
david s. millerdavid s. miller62.17%17.14%
alexey kuznetsovalexey kuznetsov20.72%214.29%
hideaki yoshifujihideaki yoshifuji10.36%17.14%
linus torvaldslinus torvalds10.36%17.14%
Total276100.00%14100.00%

/* Set the sk_pacing_rate to allow proper sizing of TSO packets. * Note: TCP stack does not yet implement pacing. * FQ packet scheduler can be used to implement cheap but effective * TCP pacing, to smooth the burst on large writes when packets * in flight is significantly lower than cwnd (or rwin) */ int sysctl_tcp_pacing_ss_ratio __read_mostly = 200; int sysctl_tcp_pacing_ca_ratio __read_mostly = 120;
static void tcp_update_pacing_rate(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); u64 rate; /* set sk_pacing_rate to 200 % of current rate (mss * cwnd / srtt) */ rate = (u64)tp->mss_cache * ((USEC_PER_SEC / 100) << 3); /* current rate is (cwnd * mss) / srtt * In Slow Start [1], set sk_pacing_rate to 200 % the current rate. * In Congestion Avoidance phase, set it to 120 % the current rate. * * [1] : Normal Slow Start condition is (tp->snd_cwnd < tp->snd_ssthresh) * If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching * end of slow start and should slow down. */ if (tp->snd_cwnd < tp->snd_ssthresh / 2) rate *= sysctl_tcp_pacing_ss_ratio; else rate *= sysctl_tcp_pacing_ca_ratio; rate *= max(tp->snd_cwnd, tp->packets_out); if (likely(tp->srtt_us)) do_div(rate, tp->srtt_us); /* ACCESS_ONCE() is needed because sch_fq fetches sk_pacing_rate * without any lock. We want to make sure compiler wont store * intermediate values in this location. */ ACCESS_ONCE(sk->sk_pacing_rate) = min_t(u64, rate, sk->sk_max_pacing_rate); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet117100.00%5100.00%
Total117100.00%5100.00%

/* Calculate rto without backoff. This is the second half of Van Jacobson's * routine referred to above. */
static void tcp_set_rto(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); /* Old crap is replaced with new one. 8) * * More seriously: * 1. If rtt variance happened to be less 50msec, it is hallucination. * It cannot be less due to utterly erratic ACK generation made * at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * to do with delayed acks, because at cwnd>2 true delack timeout * is invisible. Actually, Linux-2.4 also generates erratic * ACKs in some circumstances. */ inet_csk(sk)->icsk_rto = __tcp_set_rto(tp); /* 2. Fixups made earlier cannot be right. * If we do not estimate RTO correctly without them, * all the algo is pure shit and should be replaced * with correct one. It is exactly, which we pretend to do. */ /* NOTE: clamping at TCP_RTO_MIN is not required, current algo * guarantees that rto is higher. */ tcp_bound_rto(sk); }

Contributors

PersonTokensPropCommitsCommitProp
arnaldo carvalho de meloarnaldo carvalho de melo2047.62%111.11%
pre-gitpre-git1433.33%444.44%
damian lukowskidamian lukowski49.52%111.11%
stephen hemmingerstephen hemminger37.14%222.22%
ilpo jarvinenilpo jarvinen12.38%111.11%
Total42100.00%9100.00%


__u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst) { __u32 cwnd = (dst ? dst_metric(dst, RTAX_INITCWND) : 0); if (!cwnd) cwnd = TCP_INIT_CWND; return min_t(__u32, cwnd, tp->snd_cwnd_clamp); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller54100.00%1100.00%
Total54100.00%1100.00%

/* * Packet counting of FACK is based on in-order assumptions, therefore TCP * disables it when reordering is detected */
void tcp_disable_fack(struct tcp_sock *tp) { /* RFC3517 uses different metric in lost marker => reset on change */ if (tcp_is_fack(tp)) tp->lost_skb_hint = NULL; tp->rx_opt.sack_ok &= ~TCP_FACK_ENABLED; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller2060.61%120.00%
pre-gitpre-git1133.33%360.00%
alexey kuznetsovalexey kuznetsov26.06%120.00%
Total33100.00%5100.00%

/* Take a notice that peer is sending D-SACKs */
static void tcp_dsack_seen(struct tcp_sock *tp) { tp->rx_opt.sack_ok |= TCP_DSACK_SEEN; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1368.42%125.00%
jiri kosinajiri kosina315.79%125.00%
pre-gitpre-git315.79%250.00%
Total19100.00%4100.00%


static void tcp_update_reordering(struct sock *sk, const int metric, const int ts) { struct tcp_sock *tp = tcp_sk(sk); if (metric > tp->reordering) { int mib_idx; tp->reordering = min(sysctl_tcp_max_reordering, metric); /* This exciting event is worth to be remembered. 8) */ if (ts) mib_idx = LINUX_MIB_TCPTSREORDER; else if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENOREORDER; else if (tcp_is_fack(tp)) mib_idx = LINUX_MIB_TCPFACKREORDER; else mib_idx = LINUX_MIB_TCPSACKREORDER; NET_INC_STATS(sock_net(sk), mib_idx); #if FASTRETRANS_DEBUG > 1 pr_debug("Disorder%d %d %u f%u s%u rr%d\n", tp->rx_opt.sack_ok, inet_csk(sk)->icsk_ca_state, tp->reordering, tp->fackets_out, tp->sacked_out, tp->undo_marker ? tp->undo_retrans : 0); #endif tcp_disable_fack(tp); } if (metric > 0) tcp_disable_early_retrans(tp); tp->rack.reord = 1; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git9957.56%838.10%
arnaldo carvalho de meloarnaldo carvalho de melo2112.21%29.52%
pavel emelianovpavel emelianov1911.05%29.52%
yuchung chengyuchung cheng1911.05%29.52%
ilpo jarvinenilpo jarvinen52.91%14.76%
linus torvaldslinus torvalds31.74%29.52%
hideaki yoshifujihideaki yoshifuji31.74%14.76%
eric dumazeteric dumazet21.16%29.52%
joe perchesjoe perches10.58%14.76%
Total172100.00%21100.00%

/* This must be called before lost_out is incremented */
static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb) { if (!tp->retransmit_skb_hint || before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) tp->retransmit_skb_hint = skb; if (!tp->lost_out || after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high)) tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen7998.75%266.67%
ian morrisian morris11.25%133.33%
Total80100.00%3100.00%


static void tcp_skb_mark_lost(struct tcp_sock *tp, struct sk_buff *skb) { if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tcp_verify_retransmit_hint(tp, skb); tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen61100.00%1100.00%
Total61100.00%1100.00%


void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb) { tcp_verify_retransmit_hint(tp, skb); if (!(TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_ACKED))) { tp->lost_out += tcp_skb_pcount(skb); TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; } }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen60100.00%1100.00%
Total60100.00%1100.00%

/* This procedure tags the retransmission queue when SACKs arrive. * * We have three tag bits: SACKED(S), RETRANS(R) and LOST(L). * Packets in queue with these bits set are counted in variables * sacked_out, retrans_out and lost_out, correspondingly. * * Valid combinations are: * Tag InFlight Description * 0 1 - orig segment is in flight. * S 0 - nothing flies, orig reached receiver. * L 0 - nothing flies, orig lost by net. * R 2 - both orig and retransmit are in flight. * L|R 1 - orig is lost, retransmit is in flight. * S|R 1 - orig reached receiver, retrans is still in flight. * (L|S|R is logically valid, it could occur when L|R is sacked, * but it is equivalent to plain S and code short-curcuits it to S. * L|S is logically invalid, it would mean -1 packet in flight 8)) * * These 6 states form finite state machine, controlled by the following events: * 1. New ACK (+SACK) arrives. (tcp_sacktag_write_queue()) * 2. Retransmission. (tcp_retransmit_skb(), tcp_xmit_retransmit_queue()) * 3. Loss detection event of two flavors: * A. Scoreboard estimator decided the packet is lost. * A'. Reno "three dupacks" marks head of queue lost. * A''. Its FACK modification, head until snd.fack is lost. * B. SACK arrives sacking SND.NXT at the moment, when the * segment was retransmitted. * 4. D-SACK added new rule: D-SACK changes any tag to S. * * It is pleasant to note, that state diagram turns out to be commutative, * so that we are allowed not to be bothered by order of our actions, * when multiple events arrive simultaneously. (see the function below). * * Reordering detection. * -------------------- * Reordering metric is maximal distance, which a packet can be displaced * in packet stream. With SACKs we can estimate it: * * 1. SACK fills old hole and the corresponding segment was not * ever retransmitted -> reordering. Alas, we cannot use it * when segment was retransmitted. * 2. The last flaw is solved with D-SACK. D-SACK arrives * for retransmitted and already SACKed segment -> reordering.. * Both of these heuristics are not used in Loss state, when we cannot * account for retransmits accurately. * * SACK block validation. * ---------------------- * * SACK block range validation checks that the received SACK block fits to * the expected sequence limits, i.e., it is between SND.UNA and SND.NXT. * Note that SND.UNA is not included to the range though being valid because * it means that the receiver is rather inconsistent with itself reporting * SACK reneging when it should advance SND.UNA. Such SACK block this is * perfectly valid, however, in light of RFC2018 which explicitly states * that "SACK block MUST reflect the newest segment. Even if the newest * segment is going to be discarded ...", not that it looks very clever * in case of head skb. Due to potentional receiver driven attacks, we * choose to avoid immediate execution of a walk in write queue due to * reneging and defer head skb's loss recovery to standard loss recovery * procedure that will eventually trigger (nothing forbids us doing this). * * Implements also blockage to start_seq wrap-around. Problem lies in the * fact that though start_seq (s) is before end_seq (i.e., not reversed), * there's no guarantee that it will be before snd_nxt (n). The problem * happens when start_seq resides between end_seq wrap (e_w) and snd_nxt * wrap (s_w): * * <- outs wnd -> <- wrapzone -> * u e n u_w e_w s n_w * | | | | | | | * |<------------+------+----- TCP seqno space --------------+---------->| * ...-- <2^31 ->| |<--------... * ...---- >2^31 ------>| |<--------... * * Current code wouldn't be vulnerable but it's better still to discard such * crazy SACK blocks. Doing this check for start_seq alone closes somewhat * similar case (end_seq after snd_nxt wrap) as earlier reversed check in * snd_nxt wrap -> snd_una region will then become "well defined", i.e., * equal to the ideal case (infinite seqno space without wrap caused issues). * * With D-SACK the lower bound is extended to cover sequence space below * SND.UNA down to undo_marker, which is the last point of interest. Yet * again, D-SACK block must not to go across snd_una (for the same reason as * for the normal SACK blocks, explained above). But there all simplicity * ends, TCP might receive valid D-SACKs below that. As long as they reside * fully below undo_marker they do not affect behavior in anyway and can * therefore be safely ignored. In rare cases (which are more or less * theoretical ones), the D-SACK will nicely cross that boundary due to skb * fragmentation and packet reordering past skb's retransmission. To consider * them correctly, the acceptable range must be extended even more though * the exact amount is rather hard to quantify. However, tp->max_window can * be used as an exaggerated estimate. */
static bool tcp_is_sackblock_valid(struct tcp_sock *tp, bool is_dsack, u32 start_seq, u32 end_seq) { /* Too far in future, or reversed (interpretation is ambiguous) */ if (after(end_seq, tp->snd_nxt) || !before(start_seq, end_seq)) return false; /* Nasty start_seq wrap-around check (see comments above) */ if (!before(start_seq, tp->snd_nxt)) return false; /* In outstanding window? ...This is valid exit for D-SACKs too. * start_seq == snd_una is non-sensical (see comments above) */ if (after(start_seq, tp->snd_una)) return true; if (!is_dsack || !tp->undo_marker) return false; /* ...Then it's D-SACK, and must reside below snd_una completely */ if (after(end_seq, tp->snd_una)) return false; if (!before(start_seq, tp->undo_marker)) return true; /* Too old */ if (!after(end_seq, tp->undo_marker)) return false; /* Undo_marker boundary crossing (overestimates a lot). Known already: * start_seq < undo_marker and end_seq >= undo_marker. */ return !before(start_seq, end_seq - tp->max_window); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen13793.20%133.33%
eric dumazeteric dumazet96.12%133.33%
ryousei takanoryousei takano10.68%133.33%
Total147100.00%3100.00%


static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb, struct tcp_sack_block_wire *sp, int num_sacks, u32 prior_snd_una) { struct tcp_sock *tp = tcp_sk(sk); u32 start_seq_0 = get_unaligned_be32(&sp[0].start_seq); u32 end_seq_0 = get_unaligned_be32(&sp[0].end_seq); bool dup_sack = false; if (before(start_seq_0, TCP_SKB_CB(ack_skb)->ack_seq)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKRECV); } else if (num_sacks > 1) { u32 end_seq_1 = get_unaligned_be32(&sp[1].end_seq); u32 start_seq_1 = get_unaligned_be32(&sp[1].start_seq); if (!after(end_seq_0, end_seq_1) && !before(start_seq_0, start_seq_1)) { dup_sack = true; tcp_dsack_seen(tp); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKOFORECV); } } /* D-SACK for already forgotten data... Do dumb counting. */ if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 && !after(end_seq_0, prior_snd_una) && after(end_seq_0, tp->undo_marker)) tp->undo_retrans--; return dup_sack; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller17277.48%110.00%
pavel emelianovpavel emelianov229.91%220.00%
yuchung chengyuchung cheng104.50%220.00%
eric dumazeteric dumazet83.60%330.00%
ilpo jarvinenilpo jarvinen62.70%110.00%
harvey harrisonharvey harrison41.80%110.00%
Total222100.00%10100.00%

struct tcp_sacktag_state { int reord; int fack_count; /* Timestamps for earliest and latest never-retransmitted segment * that was SACKed. RTO needs the earliest RTT to stay conservative, * but congestion control should still get an accurate delay signal. */ struct skb_mstamp first_sackt; struct skb_mstamp last_sackt; int flag; }; /* Check if skb is fully within the SACK block. In presence of GSO skbs, * the incoming SACK may not exactly match but we can find smaller MSS * aligned portion of it that matches. Therefore we might need to fragment * which may fail and creates some hassle (caller must handle error case * returns). * * FIXME: this could be merged to shift decision code */
static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, u32 start_seq, u32 end_seq) { int err; bool in_sack; unsigned int pkt_len; unsigned int mss; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (tcp_skb_pcount(skb) > 1 && !in_sack && after(TCP_SKB_CB(skb)->end_seq, start_seq)) { mss = tcp_skb_mss(skb); in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { pkt_len = start_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) pkt_len = mss; } else { pkt_len = end_seq - TCP_SKB_CB(skb)->seq; if (pkt_len < mss) return -EINVAL; } /* Round if necessary so that SACKs cover only full MSSes * and/or the remaining small portion (if present) */ if (pkt_len > mss) { unsigned int new_len = (pkt_len / mss) * mss; if (!in_sack && new_len < pkt_len) { new_len += mss; if (new_len >= skb->len) return 0; } pkt_len = new_len; } err = tcp_fragment(sk, skb, pkt_len, mss, GFP_ATOMIC); if (err < 0) return err; } return in_sack; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen23597.11%233.33%
eric dumazeteric dumazet31.24%116.67%
octavian purdilaoctavian purdila20.83%116.67%
neal cardwellneal cardwell10.41%116.67%
adrian bunkadrian bunk10.41%116.67%
Total242100.00%6100.00%

/* Mark the given newly-SACKed range as such, adjusting counters and hints. */
static u8 tcp_sacktag_one(struct sock *sk, struct tcp_sacktag_state *state, u8 sacked, u32 start_seq, u32 end_seq, int dup_sack, int pcount, const struct skb_mstamp *xmit_time) { struct tcp_sock *tp = tcp_sk(sk); int fack_count = state->fack_count; /* Account D-SACK for retransmitted packet. */ if (dup_sack && (sacked & TCPCB_RETRANS)) { if (tp->undo_marker && tp->undo_retrans > 0 && after(end_seq, tp->undo_marker)) tp->undo_retrans--; if (sacked & TCPCB_SACKED_ACKED) state->reord = min(fack_count, state->reord); } /* Nothing to do; acked frame is about to be dropped (was ACKed). */ if (!after(end_seq, tp->snd_una)) return sacked; if (!(sacked & TCPCB_SACKED_ACKED)) { tcp_rack_advance(tp, xmit_time, sacked); if (sacked & TCPCB_SACKED_RETRANS) { /* If the segment is not tagged as lost, * we do not clear RETRANS, believing * that retransmission is still in flight. */ if (sacked & TCPCB_LOST) { sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS); tp->lost_out -= pcount; tp->retrans_out -= pcount; } } else { if (!(sacked & TCPCB_RETRANS)) { /* New sack for not retransmitted frame, * which was in hole. It is reordering. */ if (before(start_seq, tcp_highest_sack_seq(tp))) state->reord = min(fack_count, state->reord); if (!after(end_seq, tp->high_seq)) state->flag |= FLAG_ORIG_SACK_ACKED; if (state->first_sackt.v64 == 0) state->first_sackt = *xmit_time; state->last_sackt = *xmit_time; } if (sacked & TCPCB_LOST) { sacked &= ~TCPCB_LOST; tp->lost_out -= pcount; } } sacked |= TCPCB_SACKED_ACKED; state->flag |= FLAG_DATA_SACKED; tp->sacked_out += pcount; tp->delivered += pcount; /* Out-of-order packets delivered */ fack_count += pcount; /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ if (!tcp_is_fack(tp) && tp->lost_skb_hint && before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) tp->lost_cnt_hint += pcount; if (fack_count > tp->fackets_out) tp->fackets_out = fack_count; } /* D-SACK. We can detect redundant retransmission in S|R and plain R * frames and clear it. undo_retrans is decreased above, L|R frames * are accounted above as well. */ if (dup_sack && (sacked & TCPCB_SACKED_RETRANS)) { sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= pcount; } return sacked; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen23961.60%828.57%
yuchung chengyuchung cheng5814.95%621.43%
baruch evenbaruch even256.44%310.71%
pre-gitpre-git205.15%621.43%
stephen hemmingerstephen hemminger174.38%13.57%
kenneth klette jonassenkenneth klette jonassen123.09%13.57%
neal cardwellneal cardwell112.84%13.57%
eric dumazeteric dumazet51.29%13.57%
david s. millerdavid s. miller10.26%13.57%
Total388100.00%28100.00%

/* Shift newly-SACKed bytes from this skb to the immediately previous * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. */
static bool tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, unsigned int pcount, int shifted, int mss, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev = tcp_write_queue_prev(sk, skb); u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ BUG_ON(!pcount); /* Adjust counters and hints for the newly sacked sequence * range but discard the return value since prev is already * marked. We must tag the range first because the seq * advancement below implicitly advances * tcp_highest_sack_seq() when skb is highest_sack. */ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, start_seq, end_seq, dup_sack, pcount, &skb->skb_mstamp); if (skb == tp->lost_skb_hint) tp->lost_cnt_hint += pcount; TCP_SKB_CB(prev)->end_seq += shifted; TCP_SKB_CB(skb)->seq += shifted; tcp_skb_pcount_add(prev, pcount); BUG_ON(tcp_skb_pcount(skb) < pcount); tcp_skb_pcount_add(skb, -pcount); /* When we're adding to gso_segs == 1, gso_size will be zero, * in theory this shouldn't be necessary but as long as DSACK * code can come after this skb later on it's better to keep * setting gso_size to something. */ if (!TCP_SKB_CB(prev)->tcp_gso_size) TCP_SKB_CB(prev)->tcp_gso_size = mss; /* CHECKME: To clear or not to clear? Mimics normal skb currently */ if (tcp_skb_pcount(skb) <= 1) TCP_SKB_CB(skb)->tcp_gso_size = 0; /* Difference in this won't matter, both ACKed by the same cumul. ACK */ TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); if (skb->len > 0) { BUG_ON(!tcp_skb_pcount(skb)); NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTED); return false; } /* Whole SKB was eaten :-) */ if (skb == tp->retransmit_skb_hint) tp->retransmit_skb_hint = prev; if (skb == tp->lost_skb_hint) { tp->lost_skb_hint = prev; tp->lost_cnt_hint -= tcp_skb_pcount(prev); } TCP_SKB_CB(prev)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; TCP_SKB_CB(prev)->eor = TCP_SKB_CB(skb)->eor; if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) TCP_SKB_CB(prev)->end_seq++; if (skb == tcp_highest_sack(sk)) tcp_advance_highest_sack(sk, skb); tcp_skb_collapse_tstamp(prev, skb); tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKMERGED); return true; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen27068.70%631.58%
neal cardwellneal cardwell5614.25%315.79%
eric dumazeteric dumazet4411.20%736.84%
martin kafai laumartin kafai lau205.09%210.53%
yuchung chengyuchung cheng30.76%15.26%
Total393100.00%19100.00%

/* I wish gso_size would have a bit more sane initialization than * something-or-zero which complicates things */
static int tcp_skb_seglen(const struct sk_buff *skb) { return tcp_skb_pcount(skb) == 1 ? skb->len : tcp_skb_mss(skb); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen2896.55%266.67%
eric dumazeteric dumazet13.45%133.33%
Total29100.00%3100.00%

/* Shifting pages past head area doesn't work */
static int skb_can_shift(const struct sk_buff *skb) { return !skb_headlen(skb) && skb_is_nonlinear(skb); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen2395.83%150.00%
eric dumazeteric dumazet14.17%150.00%
Total24100.00%2100.00%

/* Try collapsing SACK blocks spanning across multiple skbs to a single * skb. */
static struct sk_buff *tcp_shift_skb_data(struct sock *sk, struct sk_buff *skb, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *prev; int mss; int pcount = 0; int len; int in_sack; if (!sk_can_gso(sk)) goto fallback; /* Normally R but no L won't result in plain S */ if (!dup_sack && (TCP_SKB_CB(skb)->sacked & (TCPCB_LOST|TCPCB_SACKED_RETRANS)) == TCPCB_SACKED_RETRANS) goto fallback; if (!skb_can_shift(skb)) goto fallback; /* This frame is about to be dropped (was ACKed). */ if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) goto fallback; /* Can only happen with delayed DSACK + discard craziness */ if (unlikely(skb == tcp_write_queue_head(sk))) goto fallback; prev = tcp_write_queue_prev(sk, skb); if ((TCP_SKB_CB(prev)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) goto fallback; if (!tcp_skb_can_collapse_to(prev)) goto fallback; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq) && !before(end_seq, TCP_SKB_CB(skb)->end_seq); if (in_sack) { len = skb->len; pcount = tcp_skb_pcount(skb); mss = tcp_skb_seglen(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; } else { if (!after(TCP_SKB_CB(skb)->end_seq, start_seq)) goto noop; /* CHECKME: This is non-MSS split case only?, this will * cause skipped skbs due to advancing loop btw, original * has that feature too */ if (tcp_skb_pcount(skb) <= 1) goto noop; in_sack = !after(start_seq, TCP_SKB_CB(skb)->seq); if (!in_sack) { /* TODO: head merge to next could be attempted here * if (!after(TCP_SKB_CB(skb)->end_seq, end_seq)), * though it might not be worth of the additional hassle * * ...we can probably just fallback to what was done * previously. We could try merging non-SACKed ones * as well but it probably isn't going to buy off * because later SACKs might again split them, and * it would make skb timestamp tracking considerably * harder problem. */ goto fallback; } len = end_seq - TCP_SKB_CB(skb)->seq; BUG_ON(len < 0); BUG_ON(len > skb->len); /* MSS boundaries should be honoured or else pcount will * severely break even though it makes things bit trickier. * Optimize common case to avoid most of the divides */ mss = tcp_skb_mss(skb); /* TODO: Fix DSACKs to not fragment already SACKed and we can * drop this restriction as unnecessary */ if (mss != tcp_skb_seglen(prev)) goto fallback; if (len == mss) { pcount = 1; } else if (len < mss) { goto noop; } else { pcount = len / mss; len = pcount * mss; } } /* tcp_sacktag_one() won't SACK-tag ranges below snd_una */ if (!after(TCP_SKB_CB(skb)->seq + len, tp->snd_una)) goto fallback; if (!skb_shift(prev, skb, len)) goto fallback; if (!tcp_shifted_skb(sk, skb, state, pcount, len, mss, dup_sack)) goto out; /* Hole filled allows collapsing with the next as well, this is very * useful when hole on every nth skb pattern happens */ if (prev == tcp_write_queue_tail(sk)) goto out; skb = tcp_write_queue_next(sk, prev); if (!skb_can_shift(skb) || (skb == tcp_send_head(sk)) || ((TCP_SKB_CB(skb)->sacked & TCPCB_TAGBITS) != TCPCB_SACKED_ACKED) || (mss != tcp_skb_seglen(skb))) goto out; len = skb->len; if (skb_shift(prev, skb, len)) { pcount += tcp_skb_pcount(skb); tcp_shifted_skb(sk, skb, state, tcp_skb_pcount(skb), len, mss, 0); } out: state->fack_count += pcount; return prev; noop: return skb; fallback: NET_INC_STATS(sock_net(sk), LINUX_MIB_SACKSHIFTFALLBACK); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen55391.86%1062.50%
neal cardwellneal cardwell233.82%16.25%
martin kafai laumartin kafai lau111.83%16.25%
stephen hemmingerstephen hemminger101.66%16.25%
baruch evenbaruch even30.50%16.25%
eric dumazeteric dumazet20.33%212.50%
Total602100.00%16100.00%


static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 start_seq, u32 end_seq, bool dup_sack_in) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *tmp; tcp_for_write_queue_from(skb, sk) { int in_sack = 0; bool dup_sack = dup_sack_in; if (skb == tcp_send_head(sk)) break; /* queue is in-order => we can short-circuit the walk early */ if (!before(TCP_SKB_CB(skb)->seq, end_seq)) break; if (next_dup && before(TCP_SKB_CB(skb)->seq, next_dup->end_seq)) { in_sack = tcp_match_skb_to_sack(sk, skb, next_dup->start_seq, next_dup->end_seq); if (in_sack > 0) dup_sack = true; } /* skb reference here is a bit tricky to get right, since * shifting can eat and free both this skb and the next, * so not even _safe variant of the loop is enough. */ if (in_sack <= 0) { tmp = tcp_shift_skb_data(sk, skb, state, start_seq, end_seq, dup_sack); if (tmp) { if (tmp != skb) { skb = tmp; continue; } in_sack = 0; } else { in_sack = tcp_match_skb_to_sack(sk, skb, start_seq, end_seq); } } if (unlikely(in_sack < 0)) break; if (in_sack) { TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, dup_sack, tcp_skb_pcount(skb), &skb->skb_mstamp); if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) tcp_advance_highest_sack(sk, skb); } state->fack_count += tcp_skb_pcount(skb); } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen28090.61%555.56%
neal cardwellneal cardwell216.80%111.11%
eric dumazeteric dumazet51.62%222.22%
yuchung chengyuchung cheng30.97%111.11%
Total309100.00%9100.00%

/* Avoid all extra work that is being done by sacktag while walking in * a normal way */
static struct sk_buff *tcp_sacktag_skip(struct sk_buff *skb, struct sock *sk, struct tcp_sacktag_state *state, u32 skip_to_seq) { tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; if (after(TCP_SKB_CB(skb)->end_seq, skip_to_seq)) break; state->fack_count += tcp_skb_pcount(skb); } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen71100.00%4100.00%
Total71100.00%4100.00%


static struct sk_buff *tcp_maybe_skipping_dsack(struct sk_buff *skb, struct sock *sk, struct tcp_sack_block *next_dup, struct tcp_sacktag_state *state, u32 skip_to_seq) { if (!next_dup) return skb; if (before(next_dup->start_seq, skip_to_seq)) { skb = tcp_sacktag_skip(skb, sk, state, next_dup->start_seq); skb = tcp_sacktag_walk(skb, sk, NULL, state, next_dup->start_seq, next_dup->end_seq, 1); } return skb; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen9298.92%375.00%
ian morrisian morris11.08%125.00%
Total93100.00%4100.00%


static int tcp_sack_cache_ok(const struct tcp_sock *tp, const struct tcp_sack_block *cache) { return cache < tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen3093.75%150.00%
eric dumazeteric dumazet26.25%150.00%
Total32100.00%2100.00%


static int tcp_sacktag_write_queue(struct sock *sk, const struct sk_buff *ack_skb, u32 prior_snd_una, struct tcp_sacktag_state *state) { struct tcp_sock *tp = tcp_sk(sk); const unsigned char *ptr = (skb_transport_header(ack_skb) + TCP_SKB_CB(ack_skb)->sacked); struct tcp_sack_block_wire *sp_wire = (struct tcp_sack_block_wire *)(ptr+2); struct tcp_sack_block sp[TCP_NUM_SACKS]; struct tcp_sack_block *cache; struct sk_buff *skb; int num_sacks = min(TCP_NUM_SACKS, (ptr[1] - TCPOLEN_SACK_BASE) >> 3); int used_sacks; bool found_dup_sack = false; int i, j; int first_sack_index; state->flag = 0; state->reord = tp->packets_out; if (!tp->sacked_out) { if (WARN_ON(tp->fackets_out)) tp->fackets_out = 0; tcp_highest_sack_reset(sk); } found_dup_sack = tcp_check_dsack(sk, ack_skb, sp_wire, num_sacks, prior_snd_una); if (found_dup_sack) state->flag |= FLAG_DSACKING_ACK; /* Eliminate too old ACKs, but take into * account more or less fresh ones, they can * contain valid SACK info. */ if (before(TCP_SKB_CB(ack_skb)->ack_seq, prior_snd_una - tp->max_window)) return 0; if (!tp->packets_out) goto out; used_sacks = 0; first_sack_index = 0; for (i = 0; i < num_sacks; i++) { bool dup_sack = !i && found_dup_sack; sp[used_sacks].start_seq = get_unaligned_be32(&sp_wire[i].start_seq); sp[used_sacks].end_seq = get_unaligned_be32(&sp_wire[i].end_seq); if (!tcp_is_sackblock_valid(tp, dup_sack, sp[used_sacks].start_seq, sp[used_sacks].end_seq)) { int mib_idx; if (dup_sack) { if (!tp->undo_marker) mib_idx = LINUX_MIB_TCPDSACKIGNOREDNOUNDO; else mib_idx = LINUX_MIB_TCPDSACKIGNOREDOLD; } else { /* Don't count olds caused by ACK reordering */ if ((TCP_SKB_CB(ack_skb)->ack_seq != tp->snd_una) && !after(sp[used_sacks].end_seq, tp->snd_una)) continue; mib_idx = LINUX_MIB_TCPSACKDISCARD; } NET_INC_STATS(sock_net(sk), mib_idx); if (i == 0) first_sack_index = -1; continue; } /* Ignore very old stuff early */ if (!after(sp[used_sacks].end_seq, prior_snd_una)) continue; used_sacks++; } /* order SACK blocks to allow in order walk of the retrans queue */ for (i = used_sacks - 1; i > 0; i--) { for (j = 0; j < i; j++) { if (after(sp[j].start_seq, sp[j + 1].start_seq)) { swap(sp[j], sp[j + 1]); /* Track where the first SACK block goes to */ if (j == first_sack_index) first_sack_index = j + 1; } } } skb = tcp_write_queue_head(sk); state->fack_count = 0; i = 0; if (!tp->sacked_out) { /* It's already past, so skip checking against it */ cache = tp->recv_sack_cache + ARRAY_SIZE(tp->recv_sack_cache); } else { cache = tp->recv_sack_cache; /* Skip empty blocks in at head of the cache */ while (tcp_sack_cache_ok(tp, cache) && !cache->start_seq && !cache->end_seq) cache++; } while (i < used_sacks) { u32 start_seq = sp[i].start_seq; u32 end_seq = sp[i].end_seq; bool dup_sack = (found_dup_sack && (i == first_sack_index)); struct tcp_sack_block *next_dup = NULL; if (found_dup_sack && ((i + 1) == first_sack_index)) next_dup = &sp[i + 1]; /* Skip too early cached blocks */ while (tcp_sack_cache_ok(tp, cache) && !before(start_seq, cache->end_seq)) cache++; /* Can skip some work by looking recv_sack_cache? */ if (tcp_sack_cache_ok(tp, cache) && !dup_sack && after(end_seq, cache->start_seq)) { /* Head todo? */ if (before(start_seq, cache->start_seq)) { skb = tcp_sacktag_skip(skb, sk, state, start_seq); skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, cache->start_seq, dup_sack); } /* Rest of the block already fully processed? */ if (!after(end_seq, cache->end_seq)) goto advance_sp; skb = tcp_maybe_skipping_dsack(skb, sk, next_dup, state, cache->end_seq); /* ...tail remains todo... */ if (tcp_highest_sack_seq(tp) == cache->end_seq) { /* ...but better entrypoint exists! */ skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; cache++; goto walk; } skb = tcp_sacktag_skip(skb, sk, state, cache->end_seq); /* Check overlap against next cached too (past this one already) */ cache++; continue; } if (!before(start_seq, tcp_highest_sack_seq(tp))) { skb = tcp_highest_sack(sk); if (!skb) break; state->fack_count = tp->fackets_out; } skb = tcp_sacktag_skip(skb, sk, state, start_seq); walk: skb = tcp_sacktag_walk(skb, sk, next_dup, state, start_seq, end_seq, dup_sack); advance_sp: i++; } /* Clear the head of the cache sack blocks so we can skip it next time */ for (i = 0; i < ARRAY_SIZE(tp->recv_sack_cache) - used_sacks; i++) { tp->recv_sack_cache[i].start_seq = 0; tp->recv_sack_cache[i].end_seq = 0; } for (j = 0; j < used_sacks; j++) tp->recv_sack_cache[i++] = sp[j]; if ((state->reord < tp->fackets_out) && ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker)) tcp_update_reordering(sk, tp->fackets_out - state->reord, 0); tcp_verify_left_out(tp); out: #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); WARN_ON((int)tcp_packets_in_flight(tp) < 0); #endif return state->flag; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen91182.97%1639.02%
pre-gitpre-git988.93%614.63%
pavel emelianovpavel emelianov191.73%37.32%
baruch evenbaruch even151.37%12.44%
kenneth klette jonassenkenneth klette jonassen121.09%12.44%
yuchung chengyuchung cheng111.00%37.32%
david s. millerdavid s. miller90.82%37.32%
eric dumazeteric dumazet70.64%37.32%
adam langleyadam langley60.55%12.44%
stephen hemmingerstephen hemminger40.36%12.44%
arnaldo carvalho de meloarnaldo carvalho de melo20.18%12.44%
harvey harrisonharvey harrison20.18%12.44%
ian morrisian morris20.18%12.44%
Total1098100.00%41100.00%

/* Limits sacked_out so that sum with lost_out isn't ever larger than * packets_out. Returns false if sacked_out adjustement wasn't necessary. */
static bool tcp_limit_reno_sacked(struct tcp_sock *tp) { u32 holes; holes = max(tp->lost_out, 1U); holes = min(holes, tp->packets_out); if ((tp->sacked_out + holes) > tp->packets_out) { tp->sacked_out = tp->packets_out - holes; return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen6595.59%375.00%
eric dumazeteric dumazet34.41%125.00%
Total68100.00%4100.00%

/* If we receive more dupacks than we expected counting segments * in assumption of absent reordering, interpret this as reordering. * The only another reason could be bug in receiver TCP. */
static void tcp_check_reno_reordering(struct sock *sk, const int addend) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_limit_reno_sacked(tp)) tcp_update_reordering(sk, tp->packets_out + addend, 0); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen45100.00%2100.00%
Total45100.00%2100.00%

/* Emulate SACKs for SACKless connection: account for a new dupack. */
static void tcp_add_reno_sack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); u32 prior_sacked = tp->sacked_out; tp->sacked_out++; tcp_check_reno_reordering(sk, 0); if (tp->sacked_out > prior_sacked) tp->delivered++; /* Some out-of-order packet is delivered */ tcp_verify_left_out(tp); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen3864.41%266.67%
yuchung chengyuchung cheng2135.59%133.33%
Total59100.00%3100.00%

/* Account for ACK, ACKing some data in Reno Recovery phase. */
static void tcp_remove_reno_sacks(struct sock *sk, int acked) { struct tcp_sock *tp = tcp_sk(sk); if (acked > 0) { /* One ACK acked hole. The rest eat duplicate ACKs. */ tp->delivered += max_t(int, acked - tp->sacked_out, 1); if (acked - 1 >= tp->sacked_out) tp->sacked_out = 0; else tp->sacked_out -= acked - 1; } tcp_check_reno_reordering(sk, acked); tcp_verify_left_out(tp); }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen7080.46%266.67%
yuchung chengyuchung cheng1719.54%133.33%
Total87100.00%3100.00%


static inline void tcp_reset_reno_sack(struct tcp_sock *tp) { tp->sacked_out = 0; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen18100.00%1100.00%
Total18100.00%1100.00%


void tcp_clear_retrans(struct tcp_sock *tp) { tp->retrans_out = 0; tp->lost_out = 0; tp->undo_marker = 0; tp->undo_retrans = -1; tp->fackets_out = 0; tp->sacked_out = 0; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng4085.11%375.00%
ilpo jarvinenilpo jarvinen714.89%125.00%
Total47100.00%4100.00%


static inline void tcp_init_undo(struct tcp_sock *tp) { tp->undo_marker = tp->snd_una; /* Retransmission still in flight may cause DSACKs later. */ tp->undo_retrans = tp->retrans_out ? : -1; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3296.97%266.67%
ilpo jarvinenilpo jarvinen13.03%133.33%
Total33100.00%3100.00%

/* Enter Loss state. If we detect SACK reneging, forget all SACK information * and reset tags completely, otherwise preserve SACKs. If receiver * dropped its ofo queue, we will know this due to reneging detection. */
void tcp_enter_loss(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct net *net = sock_net(sk); struct sk_buff *skb; bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery; bool is_reneg; /* is receiver reneging on SACKs? */ /* Reduce ssthresh if it has not yet been made inside this window. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder || !after(tp->high_seq, tp->snd_una) || (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); tcp_ca_event(sk, CA_EVENT_LOSS); tcp_init_undo(tp); } tp->snd_cwnd = 1; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->retrans_out = 0; tp->lost_out = 0; if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); skb = tcp_write_queue_head(sk); is_reneg = skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED); if (is_reneg) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSACKRENEGING); tp->sacked_out = 0; tp->fackets_out = 0; } tcp_clear_all_retrans_hints(tp); tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED; if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || is_reneg) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED; TCP_SKB_CB(skb)->sacked |= TCPCB_LOST; tp->lost_out += tcp_skb_pcount(skb); tp->retransmit_high = TCP_SKB_CB(skb)->end_seq; } } tcp_verify_left_out(tp); /* Timeout in disordered state after receiving substantial DUPACKs * suggests that the degree of reordering is over-estimated. */ if (icsk->icsk_ca_state <= TCP_CA_Disorder && tp->sacked_out >= net->ipv4.sysctl_tcp_reordering) tp->reordering = min_t(unsigned int, tp->reordering, net->ipv4.sysctl_tcp_reordering); tcp_set_ca_state(sk, TCP_CA_Loss); tp->high_seq = tp->snd_nxt; tcp_ecn_queue_cwr(tp); /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous * loss recovery is underway except recurring timeout(s) on * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing */ tp->frto = sysctl_tcp_frto && (new_recovery || icsk->icsk_retransmits) && !inet_csk(sk)->icsk_mtup.probe_size; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng20351.13%520.00%
ilpo jarvinenilpo jarvinen8922.42%1144.00%
pasi sarolahtipasi sarolahti379.32%14.00%
neal cardwellneal cardwell379.32%14.00%
nikolay borisovnikolay borisov153.78%14.00%
david s. millerdavid s. miller143.53%416.00%
eric dumazeteric dumazet10.25%14.00%
florian westphalflorian westphal10.25%14.00%
Total397100.00%25100.00%

/* If ACK arrived pointing to a remembered SACK, it means that our * remembered SACKs do not reflect real state of receiver i.e. * receiver _host_ is heavily congested (or buggy). * * To avoid big spurious retransmission bursts due to transient SACK * scoreboard oddities that look like reneging, we give the receiver a * little time (max(RTT/2, 10ms)) to send us some more ACKs that will * restore sanity to the SACK scoreboard. If the apparent reneging * persists until this RTO then we'll clear the SACK scoreboard. */
static bool tcp_check_sack_reneging(struct sock *sk, int flag) { if (flag & FLAG_SACK_RENEGING) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay = max(usecs_to_jiffies(tp->srtt_us >> 4), msecs_to_jiffies(10)); inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, delay, TCP_RTO_MAX); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2941.43%112.50%
neal cardwellneal cardwell1825.71%112.50%
pasi sarolahtipasi sarolahti1521.43%112.50%
ilpo jarvinenilpo jarvinen710.00%450.00%
stephen hemmingerstephen hemminger11.43%112.50%
Total70100.00%8100.00%


static inline int tcp_fackets_out(const struct tcp_sock *tp) { return tcp_is_reno(tp) ? tp->sacked_out + 1 : tp->fackets_out; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2172.41%125.00%
ilpo jarvinenilpo jarvinen724.14%250.00%
pasi sarolahtipasi sarolahti13.45%125.00%
Total29100.00%4100.00%

/* Heurestics to calculate number of duplicate ACKs. There's no dupACKs * counter when SACK is enabled (without SACK, sacked_out is used for * that purpose). * * Instead, with FACK TCP uses fackets_out that includes both SACKed * segments up to the highest received SACK block so far and holes in * between them. * * With reordering, holes may still be in flight, so RFC3517 recovery * uses pure sacked_out (total number of SACKed segments) even though * it violates the RFC that uses duplicate ACKs, often these are equal * but when e.g. out-of-window ACKs or packet duplication occurs, * they differ. Since neither occurs due to loss, TCP should really * ignore them. */
static inline int tcp_dupack_heuristics(const struct tcp_sock *tp) { return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1; }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen1344.83%120.00%
yuchung chengyuchung cheng1137.93%120.00%
pre-gitpre-git517.24%360.00%
Total29100.00%5100.00%


static bool tcp_pause_early_retransmit(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); unsigned long delay; /* Delay early retransmit and entering fast recovery for * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples * available, or RTO is scheduled to fire first. */ if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 || (flag & FLAG_ECE) || !tp->srtt_us) return false; delay = max(usecs_to_jiffies(tp->srtt_us >> 5), msecs_to_jiffies(2)); if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay))) return false; inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay, TCP_RTO_MAX); return true; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng5550.93%110.00%
pre-gitpre-git3229.63%440.00%
eric dumazeteric dumazet1211.11%110.00%
david s. millerdavid s. miller32.78%110.00%
stephen hemmingerstephen hemminger32.78%110.00%
arnaldo carvalho de meloarnaldo carvalho de melo32.78%220.00%
Total108100.00%10100.00%

/* Linux NewReno/SACK/FACK/ECN state machine. * -------------------------------------- * * "Open" Normal state, no dubious events, fast path. * "Disorder" In all the respects it is "Open", * but requires a bit more attention. It is entered when * we see some SACKs or dupacks. It is split of "Open" * mainly to move some processing from fast path to slow one. * "CWR" CWND was reduced due to some Congestion Notification event. * It can be ECN, ICMP source quench, local device congestion. * "Recovery" CWND was reduced, we are fast-retransmitting. * "Loss" CWND was reduced due to RTO timeout or SACK reneging. * * tcp_fastretrans_alert() is entered: * - each incoming ACK, if state is not "Open" * - when arrived ACK is unusual, namely: * * SACK * * Duplicate ACK. * * ECN ECE. * * Counting packets in flight is pretty simple. * * in_flight = packets_out - left_out + retrans_out * * packets_out is SND.NXT-SND.UNA counted in packets. * * retrans_out is number of retransmitted segments. * * left_out is number of segments left network, but not ACKed yet. * * left_out = sacked_out + lost_out * * sacked_out: Packets, which arrived to receiver out of order * and hence not ACKed. With SACKs this number is simply * amount of SACKed data. Even without SACKs * it is easy to give pretty reliable estimate of this number, * counting duplicate ACKs. * * lost_out: Packets lost by network. TCP has no explicit * "loss notification" feedback from network (for now). * It means that this number can be only _guessed_. * Actually, it is the heuristics to predict lossage that * distinguishes different algorithms. * * F.e. after RTO, when all the queue is considered as lost, * lost_out = packets_out and in_flight = retrans_out. * * Essentially, we have now two algorithms counting * lost packets. * * FACK: It is the simplest heuristics. As soon as we decided * that something is lost, we decide that _all_ not SACKed * packets until the most forward SACK are lost. I.e. * lost_out = fackets_out - sacked_out and left_out = fackets_out. * It is absolutely correct estimate, if network does not reorder * packets. And it loses any connection to reality when reordering * takes place. We use FACK by default until reordering * is suspected on the path to this destination. * * NewReno: when Recovery is entered, we assume that one segment * is lost (classic Reno). While we are in Recovery and * a partial ACK arrives, we assume that one more packet * is lost (NewReno). This heuristics are the same in NewReno * and SACK. * * Imagine, that's all! Forget about all this shamanism about CWND inflation * deflation etc. CWND is real congestion window, never inflated, changes * only according to classic VJ rules. * * Really tricky (and requiring careful tuning) part of algorithm * is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue(). * The first determines the moment _when_ we should reduce CWND and, * hence, slow down forward transmission. In fact, it determines the moment * when we decide that hole is caused by loss, rather than by a reorder. * * tcp_xmit_retransmit_queue() decides, _what_ we should retransmit to fill * holes, caused by lost packets. * * And the most logically complicated part of algorithm is undo * heuristics. We detect false retransmits due to both too early * fast retransmit (reordering) and underestimated RTO, analyzing * timestamps and D-SACKs. When we detect that some segments were * retransmitted by mistake and CWND reduction was wrong, we undo * window reduction and abort recovery phase. This logic is hidden * inside several functions named tcp_try_undo_<something>. */ /* This function decides, when we should leave Disordered state * and enter Recovery phase, reducing congestion window. * * Main question: may we further continue forward transmission * with the same cwnd? */
static bool tcp_time_to_recover(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); __u32 packets_out; int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; /* Trick#1: The loss is proven. */ if (tp->lost_out) return true; /* Not-A-Trick#2 : Classic rule... */ if (tcp_dupack_heuristics(tp) > tp->reordering) return true; /* Trick#4: It is still not OK... But will it be useful to delay * recovery more? */ packets_out = tp->packets_out; if (packets_out <= tp->reordering && tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) && !tcp_may_send_now(sk)) { /* We have nothing to send. This connection is limited * either by receiver window or by application. */ return true; } /* If a thin stream is detected, retransmit after first * received dupack. Employ only if SACK is supported in order * to avoid possible corner-case series of spurious retransmissions * Use only if there are no unsent data. */ if ((tp->thin_dupack || sysctl_tcp_thin_dupack) && tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 && tcp_is_sack(tp) && !tcp_send_head(sk)) return true; /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious * retransmissions due to small network reorderings, we implement * Mitigation A.3 in the RFC and delay the retransmission for a short * interval if appropriate. */ if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out && (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) && !tcp_may_send_now(sk)) return !tcp_pause_early_retransmit(sk, flag); return false; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng13267.01%110.00%
pre-gitpre-git3216.24%330.00%
ilpo jarvinenilpo jarvinen136.60%220.00%
nikolay borisovnikolay borisov136.60%110.00%
david s. millerdavid s. miller63.05%220.00%
arnaldo carvalho de meloarnaldo carvalho de melo10.51%110.00%
Total197100.00%10100.00%

/* Detect loss in event "A" above by marking head of queue up as lost. * For FACK or non-SACK(Reno) senders, the first "packets" number of segments * are considered lost. For RFC3517 SACK, a segment is considered lost if it * has at least tp->reordering SACKed seqments above it; "packets" refers to * the maximum SACKed segments to pass before reaching this limit. */
static void tcp_mark_head_lost(struct sock *sk, int packets, int mark_head) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; int cnt, oldcnt, lost; unsigned int mss; /* Use SACK to deduce losses of new sequences sent during recovery */ const u32 loss_high = tcp_is_sack(tp) ? tp->snd_nxt : tp->high_seq; WARN_ON(packets > tp->packets_out); if (tp->lost_skb_hint) { skb = tp->lost_skb_hint; cnt = tp->lost_cnt_hint; /* Head already handled? */ if (mark_head && skb != tcp_write_queue_head(sk)) return; } else { skb = tcp_write_queue_head(sk); cnt = 0; } tcp_for_write_queue_from(skb, sk) { if (skb == tcp_send_head(sk)) break; /* TODO: do this better */ /* this is not the most efficient way to do this... */ tp->lost_skb_hint = skb; tp->lost_cnt_hint = cnt; if (after(TCP_SKB_CB(skb)->end_seq, loss_high)) break; oldcnt = cnt; if (tcp_is_fack(tp) || tcp_is_reno(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) cnt += tcp_skb_pcount(skb); if (cnt > packets) { if ((tcp_is_sack(tp) && !tcp_is_fack(tp)) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) || (oldcnt >= packets)) break; mss = tcp_skb_mss(skb); /* If needed, chop off the prefix to mark as lost. */ lost = (packets - oldcnt) * mss; if (lost < skb->len && tcp_fragment(sk, skb, lost, mss, GFP_ATOMIC) < 0) break; cnt = packets; } tcp_skb_mark_lost(tp, skb); if (mark_head) break; } tcp_verify_left_out(tp); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng27390.40%228.57%
neal cardwellneal cardwell206.62%114.29%
linus torvaldslinus torvalds41.32%114.29%
arnaldo carvalho de meloarnaldo carvalho de melo20.66%114.29%
octavian purdilaoctavian purdila20.66%114.29%
eric dumazeteric dumazet10.33%114.29%
Total302100.00%7100.00%

/* Account newly detected lost packet(s) */
static void tcp_update_scoreboard(struct sock *sk, int fast_rexmit) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_is_reno(tp)) { tcp_mark_head_lost(sk, 1, 1); } else if (tcp_is_fack(tp)) { int lost = tp->fackets_out - tp->reordering; if (lost <= 0) lost = 1; tcp_mark_head_lost(sk, lost, 0); } else { int sacked_upto = tp->sacked_out - tp->reordering; if (sacked_upto >= 0) tcp_mark_head_lost(sk, sacked_upto, 0); else if (fast_rexmit) tcp_mark_head_lost(sk, 1, 1); } }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng7560.00%19.09%
linus torvaldslinus torvalds2016.00%218.18%
pre-gitpre-git1310.40%327.27%
ilpo jarvinenilpo jarvinen129.60%218.18%
david s. millerdavid s. miller32.40%19.09%
arnaldo carvalho de meloarnaldo carvalho de melo21.60%218.18%
Total125100.00%11100.00%


static bool tcp_tsopt_ecr_before(const struct tcp_sock *tp, u32 when) { return tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && before(tp->rx_opt.rcv_tsecr, when); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng39100.00%4100.00%
Total39100.00%4100.00%

/* skb is spurious retransmitted if the returned timestamp echo * reply is prior to the skb transmission time */
static bool tcp_skb_spurious_retrans(const struct tcp_sock *tp, const struct sk_buff *skb) { return (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS) && tcp_tsopt_ecr_before(tp, tcp_skb_timestamp(skb)); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng40100.00%1100.00%
Total40100.00%1100.00%

/* Nothing was retransmitted or returned timestamp is less * than timestamp of the first retransmission. */
static inline bool tcp_packet_delayed(const struct tcp_sock *tp) { return !tp->retrans_stamp || tcp_tsopt_ecr_before(tp, tp->retrans_stamp); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2692.86%375.00%
pre-gitpre-git27.14%125.00%
Total28100.00%4100.00%

/* Undo procedures. */ /* We can clear retrans_stamp when there are no retransmissions in the * window. It would seem that it is trivially available for us in * tp->retrans_out, however, that kind of assumptions doesn't consider * what will happen if errors occur when sending retransmission for the * second time. ...It could the that such segment has only * TCPCB_EVER_RETRANS set at the present time. It seems that checking * the head skb is enough except for some reneging corner cases that * are not worth the effort. * * Main reason for all this complexity is the fact that connection dying * time now depends on the validity of the retrans_stamp, in particular, * that successive retransmissions of a segment must not advance * retrans_stamp under any conditions. */
static bool tcp_any_retrans_done(const struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (tp->retrans_out) return true; skb = tcp_write_queue_head(sk); if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS)) return true; return false; }

Contributors

PersonTokensPropCommitsCommitProp
marcelo ricardo leitnermarcelo ricardo leitner66100.00%1100.00%
Total66100.00%1100.00%

#if FASTRETRANS_DEBUG > 1
static void DBGUNDO(struct sock *sk, const char *msg) { struct tcp_sock *tp = tcp_sk(sk); struct inet_sock *inet = inet_sk(sk); if (sk->sk_family == AF_INET) { pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n", msg, &inet->inet_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #if IS_ENABLED(CONFIG_IPV6) else if (sk->sk_family == AF_INET6) { pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n", msg, &sk->sk_v6_daddr, ntohs(inet->inet_dport), tp->snd_cwnd, tcp_left_out(tp), tp->snd_ssthresh, tp->prior_ssthresh, tp->packets_out); } #endif }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng9263.45%133.33%
ilpo jarvinenilpo jarvinen5135.17%133.33%
eric dumazeteric dumazet21.38%133.33%
Total145100.00%3100.00%

#else #define DBGUNDO(x...) do { } while (0) #endif
static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss) { struct tcp_sock *tp = tcp_sk(sk); if (unmark_loss) { struct sk_buff *skb; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST; } tp->lost_out = 0; tcp_clear_all_retrans_hints(tp); } if (tp->prior_ssthresh) { const struct inet_connection_sock *icsk = inet_csk(sk); if (icsk->icsk_ca_ops->undo_cwnd) tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk); else tp->snd_cwnd = max(tp->snd_cwnd, tp->snd_ssthresh << 1); if (tp->prior_ssthresh > tp->snd_ssthresh) { tp->snd_ssthresh = tp->prior_ssthresh; tcp_ecn_withdraw_cwr(tp); } } tp->snd_cwnd_stamp = tcp_time_stamp; tp->undo_marker = 0; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng12372.78%330.00%
pre-gitpre-git1710.06%330.00%
stephen hemmingerstephen hemminger169.47%110.00%
ilpo jarvinenilpo jarvinen127.10%220.00%
florian westphalflorian westphal10.59%110.00%
Total169100.00%10100.00%


static inline bool tcp_may_undo(const struct tcp_sock *tp) { return tp->undo_marker && (!tp->undo_retrans || tcp_packet_delayed(tp)); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2893.33%150.00%
pre-gitpre-git26.67%150.00%
Total30100.00%2100.00%

/* People celebrate: "We love our President!" */
static bool tcp_try_undo_recovery(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tcp_may_undo(tp)) { int mib_idx; /* Happy end! We did not retransmit anything * or our original transmission succeeded. */ DBGUNDO(sk, inet_csk(sk)->icsk_ca_state == TCP_CA_Loss ? "loss" : "retrans"); tcp_undo_cwnd_reduction(sk, false); if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss) mib_idx = LINUX_MIB_TCPLOSSUNDO; else mib_idx = LINUX_MIB_TCPFULLUNDO; NET_INC_STATS(sock_net(sk), mib_idx); } if (tp->snd_una == tp->high_seq && tcp_is_reno(tp)) { /* Hold old state until something *above* high_seq * is ACKed. For Reno it is MUST to prevent false * fast retransmits (RFC2582). SACK TCP is safe. */ if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; return true; } tcp_set_ca_state(sk, TCP_CA_Open); return false; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng9167.91%325.00%
marcelo ricardo leitnermarcelo ricardo leitner1410.45%18.33%
ilpo jarvinenilpo jarvinen107.46%216.67%
david s. millerdavid s. miller64.48%18.33%
stephen hemmingerstephen hemminger64.48%18.33%
pre-gitpre-git42.99%216.67%
linus torvaldslinus torvalds21.49%18.33%
eric dumazeteric dumazet10.75%18.33%
Total134100.00%12100.00%

/* Try to undo cwnd reduction, because D-SACKs acked all retransmitted data */
static bool tcp_try_undo_dsack(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && !tp->undo_retrans) { DBGUNDO(sk, "D-SACK"); tcp_undo_cwnd_reduction(sk, false); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDSACKUNDO); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2437.50%433.33%
yuchung chengyuchung cheng2335.94%325.00%
ilpo jarvinenilpo jarvinen1421.88%216.67%
arnaldo carvalho de meloarnaldo carvalho de melo11.56%18.33%
linus torvaldslinus torvalds11.56%18.33%
eric dumazeteric dumazet11.56%18.33%
Total64100.00%12100.00%

/* Undo during loss recovery after partial ACK or using F-RTO. */
static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) { struct tcp_sock *tp = tcp_sk(sk); if (frto_undo || tcp_may_undo(tp)) { tcp_undo_cwnd_reduction(sk, true); DBGUNDO(sk, "partial loss"); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSSUNDO); if (frto_undo) NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSPURIOUSRTOS); inet_csk(sk)->icsk_retransmits = 0; if (frto_undo || tcp_is_sack(tp)) tcp_set_ca_state(sk, TCP_CA_Open); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng6663.46%330.00%
hideaki yoshifujihideaki yoshifuji1514.42%110.00%
ilpo jarvinenilpo jarvinen109.62%220.00%
pre-gitpre-git109.62%220.00%
eric dumazeteric dumazet21.92%110.00%
david s. millerdavid s. miller10.96%110.00%
Total104100.00%10100.00%

/* The cwnd reduction in CWR and Recovery uses the PRR algorithm in RFC 6937. * It computes the number of packets to send (sndcnt) based on packets newly * delivered: * 1) If the packets in flight is larger than ssthresh, PRR spreads the * cwnd reductions across a full RTT. * 2) Otherwise PRR uses packet conservation to send as much as delivered. * But when the retransmits are acked without further losses, PRR * slow starts cwnd up to ssthresh to speed up the recovery. */
static void tcp_init_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->high_seq = tp->snd_nxt; tp->tlp_high_seq = 0; tp->snd_cwnd_cnt = 0; tp->prior_cwnd = tp->snd_cwnd; tp->prr_delivered = 0; tp->prr_out = 0; tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk); tcp_ecn_queue_cwr(tp); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3036.59%18.33%
pre-gitpre-git2834.15%650.00%
arnaldo carvalho de meloarnaldo carvalho de melo1214.63%18.33%
linus torvaldslinus torvalds67.32%18.33%
stephen hemmingerstephen hemminger56.10%216.67%
florian westphalflorian westphal11.22%18.33%
Total82100.00%12100.00%


static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag) { struct tcp_sock *tp = tcp_sk(sk); int sndcnt = 0; int delta = tp->snd_ssthresh - tcp_packets_in_flight(tp); if (newly_acked_sacked <= 0 || WARN_ON_ONCE(!tp->prior_cwnd)) return; tp->prr_delivered += newly_acked_sacked; if (delta < 0) { u64 dividend = (u64)tp->snd_ssthresh * tp->prr_delivered + tp->prior_cwnd - 1; sndcnt = div_u64(dividend, tp->prior_cwnd) - tp->prr_out; } else if ((flag & FLAG_RETRANS_DATA_ACKED) && !(flag & FLAG_LOST_RETRANS)) { sndcnt = min_t(int, delta, max_t(int, tp->prr_delivered - tp->prr_out, newly_acked_sacked) + 1); } else { sndcnt = min(delta, newly_acked_sacked); } /* Force a fast retransmit upon entering fast recovery */ sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1)); tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng14575.13%433.33%
pre-gitpre-git3116.06%325.00%
ilpo jarvinenilpo jarvinen84.15%18.33%
pavel emelianovpavel emelianov52.59%216.67%
david s. millerdavid s. miller31.55%18.33%
arnaldo carvalho de meloarnaldo carvalho de melo10.52%18.33%
Total193100.00%12100.00%


static inline void tcp_end_cwnd_reduction(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); /* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */ if (inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || (tp->undo_marker && tp->snd_ssthresh < TCP_INFINITE_SSTHRESH)) { tp->snd_cwnd = tp->snd_ssthresh; tp->snd_cwnd_stamp = tcp_time_stamp; } tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3043.48%111.11%
pre-gitpre-git2739.13%555.56%
ilpo jarvinenilpo jarvinen811.59%111.11%
pavel emelianovpavel emelianov34.35%111.11%
arnaldo carvalho de meloarnaldo carvalho de melo11.45%111.11%
Total69100.00%9100.00%

/* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
void tcp_enter_cwr(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tp->prior_ssthresh = 0; if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { tp->undo_marker = 0; tcp_init_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_CWR); } }

Contributors

PersonTokensPropCommitsCommitProp
ilpo jarvinenilpo jarvinen2950.88%150.00%
yuchung chengyuchung cheng2849.12%150.00%
Total57100.00%2100.00%

EXPORT_SYMBOL(tcp_enter_cwr);
static void tcp_try_keep_open(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); int state = TCP_CA_Open; if (tcp_left_out(tp) || tcp_any_retrans_done(sk)) state = TCP_CA_Disorder; if (inet_csk(sk)->icsk_ca_state != state) { tcp_set_ca_state(sk, state); tp->high_seq = tp->snd_nxt; } }

Contributors

PersonTokensPropCommitsCommitProp
pre-gitpre-git2738.57%444.44%
yuchung chengyuchung cheng2535.71%111.11%
ilpo jarvinenilpo jarvinen1420.00%222.22%
pavel emelianovpavel emelianov34.29%111.11%
arnaldo carvalho de meloarnaldo carvalho de melo11.43%111.11%
Total70100.00%9100.00%


static void tcp_try_to_open(struct sock *sk, int flag) { struct tcp_sock *tp = tcp_sk(sk); tcp_verify_left_out(tp); if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; if (flag & FLAG_ECE) tcp_enter_cwr(sk); if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) { tcp_try_keep_open(sk); } }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3852.78%114.29%
pre-gitpre-git1723.61%228.57%
ilpo jarvinenilpo jarvinen811.11%114.29%
david s. millerdavid s. miller56.94%114.29%
stephen hemmingerstephen hemminger34.17%114.29%
arnaldo carvalho de meloarnaldo carvalho de melo11.39%114.29%
Total72100.00%7100.00%


static void tcp_mtup_probe_failed(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_mtup.search_high = icsk->icsk_mtup.probe_size - 1; icsk->icsk_mtup.probe_size = 0; NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPFAIL); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3158.49%114.29%
rick jonesrick jones916.98%114.29%
pre-gitpre-git815.09%342.86%
arnaldo carvalho de meloarnaldo carvalho de melo47.55%114.29%
eric dumazeteric dumazet11.89%114.29%
Total53100.00%7100.00%


static void tcp_mtup_probe_success(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* FIXME: breaks with very large cwnd */ tp->prior_ssthresh = tcp_current_ssthresh(sk); tp->snd_cwnd = tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache) / icsk->icsk_mtup.probe_size; tp->snd_cwnd_cnt = 0; tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_ssthresh = tcp_current_ssthresh(sk); icsk->icsk_mtup.search_low = icsk->icsk_mtup.probe_size; icsk->icsk_mtup.probe_size = 0; tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMTUPSUCCESS); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng11189.52%350.00%
rick jonesrick jones97.26%116.67%
nandita dukkipatinandita dukkipati32.42%116.67%
eric dumazeteric dumazet10.81%116.67%
Total124100.00%6100.00%

/* Do a simple retransmit without using the backoff mechanisms in * tcp_timer. This is used for path mtu discovery. * The socket is already locked here. */
void tcp_simple_retransmit(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; unsigned int mss = tcp_current_mss(sk); u32 prior_lost = tp->lost_out; tcp_for_write_queue(skb, sk) { if (skb == tcp_send_head(sk)) break; if (tcp_skb_seglen(skb) > mss && !(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) { if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS; tp->retrans_out -= tcp_skb_pcount(skb); } tcp_skb_mark_lost_uncond_verify(tp, skb); } } tcp_clear_retrans_hints_partial(tp); if (prior_lost == tp->lost_out) return; if (tcp_is_reno(tp)) tcp_limit_reno_sacked(tp); tcp_verify_left_out(tp); /* Don't muck with the congestion window here. * Reason is that we do not increase amount of _data_ * in network, but units changed and effective * cwnd/ssthresh really reduced now. */ if (icsk->icsk_ca_state != TCP_CA_Loss) { tp->high_seq = tp->snd_nxt; tp->snd_ssthresh = tcp_current_ssthresh(sk); tp->prior_ssthresh = 0; tp->undo_marker = 0; tcp_set_ca_state(sk, TCP_CA_Loss); } tcp_xmit_retransmit_queue(sk); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng19389.77%545.45%
pre-gitpre-git104.65%327.27%
nandita dukkipatinandita dukkipati41.86%19.09%
stephen hemmingerstephen hemminger41.86%19.09%
arnaldo carvalho de meloarnaldo carvalho de melo41.86%19.09%
Total215100.00%11100.00%

EXPORT_SYMBOL(tcp_simple_retransmit);
static void tcp_enter_recovery(struct sock *sk, bool ece_ack) { struct tcp_sock *tp = tcp_sk(sk); int mib_idx; if (tcp_is_reno(tp)) mib_idx = LINUX_MIB_TCPRENORECOVERY; else mib_idx = LINUX_MIB_TCPSACKRECOVERY; NET_INC_STATS(sock_net(sk), mib_idx); tp->prior_ssthresh = 0; tcp_init_undo(tp); if (!tcp_in_cwnd_reduction(sk)) { if (!ece_ack) tp->prior_ssthresh = tcp_current_ssthresh(sk); tcp_init_cwnd_reduction(sk); } tcp_set_ca_state(sk, TCP_CA_Recovery); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng9999.00%583.33%
eric dumazeteric dumazet11.00%116.67%
Total100100.00%6100.00%

/* Process an ACK in CA_Loss state. Move to CA_Open if lost data are * recovered or spurious. Otherwise retransmits more on partial ACKs. */
static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack, int *rexmit) { struct tcp_sock *tp = tcp_sk(sk); bool recovered = !before(tp->snd_una, tp->high_seq); if ((flag & FLAG_SND_UNA_ADVANCED) && tcp_try_undo_loss(sk, false)) return; if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */ /* Step 3.b. A timeout is spurious if not all data are * lost, i.e., never-retransmitted data are (s)acked. */ if ((flag & FLAG_ORIG_SACK_ACKED) && tcp_try_undo_loss(sk, true)) return; if (after(tp->snd_nxt, tp->high_seq)) { if (flag & FLAG_DATA_SACKED || is_dupack) tp->frto = 0; /* Step 3.a. loss was real */ } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) { tp->high_seq = tp->snd_nxt; /* Step 2.b. Try send new data (but deferred until cwnd * is updated in tcp_ack()). Otherwise fall back to * the conventional recovery. */ if (tcp_send_head(sk) && after(tcp_wnd_end(tp), tp->snd_nxt)) { *rexmit = REXMIT_NEW; return; } tp->frto = 0; } } if (recovered) { /* F-RTO RFC5682 sec 3.1 step 2.a and 1st part of step 3.a */ tcp_try_undo_recovery(sk); return; } if (tcp_is_reno(tp)) { /* A Reno DUPACK means new data in F-RTO step 2.b above are * delivered. Lower inflight to clock out (re)tranmissions. */ if (after(tp->snd_nxt, tp->high_seq) && is_dupack) tcp_add_reno_sack(sk); else if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); } *rexmit = REXMIT_LOST; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng232100.00%6100.00%
Total232100.00%6100.00%

/* Undo during fast recovery after partial ACK. */
static bool tcp_try_undo_partial(struct sock *sk, const int acked) { struct tcp_sock *tp = tcp_sk(sk); if (tp->undo_marker && tcp_packet_delayed(tp)) { /* Plain luck! Hole if filled with delayed * packet, rather than with a retransmit. */ tcp_update_reordering(sk, tcp_fackets_out(tp) + acked, 1); /* We are getting evidence that the reordering degree is higher * than we realized. If there are no retransmits out then we * can undo. Otherwise we clock out new packets but do not * mark more packets lost or retransmit more. */ if (tp->retrans_out) return true; if (!tcp_any_retrans_done(sk)) tp->retrans_stamp = 0; DBGUNDO(sk, "partial recovery"); tcp_undo_cwnd_reduction(sk, true); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPPARTIALUNDO); tcp_try_keep_open(sk); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng11199.11%266.67%
eric dumazeteric dumazet10.89%133.33%
Total112100.00%3100.00%

/* Process an event, which can update packets-in-flight not trivially. * Main goal of this function is to calculate new estimate for left_out, * taking into account both packets sitting in receiver's buffer and * packets lost by network. * * Besides that it updates the congestion state when packet loss or ECN * is detected. But it does not reduce the cwnd, it is done by the * congestion control later. * * It does _not_ decide what to send, it is made in function * tcp_xmit_retransmit_queue(). */
static void tcp_fastretrans_alert(struct sock *sk, const int acked, bool is_dupack, int *ack_flag, int *rexmit) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); int fast_rexmit = 0, flag = *ack_flag; bool do_lost = is_dupack || ((flag & FLAG_DATA_SACKED) && (tcp_fackets_out(tp) > tp->reordering)); if (WARN_ON(!tp->packets_out && tp->sacked_out)) tp->sacked_out = 0; if (WARN_ON(!tp->sacked_out && tp->fackets_out)) tp->fackets_out = 0; /* Now state machine starts. * A. ECE, hence prohibit cwnd undoing, the reduction is required. */ if (flag & FLAG_ECE) tp->prior_ssthresh = 0; /* B. In all the states check for reneging SACKs. */ if (tcp_check_sack_reneging(sk, flag)) return; /* C. Check consistency of the current state. */ tcp_verify_left_out(tp); /* D. Check state exit conditions. State can be terminated * when high_seq is ACKed. */ if (icsk->icsk_ca_state == TCP_CA_Open) { WARN_ON(tp->retrans_out != 0); tp->retrans_stamp = 0; } else if (!before(tp->snd_una, tp->high_seq)) { switch (icsk->icsk_ca_state) { case TCP_CA_CWR: /* CWR is to be held something *above* high_seq * is ACKed for CWR bit to reach receiver. */ if (tp->snd_una != tp->high_seq) { tcp_end_cwnd_reduction(sk); tcp_set_ca_state(sk, TCP_CA_Open); } break; case TCP_CA_Recovery: if (tcp_is_reno(tp)) tcp_reset_reno_sack(tp); if (tcp_try_undo_recovery(sk)) return; tcp_end_cwnd_reduction(sk); break; } } /* Use RACK to detect loss */ if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS && tcp_rack_mark_lost(sk)) { flag |= FLAG_LOST_RETRANS; *ack_flag |= FLAG_LOST_RETRANS; } /* E. Process state. */ switch (icsk->icsk_ca_state) { case TCP_CA_Recovery: if (!(flag & FLAG_SND_UNA_ADVANCED)) { if (tcp_is_reno(tp) && is_dupack) tcp_add_reno_sack(sk); } else { if (tcp_try_undo_partial(sk, acked)) return; /* Partial ACK arrived. Force fast retransmit. */ do_lost = tcp_is_reno(tp) || tcp_fackets_out(tp) > tp->reordering; } if (tcp_try_undo_dsack(sk)) { tcp_try_keep_open(sk); return; } break; case TCP_CA_Loss: tcp_process_loss(sk, flag, is_dupack, rexmit); if (icsk->icsk_ca_state != TCP_CA_Open && !(flag & FLAG_LOST_RETRANS)) return; /* Change state if cwnd is undone or retransmits are lost */ default: if (tcp_is_reno(tp)) { if (flag & FLAG_SND_UNA_ADVANCED) tcp_reset_reno_sack(tp); if (is_dupack) tcp_add_reno_sack(sk); } if (icsk->icsk_ca_state <= TCP_CA_Disorder) tcp_try_undo_dsack(sk); if (!tcp_time_to_recover(sk, flag)) { tcp_try_to_open(sk, flag); return; } /* MTU probe failure: don't reduce cwnd */ if (icsk->icsk_ca_state < TCP_CA_CWR && icsk->icsk_mtup.probe_size && tp->snd_una == tp->mtu_probe.probe_seq_start) { tcp_mtup_probe_failed(sk); /* Restores the reduction we did in tcp_mtup_probe() */ tp->snd_cwnd++; tcp_simple_retransmit(sk); return; } /* Otherwise enter Recovery state */ tcp_enter_recovery(sk, (flag & FLAG_ECE)); fast_rexmit = 1; } if (do_lost) tcp_update_scoreboard(sk, fast_rexmit); *rexmit = REXMIT_LOST; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng36868.79%1240.00%
ilpo jarvinenilpo jarvinen10319.25%620.00%
john heffnerjohn heffner285.23%13.33%
pre-gitpre-git234.30%723.33%
neal cardwellneal cardwell61.12%13.33%
arnaldo carvalho de meloarnaldo carvalho de melo30.56%13.33%
david s. millerdavid s. miller20.37%13.33%
eric dumazeteric dumazet20.37%13.33%
Total535100.00%30100.00%

/* Kathleen Nichols' algorithm for tracking the minimum value of * a data stream over some fixed time interval. (E.g., the minimum * RTT over the past five minutes.) It uses constant space and constant * time per update yet almost always delivers the same minimum as an * implementation that has to keep all the data in the window. * * The algorithm keeps track of the best, 2nd best & 3rd best min * values, maintaining an invariant that the measurement time of the * n'th best >= n-1'th best. It also makes sure that the three values * are widely separated in the time window since that bounds the worse * case error when that data is monotonically increasing over the window. * * Upon getting a new min, we can forget everything earlier because it * has no value - the new min is <= everything else in the window by * definition and it's the most recent. So we restart fresh on every new min * and overwrites 2nd & 3rd choices. The same property holds for 2nd & 3rd * best. */
static void tcp_update_rtt_min(struct sock *sk, u32 rtt_us) { const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ; struct rtt_meas *m = tcp_sk(sk)->rtt_min; struct rtt_meas rttm = { .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1), .ts = now, }; u32 elapsed; /* Check if the new measurement updates the 1st, 2nd, or 3rd choices */ if (unlikely(rttm.rtt <= m[0].rtt)) m[0] = m[1] = m[2] = rttm; else if (rttm.rtt <= m[1].rtt) m[1] = m[2] = rttm; else if (rttm.rtt <= m[2].rtt) m[2] = rttm; elapsed = now - m[0].ts; if (unlikely(elapsed > wlen)) { /* Passed entire window without a new min so make 2nd choice * the new min & 3rd choice the new 2nd. So forth and so on. */ m[0] = m[1]; m[1] = m[2]; m[2] = rttm; if (now - m[0].ts > wlen) { m[0] = m[1]; m[1] = rttm; if (now - m[0].ts > wlen) m[0] = rttm; } } else if (m[1].ts == m[0].ts && elapsed > wlen / 4) { /* Passed a quarter of the window without a new min so * take 2nd choice from the 2nd quarter of the window. */ m[2] = m[1] = rttm; } else if (m[2].ts == m[1].ts && elapsed > wlen / 2) { /* Passed half the window without a new min so take the 3rd * choice from the last half of the window. */ m[2] = rttm; } }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng31397.51%150.00%
eric dumazeteric dumazet82.49%150.00%
Total321100.00%2100.00%


static inline bool tcp_ack_update_rtt(struct sock *sk, const int flag, long seq_rtt_us, long sack_rtt_us, long ca_rtt_us) { const struct tcp_sock *tp = tcp_sk(sk); /* Prefer RTT measured from ACK's timing to TS-ECR. This is because * broken middle-boxes or peers may corrupt TS-ECR fields. But * Karn's algorithm forbids taking RTT if some retransmitted data * is acked (RFC6298). */ if (seq_rtt_us < 0) seq_rtt_us = sack_rtt_us; /* RTTM Rule: A TSecr value received in a segment is used to * update the averaged RTT measurement only if the segment * acknowledges some new data, i.e., only if it advances the * left edge of the send window. * See draft-ietf-tcplw-high-performance-00, section 3.3. */ if (seq_rtt_us < 0 && tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && flag & FLAG_ACKED) seq_rtt_us = ca_rtt_us = jiffies_to_usecs(tcp_time_stamp - tp->rx_opt.rcv_tsecr); if (seq_rtt_us < 0) return false; /* ca_rtt_us >= 0 is counting on the invariant that ca_rtt_us is * always taken together with ACK, SACK, or TS-opts. Any negative * values will be skipped with the seq_rtt_us < 0 check above. */ tcp_update_rtt_min(sk, ca_rtt_us); tcp_rtt_estimator(sk, seq_rtt_us); tcp_set_rto(sk); /* RFC6298: only reset backoff on valid RTT measurement. */ inet_csk(sk)->icsk_backoff = 0; return true; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng7659.84%527.78%
pre-gitpre-git2721.26%633.33%
eric dumazeteric dumazet1411.02%15.56%
ilpo jarvinenilpo jarvinen43.15%211.11%
david s. millerdavid s. miller32.36%15.56%
arnaldo carvalho de meloarnaldo carvalho de melo21.57%211.11%
nandita dukkipatinandita dukkipati10.79%15.56%
Total127100.00%18100.00%

/* Compute time elapsed between (last) SYNACK and the ACK completing 3WHS. */
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req) { long rtt_us = -1L; if (req && !req->num_retrans && tcp_rsk(req)->snt_synack.v64) { struct skb_mstamp now; skb_mstamp_get(&now); rtt_us = skb_mstamp_us_delta(&now, &tcp_rsk(req)->snt_synack); } tcp_ack_update_rtt(sk, FLAG_SYN_ACKED, rtt_us, -1L, rtt_us); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng7693.83%583.33%
eric dumazeteric dumazet56.17%116.67%
Total81100.00%6100.00%


static void tcp_cong_avoid(struct sock *sk, u32 ack, u32 acked) { const struct inet_connection_sock *icsk = inet_csk(sk); icsk->icsk_ca_ops->cong_avoid(sk, ack, acked); tcp_sk(sk)->snd_cwnd_stamp = tcp_time_stamp; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng3978.00%225.00%
pre-gitpre-git816.00%450.00%
linus torvaldslinus torvalds24.00%112.50%
arnaldo carvalho de meloarnaldo carvalho de melo12.00%112.50%
Total50100.00%8100.00%

/* Restart timer after forward progress on connection. * RFC2988 recommends to restart timer to now+rto. */
void tcp_rearm_rto(struct sock *sk) { const struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); /* If the retrans timer is currently being used by Fast Open * for SYN-ACK retrans purpose, stay put. */ if (tp->fastopen_rsk) return; if (!tp->packets_out) { inet_csk_clear_xmit_timer(sk, ICSK_TIME_RETRANS); } else { u32 rto = inet_csk(sk)->icsk_rto; /* Offset the time elapsed after installing regular RTO */ if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { struct sk_buff *skb = tcp_write_queue_head(sk); const u32 rto_time_stamp = tcp_skb_timestamp(skb) + rto; s32 delta = (s32)(rto_time_stamp - tcp_time_stamp); /* delta may not be positive if the socket is locked * when the retrans timer fires and is rescheduled. */ if (delta > 0) rto = delta; } inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, rto, TCP_RTO_MAX); } }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng10474.29%111.11%
pre-gitpre-git2920.71%555.56%
arnaldo carvalho de meloarnaldo carvalho de melo42.86%111.11%
david s. millerdavid s. miller21.43%111.11%
eric dumazeteric dumazet10.71%111.11%
Total140100.00%9100.00%

/* This function is called when the delayed ER timer fires. TCP enters * fast recovery and performs fast-retransmit. */
void tcp_resume_early_retransmit(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_rearm_rto(sk); /* Stop if ER is disabled after the delayed ER timer is scheduled */ if (!tp->do_early_retrans) return; tcp_enter_recovery(sk, false); tcp_update_scoreboard(sk, 1); tcp_xmit_retransmit_queue(sk); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2750.94%114.29%
pre-gitpre-git2037.74%457.14%
ilpo jarvinenilpo jarvinen59.43%114.29%
arnaldo carvalho de meloarnaldo carvalho de melo11.89%114.29%
Total53100.00%7100.00%

/* If we get here, the whole TSO packet has not been acked. */
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); u32 packets_acked; BUG_ON(!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)); packets_acked = tcp_skb_pcount(skb); if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) return 0; packets_acked -= tcp_skb_pcount(skb); if (packets_acked) { BUG_ON(tcp_skb_pcount(skb) == 0); BUG_ON(!before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq)); } return packets_acked; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng8266.13%18.33%
pre-gitpre-git1915.32%433.33%
ilpo jarvinenilpo jarvinen118.87%216.67%
john heffnerjohn heffner75.65%18.33%
linus torvaldslinus torvalds21.61%18.33%
pavel emelianovpavel emelianov10.81%18.33%
david s. millerdavid s. miller10.81%18.33%
arnaldo carvalho de meloarnaldo carvalho de melo10.81%18.33%
Total124100.00%12100.00%


static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb, u32 prior_snd_una) { const struct skb_shared_info *shinfo; /* Avoid cache line misses to get skb_shinfo() and shinfo->tx_flags */ if (likely(!TCP_SKB_CB(skb)->txstamp_ack)) return; shinfo = skb_shinfo(skb); if (!before(shinfo->tskey, prior_snd_una) && before(shinfo->tskey, tcp_sk(sk)->snd_una)) __skb_tstamp_tx(skb, NULL, sk, SCM_TSTAMP_ACK); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet7083.33%133.33%
martin kafai laumartin kafai lau910.71%133.33%
soheil hassas yeganehsoheil hassas yeganeh55.95%133.33%
Total84100.00%3100.00%

/* Remove acknowledged frames from the retransmission queue. If our packet * is before the ack sequence we can discard it as it's confirmed to have * arrived at the other end. */
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets, u32 prior_snd_una, int *acked, struct tcp_sacktag_state *sack) { const struct inet_connection_sock *icsk = inet_csk(sk); struct skb_mstamp first_ackt, last_ackt, now; struct tcp_sock *tp = tcp_sk(sk); u32 prior_sacked = tp->sacked_out; u32 reord = tp->packets_out; bool fully_acked = true; long sack_rtt_us = -1L; long seq_rtt_us = -1L; long ca_rtt_us = -1L; struct sk_buff *skb; u32 pkts_acked = 0; u32 last_in_flight = 0; bool rtt_update; int flag = 0; first_ackt.v64 = 0; while ((skb = tcp_write_queue_head(sk)) && skb != tcp_send_head(sk)) { struct tcp_skb_cb *scb = TCP_SKB_CB(skb); u8 sacked = scb->sacked; u32 acked_pcount; tcp_ack_tstamp(sk, skb, prior_snd_una); /* Determine how many packets and what bytes were acked, tso and else */ if (after(scb->end_seq, tp->snd_una)) { if (tcp_skb_pcount(skb) == 1 || !after(tp->snd_una, scb->seq)) break; acked_pcount = tcp_tso_acked(sk, skb); if (!acked_pcount) break; fully_acked = false; } else { /* Speedup tcp_unlink_write_queue() and next loop */ prefetchw(skb->next); acked_pcount = tcp_skb_pcount(skb); } if (unlikely(sacked & TCPCB_RETRANS)) { if (sacked & TCPCB_SACKED_RETRANS) tp->retrans_out -= acked_pcount; flag |= FLAG_RETRANS_DATA_ACKED; } else if (!(sacked & TCPCB_SACKED_ACKED)) { last_ackt = skb->skb_mstamp; WARN_ON_ONCE(last_ackt.v64 == 0); if (!first_ackt.v64) first_ackt = last_ackt; last_in_flight = TCP_SKB_CB(skb)->tx.in_flight; reord = min(pkts_acked, reord); if (!after(scb->end_seq, tp->high_seq)) flag |= FLAG_ORIG_SACK_ACKED; } if (sacked & TCPCB_SACKED_ACKED) { tp->sacked_out -= acked_pcount; } else if (tcp_is_sack(tp)) { tp->delivered += acked_pcount; if (!tcp_skb_spurious_retrans(tp, skb)) tcp_rack_advance(tp, &skb->skb_mstamp, sacked); } if (sacked & TCPCB_LOST) tp->lost_out -= acked_pcount; tp->packets_out -= acked_pcount; pkts_acked += acked_pcount; /* Initial outgoing SYN's get put onto the write_queue * just like anything else we transmit. It is not * true data, and if we misinform our callers that * this ACK acks real data, we will erroneously exit * connection startup slow start one packet too * quickly. This is severely frowned upon behavior. */ if (likely(!(scb->tcp_flags & TCPHDR_SYN))) { flag |= FLAG_DATA_ACKED; } else { flag |= FLAG_SYN_ACKED; tp->retrans_stamp = 0; } if (!fully_acked) break; tcp_unlink_write_queue(skb, sk); sk_wmem_free_skb(sk, skb); if (unlikely(skb == tp->retransmit_skb_hint)) tp->retransmit_skb_hint = NULL; if (unlikely(skb == tp->lost_skb_hint)) tp->lost_skb_hint = NULL; } if (likely(between(tp->snd_up, prior_snd_una, tp->snd_una))) tp->snd_up = tp->snd_una; if (skb && (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) flag |= FLAG_SACK_RENEGING; skb_mstamp_get(&now); if (likely(first_ackt.v64) && !(flag & FLAG_RETRANS_DATA_ACKED)) { seq_rtt_us = skb_mstamp_us_delta(&now, &first_ackt); ca_rtt_us = skb_mstamp_us_delta(&now, &last_ackt); } if (sack->first_sackt.v64) { sack_rtt_us = skb_mstamp_us_delta(&now, &sack->first_sackt); ca_rtt_us = skb_mstamp_us_delta(&now, &sack->last_sackt); } rtt_update = tcp_ack_update_rtt(sk, flag, seq_rtt_us, sack_rtt_us, ca_rtt_us); if (flag & FLAG_ACKED) { tcp_rearm_rto(sk); if (unlikely(icsk->icsk_mtup.probe_size && !after(tp->mtu_probe.probe_seq_end, tp->snd_una))) { tcp_mtup_probe_success(sk); } if (tcp_is_reno(tp)) { tcp_remove_reno_sacks(sk, pkts_acked); } else { int delta; /* Non-retransmitted hole got filled? That's reordering */ if (reord < prior_fackets) tcp_update_reordering(sk, tp->fackets_out - reord, 0); delta = tcp_is_fack(tp) ? pkts_acked : prior_sacked - tp->sacked_out; tp->lost_cnt_hint -= min(tp->lost_cnt_hint, delta); } tp->fackets_out -= min(pkts_acked, tp->fackets_out); } else if (skb && rtt_update && sack_rtt_us >= 0 && sack_rtt_us > skb_mstamp_us_delta(&now, &skb->skb_mstamp)) { /* Do not re-arm RTO if the sack RTT is measured from data sent * after when the head was last (re)transmitted. Otherwise the * timeout may continue to extend in loss recovery. */ tcp_rearm_rto(sk); } if (icsk->icsk_ca_ops->pkts_acked) { struct ack_sample sample = { .pkts_acked = pkts_acked, .rtt_us = ca_rtt_us, .in_flight = last_in_flight }; icsk->icsk_ca_ops->pkts_acked(sk, &sample); } #if FASTRETRANS_DEBUG > 0 WARN_ON((int)tp->sacked_out < 0); WARN_ON((int)tp->lost_out < 0); WARN_ON((int)tp->retrans_out < 0); if (!tp->packets_out && tcp_is_sack(tp)) { icsk = inet_csk(sk); if (tp->lost_out) { pr_debug("Leak l=%u %d\n", tp->lost_out, icsk->icsk_ca_state); tp->lost_out = 0; } if (tp->sacked_out) { pr_debug("Leak s=%u %d\n", tp->sacked_out, icsk->icsk_ca_state); tp->sacked_out = 0; } if (tp->retrans_out) { pr_debug("Leak r=%u %d\n", tp->retrans_out, icsk->icsk_ca_state); tp->retrans_out = 0; } } #endif *acked = pkts_acked; return flag; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng59559.03%1022.22%
eric dumazeteric dumazet13313.19%36.67%
stephen hemmingerstephen hemminger959.42%511.11%
kenneth klette jonassenkenneth klette jonassen767.54%36.67%
lawrence brakmolawrence brakmo414.07%24.44%
pre-gitpre-git181.79%817.78%
ilpo jarvinenilpo jarvinen141.39%511.11%
arnaldo carvalho de meloarnaldo carvalho de melo121.19%48.89%
willem de bruijnwillem de bruijn60.60%12.22%
nandita dukkipatinandita dukkipati60.60%12.22%
jerry chujerry chu50.50%12.22%
herbert xuherbert xu50.50%12.22%
pasi sarolahtipasi sarolahti20.20%12.22%
Total1008100.00%45100.00%


static void tcp_ack_probe(struct sock *sk) { const struct tcp_sock *tp = tcp_sk(sk); struct inet_connection_sock *icsk = inet_csk(sk); /* Was it a usable window open? */ if (!after(TCP_SKB_CB(tcp_send_head(sk))->end_seq, tcp_wnd_end(tp))) { icsk->icsk_backoff = 0; inet_csk_clear_xmit_timer(sk, ICSK_TIME_PROBE0); /* Socket must be waked up by subsequent tcp_data_snd_check(). * This function is not for random using! */ } else { unsigned long when = tcp_probe0_when(sk, TCP_RTO_MAX); inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, when, TCP_RTO_MAX); } }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng5557.89%116.67%
ilpo jarvinenilpo jarvinen2829.47%350.00%
eric dumazeteric dumazet1212.63%233.33%
Total95100.00%6100.00%


static inline bool tcp_ack_is_dubious(const struct sock *sk, const int flag) { return !(flag & FLAG_NOT_DUP) || (flag & FLAG_CA_ALERT) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2357.50%133.33%
ilpo jarvinenilpo jarvinen1742.50%266.67%
Total40100.00%3100.00%

/* Decide wheather to run the increase function of congestion control. */
static inline bool tcp_may_raise_cwnd(const struct sock *sk, const int flag) { /* If reordering is high then always grow cwnd whenever data is * delivered regardless of its ordering. Otherwise stay conservative * and only grow cwnd on in-order delivery (RFC5681). A stretched ACK w/ * new SACK or ECE mark may first advance cwnd here and later reduce * cwnd in tcp_fastretrans_alert() based on more states. */ if (tcp_sk(sk)->reordering > sock_net(sk)->ipv4.sysctl_tcp_reordering) return flag & FLAG_FORWARD_PROGRESS; return flag & FLAG_DATA_ACKED; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng2145.65%333.33%
stephen hemmingerstephen hemminger1226.09%111.11%
nikolay borisovnikolay borisov715.22%111.11%
ilpo jarvinenilpo jarvinen48.70%222.22%
adrian bunkadrian bunk12.17%111.11%
eric dumazeteric dumazet12.17%111.11%
Total46100.00%9100.00%

/* The "ultimate" congestion control function that aims to replace the rigid * cwnd increase and decrease control (tcp_cong_avoid,tcp_*cwnd_reduction). * It's called toward the end of processing an ACK with precise rate * information. All transmission or retransmission are delayed afterwards. */
static void tcp_cong_control(struct sock *sk, u32 ack, u32 acked_sacked, int flag) { if (tcp_in_cwnd_reduction(sk)) { /* Reduce cwnd if state mandates */ tcp_cwnd_reduction(sk, acked_sacked, flag); } else if (tcp_may_raise_cwnd(sk, flag)) { /* Advance cwnd if state allows */ tcp_cong_avoid(sk, ack, acked_sacked); } tcp_update_pacing_rate(sk); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng66100.00%1100.00%
Total66100.00%1100.00%

/* Check that window update is acceptable. * The function assumes that snd_una<=ack<=snd_next. */
static inline bool tcp_may_update_window(const struct tcp_sock *tp, const u32 ack, const u32 ack_seq, const u32 nwin) { return after(ack, tp->snd_una) || after(ack_seq, tp->snd_wl1) || (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd); }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng4272.41%116.67%
ilpo jarvinenilpo jarvinen1525.86%466.67%
stephen hemmingerstephen hemminger11.72%116.67%
Total58100.00%6100.00%

/* If we update tp->snd_una, also update tp->bytes_acked */
static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack) { u32 delta = ack - tp->snd_una; sock_owned_by_me((struct sock *)tp); u64_stats_update_begin_raw(&tp->syncp); tp->bytes_acked += delta; u64_stats_update_end_raw(&tp->syncp); tp->snd_una = ack; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet61100.00%3100.00%
Total61100.00%3100.00%

/* If we update tp->rcv_nxt, also update tp->bytes_received */
static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq) { u32 delta = seq - tp->rcv_nxt; sock_owned_by_me((struct sock *)tp); u64_stats_update_begin_raw(&tp->syncp); tp->bytes_received += delta; u64_stats_update_end_raw(&tp->syncp); tp->rcv_nxt = seq; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet61100.00%3100.00%
Total61100.00%3100.00%

/* Update our send window. * * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2 * and in FreeBSD. NetBSD's one is even worse.) is wrong. */
static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32 ack, u32 ack_seq) { struct tcp_sock *tp = tcp_sk(sk); int flag = 0; u32 nwin = ntohs(tcp_hdr(skb)->window); if (likely(!tcp_hdr(skb)->syn)) nwin <<= tp->rx_opt.snd_wscale; if (tcp_may_update_window(tp, ack, ack_seq, nwin)) { flag |= FLAG_WIN_UPDATE; tcp_update_wl(tp, ack_seq); if (tp->snd_wnd != nwin) { tp->snd_wnd = nwin; /* Note, it is the only place, where * fast path is recovered for sending TCP. */ tp->pred_flags = 0; tcp_fast_path_check(sk); if (tcp_send_head(sk)) tcp_slow_start_after_idle_check(sk); if (nwin > tp->max_window) { tp->max_window = nwin; tcp_sync_mss(sk, inet_csk(sk)->icsk_pmtu_cookie); } } } tcp_snd_una_update(tp, ack); return flag; }

Contributors

PersonTokensPropCommitsCommitProp
yuchung chengyuchung cheng10459.09%17.14%
ilpo jarvinenilpo jarvinen4425.00%964.29%
eric dumazeteric dumazet169.09%214.29%
stephen hemmingerstephen hemminger126.82%214.29%
Total176100.00%14100.00%


static bool __tcp_oow_rate_limited(struct net *net, int mib_idx, u32 *last_oow_ack_time) { if (*last_oow_ack_time) { s32 elapsed = (s32)(tcp_time_stamp - *last_oow_ack_time); if (0 <= elapsed && elapsed < sysctl_tcp_invalid_ratelimit) { NET_INC_STATS(net, mib_idx); return true; /* rate-limited: don't send yet! */ } } *last_oow_ack_time = tcp_time_stamp; return false; /* not rate-limited: go ahead, send dupack now! */ }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet6897.14%266.67%
jason baronjason baron22.86%133.33%
Total70100.00%3100.00%

/* Return true if we're currently rate-limiting out-of-window ACKs and * thus shouldn't send a dupack right now. We rate-limit dupacks in * response to out-of-window SYNs or ACKs to mitigate ACK loops or DoS * attacks that send repeated SYNs or ACKs for the same connection. To * do this, we do not send a duplicate SYNACK or ACK if the remote * endpoint is sending out-of-window SYNs or pure ACKs at a high rate. */
bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb, int mib_idx, u32 *last_oow_ack_time) { /* Data packets without SYNs are not likely part of an ACK loop. */ if ((TCP_SKB_CB(skb)->seq != TCP_SKB_CB(skb)->end_seq) && !tcp_hdr(skb)->syn) return false; return __tcp_oow_rate_limited(net, mib_idx, last_oow_ack_time); }

Contributors

PersonTokensPropCommitsCommitProp
jason baronjason baron63100.00%1100.00%
Total63100.00%1100.00%

/* RFC 5961 7 [ACK Throttling] */
static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb) { /* unprotected vars, we dont care of overwrites */ static u32 challenge_timestamp; static unsigned int challenge_count; struct tcp_sock *tp = tcp_sk(sk); u32 count, now; /* First check our per-socket dupack rate limit. */ if (__tcp_oow_rate_limited(sock_net(sk), LINUX_MIB_TCPACKSKIPPEDCHALLENGE, &tp->last_oow_ack_time)) return; /* Then check host-wide RFC 5961 rate limit. */ now = jiffies / HZ; if (now != challenge_timestamp) { u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1; challenge_timestamp = now; WRITE_ONCE(challenge_count, half + prandom_u32_max(sysctl_tcp_challenge_ack_limit)); } count = READ_ONCE(challenge_count); if (count > 0) { WRITE_ONCE(challenge_count, count - 1); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK); tcp_send_ack(sk); } }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet10372.54%360.00%
neal cardwellneal cardwell3826.76%120.00%
jason baronjason baron10.70%120.00%
Total142100.00%5100.00%


static void tcp_store_ts_recent(struct tcp_sock *tp) { tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; tp->rx_opt.ts_recent_stamp = get_seconds(); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet32100.00%1100.00%
Total32100.00%1100.00%


static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) { if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { /* PAWS bug workaround wrt. ACK frames, the PAWS discard * extra check below makes sure this can only happen * for pure ACK frames. -DaveM * * Not only, also it occurs for expired timestamps. */ if (tcp_paws_check(&tp->rx_opt, 0)) tcp_store_ts_recent(tp); } }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller2853.85%150.00%
eric dumazeteric dumazet2446.15%150.00%
Total52100.00%2100.00%

/* This routine deals with acks during a TLP episode. * We mark the end of a TLP episode on receiving TLP dupack or when * ack is after tlp_high_seq. * Ref: loss detection algorithm in draft-dukkipati-tcpm-tcp-loss-probe. */
static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag) { struct tcp_sock *tp = tcp_sk(sk); if (before(ack, tp->tlp_high_seq)) return; if (flag & FLAG_DSACKING_ACK) { /* This DSACK means original and TLP probe arrived; no loss */ tp->tlp_high_seq = 0; } else if (after