cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/tcp.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              Implementation of the Transmission Control Protocol(TCP).
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 *              Florian La Roche, <flla@stud.uni-sb.de>
 *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 *              Matthew Dillon, <dillon@apollo.west.oic.com>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Jorge Cwik, <jorge@laser.satlink.net>
 *
 * Fixes:
 *              Alan Cox        :       Numerous verify_area() calls
 *              Alan Cox        :       Set the ACK bit on a reset
 *              Alan Cox        :       Stopped it crashing if it closed while
 *                                      sk->inuse=1 and was trying to connect
 *                                      (tcp_err()).
 *              Alan Cox        :       All icmp error handling was broken
 *                                      pointers passed where wrong and the
 *                                      socket was looked up backwards. Nobody
 *                                      tested any icmp error code obviously.
 *              Alan Cox        :       tcp_err() now handled properly. It
 *                                      wakes people on errors. poll
 *                                      behaves and the icmp error race
 *                                      has gone by moving it into sock.c
 *              Alan Cox        :       tcp_send_reset() fixed to work for
 *                                      everything not just packets for
 *                                      unknown sockets.
 *              Alan Cox        :       tcp option processing.
 *              Alan Cox        :       Reset tweaked (still not 100%) [Had
 *                                      syn rule wrong]
 *              Herp Rosmanith  :       More reset fixes
 *              Alan Cox        :       No longer acks invalid rst frames.
 *                                      Acking any kind of RST is right out.
 *              Alan Cox        :       Sets an ignore me flag on an rst
 *                                      receive otherwise odd bits of prattle
 *                                      escape still
 *              Alan Cox        :       Fixed another acking RST frame bug.
 *                                      Should stop LAN workplace lockups.
 *              Alan Cox        :       Some tidyups using the new skb list
 *                                      facilities
 *              Alan Cox        :       sk->keepopen now seems to work
 *              Alan Cox        :       Pulls options out correctly on accepts
 *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
 *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
 *                                      bit to skb ops.
 *              Alan Cox        :       Tidied tcp_data to avoid a potential
 *                                      nasty.
 *              Alan Cox        :       Added some better commenting, as the
 *                                      tcp is hard to follow
 *              Alan Cox        :       Removed incorrect check for 20 * psh
 *      Michael O'Reilly        :       ack < copied bug fix.
 *      Johannes Stille         :       Misc tcp fixes (not all in yet).
 *              Alan Cox        :       FIN with no memory -> CRASH
 *              Alan Cox        :       Added socket option proto entries.
 *                                      Also added awareness of them to accept.
 *              Alan Cox        :       Added TCP options (SOL_TCP)
 *              Alan Cox        :       Switched wakeup calls to callbacks,
 *                                      so the kernel can layer network
 *                                      sockets.
 *              Alan Cox        :       Use ip_tos/ip_ttl settings.
 *              Alan Cox        :       Handle FIN (more) properly (we hope).
 *              Alan Cox        :       RST frames sent on unsynchronised
 *                                      state ack error.
 *              Alan Cox        :       Put in missing check for SYN bit.
 *              Alan Cox        :       Added tcp_select_window() aka NET2E
 *                                      window non shrink trick.
 *              Alan Cox        :       Added a couple of small NET2E timer
 *                                      fixes
 *              Charles Hedrick :       TCP fixes
 *              Toomas Tamm     :       TCP window fixes
 *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
 *              Charles Hedrick :       Rewrote most of it to actually work
 *              Linus           :       Rewrote tcp_read() and URG handling
 *                                      completely
 *              Gerhard Koerting:       Fixed some missing timer handling
 *              Matthew Dillon  :       Reworked TCP machine states as per RFC
 *              Gerhard Koerting:       PC/TCP workarounds
 *              Adam Caldwell   :       Assorted timer/timing errors
 *              Matthew Dillon  :       Fixed another RST bug
 *              Alan Cox        :       Move to kernel side addressing changes.
 *              Alan Cox        :       Beginning work on TCP fastpathing
 *                                      (not yet usable)
 *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
 *              Alan Cox        :       TCP fast path debugging
 *              Alan Cox        :       Window clamping
 *              Michael Riepe   :       Bug in tcp_check()
 *              Matt Dillon     :       More TCP improvements and RST bug fixes
 *              Matt Dillon     :       Yet more small nasties remove from the
 *                                      TCP code (Be very nice to this man if
 *                                      tcp finally works 100%) 8)
 *              Alan Cox        :       BSD accept semantics.
 *              Alan Cox        :       Reset on closedown bug.
 *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
 *              Michael Pall    :       Handle poll() after URG properly in
 *                                      all cases.
 *              Michael Pall    :       Undo the last fix in tcp_read_urg()
 *                                      (multi URG PUSH broke rlogin).
 *              Michael Pall    :       Fix the multi URG PUSH problem in
 *                                      tcp_readable(), poll() after URG
 *                                      works now.
 *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
 *                                      BSD api.
 *              Alan Cox        :       Changed the semantics of sk->socket to
 *                                      fix a race and a signal problem with
 *                                      accept() and async I/O.
 *              Alan Cox        :       Relaxed the rules on tcp_sendto().
 *              Yury Shevchuk   :       Really fixed accept() blocking problem.
 *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
 *                                      clients/servers which listen in on
 *                                      fixed ports.
 *              Alan Cox        :       Cleaned the above up and shrank it to
 *                                      a sensible code size.
 *              Alan Cox        :       Self connect lockup fix.
 *              Alan Cox        :       No connect to multicast.
 *              Ross Biro       :       Close unaccepted children on master
 *                                      socket close.
 *              Alan Cox        :       Reset tracing code.
 *              Alan Cox        :       Spurious resets on shutdown.
 *              Alan Cox        :       Giant 15 minute/60 second timer error
 *              Alan Cox        :       Small whoops in polling before an
 *                                      accept.
 *              Alan Cox        :       Kept the state trace facility since
 *                                      it's handy for debugging.
 *              Alan Cox        :       More reset handler fixes.
 *              Alan Cox        :       Started rewriting the code based on
 *                                      the RFC's for other useful protocol
 *                                      references see: Comer, KA9Q NOS, and
 *                                      for a reference on the difference
 *                                      between specifications and how BSD
 *                                      works see the 4.4lite source.
 *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
 *                                      close.
 *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
 *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
 *              Alan Cox        :       Reimplemented timers as per the RFC
 *                                      and using multiple timers for sanity.
 *              Alan Cox        :       Small bug fixes, and a lot of new
 *                                      comments.
 *              Alan Cox        :       Fixed dual reader crash by locking
 *                                      the buffers (much like datagram.c)
 *              Alan Cox        :       Fixed stuck sockets in probe. A probe
 *                                      now gets fed up of retrying without
 *                                      (even a no space) answer.
 *              Alan Cox        :       Extracted closing code better
 *              Alan Cox        :       Fixed the closing state machine to
 *                                      resemble the RFC.
 *              Alan Cox        :       More 'per spec' fixes.
 *              Jorge Cwik      :       Even faster checksumming.
 *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
 *                                      only frames. At least one pc tcp stack
 *                                      generates them.
 *              Alan Cox        :       Cache last socket.
 *              Alan Cox        :       Per route irtt.
 *              Matt Day        :       poll()->select() match BSD precisely on error
 *              Alan Cox        :       New buffers
 *              Marc Tamsky     :       Various sk->prot->retransmits and
 *                                      sk->retransmits misupdating fixed.
 *                                      Fixed tcp_write_timeout: stuck close,
 *                                      and TCP syn retries gets used now.
 *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
 *                                      ack if state is TCP_CLOSED.
 *              Alan Cox        :       Look up device on a retransmit - routes may
 *                                      change. Doesn't yet cope with MSS shrink right
 *                                      but it's a start!
 *              Marc Tamsky     :       Closing in closing fixes.
 *              Mike Shaver     :       RFC1122 verifications.
 *              Alan Cox        :       rcv_saddr errors.
 *              Alan Cox        :       Block double connect().
 *              Alan Cox        :       Small hooks for enSKIP.
 *              Alexey Kuznetsov:       Path MTU discovery.
 *              Alan Cox        :       Support soft errors.
 *              Alan Cox        :       Fix MTU discovery pathological case
 *                                      when the remote claims no mtu!
 *              Marc Tamsky     :       TCP_CLOSE fix.
 *              Colin (G3TNE)   :       Send a reset on syn ack replies in
 *                                      window but wrong (fixes NT lpd problems)
 *              Pedro Roque     :       Better TCP window handling, delayed ack.
 *              Joerg Reuter    :       No modification of locked buffers in
 *                                      tcp_do_retransmit()
 *              Eric Schenk     :       Changed receiver side silly window
 *                                      avoidance algorithm to BSD style
 *                                      algorithm. This doubles throughput
 *                                      against machines running Solaris,
 *                                      and seems to result in general
 *                                      improvement.
 *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
 *      Willy Konynenberg       :       Transparent proxying support.
 *      Mike McLagan            :       Routing by source
 *              Keith Owens     :       Do proper merging with partial SKB's in
 *                                      tcp_do_sendmsg to avoid burstiness.
 *              Eric Schenk     :       Fix fast close down bug with
 *                                      shutdown() followed by close().
 *              Andi Kleen      :       Make poll agree with SIGIO
 *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
 *                                      lingertime == 0 (RFC 793 ABORT Call)
 *      Hirokazu Takahashi      :       Use copy_from_user() instead of
 *                                      csum_and_copy_from_user() if possible.
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or(at your option) any later version.
 *
 * Description of States:
 *
 *      TCP_SYN_SENT            sent a connection request, waiting for ack
 *
 *      TCP_SYN_RECV            received a connection request, sent ack,
 *                              waiting for final ack in three-way handshake.
 *
 *      TCP_ESTABLISHED         connection established
 *
 *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
 *                              transmission of remaining buffered data
 *
 *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
 *                              to shutdown
 *
 *      TCP_CLOSING             both sides have shutdown but we still have
 *                              data we have to finish sending
 *
 *      TCP_TIME_WAIT           timeout to catch resent junk before entering
 *                              closed, can only be entered from FIN_WAIT2
 *                              or CLOSING.  Required because the other end
 *                              may not have gotten our last ACK causing it
 *                              to retransmit the data packet (which we ignore)
 *
 *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
 *                              us to finish writing our data and to shutdown
 *                              (we have to close() to move on to LAST_ACK)
 *
 *      TCP_LAST_ACK            out side has shutdown after remote has
 *                              shutdown.  There may still be data in our
 *                              buffer that we have to finish sending
 *
 *      TCP_CLOSE               socket is finished
 */


#define pr_fmt(fmt) "TCP: " fmt

#include <crypto/hash.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/poll.h>
#include <linux/inet_diag.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/skbuff.h>
#include <linux/scatterlist.h>
#include <linux/splice.h>
#include <linux/net.h>
#include <linux/socket.h>
#include <linux/random.h>
#include <linux/bootmem.h>
#include <linux/highmem.h>
#include <linux/swap.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <linux/time.h>
#include <linux/slab.h>

#include <net/icmp.h>
#include <net/inet_common.h>
#include <net/tcp.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/sock.h>

#include <linux/uaccess.h>
#include <asm/ioctls.h>
#include <net/busy_poll.h>


int sysctl_tcp_min_tso_segs __read_mostly = 2;


int sysctl_tcp_autocorking __read_mostly = 1;


struct percpu_counter tcp_orphan_count;

EXPORT_SYMBOL_GPL(tcp_orphan_count);


long sysctl_tcp_mem[3] __read_mostly;

int sysctl_tcp_wmem[3] __read_mostly;

int sysctl_tcp_rmem[3] __read_mostly;


EXPORT_SYMBOL(sysctl_tcp_mem);

EXPORT_SYMBOL(sysctl_tcp_rmem);

EXPORT_SYMBOL(sysctl_tcp_wmem);


atomic_long_t tcp_memory_allocated;	
/* Current allocated memory. */

EXPORT_SYMBOL(tcp_memory_allocated);

/*
 * Current number of TCP sockets.
 */

struct percpu_counter tcp_sockets_allocated;

EXPORT_SYMBOL(tcp_sockets_allocated);

/*
 * TCP splice context
 */

struct tcp_splice_state {
	
struct pipe_inode_info *pipe;
	
size_t len;
	
unsigned int flags;
};

/*
 * Pressure flag: try to collapse.
 * Technical note: it is used by multiple contexts non atomically.
 * All the __sk_mem_schedule() is of this nature: accounting
 * is strict, actions are advisory and have some latency.
 */

int tcp_memory_pressure __read_mostly;

EXPORT_SYMBOL(tcp_memory_pressure);


void tcp_enter_memory_pressure(struct sock *sk) { if (!tcp_memory_pressure) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); tcp_memory_pressure = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1548.39%116.67%
Pavel Emelyanov929.03%233.33%
Arnaldo Carvalho de Melo619.35%233.33%
Hideaki Yoshifuji / 吉藤英明13.23%116.67%
Total31100.00%6100.00%

EXPORT_SYMBOL(tcp_enter_memory_pressure); /* Convert seconds to retransmits based on initial and max timeout */
static u8 secs_to_retrans(int seconds, int timeout, int rto_max) { u8 res = 0; if (seconds > 0) { int period = timeout; res = 1; while (seconds > period && res < 255) { res++; timeout <<= 1; if (timeout > rto_max) timeout = rto_max; period += timeout; } } return res; }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov73100.00%1100.00%
Total73100.00%1100.00%

/* Convert retransmits to seconds based on initial and max timeout */
static int retrans_to_secs(u8 retrans, int timeout, int rto_max) { int period = 0; if (retrans > 0) { period = timeout; while (--retrans) { timeout <<= 1; if (timeout > rto_max) timeout = rto_max; period += timeout; } } return period; }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov60100.00%1100.00%
Total60100.00%1100.00%

/* Address-family independent initialization for a tcp_sock. * * NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */
void tcp_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); struct tcp_sock *tp = tcp_sk(sk); tp->out_of_order_queue = RB_ROOT; tcp_init_xmit_timers(sk); tcp_prequeue_init(tp); INIT_LIST_HEAD(&tp->tsq_node); icsk->icsk_rto = TCP_TIMEOUT_INIT; tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U); /* So many TCP implementations out there (incorrectly) count the * initial SYN frame in their delayed-ACK and congestion control * algorithms that we must have the following bandaid to talk * efficiently to them. -DaveM */ tp->snd_cwnd = TCP_INIT_CWND; /* There's a bubble in the pipe until at least the first ACK. */ tp->app_limited = ~0U; /* See draft-stevens-tcpca-spec-01 for discussion of the * initialization of these values. */ tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; tp->snd_cwnd_clamp = ~0; tp->mss_cache = TCP_MSS_DEFAULT; tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering; tcp_assign_congestion_control(sk); tp->tsoffset = 0; sk->sk_state = TCP_CLOSE; sk->sk_write_space = sk_stream_write_space; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); icsk->icsk_sync_mss = tcp_sync_mss; sk->sk_sndbuf = sysctl_tcp_wmem[1]; sk->sk_rcvbuf = sysctl_tcp_rmem[1]; sk_sockets_allocated_inc(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Neal Cardwell14476.19%220.00%
Eric Dumazet126.35%220.00%
Soheil Hassas Yeganeh84.23%110.00%
Nikolay Borisov73.70%110.00%
Yuchung Cheng63.17%110.00%
Andrey Vagin63.17%110.00%
Florian Westphal42.12%110.00%
Yaogong Wang21.06%110.00%
Total189100.00%10100.00%

EXPORT_SYMBOL(tcp_init_sock);
static void tcp_tx_timestamp(struct sock *sk, u16 tsflags, struct sk_buff *skb) { if (tsflags && skb) { struct skb_shared_info *shinfo = skb_shinfo(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); sock_tx_timestamp(sk, tsflags, &shinfo->tx_flags); if (tsflags & SOF_TIMESTAMPING_TX_ACK) tcb->txstamp_ack = 1; if (tsflags & SOF_TIMESTAMPING_TX_RECORD_MASK) shinfo->tskey = TCP_SKB_CB(skb)->seq + skb->len - 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Willem de Bruijn6367.02%233.33%
Soheil Hassas Yeganeh3132.98%466.67%
Total94100.00%6100.00%

/* * Wait for a TCP event. * * Note that we don't need to lock the socket, as the upper poll layers * take care of normal races (between the test and the event) and we don't * go look at any of the socket buffers directly. */
unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) { unsigned int mask; struct sock *sk = sock->sk; const struct tcp_sock *tp = tcp_sk(sk); int state; sock_rps_record_flow(sk); sock_poll_wait(file, sk_sleep(sk), wait); state = sk_state_load(sk); if (state == TCP_LISTEN) return inet_csk_listen_poll(sk); /* Socket is not locked. We are protected from async events * by poll logic and correct handling of state changes * made by other threads is impossible in any case. */ mask = 0; /* * POLLHUP is certainly not done right. But poll() doesn't * have a notion of HUP in just one direction, and for a * socket the read side is more interesting. * * Some poll() documentation says that POLLHUP is incompatible * with the POLLOUT/POLLWR flags, so somebody should check this * all. But careful, it tends to be safer to return too many * bits than too few, and you can easily break real applications * if you don't tell them that something has hung up! * * Check-me. * * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and * our fs/select.c). It means that after we received EOF, * poll always returns immediately, making impossible poll() on write() * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP * if and only if shutdown has been made in both directions. * Actually, it is interesting to look how Solaris and DUX * solve this dilemma. I would prefer, if POLLHUP were maskable, * then we could set it on SND_SHUTDOWN. BTW examples given * in Stevens' books assume exactly this behaviour, it explains * why POLLHUP is incompatible with POLLOUT. --ANK * * NOTE. Check for TCP_CLOSE is added. The goal is to prevent * blocking on fresh not-connected or disconnected socket. --ANK */ if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE) mask |= POLLHUP; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLIN | POLLRDNORM | POLLRDHUP; /* Connected or passive Fast Open socket? */ if (state != TCP_SYN_SENT && (state != TCP_SYN_RECV || tp->fastopen_rsk)) { int target = sock_rcvlowat(sk, 0, INT_MAX); if (tp->urg_seq == tp->copied_seq && !sock_flag(sk, SOCK_URGINLINE) && tp->urg_data) target++; if (tp->rcv_nxt - tp->copied_seq >= target) mask |= POLLIN | POLLRDNORM; if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { if (sk_stream_is_writeable(sk)) { mask |= POLLOUT | POLLWRNORM; } else { /* send SIGIO later */ sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); /* Race breaker. If space is freed after * wspace test but before the flags are set, * IO signal will be lost. Memory barrier * pairs with the input side. */ smp_mb__after_atomic(); if (sk_stream_is_writeable(sk)) mask |= POLLOUT | POLLWRNORM; } } else mask |= POLLOUT | POLLWRNORM; if (tp->urg_data & TCP_URG_VALID) mask |= POLLPRI; } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) { /* Active TCP fastopen socket with defer_connect * Return POLLOUT so application can call write() * in order for kernel to generate SYN+data */ mask |= POLLOUT | POLLWRNORM; } /* This barrier is coupled with smp_wmb() in tcp_reset() */ smp_rmb(); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR; return mask; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)18655.86%2552.08%
David S. Miller339.91%24.17%
Wei Wang257.51%12.08%
Eric Dumazet226.61%510.42%
Tom Marshall144.20%12.08%
Jerry Chu103.00%12.08%
Arnaldo Carvalho de Melo103.00%48.33%
Willem de Bruijn92.70%12.08%
Motohiro Kosaki72.10%12.08%
David Majnemer51.50%12.08%
Jason Baron41.20%12.08%
James Morris20.60%12.08%
Davide Libenzi20.60%12.08%
Will Newton20.60%12.08%
Jiri Olsa10.30%12.08%
Alexandra N. Kossovsky10.30%12.08%
Total333100.00%48100.00%

EXPORT_SYMBOL(tcp_poll);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) { struct tcp_sock *tp = tcp_sk(sk); int answ; bool slow; switch (cmd) { case SIOCINQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; slow = lock_sock_fast(sk); answ = tcp_inq(sk); unlock_sock_fast(sk, slow); break; case SIOCATMARK: answ = tp->urg_data && tp->urg_seq == tp->copied_seq; break; case SIOCOUTQ: if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else answ = tp->write_seq - tp->snd_una; break; case SIOCOUTQNSD: if (sk->sk_state == TCP_LISTEN) return -EINVAL; if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) answ = 0; else answ = tp->write_seq - tp->snd_nxt; break; default: return -ENOIOCTLCMD; } return put_user(answ, (int __user *)arg); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)13866.99%110.00%
Mario Schuknecht4722.82%110.00%
Eric Dumazet104.85%220.00%
Arnaldo Carvalho de Melo62.91%330.00%
David S. Miller31.46%110.00%
Tom Herbert10.49%110.00%
Al Viro10.49%110.00%
Total206100.00%10100.00%

EXPORT_SYMBOL(tcp_ioctl);
static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) { TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; tp->pushed_seq = tp->write_seq; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3191.18%125.00%
Eric Dumazet12.94%125.00%
Arnaldo Carvalho de Melo12.94%125.00%
Changli Gao12.94%125.00%
Total34100.00%4100.00%


static inline bool forced_push(const struct tcp_sock *tp) { return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3090.91%125.00%
Eric Dumazet26.06%250.00%
Arnaldo Carvalho de Melo13.03%125.00%
Total33100.00%4100.00%


static void skb_entail(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); skb->csum = 0; tcb->seq = tcb->end_seq = tp->write_seq; tcb->tcp_flags = TCPHDR_ACK; tcb->sacked = 0; __skb_header_release(skb); tcp_add_write_queue_tail(sk, skb); sk->sk_wmem_queued += skb->truesize; sk_mem_charge(sk, skb->truesize); if (tp->nonagle & TCP_NAGLE_PUSH) tp->nonagle &= ~TCP_NAGLE_PUSH; tcp_slow_start_after_idle_check(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds5446.96%19.09%
Arnaldo Carvalho de Melo1412.17%19.09%
Alexey Kuznetsov1311.30%19.09%
Hideo Aoki119.57%19.09%
Ilpo Järvinen108.70%19.09%
Eric Dumazet76.09%327.27%
Herbert Xu43.48%19.09%
David S. Miller10.87%19.09%
Changli Gao10.87%19.09%
Total115100.00%11100.00%


static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) { if (flags & MSG_OOB) tp->snd_up = tp->write_seq; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds2896.55%150.00%
Arnaldo Carvalho de Melo13.45%150.00%
Total29100.00%2100.00%

/* If a not yet filled skb is pushed, do not send it if * we have data packets in Qdisc or NIC queues : * Because TX completion will happen shortly, it gives a chance * to coalesce future sendmsg() payload into this skb, without * need for a timer, and with no latency trade off. * As packets containing data payload have a bigger truesize * than pure acks (dataless) packets, the last checks prevent * autocorking if we only have an ACK in Qdisc/NIC queues, * or if TX completion was delayed after we processed ACK packet. */
static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb, int size_goal) { return skb->len < size_goal && sysctl_tcp_autocorking && skb != tcp_write_queue_head(sk) && atomic_read(&sk->sk_wmem_alloc) > skb->truesize; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet47100.00%2100.00%
Total47100.00%2100.00%


static void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle, int size_goal) { struct tcp_sock *tp = tcp_sk(sk); struct sk_buff *skb; if (!tcp_send_head(sk)) return; skb = tcp_write_queue_tail(sk); if (!(flags & MSG_MORE) || forced_push(tp)) tcp_mark_push(tp, skb); tcp_mark_urg(tp, flags); if (tcp_should_autocork(sk, skb, size_goal)) { /* avoid atomic op if TSQ_THROTTLED bit is already set */ if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING); set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags); } /* It is possible TX completion already happened * before we set TSQ_THROTTLED. */ if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize) return; } if (flags & MSG_MORE) nonagle = TCP_NAGLE_CORK; __tcp_push_pending_frames(sk, mss_now, nonagle); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet9859.04%342.86%
Linus Torvalds6237.35%114.29%
Krishna Kumar31.81%114.29%
David S. Miller21.20%114.29%
Alexey Kuznetsov10.60%114.29%
Total166100.00%7100.00%


static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, unsigned int offset, size_t len) { struct tcp_splice_state *tss = rd_desc->arg.data; int ret; ret = skb_splice_bits(skb, skb->sk, offset, tss->pipe, min(rd_desc->count, len), tss->flags); if (ret > 0) rd_desc->count -= ret; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4960.49%120.00%
Willy Tarreau2227.16%120.00%
Dimitris Michailidis56.17%120.00%
Hannes Frederic Sowa44.94%120.00%
Adrian Bunk11.23%120.00%
Total81100.00%5100.00%


static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) { /* Store TCP splice context information in read_descriptor_t. */ read_descriptor_t rd_desc = { .arg.data = tss, .count = tss->len, }; return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe4085.11%150.00%
Willy Tarreau714.89%150.00%
Total47100.00%2100.00%

/** * tcp_splice_read - splice data from TCP socket to a pipe * @sock: socket to splice from * @ppos: position (not valid) * @pipe: pipe to splice to * @len: number of bytes to splice * @flags: splice modifier flags * * Description: * Will read pages from given socket and fill them into a pipe. * **/
ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, struct pipe_inode_info *pipe, size_t len, unsigned int flags) { struct sock *sk = sock->sk; struct tcp_splice_state tss = { .pipe = pipe, .len = len, .flags = flags, }; long timeo; ssize_t spliced; int ret; sock_rps_record_flow(sk); /* * We can't seek on a socket input */ if (unlikely(*ppos)) return -ESPIPE; ret = spliced = 0; lock_sock(sk); timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); while (tss.len) { ret = __tcp_splice_read(sk, &tss); if (ret < 0) break; else if (!ret) { if (spliced) break; if (sock_flag(sk, SOCK_DONE)) break; if (sk->sk_err) { ret = sock_error(sk); break; } if (sk->sk_shutdown & RCV_SHUTDOWN) break; if (sk->sk_state == TCP_CLOSE) { /* * This occurs when user tries to read * from never connected socket. */ if (!sock_flag(sk, SOCK_DONE)) ret = -ENOTCONN; break; } if (!timeo) { ret = -EAGAIN; break; } /* if __tcp_splice_read() got nothing while we have * an skb in receive queue, we do not want to loop. * This might happen with URG data. */ if (!skb_queue_empty(&sk->sk_receive_queue)) break; sk_wait_data(sk, &timeo, NULL); if (signal_pending(current)) { ret = sock_intr_errno(timeo); break; } continue; } tss.len -= ret; spliced += ret; if (!timeo) break; release_sock(sk); lock_sock(sk); if (sk->sk_err || sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current)) break; } release_sock(sk); if (spliced) return spliced; return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Jens Axboe29890.30%116.67%
Eric Dumazet195.76%233.33%
Willy Tarreau61.82%116.67%
Changli Gao51.52%116.67%
Sabrina Dubroca20.61%116.67%
Total330100.00%6100.00%

EXPORT_SYMBOL(tcp_splice_read);
struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp, bool force_schedule) { struct sk_buff *skb; /* The TCP header must be at least 32-bit aligned. */ size = ALIGN(size, 4); if (unlikely(tcp_under_memory_pressure(sk))) sk_mem_reclaim_partial(sk); skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); if (likely(skb)) { bool mem_scheduled; if (force_schedule) { mem_scheduled = true; sk_forced_mem_schedule(sk, skb->truesize); } else { mem_scheduled = sk_wmem_schedule(sk, skb->truesize); } if (likely(mem_scheduled)) { skb_reserve(skb, sk->sk_prot->max_header); /* * Make sure that we have exactly size bytes * available to the caller, no more, no less. */ skb->reserved_tailroom = skb->end - skb->tail - size; return skb; } __kfree_skb(skb); } else { sk->sk_prot->enter_memory_pressure(sk); sk_stream_moderate_sndbuf(sk); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov10158.05%337.50%
Eric Dumazet7241.38%450.00%
Hideo Aoki10.57%112.50%
Total174100.00%8100.00%


static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, int large_allowed) { struct tcp_sock *tp = tcp_sk(sk); u32 new_size_goal, size_goal; if (!large_allowed || !sk_can_gso(sk)) return mss_now; /* Note : tcp_tso_autosize() will eventually split this later */ new_size_goal = sk->sk_gso_max_size - 1 - MAX_TCP_HEADER; new_size_goal = tcp_bound_to_half_wnd(tp, new_size_goal); /* We try hard to avoid divides here */ size_goal = tp->gso_segs * mss_now; if (unlikely(new_size_goal < size_goal || new_size_goal >= size_goal + mss_now)) { tp->gso_segs = min_t(u16, new_size_goal / mss_now, sk->sk_gso_max_segs); size_goal = tp->gso_segs * mss_now; } return max(size_goal, mss_now); }

Contributors

PersonTokensPropCommitsCommitProp
Ilpo Järvinen8164.29%342.86%
Eric Dumazet3628.57%342.86%
Ben Hutchings97.14%114.29%
Total126100.00%7100.00%


static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) { int mss_now; mss_now = tcp_current_mss(sk); *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); return mss_now; }

Contributors

PersonTokensPropCommitsCommitProp
Ilpo Järvinen48100.00%1100.00%
Total48100.00%1100.00%


static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset, size_t size, int flags) { struct tcp_sock *tp = tcp_sk(sk); int mss_now, size_goal; int err; ssize_t copied; long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* Wait for a connection to finish. One exception is TCP Fast Open * (passive side) where data is allowed to be sent before a connection * is fully established. */ if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) && !tcp_passive_fastopen(sk)) { err = sk_stream_wait_connect(sk, &timeo); if (err != 0) goto out_err; } sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); mss_now = tcp_send_mss(sk, &size_goal, flags); copied = 0; err = -EPIPE; if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) goto out_err; while (size > 0) { struct sk_buff *skb = tcp_write_queue_tail(sk); int copy, i; bool can_coalesce; if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0 || !tcp_skb_can_collapse_to(skb)) { new_segment: if (!sk_stream_memory_free(sk)) goto wait_for_sndbuf; skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation, skb_queue_empty(&sk->sk_write_queue)); if (!skb) goto wait_for_memory; skb_entail(sk, skb); copy = size_goal; } if (copy > size) copy = size; i = skb_shinfo(skb)->nr_frags; can_coalesce = skb_can_coalesce(skb, i, page, offset); if (!can_coalesce && i >= sysctl_max_skb_frags) { tcp_mark_push(tp, skb); goto new_segment; } if (!sk_wmem_schedule(sk, copy)) goto wait_for_memory; if (can_coalesce) { skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); } else { get_page(page); skb_fill_page_desc(skb, i, page, offset, copy); } skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; skb->len += copy; skb->data_len += copy; skb->truesize += copy; sk->sk_wmem_queued += copy; sk_mem_charge(sk, copy); skb->ip_summed = CHECKSUM_PARTIAL; tp->write_seq += copy; TCP_SKB_CB(skb)->end_seq += copy; tcp_skb_pcount_set(skb, 0); if (!copied) TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; copied += copy; offset += copy; size -= copy; if (!size) goto out; if (skb->len < size_goal || (flags & MSG_OOB)) continue; if (forced_push(tp)) { tcp_mark_push(tp, skb); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); } else if (skb == tcp_send_head(sk)) tcp_push_one(sk, mss_now