cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/tcp_ipv4.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              Implementation of the Transmission Control Protocol(TCP).
 *
 *              IPv4 specific functions
 *
 *
 *              code split from:
 *              linux/ipv4/tcp.c
 *              linux/ipv4/tcp_input.c
 *              linux/ipv4/tcp_output.c
 *
 *              See tcp.c for author information
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*
 * Changes:
 *              David S. Miller :       New socket lookup architecture.
 *                                      This code is dedicated to John Dyson.
 *              David S. Miller :       Change semantics of established hash,
 *                                      half is devoted to TIME_WAIT sockets
 *                                      and the rest go in the other half.
 *              Andi Kleen :            Add support for syncookies and fixed
 *                                      some bugs: ip options weren't passed to
 *                                      the TCP layer, missed a check for an
 *                                      ACK bit.
 *              Andi Kleen :            Implemented fast path mtu discovery.
 *                                      Fixed many serious bugs in the
 *                                      request_sock handling and moved
 *                                      most of it into the af independent code.
 *                                      Added tail drop and some other bugfixes.
 *                                      Added new listen semantics.
 *              Mike McLagan    :       Routing by source
 *      Juan Jose Ciarlante:            ip_dynaddr bits
 *              Andi Kleen:             various fixes.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year
 *                                      coma.
 *      Andi Kleen              :       Fix new listen.
 *      Andi Kleen              :       Fix accept error reporting.
 *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 *                                      a single port at the same time.
 */


#define pr_fmt(fmt) "TCP: " fmt

#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <linux/slab.h>

#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>

#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>

#include <crypto/hash.h>
#include <linux/scatterlist.h>


int sysctl_tcp_low_latency __read_mostly;

#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
			       __be32 daddr, __be32 saddr, const struct tcphdr *th);
#endif


struct inet_hashinfo tcp_hashinfo;

EXPORT_SYMBOL(tcp_hashinfo);


static u32 tcp_v4_init_sequence(const struct sk_buff *skb, u32 *tsoff) { return secure_tcp_sequence_number(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, tcp_hdr(skb)->dest, tcp_hdr(skb)->source, tsoff); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo3672.00%337.50%
Florian Westphal714.00%112.50%
Linus Torvalds48.00%112.50%
David S. Miller24.00%225.00%
Eric Dumazet12.00%112.50%
Total50100.00%8100.00%


int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp) { const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw); struct tcp_sock *tp = tcp_sk(sk); /* With PAWS, it is safe from the viewpoint of data integrity. Even without PAWS it is safe provided sequence spaces do not overlap i.e. at data rates <= 80Mbit/sec. Actually, the idea is close to VJ's one, only timestamp cache is held not per host, but per port pair and TW bucket is used as state holder. If TW bucket has been already destroyed we fall back to VJ's scheme and use initial timestamp retrieved from peer table. */ if (tcptw->tw_ts_recent_stamp && (!twp || (sock_net(sk)->ipv4.sysctl_tcp_tw_reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) { tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2; if (tp->write_seq == 0) tp->write_seq = 1; tp->rx_opt.ts_recent = tcptw->tw_ts_recent; tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; sock_hold(sktw); return 1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo12192.37%125.00%
Haishuang Yan75.34%125.00%
James Morris21.53%125.00%
Ian Morris10.76%125.00%
Total131100.00%4100.00%

EXPORT_SYMBOL_GPL(tcp_twsk_unique); /* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) { struct sockaddr_in *usin = (struct sockaddr_in *)uaddr; struct inet_sock *inet = inet_sk(sk); struct tcp_sock *tp = tcp_sk(sk); __be16 orig_sport, orig_dport; __be32 daddr, nexthop; struct flowi4 *fl4; struct rtable *rt; int err; u32 seq; struct ip_options_rcu *inet_opt; struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; if (addr_len < sizeof(struct sockaddr_in)) return -EINVAL; if (usin->sin_family != AF_INET) return -EAFNOSUPPORT; nexthop = daddr = usin->sin_addr.s_addr; inet_opt = rcu_dereference_protected(inet->inet_opt, lockdep_sock_is_held(sk)); if (inet_opt && inet_opt->opt.srr) { if (!daddr) return -EINVAL; nexthop = inet_opt->opt.faddr; } orig_sport = inet->inet_sport; orig_dport = usin->sin_port; fl4 = &inet->cork.fl.u.ip4; rt = ip_route_connect(fl4, nexthop, inet->inet_saddr, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if, IPPROTO_TCP, orig_sport, orig_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); if (err == -ENETUNREACH) IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES); return err; } if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) { ip_rt_put(rt); return -ENETUNREACH; } if (!inet_opt || !inet_opt->opt.srr) daddr = fl4->daddr; if (!inet->inet_saddr) inet->inet_saddr = fl4->saddr; sk_rcv_saddr_set(sk, inet->inet_saddr); if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) { /* Reset inherited state */ tp->rx_opt.ts_recent = 0; tp->rx_opt.ts_recent_stamp = 0; if (likely(!tp->repair)) tp->write_seq = 0; } if (tcp_death_row->sysctl_tw_recycle && !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr) tcp_fetch_timewait_stamp(sk, &rt->dst); inet->inet_dport = usin->sin_port; sk_daddr_set(sk, daddr); inet_csk(sk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT; /* Socket identity is still unknown (sport may be zero). * However we set state to SYN-SENT and not releasing socket * lock select source port, enter ourselves into the hash tables and * complete initialization after this. */ tcp_set_state(sk, TCP_SYN_SENT); err = inet_hash_connect(tcp_death_row, sk); if (err) goto failure; sk_set_txhash(sk); rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, inet->inet_sport, inet->inet_dport, sk); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto failure; } /* OK, now commit destination to socket. */ sk->sk_gso_type = SKB_GSO_TCPV4; sk_setup_caps(sk, &rt->dst); rt = NULL; if (likely(!tp->repair)) { seq = secure_tcp_sequence_number(inet->inet_saddr, inet->inet_daddr, inet->inet_sport, usin->sin_port, &tp->tsoffset); if (!tp->write_seq) tp->write_seq = seq; } inet->inet_id = tp->write_seq ^ jiffies; if (tcp_fastopen_defer_connect(sk, &err)) return err; if (err) goto failure; err = tcp_connect(sk); if (err) goto failure; return 0; failure: /* * This unhashes the socket and releases the local port, * if necessary. */ tcp_set_state(sk, TCP_CLOSE); ip_rt_put(rt); sk->sk_route_caps = 0; inet->inet_dport = 0; return err; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo20630.21%1321.31%
David S. Miller14320.97%1118.03%
Alexey Kuznetsov669.68%23.28%
Linus Torvalds (pre-git)649.38%1118.03%
Eric Dumazet558.06%58.20%
Wei Wang243.52%11.64%
Pavel Emelyanov223.23%23.28%
Alexey Kodanev192.79%11.64%
Stephen Hemminger182.64%11.64%
Haishuang Yan162.35%11.64%
Linus Torvalds162.35%34.92%
Wei Dong121.76%11.64%
Herbert Xu60.88%11.64%
Florian Westphal50.73%11.64%
Sathya Perla40.59%11.64%
William Allen Simpson10.15%11.64%
Patrick McHardy10.15%11.64%
Al Viro10.15%11.64%
Steven Cole10.15%11.64%
Hannes Frederic Sowa10.15%11.64%
Tom Herbert10.15%11.64%
Total682100.00%61100.00%

EXPORT_SYMBOL(tcp_v4_connect); /* * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191. * It can be called through tcp_release_cb() if socket was owned by user * at the time tcp_v4_err() was called to handle ICMP message. */
void tcp_v4_mtu_reduced(struct sock *sk) { struct inet_sock *inet = inet_sk(sk); struct dst_entry *dst; u32 mtu; if ((1 << sk->sk_state) & (TCPF_LISTEN | TCPF_CLOSE)) return; mtu = tcp_sk(sk)->mtu_info; dst = inet_csk_update_pmtu(sk, mtu); if (!dst) return; /* Something is about to be wrong... Remember soft error * for the case, if this connection will not able to recover. */ if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst)) sk->sk_err_soft = EMSGSIZE; mtu = dst_mtu(dst); if (inet->pmtudisc != IP_PMTUDISC_DONT && ip_sk_accept_pmtu(sk) && inet_csk(sk)->icsk_pmtu_cookie > mtu) { tcp_sync_mss(sk, mtu); /* Resend the TCP packet because it's * clear that the old packet has been * dropped. This is the new "fast" path mtu * discovery. */ tcp_simple_retransmit(sk); } /* else let the usual retransmit timer handle it */ }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6144.53%531.25%
Eric Dumazet3525.55%212.50%
David S. Miller1611.68%212.50%
Alexey Kuznetsov107.30%16.25%
Arnaldo Carvalho de Melo85.84%425.00%
Hannes Frederic Sowa53.65%16.25%
Herbert Xu21.46%16.25%
Total137100.00%16100.00%

EXPORT_SYMBOL(tcp_v4_mtu_reduced);
static void do_redirect(struct sk_buff *skb, struct sock *sk) { struct dst_entry *dst = __sk_dst_check(sk, 0); if (dst) dst->ops->redirect(dst, sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller45100.00%2100.00%
Total45100.00%2100.00%

/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
void tcp_req_err(struct sock *sk, u32 seq, bool abort) { struct request_sock *req = inet_reqsk(sk); struct net *net = sock_net(sk); /* ICMPs are not backlogged, hence we cannot get * an established socket here. */ if (seq != tcp_rsk(req)->snt_isn) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); } else if (abort) { /* * Still in SYN_RECV, just remove it silently. * There is no good way to pass the error to the newly * created socket, and POSIX does not want network * errors returned from accept(). */ inet_csk_reqsk_queue_drop(req->rsk_listener, req); tcp_listendrop(req->rsk_listener); } reqsk_put(req); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet8396.51%583.33%
Fan Du33.49%116.67%
Total86100.00%6100.00%

EXPORT_SYMBOL(tcp_req_err); /* * This routine is called by the ICMP module when it gets some * sort of error condition. If err < 0 then the socket should * be closed and the error returned to the user. If err > 0 * it's just the icmp type << 8 | icmp code. After adjustment * header points to the first 8 bytes of the tcp header. We need * to find the appropriate port. * * The locking strategy used here is very "optimistic". When * someone else accesses the socket the ICMP is just dropped * and for some paths there is no check at all. * A more general error queue to queue errors for later handling * is probably better. * */
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) { const struct iphdr *iph = (const struct iphdr *)icmp_skb->data; struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2)); struct inet_connection_sock *icsk; struct tcp_sock *tp; struct inet_sock *inet; const int type = icmp_hdr(icmp_skb)->type; const int code = icmp_hdr(icmp_skb)->code; struct sock *sk; struct sk_buff *skb; struct request_sock *fastopen; __u32 seq, snd_una; __u32 remaining; int err; struct net *net = dev_net(icmp_skb->dev); sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr, th->dest, iph->saddr, ntohs(th->source), inet_iif(icmp_skb)); if (!sk) { __ICMP_INC_STATS(net, ICMP_MIB_INERRORS); return; } if (sk->sk_state == TCP_TIME_WAIT) { inet_twsk_put(inet_twsk(sk)); return; } seq = ntohl(th->seq); if (sk->sk_state == TCP_NEW_SYN_RECV) return tcp_req_err(sk, seq, type == ICMP_PARAMETERPROB || type == ICMP_TIME_EXCEEDED || (type == ICMP_DEST_UNREACH && (code == ICMP_NET_UNREACH || code == ICMP_HOST_UNREACH))); bh_lock_sock(sk); /* If too many ICMPs get dropped on busy * servers this needs to be solved differently. * We do take care of PMTU discovery (RFC1191) special case : * we can receive locally generated ICMP messages while socket is held. */ if (sock_owned_by_user(sk)) { if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)) __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS); } if (sk->sk_state == TCP_CLOSE) goto out; if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto out; } icsk = inet_csk(sk); tp = tcp_sk(sk); /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */ fastopen = tp->fastopen_rsk; snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una; if (sk->sk_state != TCP_LISTEN && !between(seq, snd_una, tp->snd_nxt)) { __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS); goto out; } switch (type) { case ICMP_REDIRECT: if (!sock_owned_by_user(sk)) do_redirect(icmp_skb, sk); goto out; case ICMP_SOURCE_QUENCH: /* Just silently ignore these. */ goto out; case ICMP_PARAMETERPROB: err = EPROTO; break; case ICMP_DEST_UNREACH: if (code > NR_ICMP_UNREACH) goto out; if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ /* We are not interested in TCP_LISTEN and open_requests * (SYN-ACKs send out by Linux are always <576bytes so * they should go through unfragmented). */ if (sk->sk_state == TCP_LISTEN) goto out; tp->mtu_info = info; if (!sock_owned_by_user(sk)) { tcp_v4_mtu_reduced(sk); } else { if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &sk->sk_tsq_flags)) sock_hold(sk); } goto out; } err = icmp_err_convert[code].errno; /* check if icmp_skb allows revert of backoff * (see draft-zimmermann-tcp-lcd) */ if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH) break; if (seq != tp->snd_una || !icsk->icsk_retransmits || !icsk->icsk_backoff || fastopen) break; if (sock_owned_by_user(sk)) break; icsk->icsk_backoff--; icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) : TCP_TIMEOUT_INIT; icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX); skb = tcp_write_queue_head(sk); BUG_ON(!skb); remaining = icsk->icsk_rto - min(icsk->icsk_rto, tcp_time_stamp - tcp_skb_timestamp(skb)); if (remaining) { inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, remaining, TCP_RTO_MAX); } else { /* RTO revert clocked out retransmission. * Will retransmit now */ tcp_retransmit_timer(sk); } break; case ICMP_TIME_EXCEEDED: err = EHOSTUNREACH; break; default: goto out; } switch (sk->sk_state) { case TCP_SYN_SENT: case TCP_SYN_RECV: /* Only in fast or simultaneous open. If a fast open socket is * is already accepted it is treated as a connected one below. */ if (fastopen && !fastopen->sk) break; if (!sock_owned_by_user(sk)) { sk->sk_err = err; sk->sk_error_report(sk); tcp_done(sk); } else { sk->sk_err_soft = err; } goto out; } /* If we've already connected we will keep trying * until we time out, or the user gives up. * * rfc1122 4.2.3.9 allows to consider as hard errors * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too, * but it is obsoleted by pmtu discovery). * * Note, that in modern internet, where routing is unreliable * and in each dark corner broken firewalls sit, sending random * errors ordered by their masters even this two messages finally lose * their original sense (even Linux sends invalid PORT_UNREACHs) * * Now we are in compliance with RFCs. * --ANK (980905) */ inet = inet_sk(sk); if (!sock_owned_by_user(sk) && inet->recverr) { sk->sk_err = err; sk->sk_error_report(sk); } else { /* Only an error on timeout */ sk->sk_err_soft = err; } out: bh_unlock_sock(sk); sock_put(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)31640.41%816.33%
Damian Lukowski13317.01%24.08%
Eric Dumazet12616.11%1326.53%
David S. Miller374.73%48.16%
Yuchung Cheng374.73%12.04%
Arnaldo Carvalho de Melo293.71%816.33%
Stephen Hemminger273.45%12.04%
Linus Torvalds222.81%12.04%
Pavel Emelyanov192.43%48.16%
Benjamin LaHaise121.53%12.04%
Jerry Chu91.15%24.08%
Jon Maxwell81.02%12.04%
Hideaki Yoshifuji / 吉藤英明60.77%24.08%
Ian Morris10.13%12.04%
Total782100.00%49100.00%


void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr) { struct tcphdr *th = tcp_hdr(skb); if (skb->ip_summed == CHECKSUM_PARTIAL) { th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0); skb->csum_start = skb_transport_header(skb) - skb->head; skb->csum_offset = offsetof(struct tcphdr, check); } else { th->check = tcp_v4_check(skb->len, saddr, daddr, csum_partial(th, th->doff << 2, skb->csum)); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds3733.04%110.00%
Linus Torvalds (pre-git)3531.25%220.00%
Herbert Xu2825.00%220.00%
Arnaldo Carvalho de Melo98.04%220.00%
Al Viro10.89%110.00%
Patrick McHardy10.89%110.00%
David S. Miller10.89%110.00%
Total112100.00%10100.00%

/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb) { const struct inet_sock *inet = inet_sk(sk); __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3897.44%150.00%
Eric Dumazet12.56%150.00%
Total39100.00%2100.00%

EXPORT_SYMBOL(tcp_v4_send_check); /* * This routine will send an RST to the other tcp. * * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.) * for reset. * Answer: if a packet caused RST, it is not for a socket * existing in our system, if it is matched to a socket, * it is just duplicate segment or bug in other side's TCP. * So that we build reply only basing on parameters * arrived with segment. * Exception: precedence violation. We do not implement it in any case. */
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; #ifdef CONFIG_TCP_MD5SIG __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)]; #endif } rep; struct ip_reply_arg arg; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key = NULL; const __u8 *hash_location = NULL; unsigned char newhash[16]; int genhash; struct sock *sk1 = NULL; #endif struct net *net; /* Never send a reset in response to a reset. */ if (th->rst) return; /* If sk not NULL, it means we did a successful lookup and incoming * route had to be correct. prequeue might have dropped our dst. */ if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL) return; /* Swap the send and the receive. */ memset(&rep, 0, sizeof(rep)); rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = sizeof(struct tcphdr) / 4; rep.th.rst = 1; if (th->ack) { rep.th.seq = th->ack_seq; } else { rep.th.ack = 1; rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin + skb->len - (th->doff << 2)); } memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev); #ifdef CONFIG_TCP_MD5SIG rcu_read_lock(); hash_location = tcp_parse_md5sig_option(th); if (sk && sk_fullsock(sk)) { key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); } else if (hash_location) { /* * active side is lost. Try to find listening socket through * source port, and then find md5 key through listening socket. * we are not loose security here: * Incoming packet is checked with md5 hash with finding key, * no RST generated if md5 hash doesn't match. */ sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0, ip_hdr(skb)->saddr, th->source, ip_hdr(skb)->daddr, ntohs(th->source), inet_iif(skb)); /* don't send rst if it can't find key */ if (!sk1) goto out; key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *) &ip_hdr(skb)->saddr, AF_INET); if (!key) goto out; genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) goto out; } if (key) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); /* Update length and the length the header thinks exists */ arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len / 4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. * routing might fail in this case. No choice here, if we choose to force * input interface, we will misroute in case of asymmetric route. */ if (sk) arg.bound_dev_if = sk->sk_bound_dev_if; BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) != offsetof(struct inet_timewait_sock, tw_bound_dev_if)); arg.tos = ip_hdr(skb)->tos; arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL); local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); __TCP_INC_STATS(net, TCP_MIB_OUTRSTS); local_bh_enable(); #ifdef CONFIG_TCP_MD5SIG out: rcu_read_unlock(); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)22628.61%1226.09%
Hideaki Yoshifuji / 吉藤英明17121.65%24.35%
Shawn Lu14918.86%24.35%
Eric Dumazet779.75%1226.09%
Florian Westphal556.96%24.35%
Lorenzo Colitti202.53%12.17%
Arnaldo Carvalho de Melo182.28%36.52%
KOVACS Krisztian162.03%12.17%
David S. Miller151.90%24.35%
Tom Herbert111.39%12.17%
Pavel Emelyanov111.39%24.35%
Ilpo Järvinen101.27%24.35%
Alexey Kuznetsov50.63%12.17%
Craig Gallek40.51%12.17%
Adam Langley10.13%12.17%
Al Viro10.13%12.17%
Total790100.00%46100.00%

/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states outside socket context is ugly, certainly. What can I do? */
static void tcp_v4_send_ack(const struct sock *sk, struct sk_buff *skb, u32 seq, u32 ack, u32 win, u32 tsval, u32 tsecr, int oif, struct tcp_md5sig_key *key, int reply_flags, u8 tos) { const struct tcphdr *th = tcp_hdr(skb); struct { struct tcphdr th; __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2) #ifdef CONFIG_TCP_MD5SIG + (TCPOLEN_MD5SIG_ALIGNED >> 2) #endif ]; } rep; struct net *net = sock_net(sk); struct ip_reply_arg arg; memset(&rep.th, 0, sizeof(struct tcphdr)); memset(&arg, 0, sizeof(arg)); arg.iov[0].iov_base = (unsigned char *)&rep; arg.iov[0].iov_len = sizeof(rep.th); if (tsecr) { rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP); rep.opt[1] = htonl(tsval); rep.opt[2] = htonl(tsecr); arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED; } /* Swap the send and the receive. */ rep.th.dest = th->source; rep.th.source = th->dest; rep.th.doff = arg.iov[0].iov_len / 4; rep.th.seq = htonl(seq); rep.th.ack_seq = htonl(ack); rep.th.ack = 1; rep.th.window = htons(win); #ifdef CONFIG_TCP_MD5SIG if (key) { int offset = (tsecr) ? 3 : 0; rep.opt[offset++] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) | (TCPOPT_MD5SIG << 8) | TCPOLEN_MD5SIG); arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED; rep.th.doff = arg.iov[0].iov_len/4; tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset], key, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &rep.th); } #endif arg.flags = reply_flags; arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, /* XXX */ arg.iov[0].iov_len, IPPROTO_TCP, 0); arg.csumoffset = offsetof(struct tcphdr, check) / 2; if (oif) arg.bound_dev_if = oif; arg.tos = tos; arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL); local_bh_disable(); ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk), skb, &TCP_SKB_CB(skb)->header.h4.opt, ip_hdr(skb)->saddr, ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len); __TCP_INC_STATS(net, TCP_MIB_OUTSEGS); local_bh_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)30752.48%1331.71%
Hideaki Yoshifuji / 吉藤英明14023.93%49.76%
Eric Dumazet406.84%819.51%
Lorenzo Colitti315.30%12.44%
Arnaldo Carvalho de Melo193.25%49.76%
David S. Miller152.56%24.88%
KOVACS Krisztian91.54%12.44%
Andrey Vagin81.37%12.44%
Patrick McHardy81.37%12.44%
Pavel Emelyanov30.51%24.88%
Adam Langley30.51%24.88%
Al Viro10.17%12.44%
Craig Schlenter10.17%12.44%
Total585100.00%41100.00%


static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) { struct inet_timewait_sock *tw = inet_twsk(sk); struct tcp_timewait_sock *tcptw = tcp_twsk(sk); tcp_v4_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcp_time_stamp + tcptw->tw_ts_offset, tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0, tw->tw_tos ); inet_twsk_put(tw); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4244.21%646.15%
Arnaldo Carvalho de Melo2425.26%215.38%
Hideaki Yoshifuji / 吉藤英明99.47%17.69%
KOVACS Krisztian88.42%17.69%
Eric Dumazet66.32%215.38%
Andrey Vagin66.32%17.69%
Total95100.00%13100.00%


static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, struct request_sock *req) { /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV * sk->sk_state == TCP_SYN_RECV -> for Fast Open. */ u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt; /* RFC 7323 2.3 * The window field (SEG.WND) of every outgoing segment, with the * exception of <SYN> segments, MUST be right-shifted by * Rcv.Wind.Shift bits: */ tcp_v4_send_ack(sk, skb, seq, tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, tcp_time_stamp + tcp_rsk(req)->ts_off, req->ts_recent, 0, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, AF_INET), inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0, ip_hdr(skb)->tos); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3728.24%633.33%
Linus Torvalds (pre-git)3123.66%422.22%
Jerry Chu1712.98%15.56%
Hideaki Yoshifuji / 吉藤英明139.92%15.56%
KOVACS Krisztian118.40%15.56%
Arnaldo Carvalho de Melo86.11%211.11%
Florian Westphal75.34%15.56%
Gui Jianfeng53.82%15.56%
Andrey Vagin21.53%15.56%
Total131100.00%18100.00%

/* * Send a SYN-ACK after having received a SYN. * This still operates on a request_sock only, not on a big * socket. */
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst, struct flowi *fl, struct request_sock *req, struct tcp_fastopen_cookie *foc, enum tcp_synack_type synack_type) { const struct inet_request_sock *ireq = inet_rsk(req); struct flowi4 fl4; int err = -1; struct sk_buff *skb; /* First, grab a route. */ if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL) return -1; skb = tcp_make_synack(sk, dst, req, foc, synack_type); if (skb) { __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr); err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr, ireq->ir_rmt_addr, ireq->opt); err = net_xmit_eval(err); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)9058.82%420.00%
Arnaldo Carvalho de Melo1912.42%420.00%
Eric Dumazet117.19%420.00%
David S. Miller74.58%15.00%
Octavian Purdila63.92%210.00%
William Allen Simpson63.92%15.00%
Yuchung Cheng53.27%15.00%
Gerrit Renker42.61%15.00%
Denis V. Lunev42.61%15.00%
Herbert Xu10.65%15.00%
Total153100.00%20100.00%

/* * IPv4 request_sock destructor. */
static void tcp_v4_reqsk_destructor(struct request_sock *req) { kfree(inet_rsk(req)->opt); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1676.19%360.00%
Arnaldo Carvalho de Melo523.81%240.00%
Total21100.00%5100.00%

#ifdef CONFIG_TCP_MD5SIG /* * RFC2385 MD5 checksumming requires a mapping of * IP address->MD5 Key. * We need to maintain these in the sk structure. */ /* Find the Key structure for an address. */
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk, const union tcp_md5_addr *addr, int family) { const struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; unsigned int size = sizeof(struct in_addr); const struct tcp_md5sig_info *md5sig; /* caller either holds rcu_read_lock() or socket lock */ md5sig = rcu_dereference_check(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) return NULL; #if IS_ENABLED(CONFIG_IPV6) if (family == AF_INET6) size = sizeof(struct in6_addr); #endif hlist_for_each_entry_rcu(key, &md5sig->head, node) { if (key->family != family) continue; if (!memcmp(&key->addr, addr, size)) return key; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet9165.47%571.43%
Hideaki Yoshifuji / 吉藤英明4733.81%114.29%
Hannes Frederic Sowa10.72%114.29%
Total139100.00%7100.00%

EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk, const struct sock *addr_sk) { const union tcp_md5_addr *addr; addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr; return tcp_md5_do_lookup(sk, addr, AF_INET); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet2858.33%480.00%
Hideaki Yoshifuji / 吉藤英明2041.67%120.00%
Total48100.00%5100.00%

EXPORT_SYMBOL(tcp_v4_md5_lookup); /* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr, int family, const u8 *newkey, u8 newkeylen, gfp_t gfp) { /* Add Key to the list */ struct tcp_md5sig_key *key; struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_info *md5sig; key = tcp_md5_do_lookup(sk, addr, family); if (key) { /* Pre-existing entry - just update that one. */ memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; return 0; } md5sig = rcu_dereference_protected(tp->md5sig_info, lockdep_sock_is_held(sk)); if (!md5sig) { md5sig = kmalloc(sizeof(*md5sig), gfp); if (!md5sig) return -ENOMEM; sk_nocaps_add(sk, NETIF_F_GSO_MASK); INIT_HLIST_HEAD(&md5sig->head); rcu_assign_pointer(tp->md5sig_info, md5sig); } key = sock_kmalloc(sk, sizeof(*key), gfp); if (!key) return -ENOMEM; if (!tcp_alloc_md5sig_pool()) { sock_kfree_s(sk, key, sizeof(*key)); return -ENOMEM; } memcpy(key->key, newkey, newkeylen); key->keylen = newkeylen; key->family = family; memcpy(&key->addr, addr, (family == AF_INET6) ? sizeof(struct in6_addr) : sizeof(struct in_addr)); hlist_add_head_rcu(&key->node, &md5sig->head); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet13248.53%646.15%
Hideaki Yoshifuji / 吉藤英明12746.69%17.69%
Arnaldo Carvalho de Melo41.47%17.69%
Zheng Yan31.10%17.69%
David S. Miller31.10%17.69%
Aydin Arik10.37%17.69%
Hannes Frederic Sowa10.37%17.69%
Matthias M. Dellweg10.37%17.69%
Total272100.00%13100.00%

EXPORT_SYMBOL(tcp_md5_do_add);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family) { struct tcp_md5sig_key *key; key = tcp_md5_do_lookup(sk, addr, family); if (!key) return -ENOENT; hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet4660.53%250.00%
Hideaki Yoshifuji / 吉藤英明2938.16%125.00%
Aydin Arik11.32%125.00%
Total76100.00%4100.00%

EXPORT_SYMBOL(tcp_md5_do_del);
static void tcp_clear_md5_list(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); struct tcp_md5sig_key *key; struct hlist_node *n; struct tcp_md5sig_info *md5sig; md5sig = rcu_dereference_protected(tp->md5sig_info, 1); hlist_for_each_entry_safe(key, n, &md5sig->head, node) { hlist_del_rcu(&key->node); atomic_sub(sizeof(*key), &sk->sk_omem_alloc); kfree_rcu(key, rcu); } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet6169.32%360.00%
Hideaki Yoshifuji / 吉藤英明2629.55%120.00%
Stephen Hemminger11.14%120.00%
Total88100.00%5100.00%


static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval, int optlen) { struct tcp_md5sig cmd; struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr; if (optlen < sizeof(cmd)) return -EINVAL; if (copy_from_user(&cmd, optval, sizeof(cmd))) return -EFAULT; if (sin->sin_family != AF_INET) return -EINVAL; if (!cmd.tcpm_keylen) return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET); if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN) return -EINVAL; return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr, AF_INET, cmd.tcpm_key, cmd.tcpm_keylen, GFP_KERNEL); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明12884.77%150.00%
Eric Dumazet2315.23%150.00%
Total151100.00%2100.00%


static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp, __be32 daddr, __be32 saddr, const struct tcphdr *th, int nbytes) { struct tcp4_pseudohdr *bp; struct scatterlist sg; struct tcphdr *_th; bp = hp->scratch; bp->saddr = saddr; bp->daddr = daddr; bp->pad = 0; bp->protocol = IPPROTO_TCP; bp->len = cpu_to_be16(nbytes); _th = (struct tcphdr *)(bp + 1); memcpy(_th, th, sizeof(*th)); _th->check = 0; sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th)); ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp) + sizeof(*th)); return crypto_ahash_update(hp->md5_req); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6439.26%240.00%
Eric Dumazet5734.97%120.00%
Adam Langley3018.40%120.00%
Herbert Xu127.36%120.00%
Total163100.00%5100.00%


static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key, __be32 daddr, __be32 saddr, const struct tcphdr *th) { struct tcp_md5sig_pool *hp; struct ahash_request *req; hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Adam Langley9262.59%114.29%
Hideaki Yoshifuji / 吉藤英明3121.09%228.57%
Herbert Xu1912.93%114.29%
Eric Dumazet53.40%342.86%
Total147100.00%7100.00%


int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key, const struct sock *sk, const struct sk_buff *skb) { struct tcp_md5sig_pool *hp; struct ahash_request *req; const struct tcphdr *th = tcp_hdr(skb); __be32 saddr, daddr; if (sk) { /* valid for establish/request sockets */ saddr = sk->sk_rcv_saddr; daddr = sk->sk_daddr; } else { const struct iphdr *iph = ip_hdr(skb); saddr = iph->saddr; daddr = iph->daddr; } hp = tcp_get_md5sig_pool(); if (!hp) goto clear_hash_noput; req = hp->md5_req; if (crypto_ahash_init(req)) goto clear_hash; if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len)) goto clear_hash; if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2)) goto clear_hash; if (tcp_md5_hash_key(hp, key)) goto clear_hash; ahash_request_set_crypt(req, NULL, md5_hash, 0); if (crypto_ahash_final(req)) goto clear_hash; tcp_put_md5sig_pool(); return 0; clear_hash: tcp_put_md5sig_pool(); clear_hash_noput: memset(md5_hash, 0, 16); return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Adam Langley13058.30%116.67%
Hideaki Yoshifuji / 吉藤英明6428.70%116.67%
Herbert Xu198.52%116.67%
Eric Dumazet104.48%350.00%
Total223100.00%6100.00%

EXPORT_SYMBOL(tcp_v4_md5_hash_skb); #endif /* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(const struct sock *sk, const struct sk_buff *skb) { #ifdef CONFIG_TCP_MD5SIG /* * This gets called for each TCP segment that arrives * so we want to be efficient. * We have 3 drop cases: * o No MD5 hash and one expected. * o MD5 hash and we're not expecting one. * o MD5 hash and its wrong. */ const __u8 *hash_location = NULL; struct tcp_md5sig_key *hash_expected; const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int genhash; unsigned char newhash[16]; hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr, AF_INET); hash_location = tcp_parse_md5sig_option(th); /* We've parsed the options - do we have a hash? */ if (!hash_expected && !hash_location) return false; if (hash_expected && !hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND); return true; } if (!hash_expected && hash_location) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED); return true; } /* Okay, so this is hash_expected and hash_location - * so we need to calculate the checksum. */ genhash = tcp_v4_md5_hash_skb(newhash, hash_expected, NULL, skb); if (genhash || memcmp(hash_location, newhash, 16) != 0) { NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE); net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n", &iph->saddr, ntohs(th->source), &iph->daddr, ntohs(th->dest), genhash ? " tcp_v4_calc_md5_hash failed" : ""); return true; } return false; #endif return false; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明17474.04%211.76%
Eric Dumazet3916.60%847.06%
Arnaldo Carvalho de Melo72.98%211.76%
David S. Miller62.55%15.88%
Linus Torvalds (pre-git)31.28%15.88%
Harvey Harrison31.28%15.88%
Adam Langley20.85%15.88%
Joe Perches10.43%15.88%
Total235100.00%17100.00%


static void tcp_v4_init_req(struct request_sock *req, const struct sock *sk_listener, struct sk_buff *skb) { struct inet_request_sock *ireq = inet_rsk(req); sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr); sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr); ireq->opt = tcp_v4_save_options(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Octavian Purdila5374.65%133.33%
Eric Dumazet1825.35%266.67%
Total71100.00%3100.00%


static struct dst_entry *tcp_v4_route_req(const struct sock *sk, struct flowi *fl, const struct request_sock *req, bool *strict) { struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req); if (strict) { if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr) *strict = true; else *strict = false; } return dst; }

Contributors

PersonTokensPropCommitsCommitProp
Octavian Purdila8498.82%150.00%
Eric Dumazet11.18%150.00%
Total85100.00%2100.00%

struct request_sock_ops tcp_request_sock_ops __read_mostly = { .family = PF_INET, .obj_size = sizeof(struct tcp_request_sock), .rtx_syn_ack = tcp_rtx_synack, .send_ack = tcp_v4_reqsk_send_ack, .destructor = tcp_v4_reqsk_destructor, .send_reset = tcp_v4_send_reset, .syn_ack_timeout = tcp_syn_ack_timeout, }; static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = { .mss_clamp = TCP_MSS_DEFAULT, #ifdef CONFIG_TCP_MD5SIG .req_md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, #endif .init_req = tcp_v4_init_req, #ifdef CONFIG_SYN_COOKIES .cookie_init_seq = cookie_v4_init_sequence, #endif .route_req = tcp_v4_route_req, .init_seq = tcp_v4_init_sequence, .send_synack = tcp_v4_send_synack, };
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) { /* Never answer to SYNs send to broadcast or multicast */ if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST)) goto drop; return tcp_conn_request(&tcp_request_sock_ops, &tcp_request_sock_ipv4_ops, sk, skb); drop: tcp_listendrop(sk); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3967.24%430.77%
Eric Dumazet46.90%215.38%
Vijay Subramanian46.90%17.69%
Octavian Purdila46.90%215.38%
Arnaldo Carvalho de Melo35.17%215.38%
Paul Moore35.17%17.69%
Hannes Frederic Sowa11.72%17.69%
Total58100.00%13100.00%

EXPORT_SYMBOL(tcp_v4_conn_request); /* * The three way handshake has completed - we got a valid synack - * now create the new socket. */
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst, struct request_sock *req_unhash, bool *own_req) { struct inet_request_sock *ireq; struct inet_sock *newinet; struct tcp_sock *newtp; struct sock *newsk; #ifdef CONFIG_TCP_MD5SIG struct tcp_md5sig_key *key; #endif struct ip_options_rcu *inet_opt; if (sk_acceptq_is_full(sk)) goto exit_overflow; newsk = tcp_create_openreq_child(sk, req, skb); if (!newsk) goto exit_nonewsk; newsk->sk_gso_type = SKB_GSO_TCPV4; inet_sk_rx_dst_set(newsk, skb); newtp = tcp_sk(newsk); newinet = inet_sk(newsk); ireq = inet_rsk(req); sk_daddr_set(newsk, ireq->ir_rmt_addr); sk_rcv_saddr_set(newsk, ireq->ir_loc_addr); newsk->sk_bound_dev_if = ireq->ir_iif; newinet->inet_saddr = ireq->ir_loc_addr; inet_opt = ireq->opt; rcu_assign_pointer(newinet->inet_opt, inet_opt); ireq->opt = NULL; newinet->mc_index = inet_iif(skb); newinet->mc_ttl = ip_hdr(skb)->ttl; newinet->rcv_tos = ip_hdr(skb)->tos; inet_csk(newsk)->icsk_ext_hdr_len = 0; if (inet_opt) inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen; newinet->inet_id = newtp->write_seq ^ jiffies; if (!dst) { dst = inet_csk_route_child_sock(sk, newsk, req); if (!dst) goto put_and_exit; } else { /* syncookie case : see end of cookie_v4_check() */ } sk_setup_caps(newsk, dst); tcp_ca_openreq_child(newsk, dst); tcp_sync_mss(newsk, dst_mtu(dst)); newtp->advmss = tcp_mss_clamp(tcp_sk(sk), dst_metric_advmss(dst)); tcp_initialize_rcv_mss(newsk); #ifdef CONFIG_TCP_MD5SIG /* Copy over the MD5 key from the original socket */ key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET); if (key) { /* * We're using one, so create a matching key * on the newsk structure. If we fail to get * memory, then we end up not copying the key * across. Shucks. */ tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr, AF_INET, key->key, key->keylen, GFP_ATOMIC); sk_nocaps_add(newsk, NETIF_F_GSO_MASK); } #endif if (__inet_inherit_port(sk, newsk) < 0) goto put_and_exit; *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash)); if (*own_req) tcp_move_syn(newtp, req); return newsk; exit_overflow: NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); exit_nonewsk: dst_release(dst); exit: tcp_listendrop(sk); return NULL; put_and_exit: inet_csk_prepare_forced_close(newsk); tcp_done(newsk); goto exit; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)14630.10%1323.21%
Eric Dumazet11924.54%1628.57%
David S. Miller5210.72%35.36%
Hideaki Yoshifuji / 吉藤英明408.25%23.57%
Arnaldo Carvalho de Melo357.22%814.29%
Balazs Scheidler204.12%11.79%
Jiri Benc112.27%11.79%
Linus Torvalds81.65%11.79%
David Ahern81.65%11.79%
Pavel Emelyanov81.65%11.79%
Tom Quetchenbach81.65%11.79%
Herbert Xu71.44%23.57%
Daniel Borkmann71.44%11.79%
Neal Cardwell71.44%11.79%
Adam Langley30.62%11.79%
Alexey Kuznetsov30.62%11.79%
Christoph Paasch20.41%11.79%
John Dykstra10.21%11.79%
Total485100.00%56100.00%

EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb) { #ifdef CONFIG_SYN_COOKIES const struct tcphdr *th = tcp_hdr(skb); if (!th->syn) sk = cookie_v4_check(sk, skb); #endif return sk; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4584.91%350.00%
Eric Dumazet59.43%233.33%
Arnaldo Carvalho de Melo35.66%116.67%
Total53100.00%6100.00%

/* The socket must have it's spinlock held when we get * here, unless it is a TCP_LISTEN socket. * * We have a potential double-lock case here, so even when * doing backlog processing we use the BH locking scheme. * This is because we cannot sleep with the original spinlock * held. */
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb) { struct sock *rsk; if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */ struct dst_entry *dst = sk->sk_rx_dst; sock_rps_save_rxhash(sk, skb); sk_mark_napi_id(sk, skb); if (dst) { if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif || !dst->ops->check(dst, 0)) { dst_release(dst); sk->sk_rx_dst = NULL; } } tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len); return 0; } if (tcp_checksum_complete(skb)) goto csum_err; if (sk->sk_state == TCP_LISTEN) { struct sock *nsk = tcp_v4_cookie_check(sk, skb); if (!nsk) goto discard; if (nsk != sk) { sock_rps_save_rxhash(nsk, skb); sk_mark_napi_id(nsk, skb); if (tcp_child_process(sk, nsk, skb)) { rsk = nsk; goto reset; } return 0; } } else sock_rps_save_rxhash(sk, skb); if (tcp_rcv_state_process(sk, skb)) { rsk = sk; goto reset; } return 0; reset: tcp_v4_send_reset(rsk, skb); discard: kfree_skb(skb); /* Be careful here. If this function gets more complicated and * gcc suffers from register pressure on the x86, sk (in %ebx) * might be destroyed here. This current version compiles correctly, * but you have been warned. */ return 0; csum_err: TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS); TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS); goto discard; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)14552.54%934.62%
Eric Dumazet6925.00%934.62%
David S. Miller2910.51%13.85%
Hideaki Yoshifuji / 吉藤英明217.61%27.69%
Arnaldo Carvalho de Melo62.17%311.54%
Pavel Emelyanov51.81%13.85%
Ian Morris10.36%13.85%
Total276100.00%26100.00%

EXPORT_SYMBOL(tcp_v4_do_rcv);
void tcp_v4_early_demux(struct sk_buff *skb) { const struct iphdr *iph; const struct tcphdr *th; struct sock *sk; if (skb->pkt_type != PACKET_HOST) return; if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr))) return; iph = ip_hdr(skb); th = tcp_hdr(skb); if (th->doff < sizeof(struct tcphdr) / 4) return; sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo, iph->saddr, th->source, iph->daddr, ntohs(th->dest), skb->skb_iif); if (sk) { skb->sk = sk; skb->destructor = sock_edemux; if (sk_fullsock(sk)) { struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst); if (dst) dst = dst_check(dst, 0); if (dst && inet_sk(sk)->rx_dst_ifindex == skb->skb_iif) skb_dst_set_noref(skb, dst); } } }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller17287.31%440.00%
Eric Dumazet199.64%440.00%
Vijay Subramanian31.52%110.00%
Michal Kubeček31.52%110.00%
Total197100.00%10100.00%

/* Packet is added to VJ-style prequeue for processing in process * context, if a reader task is waiting. Apparently, this exciting * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) * failed somewhere. Latency? Burstiness? Well, at least now we will * see, why it failed. 8)8) --ANK * */
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb) { struct tcp_sock *tp = tcp_sk(sk); if (sysctl_tcp_low_latency || !tp->ucopy.task) return false; if (skb->len <= tcp_hdrlen(skb) && skb_queue_len(&tp->ucopy.prequeue) == 0) return false; /* Before escaping RCU protected region, we need to take care of skb * dst. Prequeue is only enabled for established sockets. * For such sockets, we might need the skb dst only to set sk->sk_rx_dst * Instead of doing full sk_rx_dst validity here, let's perform * an optimistic check. */ if (likely(sk->sk_rx_dst)) skb_dst_drop(skb); else skb_dst_force_safe(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (skb_queue_len(&tp->ucopy.prequeue) >= 32 || tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) { struct sk_buff *skb1; BUG_ON(sock_owned_by_user(sk)); __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED, skb_queue_len(&tp->ucopy.prequeue)); while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) sk_backlog_rcv(sk, skb1); tp->ucopy.memory = 0; } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) { wake_up_interruptible_sync_poll(sk_sleep(sk), POLLIN | POLLRDNORM | POLLRDBAND); if (!inet_csk_ack_scheduled(sk)) inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, (3 * tcp_rto_min(sk)) / 4, TCP_RTO_MAX); } return true; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet26698.52%480.00%
David S. Miller41.48%120.00%
Total270100.00%5100.00%

EXPORT_SYMBOL(tcp_prequeue);
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb) { u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf; /* Only socket owner can try to collapse/prune rx queues * to reduce memory overhead, so add a little headroom here. * Few sockets backlog are possibly concurrently non empty. */ limit += 64*1024; /* In case all data was pulled from skb frags (in __pskb_pull_tail()), * we can fix skb->truesize to its real value to avoid future drops. * This is valid because skb is not yet charged to the socket. * It has been noticed pure SACK packets were sometimes dropped * (if cooked by drivers without copybreak feature). */ skb_condense(skb); if (unlikely(sk_add_backlog(sk, skb, limit))) { bh_unlock_sock(sk); __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet76100.00%2100.00%
Total76100.00%2100.00%

EXPORT_SYMBOL(tcp_add_backlog);
int tcp_filter(struct sock *sk, struct sk_buff *skb) { struct tcphdr *th = (struct tcphdr *)skb->data; unsigned int eaten = skb->len; int err; err = sk_filter_trim_cap(sk, skb, th->doff * 4); if (!err) { eaten -= skb->len; TCP_SKB_CB(skb)->end_seq -= eaten; } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet80100.00%1100.00%
Total80100.00%1100.00%

EXPORT_SYMBOL(tcp_filter); /* * From tcp_input.c */
int tcp_v4_rcv(struct sk_buff *skb) { struct net *net = dev_net(skb->dev); const struct iphdr *iph; const struct tcphdr *th; bool refcounted; struct sock *sk; int ret; if (skb->pkt_type != PACKET_HOST) goto discard_it; /* Count it even if it's bad */ __TCP_INC_STATS(net, TCP_MIB_INSEGS); if (!pskb_may_pull(skb, sizeof(struct tcphdr))) goto discard_it; th = (const struct tcphdr *)skb->data; if (unlikely(th->doff < sizeof(struct tcphdr) / 4)) goto bad_packet; if (!pskb_may_pull(skb, th->doff * 4)) goto discard_it; /* An explanation is required here, I think. * Packet length and doff are validated by header prediction, * provided case of th->doff==0 is eliminated. * So, we defer the checks. */ if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo)) goto csum_error; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB() * barrier() makes sure compiler wont play fool^Waliasing games. */ memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb), sizeof(struct inet_skb_parm)); barrier(); TCP_SKB_CB(skb)->seq = ntohl(th->seq); TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin + skb->len - th->doff * 4); TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq); TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th); TCP_SKB_CB(skb)->tcp_tw_isn = 0; TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph); TCP_SKB_CB(skb)->sacked = 0; lookup: sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source, th->dest, &refcounted); if (!sk) goto no_tcp_socket; process: if (sk->sk_state == TCP_TIME_WAIT) goto do_time_wait; if (sk->sk_state == TCP_NEW_SYN_RECV) { struct request_sock *req = inet_reqsk(sk); struct sock *nsk; sk = req->rsk_listener; if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) { sk_drops_add(sk, skb); reqsk_put(req); goto discard_it; } if (unlikely(sk->sk_state != TCP_LISTEN)) { inet_csk_reqsk_queue_drop_and_put(sk, req); goto lookup; } /* We own a reference on the listener, increase it again * as we might lose it too soon. */ sock_hold(sk); refcounted = true; nsk = tcp_check_req(sk, skb, req, false); if (!nsk) { reqsk_put(req); goto discard_and_relse; } if (nsk == sk) { reqsk_put(req); } else if (tcp_child_process(sk, nsk, skb)) { tcp_v4_send_reset(nsk, skb); goto discard_and_relse; } else { sock_put(sk); return 0; } } if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP); goto discard_and_relse; } if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) goto discard_and_relse; if (tcp_v4_inbound_md5_hash(sk, skb)) goto discard_and_relse; nf_reset(skb); if (tcp_filter(sk, skb)) goto discard_and_relse; th = (const struct tcphdr *)skb->data; iph = ip_hdr(skb); skb->dev = NULL; if (sk->sk_state == TCP_LISTEN) { ret = tcp_v4_do_rcv(sk, skb); goto put_and_return; } sk_incoming_cpu_update(sk); bh_lock_sock_nested(sk); tcp_segs_in(tcp_sk(sk), skb); ret = 0; if (!sock_owned_by_user(sk)) { if (!tcp_prequeue(sk, skb)) ret = tcp_v4_do_rcv(sk, skb); } else if (tcp_add_backlog(sk, skb)) { goto discard_and_relse; } bh_unlock_sock(sk); put_and_return: if (refcounted) sock_put(sk); return ret; no_tcp_socket: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard_it; if (tcp_checksum_complete(skb)) { csum_error: __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS); bad_packet: __TCP_INC_STATS(net, TCP_MIB_INERRS); } else { tcp_v4_send_reset(NULL, skb); } discard_it: /* Discard frame. */ kfree_skb(skb); return 0; discard_and_relse: sk_drops_add(sk, skb); if (refcounted) sock_put(sk); goto discard_it; do_time_wait: if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) { inet_twsk_put(inet_twsk(sk)); goto discard_it; } if (tcp_checksum_complete(skb)) { inet_twsk_put(inet_twsk(sk)); goto csum_error; } switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) { case TCP_TW_SYN: { struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev), &tcp_hashinfo, skb, __tcp_hdrlen(th), iph->saddr, th->source, iph->daddr, th->dest, inet_iif(skb)); if (sk2) { inet_twsk_deschedule_put(inet_twsk(sk)); sk = sk2; refcounted = false; goto process; } /* Fall through to ACK */ } case TCP_TW_ACK: tcp_v4_timewait_ack(sk, skb); break; case TCP_TW_RST: tcp_v4_send_reset(sk, skb); inet_twsk_deschedule_put(inet_twsk(sk)); goto discard_it; case TCP_TW_SUCCESS:; } goto discard_it; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet36336.85%2232.35%
Linus Torvalds (pre-git)36236.75%1522.06%
Linus Torvalds484.87%11.47%
Alexey Kuznetsov353.55%11.47%
Arnaldo Carvalho de Melo272.74%710.29%
Hideaki Yoshifuji / 吉藤英明222.23%57.35%
Stephen Hemminger171.73%22.94%
Florian Westphal161.62%11.47%
Tom Herbert131.32%22.94%
Craig Gallek121.22%11.47%
Dmitry Popov121.22%11.47%
Nivedita Singhvi111.12%11.47%
James Morris111.12%11.47%
Yi Zhu90.91%11.47%
Marcelo Ricardo Leitner80.81%11.47%
Pavel Emelyanov80.81%22.94%
Patrick McHardy50.51%11.47%
Benjamin LaHaise30.30%11.47%
Martin KaFai Lau20.20%11.47%
Ingo Molnar10.10%11.47%
Total985100.00%68100.00%

static struct timewait_sock_ops tcp_timewait_sock_ops = { .twsk_obj_size = sizeof(struct tcp_timewait_sock), .twsk_unique = tcp_twsk_unique, .twsk_destructor= tcp_twsk_destructor, };
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); if (dst && dst_hold_safe(dst)) { sk->sk_rx_dst = dst; inet_sk(sk)->rx_dst_ifindex = skb->skb_iif; } }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet54100.00%3100.00%
Total54100.00%3100.00%

EXPORT_SYMBOL(inet_sk_rx_dst_set); const struct inet_connection_sock_af_ops ipv4_specific = { .queue_xmit = ip_queue_xmit, .send_check = tcp_v4_send_check, .rebuild_header = inet_sk_rebuild_header, .sk_rx_dst_set = inet_sk_rx_dst_set, .conn_request = tcp_v4_conn_request, .syn_recv_sock = tcp_v4_syn_recv_sock, .net_header_len = sizeof(struct iphdr), .setsockopt = ip_setsockopt, .getsockopt = ip_getsockopt, .addr2sockaddr = inet_csk_addr2sockaddr, .sockaddr_len = sizeof(struct sockaddr_in), #ifdef CONFIG_COMPAT .compat_setsockopt = compat_ip_setsockopt, .compat_getsockopt = compat_ip_getsockopt, #endif .mtu_reduced = tcp_v4_mtu_reduced, }; EXPORT_SYMBOL(ipv4_specific); #ifdef CONFIG_TCP_MD5SIG static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = { .md5_lookup = tcp_v4_md5_lookup, .calc_md5_hash = tcp_v4_md5_hash_skb, .md5_parse = tcp_v4_parse_md5_keys, }; #endif /* NOTE: A lot of things set to zero explicitly by call to * sk_alloc() so need not be done here. */
static int tcp_v4_init_sock(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); tcp_init_sock(sk); icsk->icsk_af_ops = &ipv4_specific; #ifdef CONFIG_TCP_MD5SIG tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific; #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2141.18%112.50%
Arnaldo Carvalho de Melo1121.57%225.00%
Hideaki Yoshifuji / 吉藤英明1121.57%112.50%
David S. Miller47.84%112.50%
Linus Torvalds23.92%112.50%
Thomas Graf11.96%112.50%
Neal Cardwell11.96%112.50%
Total51100.00%8100.00%


void tcp_v4_destroy_sock(struct sock *sk) { struct tcp_sock *tp = tcp_sk(sk); tcp_clear_xmit_timers(sk); tcp_cleanup_congestion_control(sk); /* Cleanup up the write buffer. */ tcp_write_queue_purge(sk); /* Cleans up our, hopefully empty, out_of_order_queue. */ skb_rbtree_purge(&tp->out_of_order_queue); #ifdef CONFIG_TCP_MD5SIG /* Clean up the MD5 key list, if any */ if (tp->md5sig_info) { tcp_clear_md5_list(sk); kfree_rcu(tp->md5sig_info, rcu); tp->md5sig_info = NULL; } #endif /* Clean prequeue, it must be empty really */ __skb_queue_purge(&tp->ucopy.prequeue); /* Clean up a referenced TCP bind bucket. */ if (inet_csk(sk)->icsk_bind_hash) inet_put_port(sk); BUG_ON(tp->fastopen_rsk); /* If socket is aborted during connect operation */ tcp_free_fastopen_req(tp); tcp_saved_syn_free(tp); sk_sockets_allocated_dec(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5542.97%520.83%
Hideaki Yoshifuji / 吉藤英明2821.88%14.17%
Eric Dumazet97.03%312.50%
Arnaldo Carvalho de Melo86.25%520.83%
Jerry Chu75.47%14.17%
Yuchung Cheng64.69%14.17%
David S. Miller43.12%28.33%
Stephen Hemminger43.12%14.17%
Glauber de Oliveira Costa21.56%14.17%
Christopher Leech21.56%14.17%
Yaogong Wang10.78%14.17%
Linus Torvalds10.78%14.17%
Brian Haley10.78%14.17%
Total128100.00%24100.00%

EXPORT_SYMBOL(tcp_v4_destroy_sock); #ifdef CONFIG_PROC_FS /* Proc filesystem TCP sock list dumping. */ /* * Get next listener socket follow cur. If cur is NULL, get first socket * starting from bucket given in st->bucket; when st->bucket is zero the * very first socket in the hash table is returned. */
static void *listening_get_next(struct seq_file *seq, void *cur) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); struct inet_listen_hashbucket *ilb; struct sock *sk = cur; if (!sk) { get_head: ilb = &tcp_hashinfo.listening_hash[st->bucket]; spin_lock(&ilb->lock); sk = sk_head(&ilb->head); st->offset = 0; goto get_sk; } ilb = &tcp_hashinfo.listening_hash[st->bucket]; ++st->num; ++st->offset; sk = sk_next(sk); get_sk: sk_for_each_from(sk) { if (!net_eq(sock_net(sk), net)) continue; if (sk->sk_family == st->family) return sk; } spin_unlock(&ilb->lock); st->offset = 0; if (++st->bucket < INET_LHTABLE_SIZE) goto get_head; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet8545.21%422.22%
Arnaldo Carvalho de Melo5026.60%422.22%
Linus Torvalds (pre-git)147.45%211.11%
Pavel Emelyanov126.38%15.56%
Herbert Xu115.85%211.11%
Hideaki Yoshifuji / 吉藤英明73.72%211.11%
Tom Herbert63.19%15.56%
Daniel Lezcano21.06%15.56%
David S. Miller10.53%15.56%
Total188100.00%18100.00%


static void *listening_get_idx(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; st->offset = 0; rc = listening_get_next(seq, NULL); while (rc && *pos) { rc = listening_get_next(seq, rc); --*pos; } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo4154.67%125.00%
Tom Herbert2533.33%125.00%
Tim Shepard68.00%125.00%
Herbert Xu34.00%125.00%
Total75100.00%4100.00%


static inline bool empty_bucket(const struct tcp_iter_state *st) { return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain); }

Contributors

PersonTokensPropCommitsCommitProp
Andi Kleen2689.66%125.00%
Eric Dumazet310.34%375.00%
Total29100.00%4100.00%

/* * Get first established socket starting from bucket given in st->bucket. * If st->bucket is zero, the very first socket in the hash is returned. */
static void *established_get_first(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); void *rc = NULL; st->offset = 0; for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) { struct sock *sk; struct hlist_nulls_node *node; spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket); /* Lockless fast path for the common case of empty buckets */ if (empty_bucket(st)) continue; spin_lock_bh(lock); sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) { if (sk->sk_family != st->family || !net_eq(sock_net(sk), net)) { continue; } rc = sk; goto out; } spin_unlock_bh(lock); } out: return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo7348.67%422.22%
Linus Torvalds (pre-git)2416.00%316.67%
Eric Dumazet1510.00%422.22%
Hideaki Yoshifuji / 吉藤英明128.00%316.67%
Daniel Lezcano96.00%15.56%
Andi Kleen96.00%15.56%
Denis V. Lunev42.67%15.56%
Tom Herbert42.67%15.56%
Total150100.00%18100.00%


static void *established_get_next(struct seq_file *seq, void *cur) { struct sock *sk = cur; struct hlist_nulls_node *node; struct tcp_iter_state *st = seq->private; struct net *net = seq_file_net(seq); ++st->num; ++st->offset; sk = sk_nulls_next(sk); sk_nulls_for_each_from(sk, node) { if (sk->sk_family == st->family && net_eq(sock_net(sk), net)) return sk; } spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); ++st->bucket; return established_get_first(seq); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo6756.30%430.77%
Eric Dumazet1915.97%215.38%
Hideaki Yoshifuji / 吉藤英明108.40%323.08%
Daniel Lezcano97.56%17.69%
Tim Shepard54.20%17.69%
Tom Herbert54.20%17.69%
Denis V. Lunev43.36%17.69%
Total119100.00%13100.00%


static void *established_get_idx(struct seq_file *seq, loff_t pos) { struct tcp_iter_state *st = seq->private; void *rc; st->bucket = 0; rc = established_get_first(seq); while (rc && pos) { rc = established_get_next(seq, rc); --pos; } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo3960.94%133.33%
Tom Herbert1929.69%133.33%
Tim Shepard69.38%133.33%
Total64100.00%3100.00%


static void *tcp_get_idx(struct seq_file *seq, loff_t pos) { void *rc; struct tcp_iter_state *st = seq->private; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_idx(seq, &pos); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; rc = established_get_idx(seq, pos); } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo69100.00%1100.00%
Total69100.00%1100.00%


static void *tcp_seek_last_pos(struct seq_file *seq) { struct tcp_iter_state *st = seq->private; int offset = st->offset; int orig_num = st->num; void *rc = NULL; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (st->bucket >= INET_LHTABLE_SIZE) break; st->state = TCP_SEQ_STATE_LISTENING; rc = listening_get_next(seq, NULL); while (offset-- && rc) rc = listening_get_next(seq, rc); if (rc) break; st->bucket = 0; st->state = TCP_SEQ_STATE_ESTABLISHED; /* Fallthrough */ case TCP_SEQ_STATE_ESTABLISHED: if (st->bucket > tcp_hashinfo.ehash_mask) break; rc = established_get_first(seq); while (offset-- && rc) rc = established_get_next(seq, rc); } st->num = orig_num; return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert15297.44%150.00%
Eric Dumazet42.56%150.00%
Total156100.00%2100.00%


static void *tcp_seq_start(struct seq_file *seq, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc; if (*pos && *pos == st->last_pos) { rc = tcp_seek_last_pos(seq); if (rc) goto out; } st->state = TCP_SEQ_STATE_LISTENING; st->num = 0; st->bucket = 0; st->offset = 0; rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; out: st->last_pos = *pos; return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Tom Herbert5852.73%120.00%
Arnaldo Carvalho de Melo3027.27%120.00%
Tim Shepard1513.64%120.00%
Hirofumi Ogawa65.45%120.00%
Joe Perches10.91%120.00%
Total110100.00%5100.00%


static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct tcp_iter_state *st = seq->private; void *rc = NULL; if (v == SEQ_START_TOKEN) { rc = tcp_get_idx(seq, 0); goto out; } switch (st->state) { case TCP_SEQ_STATE_LISTENING: rc = listening_get_next(seq, v); if (!rc) { st->state = TCP_SEQ_STATE_ESTABLISHED; st->bucket = 0; st->offset = 0; rc = established_get_first(seq); } break; case TCP_SEQ_STATE_ESTABLISHED: rc = established_get_next(seq, v); break; } out: ++*pos; st->last_pos = *pos; return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo10878.83%133.33%
Tom Herbert2820.44%133.33%
Joe Perches10.73%133.33%
Total137100.00%3100.00%


static void tcp_seq_stop(struct seq_file *seq, void *v) { struct tcp_iter_state *st = seq->private; switch (st->state) { case TCP_SEQ_STATE_LISTENING: if (v != SEQ_START_TOKEN) spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock); break; case TCP_SEQ_STATE_ESTABLISHED: if (v) spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket)); break; } }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo5773.08%333.33%
Eric Dumazet1519.23%444.44%
Anders Gustafsson56.41%111.11%
Joe Perches11.28%111.11%
Total78100.00%9100.00%


int tcp_seq_open(struct inode *inode, struct file *file) { struct tcp_seq_afinfo *afinfo = PDE_DATA(inode); struct tcp_iter_state *s; int err; err = seq_open_net(inode, file, &afinfo->seq_ops, sizeof(struct tcp_iter_state)); if (err < 0) return err; s = ((struct seq_file *)file->private_data)->private; s->family = afinfo->family; s->last_pos = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明3840.43%225.00%
Denis V. Lunev3031.91%225.00%
Arnaldo Carvalho de Melo1010.64%112.50%
Daniel Lezcano99.57%112.50%
Tom Herbert66.38%112.50%
Al Viro11.06%112.50%
Total94100.00%8100.00%

EXPORT_SYMBOL(tcp_seq_open);
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo) { int rc = 0; struct proc_dir_entry *p; afinfo->seq_ops.start = tcp_seq_start; afinfo->seq_ops.next = tcp_seq_next; afinfo->seq_ops.stop = tcp_seq_stop; p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net, afinfo->seq_fops, afinfo); if (!p) rc = -ENOMEM; return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明4857.83%240.00%
Denis V. Lunev3036.14%240.00%
Daniel Lezcano56.02%120.00%
Total83100.00%5100.00%

EXPORT_SYMBOL(tcp_proc_register);
void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo) { remove_proc_entry(afinfo->name, net->proc_net); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1661.54%133.33%
Gao Feng519.23%133.33%
Daniel Lezcano519.23%133.33%
Total26100.00%3100.00%

EXPORT_SYMBOL(tcp_proc_unregister);
static void get_openreq4(const struct request_sock *req, struct seq_file *f, int i) { const struct inet_request_sock *ireq = inet_rsk(req); long delta = req->rsk_timer.expires - jiffies; seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK", i, ireq->ir_loc_addr, ireq->ir_num, ireq->ir_rmt_addr, ntohs(ireq->ir_rmt_port), TCP_SYN_RECV, 0, 0, /* could print option size, but that is af dependent. */ 1, /* timers active (only the expire timer) */ jiffies_delta_to_clock_t(delta), req->num_timeout, from_kuid_munged(seq_user_ns(f), sock_i_uid(req->rsk_listener)), 0, /* non standard timer */ 0, /* open_requests have no inode */ 0, req); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5951.30%16.67%
Arnaldo Carvalho de Melo2017.39%320.00%
Eric Dumazet2017.39%746.67%
Eric W. Biedermann86.96%16.67%
Pavel Emelyanov54.35%16.67%
David S. Miller21.74%16.67%
Tetsuo Handa10.87%16.67%
Total115100.00%15100.00%


static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i) { int timer_active; unsigned long timer_expires; const struct tcp_sock *tp = tcp_sk(sk); const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_sock *inet = inet_sk(sk); const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq; __be32 dest = inet->inet_daddr; __be32 src = inet->inet_rcv_saddr; __u16 destp = ntohs(inet->inet_dport); __u16 srcp = ntohs(inet->inet_sport); int rx_queue; int state; if (icsk->icsk_pending == ICSK_TIME_RETRANS || icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) { timer_active = 1; timer_expires = icsk->icsk_timeout; } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) { timer_active = 4; timer_expires = icsk->icsk_timeout; } else if (timer_pending(&sk->sk_timer)) { timer_active = 2; timer_expires = sk->sk_timer.expires; } else { timer_active = 0; timer_expires = jiffies; } state = sk_state_load(sk); if (state == TCP_LISTEN) rx_queue = sk->sk_ack_backlog; else /* Because we don't lock the socket, * we might find a transient negative value. */ rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0); seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d", i, src, srcp, dest, destp, state, tp->write_seq - tp->snd_una, rx_queue, timer_active, jiffies_delta_to_clock_t(timer_expires - jiffies), icsk->icsk_retransmits, from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)), icsk->icsk_probes_out, sock_i_ino(sk), atomic_read(&sk->sk_refcnt), sk, jiffies_to_clock_t(icsk->icsk_rto), jiffies_to_clock_t(icsk->icsk_ack.ato), (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, tp->snd_cwnd, state == TCP_LISTEN ? fastopenq->max_qlen : (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo24666.13%625.00%
Eric Dumazet5615.05%625.00%
Jerry Chu205.38%14.17%
Ilpo Järvinen133.49%28.33%
Eric W. Biedermann82.15%14.17%
Yuchung Cheng61.61%14.17%
Stephen Hemminger61.61%14.17%
Nandita Dukkipati61.61%14.17%
Pavel Emelyanov51.34%14.17%
Al Viro20.54%14.17%
David S. Miller20.54%14.17%
Tetsuo Handa10.27%14.17%
Hideaki Yoshifuji / 吉藤英明10.27%14.17%
Total372100.00%24100.00%


static void get_timewait4_sock(const struct inet_timewait_sock *tw, struct seq_file *f, int i) { long delta = tw->tw_timer.expires - jiffies; __be32 dest, src; __u16 destp, srcp; dest = tw->tw_daddr; src = tw->tw_rcv_saddr; destp = ntohs(tw->tw_dport); srcp = ntohs(tw->tw_sport); seq_printf(f, "%4d: %08X:%04X %08X:%04X" " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK", i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0, atomic_read(&tw->tw_refcnt), tw); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo8569.67%320.00%
Eric Dumazet1411.48%320.00%
Linus Torvalds (pre-git)1310.66%426.67%
Pavel Emelyanov54.10%16.67%
David S. Miller21.64%16.67%
Tetsuo Handa10.82%16.67%
Al Viro10.82%16.67%
Hideaki Yoshifuji / 吉藤英明10.82%16.67%
Total122100.00%15100.00%

#define TMPSZ 150
static int tcp4_seq_show(struct seq_file *seq, void *v) { struct tcp_iter_state *st; struct sock *sk = v; seq_setwidth(seq, TMPSZ - 1); if (v == SEQ_START_TOKEN) { seq_puts(seq, " sl local_address rem_address st tx_queue " "rx_queue tr tm->when retrnsmt uid timeout " "inode"); goto out; } st = seq->private; if (sk->sk_state == TCP_TIME_WAIT) get_timewait4_sock(v, seq, st->num); else if (sk->sk_state == TCP_NEW_SYN_RECV) get_openreq4(v, seq, st->num); else get_tcp4_sock(v, seq, st->num); out: seq_pad(seq, '\n'); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo4536.00%19.09%
Linus Torvalds (pre-git)3326.40%436.36%
Eric Dumazet2822.40%218.18%
Tetsuo Handa1411.20%19.09%
Pavel Emelyanov32.40%19.09%
Hideaki Yoshifuji / 吉藤英明10.80%19.09%
Joe Perches10.80%19.09%
Total125100.00%11100.00%

static const struct file_operations tcp_afinfo_seq_fops = { .owner = THIS_MODULE, .open = tcp_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net }; static struct tcp_seq_afinfo tcp4_seq_afinfo = { .name = "tcp", .family = AF_INET, .seq_fops = &tcp_afinfo_seq_fops, .seq_ops = { .show = tcp4_seq_show, }, };
static int __net_init tcp4_proc_init_net(struct net *net) { return tcp_proc_register(net, &tcp4_seq_afinfo); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1257.14%125.00%
Arnaldo Carvalho de Melo628.57%125.00%
Hideaki Yoshifuji / 吉藤英明29.52%125.00%
Alexey Dobriyan14.76%125.00%
Total21100.00%4100.00%


static void __net_exit tcp4_proc_exit_net(struct net *net) { tcp_proc_unregister(net, &tcp4_seq_afinfo); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1575.00%125.00%
Hideaki Yoshifuji / 吉藤英明210.00%125.00%
Daniel Lezcano210.00%125.00%
Alexey Dobriyan15.00%125.00%
Total20100.00%4100.00%

static struct pernet_operations tcp4_net_ops = { .init = tcp4_proc_init_net, .exit = tcp4_proc_exit_net, };
int __init tcp4_proc_init(void) { return register_pernet_subsys(&tcp4_net_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1386.67%150.00%
Linus Torvalds (pre-git)213.33%150.00%
Total15100.00%2100.00%


void tcp4_proc_exit(void) { unregister_pernet_subsys(&tcp4_net_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo969.23%133.33%
Hideaki Yoshifuji / 吉藤英明215.38%133.33%
Pavel Emelyanov215.38%133.33%
Total13100.00%3100.00%

#endif /* CONFIG_PROC_FS */ struct proto tcp_prot = { .name = "TCP", .owner = THIS_MODULE, .close = tcp_close, .connect = tcp_v4_connect, .disconnect = tcp_disconnect, .accept = inet_csk_accept, .ioctl = tcp_ioctl, .init = tcp_v4_init_sock, .destroy = tcp_v4_destroy_sock, .shutdown = tcp_shutdown, .setsockopt = tcp_setsockopt, .getsockopt = tcp_getsockopt, .keepalive = tcp_set_keepalive, .recvmsg = tcp_recvmsg, .sendmsg = tcp_sendmsg, .sendpage = tcp_sendpage, .backlog_rcv = tcp_v4_do_rcv, .release_cb = tcp_release_cb, .hash = inet_hash, .unhash = inet_unhash, .get_port = inet_csk_get_port, .enter_memory_pressure = tcp_enter_memory_pressure, .stream_memory_free = tcp_stream_memory_free, .sockets_allocated = &tcp_sockets_allocated, .orphan_count = &tcp_orphan_count, .memory_allocated = &tcp_memory_allocated, .memory_pressure = &tcp_memory_pressure, .sysctl_mem = sysctl_tcp_mem, .sysctl_wmem = sysctl_tcp_wmem, .sysctl_rmem = sysctl_tcp_rmem, .max_header = MAX_TCP_HEADER, .obj_size = sizeof(struct tcp_sock), .slab_flags = SLAB_DESTROY_BY_RCU, .twsk_prot = &tcp_timewait_sock_ops, .rsk_prot = &tcp_request_sock_ops, .h.hashinfo = &tcp_hashinfo, .no_autobind = true, #ifdef CONFIG_COMPAT .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif .diag_destroy = tcp_abort, }; EXPORT_SYMBOL(tcp_prot);
static void __net_exit tcp_sk_exit(struct net *net) { int cpu; for_each_possible_cpu(cpu) inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu)); free_percpu(net->ipv4.tcp_sk); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet43100.00%1100.00%
Total43100.00%1100.00%


static int __net_init tcp_sk_init(struct net *net) { int res, cpu, cnt; net->ipv4.tcp_sk = alloc_percpu(struct sock *); if (!net->ipv4.tcp_sk) return -ENOMEM; for_each_possible_cpu(cpu) { struct sock *sk; res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW, IPPROTO_TCP, net); if (res) goto fail; sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk; } net->ipv4.sysctl_tcp_ecn = 2; net->ipv4.sysctl_tcp_ecn_fallback = 1; net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS; net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD; net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL; net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME; net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES; net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL; net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES; net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES; net->ipv4.sysctl_tcp_syncookies = 1; net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH; net->ipv4.sysctl_tcp_retries1 = TCP_RETR1; net->ipv4.sysctl_tcp_retries2 = TCP_RETR2; net->ipv4.sysctl_tcp_orphan_retries = 0; net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX; net->ipv4.sysctl_tcp_tw_reuse = 0; cnt = tcp_hashinfo.ehash_mask + 1; net->ipv4.tcp_death_row.sysctl_tw_recycle = 0; net->ipv4.tcp_death_row.sysctl_max_tw_buckets = (cnt + 1) / 2; net->ipv4.tcp_death_row.hashinfo = &tcp_hashinfo; net->ipv4.sysctl_max_syn_backlog = max(128, cnt / 256); return 0; fail: tcp_sk_exit(net); return res; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet9630.28%311.11%
Nikolay Borisov9529.97%1244.44%
Haishuang Yan7022.08%311.11%
Fan Du247.57%311.11%
Denis V. Lunev123.79%13.70%
Hannes Frederic Sowa82.52%13.70%
Daniel Borkmann82.52%13.70%
Linus Torvalds (pre-git)30.95%27.41%
David S. Miller10.32%13.70%
Total317100.00%27100.00%


static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list) { inet_twsk_purge(&tcp_hashinfo, AF_INET); }

Contributors

PersonTokensPropCommitsCommitProp
Eric W. Biedermann1155.00%133.33%
Daniel Lezcano840.00%133.33%
Denis V. Lunev15.00%133.33%
Total20100.00%3100.00%

static struct pernet_operations __net_initdata tcp_sk_ops = { .init = tcp_sk_init, .exit = tcp_sk_exit, .exit_batch = tcp_sk_exit_batch, };
void __init tcp_v4_init(void) { if (register_pernet_subsys(&tcp_sk_ops)) panic("Failed to create the TCP control socket.\n"); }

Contributors

PersonTokensPropCommitsCommitProp
Denis V. Lunev1257.14%240.00%
Linus Torvalds (pre-git)733.33%120.00%
Eric W. Biedermann14.76%120.00%
Linus Torvalds14.76%120.00%
Total21100.00%5100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet255921.87%12627.04%
Linus Torvalds (pre-git)222919.05%459.66%
Arnaldo Carvalho de Melo175815.03%4910.52%
Hideaki Yoshifuji / 吉藤英明146012.48%153.22%
David S. Miller5935.07%367.73%
Tom Herbert3302.82%40.86%
Adam Langley2662.27%20.43%
Octavian Purdila1941.66%91.93%
Pavel Emelyanov1671.43%122.58%
Herbert Xu1511.29%81.72%
Shawn Lu1491.27%20.43%
Linus Torvalds1391.19%61.29%
Damian Lukowski1331.14%20.43%
Denis V. Lunev1271.09%81.72%
Alexey Kuznetsov1241.06%51.07%
Nikolay Borisov950.81%122.58%
Haishuang Yan930.79%30.64%
Florian Westphal900.77%30.64%
Stephen Hemminger770.66%91.93%
Rusty Russell580.50%10.21%
Lorenzo Colitti560.48%20.43%
Yuchung Cheng540.46%40.86%
Jerry Chu530.45%20.43%
Daniel Lezcano490.42%30.64%
KOVACS Krisztian440.38%10.21%
Eric W. Biedermann410.35%51.07%
Arjan van de Ven400.34%10.21%
Andi Kleen350.30%10.21%
Tim Shepard320.27%20.43%
Fan Du270.23%40.86%
Wei Wang240.21%10.21%
Ilpo Järvinen230.20%40.86%
Balazs Scheidler200.17%10.21%
Alexey Kodanev190.16%10.21%
Neal Cardwell180.15%30.64%
Hannes Frederic Sowa170.15%30.64%
Tetsuo Handa170.15%10.21%
Andrey Vagin160.14%10.21%
Craig Gallek160.14%10.21%
Benjamin LaHaise150.13%10.21%
Changli Gao150.13%10.21%
Patrick McHardy150.13%40.86%
Dmitry Mishin150.13%10.21%
Daniel Borkmann150.13%20.43%
James Morris130.11%20.43%
Joe Perches120.10%30.64%
Dmitry Popov120.10%10.21%
Wei Dong120.10%10.21%
Nivedita Singhvi110.09%10.21%
Jiri Benc110.09%10.21%
Yi Zhu90.08%10.21%
David Ahern80.07%10.21%
Marcelo Ricardo Leitner80.07%10.21%
Tom Quetchenbach80.07%10.21%
Al Viro80.07%51.07%
Jon Maxwell80.07%10.21%
Vijay Subramanian70.06%20.43%
William Allen Simpson70.06%20.43%
Nandita Dukkipati60.05%10.21%
Andrew Morton60.05%10.21%
Hirofumi Ogawa60.05%10.21%
John Dykstra60.05%20.43%
Gao Feng50.04%10.21%
Ursula Braun50.04%10.21%
Gui Jianfeng50.04%10.21%
Anders Gustafsson50.04%10.21%
Gerrit Renker40.03%10.21%
Sathya Perla40.03%10.21%
Harvey Harrison30.03%10.21%
Tejun Heo30.03%10.21%
Paul Moore30.03%10.21%
Zheng Yan30.03%10.21%
Michal Kubeček30.03%10.21%
Eliezer Tamir30.03%20.43%
Adrian Bunk30.03%20.43%
Ian Morris30.03%10.21%
Alexey Dobriyan20.02%10.21%
Brian Haley20.02%20.43%
Martin KaFai Lau20.02%10.21%
Aydin Arik20.02%10.21%
Christopher Leech20.02%10.21%
Glauber de Oliveira Costa20.02%10.21%
Christoph Paasch20.02%10.21%
Yaogong Wang10.01%10.21%
Matthias M. Dellweg10.01%10.21%
Craig Schlenter10.01%10.21%
Steven Cole10.01%10.21%
Ingo Molnar10.01%10.21%
Thomas Graf10.01%10.21%
Kris Katterjohn10.01%10.21%
Total11699100.00%466100.00%
Directory: net/ipv4
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.