Release 4.7 net/ipv4/tcp_ipv4.c
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Implementation of the Transmission Control Protocol(TCP).
*
* IPv4 specific functions
*
*
* code split from:
* linux/ipv4/tcp.c
* linux/ipv4/tcp_input.c
* linux/ipv4/tcp_output.c
*
* See tcp.c for author information
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/*
* Changes:
* David S. Miller : New socket lookup architecture.
* This code is dedicated to John Dyson.
* David S. Miller : Change semantics of established hash,
* half is devoted to TIME_WAIT sockets
* and the rest go in the other half.
* Andi Kleen : Add support for syncookies and fixed
* some bugs: ip options weren't passed to
* the TCP layer, missed a check for an
* ACK bit.
* Andi Kleen : Implemented fast path mtu discovery.
* Fixed many serious bugs in the
* request_sock handling and moved
* most of it into the af independent code.
* Added tail drop and some other bugfixes.
* Added new listen semantics.
* Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes.
* Vitaly E. Lavrov : Transparent proxy revived after year
* coma.
* Andi Kleen : Fix new listen.
* Andi Kleen : Fix accept error reporting.
* YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
* Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
* a single port at the same time.
*/
#define pr_fmt(fmt) "TCP: " fmt
#include <linux/bottom_half.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/module.h>
#include <linux/random.h>
#include <linux/cache.h>
#include <linux/jhash.h>
#include <linux/init.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/icmp.h>
#include <net/inet_hashtables.h>
#include <net/tcp.h>
#include <net/transp_v6.h>
#include <net/ipv6.h>
#include <net/inet_common.h>
#include <net/timewait_sock.h>
#include <net/xfrm.h>
#include <net/secure_seq.h>
#include <net/busy_poll.h>
#include <linux/inet.h>
#include <linux/ipv6.h>
#include <linux/stddef.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <crypto/hash.h>
#include <linux/scatterlist.h>
int sysctl_tcp_tw_reuse __read_mostly;
int sysctl_tcp_low_latency __read_mostly;
EXPORT_SYMBOL(sysctl_tcp_low_latency);
#ifdef CONFIG_TCP_MD5SIG
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th);
#endif
struct inet_hashinfo tcp_hashinfo;
EXPORT_SYMBOL(tcp_hashinfo);
static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
{
return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr,
tcp_hdr(skb)->dest,
tcp_hdr(skb)->source);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 37 | 84.09% | 3 | 42.86% |
linus torvalds | linus torvalds | 4 | 9.09% | 1 | 14.29% |
david s. miller | david s. miller | 2 | 4.55% | 2 | 28.57% |
eric dumazet | eric dumazet | 1 | 2.27% | 1 | 14.29% |
| Total | 44 | 100.00% | 7 | 100.00% |
int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
{
const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
struct tcp_sock *tp = tcp_sk(sk);
/* With PAWS, it is safe from the viewpoint
of data integrity. Even without PAWS it is safe provided sequence
spaces do not overlap i.e. at data rates <= 80Mbit/sec.
Actually, the idea is close to VJ's one, only timestamp cache is
held not per host, but per port pair and TW bucket is used as state
holder.
If TW bucket has been already destroyed we fall back to VJ's scheme
and use initial timestamp retrieved from peer table.
*/
if (tcptw->tw_ts_recent_stamp &&
(!twp || (sysctl_tcp_tw_reuse &&
get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
if (tp->write_seq == 0)
tp->write_seq = 1;
tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
sock_hold(sktw);
return 1;
}
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 121 | 97.58% | 1 | 33.33% |
james morris | james morris | 2 | 1.61% | 1 | 33.33% |
ian morris | ian morris | 1 | 0.81% | 1 | 33.33% |
| Total | 124 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(tcp_twsk_unique);
/* This will initiate an outgoing connection. */
int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
__be16 orig_sport, orig_dport;
__be32 daddr, nexthop;
struct flowi4 *fl4;
struct rtable *rt;
int err;
struct ip_options_rcu *inet_opt;
if (addr_len < sizeof(struct sockaddr_in))
return -EINVAL;
if (usin->sin_family != AF_INET)
return -EAFNOSUPPORT;
nexthop = daddr = usin->sin_addr.s_addr;
inet_opt = rcu_dereference_protected(inet->inet_opt,
lockdep_sock_is_held(sk));
if (inet_opt && inet_opt->opt.srr) {
if (!daddr)
return -EINVAL;
nexthop = inet_opt->opt.faddr;
}
orig_sport = inet->inet_sport;
orig_dport = usin->sin_port;
fl4 = &inet->cork.fl.u.ip4;
rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
IPPROTO_TCP,
orig_sport, orig_dport, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
if (err == -ENETUNREACH)
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
return err;
}
if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
ip_rt_put(rt);
return -ENETUNREACH;
}
if (!inet_opt || !inet_opt->opt.srr)
daddr = fl4->daddr;
if (!inet->inet_saddr)
inet->inet_saddr = fl4->saddr;
sk_rcv_saddr_set(sk, inet->inet_saddr);
if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
/* Reset inherited state */
tp->rx_opt.ts_recent = 0;
tp->rx_opt.ts_recent_stamp = 0;
if (likely(!tp->repair))
tp->write_seq = 0;
}
if (tcp_death_row.sysctl_tw_recycle &&
!tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
tcp_fetch_timewait_stamp(sk, &rt->dst);
inet->inet_dport = usin->sin_port;
sk_daddr_set(sk, daddr);
inet_csk(sk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
/* Socket identity is still unknown (sport may be zero).
* However we set state to SYN-SENT and not releasing socket
* lock select source port, enter ourselves into the hash tables and
* complete initialization after this.
*/
tcp_set_state(sk, TCP_SYN_SENT);
err = inet_hash_connect(&tcp_death_row, sk);
if (err)
goto failure;
sk_set_txhash(sk);
rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
inet->inet_sport, inet->inet_dport, sk);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
rt = NULL;
goto failure;
}
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
inet->inet_daddr,
inet->inet_sport,
usin->sin_port);
inet->inet_id = tp->write_seq ^ jiffies;
err = tcp_connect(sk);
rt = NULL;
if (err)
goto failure;
return 0;
failure:
/*
* This unhashes the socket and releases the local port,
* if necessary.
*/
tcp_set_state(sk, TCP_CLOSE);
ip_rt_put(rt);
sk->sk_route_caps = 0;
inet->inet_dport = 0;
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 208 | 32.91% | 13 | 22.41% |
david s. miller | david s. miller | 143 | 22.63% | 11 | 18.97% |
pre-git | pre-git | 71 | 11.23% | 11 | 18.97% |
alexey kuznetsov | alexey kuznetsov | 70 | 11.08% | 3 | 5.17% |
eric dumazet | eric dumazet | 55 | 8.70% | 5 | 8.62% |
pavel emelianov | pavel emelianov | 23 | 3.64% | 2 | 3.45% |
stephen hemminger | stephen hemminger | 18 | 2.85% | 1 | 1.72% |
linus torvalds | linus torvalds | 16 | 2.53% | 3 | 5.17% |
wei dong | wei dong | 12 | 1.90% | 1 | 1.72% |
herbert xu | herbert xu | 6 | 0.95% | 1 | 1.72% |
sathya perla | sathya perla | 4 | 0.63% | 1 | 1.72% |
hannes frederic sowa | hannes frederic sowa | 1 | 0.16% | 1 | 1.72% |
al viro | al viro | 1 | 0.16% | 1 | 1.72% |
steven cole | steven cole | 1 | 0.16% | 1 | 1.72% |
william allen simpson | william allen simpson | 1 | 0.16% | 1 | 1.72% |
tom herbert | tom herbert | 1 | 0.16% | 1 | 1.72% |
patrick mchardy | patrick mchardy | 1 | 0.16% | 1 | 1.72% |
| Total | 632 | 100.00% | 58 | 100.00% |
EXPORT_SYMBOL(tcp_v4_connect);
/*
* This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
* It can be called through tcp_release_cb() if socket was owned by user
* at the time tcp_v4_err() was called to handle ICMP message.
*/
void tcp_v4_mtu_reduced(struct sock *sk)
{
struct dst_entry *dst;
struct inet_sock *inet = inet_sk(sk);
u32 mtu = tcp_sk(sk)->mtu_info;
dst = inet_csk_update_pmtu(sk, mtu);
if (!dst)
return;
/* Something is about to be wrong... Remember soft error
* for the case, if this connection will not able to recover.
*/
if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
sk->sk_err_soft = EMSGSIZE;
mtu = dst_mtu(dst);
if (inet->pmtudisc != IP_PMTUDISC_DONT &&
ip_sk_accept_pmtu(sk) &&
inet_csk(sk)->icsk_pmtu_cookie > mtu) {
tcp_sync_mss(sk, mtu);
/* Resend the TCP packet because it's
* clear that the old packet has been
* dropped. This is the new "fast" path mtu
* discovery.
*/
tcp_simple_retransmit(sk);
} /* else let the usual retransmit timer handle it */
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 66 | 55.93% | 5 | 33.33% |
david s. miller | david s. miller | 16 | 13.56% | 2 | 13.33% |
eric dumazet | eric dumazet | 11 | 9.32% | 1 | 6.67% |
alexey kuznetsov | alexey kuznetsov | 10 | 8.47% | 1 | 6.67% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 8 | 6.78% | 4 | 26.67% |
hannes frederic sowa | hannes frederic sowa | 5 | 4.24% | 1 | 6.67% |
herbert xu | herbert xu | 2 | 1.69% | 1 | 6.67% |
| Total | 118 | 100.00% | 15 | 100.00% |
EXPORT_SYMBOL(tcp_v4_mtu_reduced);
static void do_redirect(struct sk_buff *skb, struct sock *sk)
{
struct dst_entry *dst = __sk_dst_check(sk, 0);
if (dst)
dst->ops->redirect(dst, sk, skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david s. miller | david s. miller | 45 | 100.00% | 2 | 100.00% |
| Total | 45 | 100.00% | 2 | 100.00% |
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{
struct request_sock *req = inet_reqsk(sk);
struct net *net = sock_net(sk);
/* ICMPs are not backlogged, hence we cannot get
* an established socket here.
*/
if (seq != tcp_rsk(req)->snt_isn) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
} else if (abort) {
/*
* Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly
* created socket, and POSIX does not want network
* errors returned from accept().
*/
inet_csk_reqsk_queue_drop(req->rsk_listener, req);
tcp_listendrop(req->rsk_listener);
}
reqsk_put(req);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 83 | 96.51% | 5 | 83.33% |
fan du | fan du | 3 | 3.49% | 1 | 16.67% |
| Total | 86 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(tcp_req_err);
/*
* This routine is called by the ICMP module when it gets some
* sort of error condition. If err < 0 then the socket should
* be closed and the error returned to the user. If err > 0
* it's just the icmp type << 8 | icmp code. After adjustment
* header points to the first 8 bytes of the tcp header. We need
* to find the appropriate port.
*
* The locking strategy used here is very "optimistic". When
* someone else accesses the socket the ICMP is just dropped
* and for some paths there is no check at all.
* A more general error queue to queue errors for later handling
* is probably better.
*
*/
void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
{
const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
struct inet_connection_sock *icsk;
struct tcp_sock *tp;
struct inet_sock *inet;
const int type = icmp_hdr(icmp_skb)->type;
const int code = icmp_hdr(icmp_skb)->code;
struct sock *sk;
struct sk_buff *skb;
struct request_sock *fastopen;
__u32 seq, snd_una;
__u32 remaining;
int err;
struct net *net = dev_net(icmp_skb->dev);
sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
th->dest, iph->saddr, ntohs(th->source),
inet_iif(icmp_skb));
if (!sk) {
__ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
return;
}
if (sk->sk_state == TCP_TIME_WAIT) {
inet_twsk_put(inet_twsk(sk));
return;
}
seq = ntohl(th->seq);
if (sk->sk_state == TCP_NEW_SYN_RECV)
return tcp_req_err(sk, seq,
type == ICMP_PARAMETERPROB ||
type == ICMP_TIME_EXCEEDED ||
(type == ICMP_DEST_UNREACH &&
(code == ICMP_NET_UNREACH ||
code == ICMP_HOST_UNREACH)));
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
* We do take care of PMTU discovery (RFC1191) special case :
* we can receive locally generated ICMP messages while socket is held.
*/
if (sock_owned_by_user(sk)) {
if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
__NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
}
if (sk->sk_state == TCP_CLOSE)
goto out;
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto out;
}
icsk = inet_csk(sk);
tp = tcp_sk(sk);
/* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
fastopen = tp->fastopen_rsk;
snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
if (sk->sk_state != TCP_LISTEN &&
!between(seq, snd_una, tp->snd_nxt)) {
__NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
goto out;
}
switch (type) {
case ICMP_REDIRECT:
do_redirect(icmp_skb, sk);
goto out;
case ICMP_SOURCE_QUENCH:
/* Just silently ignore these. */
goto out;
case ICMP_PARAMETERPROB:
err = EPROTO;
break;
case ICMP_DEST_UNREACH:
if (code > NR_ICMP_UNREACH)
goto out;
if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
/* We are not interested in TCP_LISTEN and open_requests
* (SYN-ACKs send out by Linux are always <576bytes so
* they should go through unfragmented).
*/
if (sk->sk_state == TCP_LISTEN)
goto out;
tp->mtu_info = info;
if (!sock_owned_by_user(sk)) {
tcp_v4_mtu_reduced(sk);
} else {
if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
sock_hold(sk);
}
goto out;
}
err = icmp_err_convert[code].errno;
/* check if icmp_skb allows revert of backoff
* (see draft-zimmermann-tcp-lcd) */
if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
break;
if (seq != tp->snd_una || !icsk->icsk_retransmits ||
!icsk->icsk_backoff || fastopen)
break;
if (sock_owned_by_user(sk))
break;
icsk->icsk_backoff--;
icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
TCP_TIMEOUT_INIT;
icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
skb = tcp_write_queue_head(sk);
BUG_ON(!skb);
remaining = icsk->icsk_rto -
min(icsk->icsk_rto,
tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX);
} else {
/* RTO revert clocked out retransmission.
* Will retransmit now */
tcp_retransmit_timer(sk);
}
break;
case ICMP_TIME_EXCEEDED:
err = EHOSTUNREACH;
break;
default:
goto out;
}
switch (sk->sk_state) {
case TCP_SYN_SENT:
case TCP_SYN_RECV:
/* Only in fast or simultaneous open. If a fast open socket is
* is already accepted it is treated as a connected one below.
*/
if (fastopen && !fastopen->sk)
break;
if (!sock_owned_by_user(sk)) {
sk->sk_err = err;
sk->sk_error_report(sk);
tcp_done(sk);
} else {
sk->sk_err_soft = err;
}
goto out;
}
/* If we've already connected we will keep trying
* until we time out, or the user gives up.
*
* rfc1122 4.2.3.9 allows to consider as hard errors
* only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
* but it is obsoleted by pmtu discovery).
*
* Note, that in modern internet, where routing is unreliable
* and in each dark corner broken firewalls sit, sending random
* errors ordered by their masters even this two messages finally lose
* their original sense (even Linux sends invalid PORT_UNREACHs)
*
* Now we are in compliance with RFCs.
* --ANK (980905)
*/
inet = inet_sk(sk);
if (!sock_owned_by_user(sk) && inet->recverr) {
sk->sk_err = err;
sk->sk_error_report(sk);
} else { /* Only an error on timeout */
sk->sk_err_soft = err;
}
out:
bh_unlock_sock(sk);
sock_put(sk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 316 | 40.83% | 8 | 17.02% |
damian lukowski | damian lukowski | 133 | 17.18% | 2 | 4.26% |
eric dumazet | eric dumazet | 126 | 16.28% | 12 | 25.53% |
david s. miller | david s. miller | 37 | 4.78% | 4 | 8.51% |
yuchung cheng | yuchung cheng | 37 | 4.78% | 1 | 2.13% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 29 | 3.75% | 8 | 17.02% |
stephen hemminger | stephen hemminger | 27 | 3.49% | 1 | 2.13% |
linus torvalds | linus torvalds | 22 | 2.84% | 1 | 2.13% |
pavel emelianov | pavel emelianov | 19 | 2.45% | 4 | 8.51% |
benjamin lahaise | benjamin lahaise | 12 | 1.55% | 1 | 2.13% |
jerry chu | jerry chu | 9 | 1.16% | 2 | 4.26% |
hideaki yoshifuji | hideaki yoshifuji | 6 | 0.78% | 2 | 4.26% |
ian morris | ian morris | 1 | 0.13% | 1 | 2.13% |
| Total | 774 | 100.00% | 47 | 100.00% |
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
{
struct tcphdr *th = tcp_hdr(skb);
if (skb->ip_summed == CHECKSUM_PARTIAL) {
th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
skb->csum_start = skb_transport_header(skb) - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
} else {
th->check = tcp_v4_check(skb->len, saddr, daddr,
csum_partial(th,
th->doff << 2,
skb->csum));
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
linus torvalds | linus torvalds | 37 | 33.04% | 1 | 10.00% |
pre-git | pre-git | 35 | 31.25% | 2 | 20.00% |
herbert xu | herbert xu | 28 | 25.00% | 2 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 9 | 8.04% | 2 | 20.00% |
david s. miller | david s. miller | 1 | 0.89% | 1 | 10.00% |
al viro | al viro | 1 | 0.89% | 1 | 10.00% |
patrick mchardy | patrick mchardy | 1 | 0.89% | 1 | 10.00% |
| Total | 112 | 100.00% | 10 | 100.00% |
/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
{
const struct inet_sock *inet = inet_sk(sk);
__tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
herbert xu | herbert xu | 38 | 97.44% | 1 | 50.00% |
eric dumazet | eric dumazet | 1 | 2.56% | 1 | 50.00% |
| Total | 39 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(tcp_v4_send_check);
/*
* This routine will send an RST to the other tcp.
*
* Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
* for reset.
* Answer: if a packet caused RST, it is not for a socket
* existing in our system, if it is matched to a socket,
* it is just duplicate segment or bug in other side's TCP.
* So that we build reply only basing on parameters
* arrived with segment.
* Exception: precedence violation. We do not implement it in any case.
*/
static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
{
const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
#ifdef CONFIG_TCP_MD5SIG
__be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
#endif
} rep;
struct ip_reply_arg arg;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key = NULL;
const __u8 *hash_location = NULL;
unsigned char newhash[16];
int genhash;
struct sock *sk1 = NULL;
#endif
struct net *net;
/* Never send a reset in response to a reset. */
if (th->rst)
return;
/* If sk not NULL, it means we did a successful lookup and incoming
* route had to be correct. prequeue might have dropped our dst.
*/
if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
return;
/* Swap the send and the receive. */
memset(&rep, 0, sizeof(rep));
rep.th.dest = th->source;
rep.th.source = th->dest;
rep.th.doff = sizeof(struct tcphdr) / 4;
rep.th.rst = 1;
if (th->ack) {
rep.th.seq = th->ack_seq;
} else {
rep.th.ack = 1;
rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
skb->len - (th->doff << 2));
}
memset(&arg, 0, sizeof(arg));
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
#ifdef CONFIG_TCP_MD5SIG
rcu_read_lock();
hash_location = tcp_parse_md5sig_option(th);
if (sk && sk_fullsock(sk)) {
key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
&ip_hdr(skb)->saddr, AF_INET);
} else if (hash_location) {
/*
* active side is lost. Try to find listening socket through
* source port, and then find md5 key through listening socket.
* we are not loose security here:
* Incoming packet is checked with md5 hash with finding key,
* no RST generated if md5 hash doesn't match.
*/
sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
ip_hdr(skb)->saddr,
th->source, ip_hdr(skb)->daddr,
ntohs(th->source), inet_iif(skb));
/* don't send rst if it can't find key */
if (!sk1)
goto out;
key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
&ip_hdr(skb)->saddr, AF_INET);
if (!key)
goto out;
genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0)
goto out;
}
if (key) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
/* Update length and the length the header thinks exists */
arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
rep.th.doff = arg.iov[0].iov_len / 4;
tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
key, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &rep.th);
}
#endif
arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr, /* XXX */
arg.iov[0].iov_len, IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
/* When socket is gone, all binding information is lost.
* routing might fail in this case. No choice here, if we choose to force
* input interface, we will misroute in case of asymmetric route.
*/
if (sk)
arg.bound_dev_if = sk->sk_bound_dev_if;
BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
offsetof(struct inet_timewait_sock, tw_bound_dev_if));
arg.tos = ip_hdr(skb)->tos;
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
__TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
local_bh_enable();
#ifdef CONFIG_TCP_MD5SIG
out:
rcu_read_unlock();
#endif
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 226 | 29.35% | 12 | 26.67% |
hideaki yoshifuji | hideaki yoshifuji | 171 | 22.21% | 2 | 4.44% |
shawn lu | shawn lu | 149 | 19.35% | 2 | 4.44% |
eric dumazet | eric dumazet | 77 | 10.00% | 12 | 26.67% |
florian westphal | florian westphal | 55 | 7.14% | 2 | 4.44% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 18 | 2.34% | 3 | 6.67% |
kovacs krisztian | kovacs krisztian | 16 | 2.08% | 1 | 2.22% |
david s. miller | david s. miller | 15 | 1.95% | 2 | 4.44% |
tom herbert | tom herbert | 11 | 1.43% | 1 | 2.22% |
pavel emelianov | pavel emelianov | 11 | 1.43% | 2 | 4.44% |
ilpo jarvinen | ilpo jarvinen | 10 | 1.30% | 2 | 4.44% |
alexey kuznetsov | alexey kuznetsov | 5 | 0.65% | 1 | 2.22% |
craig gallek | craig gallek | 4 | 0.52% | 1 | 2.22% |
adam langley | adam langley | 1 | 0.13% | 1 | 2.22% |
al viro | al viro | 1 | 0.13% | 1 | 2.22% |
| Total | 770 | 100.00% | 45 | 100.00% |
/* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
outside socket context is ugly, certainly. What can I do?
*/
static void tcp_v4_send_ack(struct net *net,
struct sk_buff *skb, u32 seq, u32 ack,
u32 win, u32 tsval, u32 tsecr, int oif,
struct tcp_md5sig_key *key,
int reply_flags, u8 tos)
{
const struct tcphdr *th = tcp_hdr(skb);
struct {
struct tcphdr th;
__be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
#ifdef CONFIG_TCP_MD5SIG
+ (TCPOLEN_MD5SIG_ALIGNED >> 2)
#endif
];
} rep;
struct ip_reply_arg arg;
memset(&rep.th, 0, sizeof(struct tcphdr));
memset(&arg, 0, sizeof(arg));
arg.iov[0].iov_base = (unsigned char *)&rep;
arg.iov[0].iov_len = sizeof(rep.th);
if (tsecr) {
rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
(TCPOPT_TIMESTAMP << 8) |
TCPOLEN_TIMESTAMP);
rep.opt[1] = htonl(tsval);
rep.opt[2] = htonl(tsecr);
arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
}
/* Swap the send and the receive. */
rep.th.dest = th->source;
rep.th.source = th->dest;
rep.th.doff = arg.iov[0].iov_len / 4;
rep.th.seq = htonl(seq);
rep.th.ack_seq = htonl(ack);
rep.th.ack = 1;
rep.th.window = htons(win);
#ifdef CONFIG_TCP_MD5SIG
if (key) {
int offset = (tsecr) ? 3 : 0;
rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
(TCPOPT_NOP << 16) |
(TCPOPT_MD5SIG << 8) |
TCPOLEN_MD5SIG);
arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
rep.th.doff = arg.iov[0].iov_len/4;
tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
key, ip_hdr(skb)->saddr,
ip_hdr(skb)->daddr, &rep.th);
}
#endif
arg.flags = reply_flags;
arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
ip_hdr(skb)->saddr, /* XXX */
arg.iov[0].iov_len, IPPROTO_TCP, 0);
arg.csumoffset = offsetof(struct tcphdr, check) / 2;
if (oif)
arg.bound_dev_if = oif;
arg.tos = tos;
local_bh_disable();
ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
skb, &TCP_SKB_CB(skb)->header.h4.opt,
ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
&arg, arg.iov[0].iov_len);
__TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
local_bh_enable();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 307 | 55.22% | 13 | 32.50% |
hideaki yoshifuji | hideaki yoshifuji | 140 | 25.18% | 4 | 10.00% |
eric dumazet | eric dumazet | 42 | 7.55% | 8 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 19 | 3.42% | 4 | 10.00% |
david s. miller | david s. miller | 15 | 2.70% | 2 | 5.00% |
kovacs krisztian | kovacs krisztian | 9 | 1.62% | 1 | 2.50% |
patrick mchardy | patrick mchardy | 8 | 1.44% | 1 | 2.50% |
andrey vagin | andrey vagin | 8 | 1.44% | 1 | 2.50% |
pavel emelianov | pavel emelianov | 3 | 0.54% | 2 | 5.00% |
adam langley | adam langley | 3 | 0.54% | 2 | 5.00% |
craig schlenter | craig schlenter | 1 | 0.18% | 1 | 2.50% |
al viro | al viro | 1 | 0.18% | 1 | 2.50% |
| Total | 556 | 100.00% | 40 | 100.00% |
static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
{
struct inet_timewait_sock *tw = inet_twsk(sk);
struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
tcp_v4_send_ack(sock_net(sk), skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset,
tcptw->tw_ts_recent,
tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw),
tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
tw->tw_tos
);
inet_twsk_put(tw);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 42 | 42.86% | 6 | 46.15% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 24 | 24.49% | 2 | 15.38% |
hideaki yoshifuji | hideaki yoshifuji | 9 | 9.18% | 1 | 7.69% |
eric dumazet | eric dumazet | 9 | 9.18% | 2 | 15.38% |
kovacs krisztian | kovacs krisztian | 8 | 8.16% | 1 | 7.69% |
andrey vagin | andrey vagin | 6 | 6.12% | 1 | 7.69% |
| Total | 98 | 100.00% | 13 | 100.00% |
static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
/* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
* sk->sk_state == TCP_SYN_RECV -> for Fast Open.
*/
u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
tcp_sk(sk)->snd_nxt;
tcp_v4_send_ack(sock_net(sk), skb, seq,
tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
tcp_time_stamp,
req->ts_recent,
0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
AF_INET),
inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
ip_hdr(skb)->tos);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 32 | 26.89% | 5 | 31.25% |
pre-git | pre-git | 31 | 26.05% | 4 | 25.00% |
jerry chu | jerry chu | 17 | 14.29% | 1 | 6.25% |
hideaki yoshifuji | hideaki yoshifuji | 13 | 10.92% | 1 | 6.25% |
kovacs krisztian | kovacs krisztian | 11 | 9.24% | 1 | 6.25% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 8 | 6.72% | 2 | 12.50% |
gui jianfeng | gui jianfeng | 5 | 4.20% | 1 | 6.25% |
andrey vagin | andrey vagin | 2 | 1.68% | 1 | 6.25% |
| Total | 119 | 100.00% | 16 | 100.00% |
/*
* Send a SYN-ACK after having received a SYN.
* This still operates on a request_sock only, not on a big
* socket.
*/
static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl,
struct request_sock *req,
struct tcp_fastopen_cookie *foc,
enum tcp_synack_type synack_type)
{
const struct inet_request_sock *ireq = inet_rsk(req);
struct flowi4 fl4;
int err = -1;
struct sk_buff *skb;
/* First, grab a route. */
if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
return -1;
skb = tcp_make_synack(sk, dst, req, foc, synack_type);
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
ireq->opt);
err = net_xmit_eval(err);
}
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 90 | 58.82% | 4 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 19 | 12.42% | 4 | 20.00% |
eric dumazet | eric dumazet | 11 | 7.19% | 4 | 20.00% |
david s. miller | david s. miller | 7 | 4.58% | 1 | 5.00% |
william allen simpson | william allen simpson | 6 | 3.92% | 1 | 5.00% |
octavian purdila | octavian purdila | 6 | 3.92% | 2 | 10.00% |
yuchung cheng | yuchung cheng | 5 | 3.27% | 1 | 5.00% |
denis v. lunev | denis v. lunev | 4 | 2.61% | 1 | 5.00% |
gerrit renker | gerrit renker | 4 | 2.61% | 1 | 5.00% |
herbert xu | herbert xu | 1 | 0.65% | 1 | 5.00% |
| Total | 153 | 100.00% | 20 | 100.00% |
/*
* IPv4 request_sock destructor.
*/
static void tcp_v4_reqsk_destructor(struct request_sock *req)
{
kfree(inet_rsk(req)->opt);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 16 | 76.19% | 3 | 60.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 5 | 23.81% | 2 | 40.00% |
| Total | 21 | 100.00% | 5 | 100.00% |
#ifdef CONFIG_TCP_MD5SIG
/*
* RFC2385 MD5 checksumming requires a mapping of
* IP address->MD5 Key.
* We need to maintain these in the sk structure.
*/
/* Find the Key structure for an address. */
struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
const union tcp_md5_addr *addr,
int family)
{
const struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
unsigned int size = sizeof(struct in_addr);
const struct tcp_md5sig_info *md5sig;
/* caller either holds rcu_read_lock() or socket lock */
md5sig = rcu_dereference_check(tp->md5sig_info,
lockdep_sock_is_held(sk));
if (!md5sig)
return NULL;
#if IS_ENABLED(CONFIG_IPV6)
if (family == AF_INET6)
size = sizeof(struct in6_addr);
#endif
hlist_for_each_entry_rcu(key, &md5sig->head, node) {
if (key->family != family)
continue;
if (!memcmp(&key->addr, addr, size))
return key;
}
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 91 | 65.47% | 5 | 71.43% |
hideaki yoshifuji | hideaki yoshifuji | 47 | 33.81% | 1 | 14.29% |
hannes frederic sowa | hannes frederic sowa | 1 | 0.72% | 1 | 14.29% |
| Total | 139 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(tcp_md5_do_lookup);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk)
{
const union tcp_md5_addr *addr;
addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
return tcp_md5_do_lookup(sk, addr, AF_INET);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 28 | 58.33% | 4 | 80.00% |
hideaki yoshifuji | hideaki yoshifuji | 20 | 41.67% | 1 | 20.00% |
| Total | 48 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(tcp_v4_md5_lookup);
/* This can be called on a newly created socket, from other files */
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
{
/* Add Key to the list */
struct tcp_md5sig_key *key;
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_info *md5sig;
key = tcp_md5_do_lookup(sk, addr, family);
if (key) {
/* Pre-existing entry - just update that one. */
memcpy(key->key, newkey, newkeylen);
key->keylen = newkeylen;
return 0;
}
md5sig = rcu_dereference_protected(tp->md5sig_info,
lockdep_sock_is_held(sk));
if (!md5sig) {
md5sig = kmalloc(sizeof(*md5sig), gfp);
if (!md5sig)
return -ENOMEM;
sk_nocaps_add(sk, NETIF_F_GSO_MASK);
INIT_HLIST_HEAD(&md5sig->head);
rcu_assign_pointer(tp->md5sig_info, md5sig);
}
key = sock_kmalloc(sk, sizeof(*key), gfp);
if (!key)
return -ENOMEM;
if (!tcp_alloc_md5sig_pool()) {
sock_kfree_s(sk, key, sizeof(*key));
return -ENOMEM;
}
memcpy(key->key, newkey, newkeylen);
key->keylen = newkeylen;
key->family = family;
memcpy(&key->addr, addr,
(family == AF_INET6) ? sizeof(struct in6_addr) :
sizeof(struct in_addr));
hlist_add_head_rcu(&key->node, &md5sig->head);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 132 | 48.53% | 6 | 46.15% |
hideaki yoshifuji | hideaki yoshifuji | 127 | 46.69% | 1 | 7.69% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 4 | 1.47% | 1 | 7.69% |
david s. miller | david s. miller | 3 | 1.10% | 1 | 7.69% |
zheng yan | zheng yan | 3 | 1.10% | 1 | 7.69% |
hannes frederic sowa | hannes frederic sowa | 1 | 0.37% | 1 | 7.69% |
aydin arik | aydin arik | 1 | 0.37% | 1 | 7.69% |
matthias dellweg | matthias dellweg | 1 | 0.37% | 1 | 7.69% |
| Total | 272 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(tcp_md5_do_add);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
{
struct tcp_md5sig_key *key;
key = tcp_md5_do_lookup(sk, addr, family);
if (!key)
return -ENOENT;
hlist_del_rcu(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
kfree_rcu(key, rcu);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 46 | 60.53% | 2 | 50.00% |
hideaki yoshifuji | hideaki yoshifuji | 29 | 38.16% | 1 | 25.00% |
aydin arik | aydin arik | 1 | 1.32% | 1 | 25.00% |
| Total | 76 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(tcp_md5_do_del);
static void tcp_clear_md5_list(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_md5sig_key *key;
struct hlist_node *n;
struct tcp_md5sig_info *md5sig;
md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
hlist_del_rcu(&key->node);
atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
kfree_rcu(key, rcu);
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 61 | 69.32% | 3 | 60.00% |
hideaki yoshifuji | hideaki yoshifuji | 26 | 29.55% | 1 | 20.00% |
stephen hemminger | stephen hemminger | 1 | 1.14% | 1 | 20.00% |
| Total | 88 | 100.00% | 5 | 100.00% |
static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
int optlen)
{
struct tcp_md5sig cmd;
struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
if (optlen < sizeof(cmd))
return -EINVAL;
if (copy_from_user(&cmd, optval, sizeof(cmd)))
return -EFAULT;
if (sin->sin_family != AF_INET)
return -EINVAL;
if (!cmd.tcpm_keylen)
return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
AF_INET);
if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
return -EINVAL;
return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
GFP_KERNEL);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 128 | 84.77% | 1 | 50.00% |
eric dumazet | eric dumazet | 23 | 15.23% | 1 | 50.00% |
| Total | 151 | 100.00% | 2 | 100.00% |
static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
__be32 daddr, __be32 saddr, int nbytes)
{
struct tcp4_pseudohdr *bp;
struct scatterlist sg;
bp = &hp->md5_blk.ip4;
/*
* 1. the TCP pseudo-header (in the order: source IP address,
* destination IP address, zero-padded protocol number, and
* segment length)
*/
bp->saddr = saddr;
bp->daddr = daddr;
bp->pad = 0;
bp->protocol = IPPROTO_TCP;
bp->len = cpu_to_be16(nbytes);
sg_init_one(&sg, bp, sizeof(*bp));
ahash_request_set_crypt(hp->md5_req, &sg, NULL, sizeof(*bp));
return crypto_ahash_update(hp->md5_req);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 68 | 60.71% | 2 | 50.00% |
adam langley | adam langley | 32 | 28.57% | 1 | 25.00% |
herbert xu | herbert xu | 12 | 10.71% | 1 | 25.00% |
| Total | 112 | 100.00% | 4 | 100.00% |
static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
__be32 daddr, __be32 saddr, const struct tcphdr *th)
{
struct tcp_md5sig_pool *hp;
struct ahash_request *req;
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
if (crypto_ahash_init(req))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
ahash_request_set_crypt(req, NULL, md5_hash, 0);
if (crypto_ahash_final(req))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
adam langley | adam langley | 104 | 66.24% | 1 | 16.67% |
hideaki yoshifuji | hideaki yoshifuji | 32 | 20.38% | 2 | 33.33% |
herbert xu | herbert xu | 19 | 12.10% | 1 | 16.67% |
eric dumazet | eric dumazet | 2 | 1.27% | 2 | 33.33% |
| Total | 157 | 100.00% | 6 | 100.00% |
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk,
const struct sk_buff *skb)
{
struct tcp_md5sig_pool *hp;
struct ahash_request *req;
const struct tcphdr *th = tcp_hdr(skb);
__be32 saddr, daddr;
if (sk) { /* valid for establish/request sockets */
saddr = sk->sk_rcv_saddr;
daddr = sk->sk_daddr;
} else {
const struct iphdr *iph = ip_hdr(skb);
saddr = iph->saddr;
daddr = iph->daddr;
}
hp = tcp_get_md5sig_pool();
if (!hp)
goto clear_hash_noput;
req = hp->md5_req;
if (crypto_ahash_init(req))
goto clear_hash;
if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
goto clear_hash;
if (tcp_md5_hash_header(hp, th))
goto clear_hash;
if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
goto clear_hash;
if (tcp_md5_hash_key(hp, key))
goto clear_hash;
ahash_request_set_crypt(req, NULL, md5_hash, 0);
if (crypto_ahash_final(req))
goto clear_hash;
tcp_put_md5sig_pool();
return 0;
clear_hash:
tcp_put_md5sig_pool();
clear_hash_noput:
memset(md5_hash, 0, 16);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
adam langley | adam langley | 141 | 60.52% | 1 | 20.00% |
hideaki yoshifuji | hideaki yoshifuji | 66 | 28.33% | 1 | 20.00% |
herbert xu | herbert xu | 19 | 8.15% | 1 | 20.00% |
eric dumazet | eric dumazet | 7 | 3.00% | 2 | 40.00% |
| Total | 233 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
#endif
/* Called with rcu_read_lock() */
static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
const struct sk_buff *skb)
{
#ifdef CONFIG_TCP_MD5SIG
/*
* This gets called for each TCP segment that arrives
* so we want to be efficient.
* We have 3 drop cases:
* o No MD5 hash and one expected.
* o MD5 hash and we're not expecting one.
* o MD5 hash and its wrong.
*/
const __u8 *hash_location = NULL;
struct tcp_md5sig_key *hash_expected;
const struct iphdr *iph = ip_hdr(skb);
const struct tcphdr *th = tcp_hdr(skb);
int genhash;
unsigned char newhash[16];
hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
AF_INET);
hash_location = tcp_parse_md5sig_option(th);
/* We've parsed the options - do we have a hash? */
if (!hash_expected && !hash_location)
return false;
if (hash_expected && !hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
return true;
}
if (!hash_expected && hash_location) {
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
return true;
}
/* Okay, so this is hash_expected and hash_location -
* so we need to calculate the checksum.
*/
genhash = tcp_v4_md5_hash_skb(newhash,
hash_expected,
NULL, skb);
if (genhash || memcmp(hash_location, newhash, 16) != 0) {
net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
&iph->saddr, ntohs(th->source),
&iph->daddr, ntohs(th->dest),
genhash ? " tcp_v4_calc_md5_hash failed"
: "");
return true;
}
return false;
#endif
return false;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 174 | 77.33% | 2 | 12.50% |
eric dumazet | eric dumazet | 29 | 12.89% | 7 | 43.75% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 7 | 3.11% | 2 | 12.50% |
david s. miller | david s. miller | 6 | 2.67% | 1 | 6.25% |
harvey harrison | harvey harrison | 3 | 1.33% | 1 | 6.25% |
pre-git | pre-git | 3 | 1.33% | 1 | 6.25% |
adam langley | adam langley | 2 | 0.89% | 1 | 6.25% |
joe perches | joe perches | 1 | 0.44% | 1 | 6.25% |
| Total | 225 | 100.00% | 16 | 100.00% |
static void tcp_v4_init_req(struct request_sock *req,
const struct sock *sk_listener,
struct sk_buff *skb)
{
struct inet_request_sock *ireq = inet_rsk(req);
sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
ireq->no_srccheck = inet_sk(sk_listener)->transparent;
ireq->opt = tcp_v4_save_options(skb);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
octavian purdila | octavian purdila | 63 | 76.83% | 1 | 33.33% |
eric dumazet | eric dumazet | 19 | 23.17% | 2 | 66.67% |
| Total | 82 | 100.00% | 3 | 100.00% |
static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
struct flowi *fl,
const struct request_sock *req,
bool *strict)
{
struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
if (strict) {
if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
*strict = true;
else
*strict = false;
}
return dst;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
octavian purdila | octavian purdila | 84 | 98.82% | 1 | 50.00% |
eric dumazet | eric dumazet | 1 | 1.18% | 1 | 50.00% |
| Total | 85 | 100.00% | 2 | 100.00% |
struct request_sock_ops tcp_request_sock_ops __read_mostly = {
.family = PF_INET,
.obj_size = sizeof(struct tcp_request_sock),
.rtx_syn_ack = tcp_rtx_synack,
.send_ack = tcp_v4_reqsk_send_ack,
.destructor = tcp_v4_reqsk_destructor,
.send_reset = tcp_v4_send_reset,
.syn_ack_timeout = tcp_syn_ack_timeout,
};
static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
.mss_clamp = TCP_MSS_DEFAULT,
#ifdef CONFIG_TCP_MD5SIG
.req_md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
#endif
.init_req = tcp_v4_init_req,
#ifdef CONFIG_SYN_COOKIES
.cookie_init_seq = cookie_v4_init_sequence,
#endif
.route_req = tcp_v4_route_req,
.init_seq = tcp_v4_init_sequence,
.send_synack = tcp_v4_send_synack,
};
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
/* Never answer to SYNs send to broadcast or multicast */
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop;
return tcp_conn_request(&tcp_request_sock_ops,
&tcp_request_sock_ipv4_ops, sk, skb);
drop:
tcp_listendrop(sk);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 39 | 67.24% | 4 | 30.77% |
vijay subramanian | vijay subramanian | 4 | 6.90% | 1 | 7.69% |
eric dumazet | eric dumazet | 4 | 6.90% | 2 | 15.38% |
octavian purdila | octavian purdila | 4 | 6.90% | 2 | 15.38% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 5.17% | 2 | 15.38% |
paul moore | paul moore | 3 | 5.17% | 1 | 7.69% |
hannes frederic sowa | hannes frederic sowa | 1 | 1.72% | 1 | 7.69% |
| Total | 58 | 100.00% | 13 | 100.00% |
EXPORT_SYMBOL(tcp_v4_conn_request);
/*
* The three way handshake has completed - we got a valid synack -
* now create the new socket.
*/
struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst,
struct request_sock *req_unhash,
bool *own_req)
{
struct inet_request_sock *ireq;
struct inet_sock *newinet;
struct tcp_sock *newtp;
struct sock *newsk;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *key;
#endif
struct ip_options_rcu *inet_opt;
if (sk_acceptq_is_full(sk))
goto exit_overflow;
newsk = tcp_create_openreq_child(sk, req, skb);
if (!newsk)
goto exit_nonewsk;
newsk->sk_gso_type = SKB_GSO_TCPV4;
inet_sk_rx_dst_set(newsk, skb);
newtp = tcp_sk(newsk);
newinet = inet_sk(newsk);
ireq = inet_rsk(req);
sk_daddr_set(newsk, ireq->ir_rmt_addr);
sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
newsk->sk_bound_dev_if = ireq->ir_iif;
newinet->inet_saddr = ireq->ir_loc_addr;
inet_opt = ireq->opt;
rcu_assign_pointer(newinet->inet_opt, inet_opt);
ireq->opt = NULL;
newinet->mc_index = inet_iif(skb);
newinet->mc_ttl = ip_hdr(skb)->ttl;
newinet->rcv_tos = ip_hdr(skb)->tos;
inet_csk(newsk)->icsk_ext_hdr_len = 0;
if (inet_opt)
inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
newinet->inet_id = newtp->write_seq ^ jiffies;
if (!dst) {
dst = inet_csk_route_child_sock(sk, newsk, req);
if (!dst)
goto put_and_exit;
} else {
/* syncookie case : see end of cookie_v4_check() */
}
sk_setup_caps(newsk, dst);
tcp_ca_openreq_child(newsk, dst);
tcp_sync_mss(newsk, dst_mtu(dst));
newtp->advmss = dst_metric_advmss(dst);
if (tcp_sk(sk)->rx_opt.user_mss &&
tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
tcp_initialize_rcv_mss(newsk);
#ifdef CONFIG_TCP_MD5SIG
/* Copy over the MD5 key from the original socket */
key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
AF_INET);
if (key) {
/*
* We're using one, so create a matching key
* on the newsk structure. If we fail to get
* memory, then we end up not copying the key
* across. Shucks.
*/
tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
AF_INET, key->key, key->keylen, GFP_ATOMIC);
sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
}
#endif
if (__inet_inherit_port(sk, newsk) < 0)
goto put_and_exit;
*own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
if (*own_req)
tcp_move_syn(newtp, req);
return newsk;
exit_overflow:
NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
exit_nonewsk:
dst_release(dst);
exit:
tcp_listendrop(sk);
return NULL;
put_and_exit:
inet_csk_prepare_forced_close(newsk);
tcp_done(newsk);
goto exit;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 148 | 28.79% | 13 | 23.21% |
eric dumazet | eric dumazet | 115 | 22.37% | 15 | 26.79% |
david s. miller | david s. miller | 53 | 10.31% | 4 | 7.14% |
hideaki yoshifuji | hideaki yoshifuji | 40 | 7.78% | 2 | 3.57% |
tom quetchenbach | tom quetchenbach | 37 | 7.20% | 1 | 1.79% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 35 | 6.81% | 8 | 14.29% |
balazs scheidler | balazs scheidler | 20 | 3.89% | 1 | 1.79% |
jiri benc | jiri benc | 11 | 2.14% | 1 | 1.79% |
pavel emelianov | pavel emelianov | 8 | 1.56% | 1 | 1.79% |
david ahern | david ahern | 8 | 1.56% | 1 | 1.79% |
linus torvalds | linus torvalds | 8 | 1.56% | 1 | 1.79% |
neal cardwell | neal cardwell | 7 | 1.36% | 1 | 1.79% |
herbert xu | herbert xu | 7 | 1.36% | 2 | 3.57% |
daniel borkmann | daniel borkmann | 7 | 1.36% | 1 | 1.79% |
alexey kuznetsov | alexey kuznetsov | 4 | 0.78% | 1 | 1.79% |
adam langley | adam langley | 3 | 0.58% | 1 | 1.79% |
christoph paasch | christoph paasch | 2 | 0.39% | 1 | 1.79% |
john dykstra | john dykstra | 1 | 0.19% | 1 | 1.79% |
| Total | 514 | 100.00% | 56 | 100.00% |
EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
{
#ifdef CONFIG_SYN_COOKIES
const struct tcphdr *th = tcp_hdr(skb);
if (!th->syn)
sk = cookie_v4_check(sk, skb);
#endif
return sk;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 45 | 84.91% | 3 | 50.00% |
eric dumazet | eric dumazet | 5 | 9.43% | 2 | 33.33% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 3 | 5.66% | 1 | 16.67% |
| Total | 53 | 100.00% | 6 | 100.00% |
/* The socket must have it's spinlock held when we get
* here, unless it is a TCP_LISTEN socket.
*
* We have a potential double-lock case here, so even when
* doing backlog processing we use the BH locking scheme.
* This is because we cannot sleep with the original spinlock
* held.
*/
int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
{
struct sock *rsk;
if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
struct dst_entry *dst = sk->sk_rx_dst;
sock_rps_save_rxhash(sk, skb);
sk_mark_napi_id(sk, skb);
if (dst) {
if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
!dst->ops->check(dst, 0)) {
dst_release(dst);
sk->sk_rx_dst = NULL;
}
}
tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
return 0;
}
if (tcp_checksum_complete(skb))
goto csum_err;
if (sk->sk_state == TCP_LISTEN) {
struct sock *nsk = tcp_v4_cookie_check(sk, skb);
if (!nsk)
goto discard;
if (nsk != sk) {
sock_rps_save_rxhash(nsk, skb);
sk_mark_napi_id(nsk, skb);
if (tcp_child_process(sk, nsk, skb)) {
rsk = nsk;
goto reset;
}
return 0;
}
} else
sock_rps_save_rxhash(sk, skb);
if (tcp_rcv_state_process(sk, skb)) {
rsk = sk;
goto reset;
}
return 0;
reset:
tcp_v4_send_reset(rsk, skb);
discard:
kfree_skb(skb);
/* Be careful here. If this function gets more complicated and
* gcc suffers from register pressure on the x86, sk (in %ebx)
* might be destroyed here. This current version compiles correctly,
* but you have been warned.
*/
return 0;
csum_err:
TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
goto discard;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 145 | 52.54% | 9 | 34.62% |
eric dumazet | eric dumazet | 69 | 25.00% | 9 | 34.62% |
david s. miller | david s. miller | 29 | 10.51% | 1 | 3.85% |
hideaki yoshifuji | hideaki yoshifuji | 21 | 7.61% | 2 | 7.69% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 6 | 2.17% | 3 | 11.54% |
pavel emelianov | pavel emelianov | 5 | 1.81% | 1 | 3.85% |
ian morris | ian morris | 1 | 0.36% | 1 | 3.85% |
| Total | 276 | 100.00% | 26 | 100.00% |
EXPORT_SYMBOL(tcp_v4_do_rcv);
void tcp_v4_early_demux(struct sk_buff *skb)
{
const struct iphdr *iph;
const struct tcphdr *th;
struct sock *sk;
if (skb->pkt_type != PACKET_HOST)
return;
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
return;
iph = ip_hdr(skb);
th = tcp_hdr(skb);
if (th->doff < sizeof(struct tcphdr) / 4)
return;
sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
iph->saddr, th->source,
iph->daddr, ntohs(th->dest),
skb->skb_iif);
if (sk) {
skb->sk = sk;
skb->destructor = sock_edemux;
if (sk_fullsock(sk)) {
struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
if (dst)
dst = dst_check(dst, 0);
if (dst &&
inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
skb_dst_set_noref(skb, dst);
}
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
david s. miller | david s. miller | 172 | 87.31% | 4 | 40.00% |
eric dumazet | eric dumazet | 19 | 9.64% | 4 | 40.00% |
vijay subramanian | vijay subramanian | 3 | 1.52% | 1 | 10.00% |
michal kubecek | michal kubecek | 3 | 1.52% | 1 | 10.00% |
| Total | 197 | 100.00% | 10 | 100.00% |
/* Packet is added to VJ-style prequeue for processing in process
* context, if a reader task is waiting. Apparently, this exciting
* idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
* failed somewhere. Latency? Burstiness? Well, at least now we will
* see, why it failed. 8)8) --ANK
*
*/
bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
if (sysctl_tcp_low_latency || !tp->ucopy.task)
return false;
if (skb->len <= tcp_hdrlen(skb) &&
skb_queue_len(&tp->ucopy.prequeue) == 0)
return false;
/* Before escaping RCU protected region, we need to take care of skb
* dst. Prequeue is only enabled for established sockets.
* For such sockets, we might need the skb dst only to set sk->sk_rx_dst
* Instead of doing full sk_rx_dst validity here, let's perform
* an optimistic check.
*/
if (likely(sk->sk_rx_dst))
skb_dst_drop(skb);
else
skb_dst_force_safe(skb);
__skb_queue_tail(&tp->ucopy.prequeue, skb);
tp->ucopy.memory += skb->truesize;
if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
struct sk_buff *skb1;
BUG_ON(sock_owned_by_user(sk));
__NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
skb_queue_len(&tp->ucopy.prequeue));
while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
sk_backlog_rcv(sk, skb1);
tp->ucopy.memory = 0;
} else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
wake_up_interruptible_sync_poll(sk_sleep(sk),
POLLIN | POLLRDNORM | POLLRDBAND);
if (!inet_csk_ack_scheduled(sk))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
(3 * tcp_rto_min(sk)) / 4,
TCP_RTO_MAX);
}
return true;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 266 | 98.52% | 4 | 80.00% |
david s. miller | david s. miller | 4 | 1.48% | 1 | 20.00% |
| Total | 270 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(tcp_prequeue);
/*
* From tcp_input.c
*/
int tcp_v4_rcv(struct sk_buff *skb)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph;
const struct tcphdr *th;
bool refcounted;
struct sock *sk;
int ret;
if (skb->pkt_type != PACKET_HOST)
goto discard_it;
/* Count it even if it's bad */
__TCP_INC_STATS(net, TCP_MIB_INSEGS);
if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
goto discard_it;
th = (const struct tcphdr *)skb->data;
if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
goto bad_packet;
if (!pskb_may_pull(skb, th->doff * 4))
goto discard_it;
/* An explanation is required here, I think.
* Packet length and doff are validated by header prediction,
* provided case of th->doff==0 is eliminated.
* So, we defer the checks. */
if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
goto csum_error;
th = (const struct tcphdr *)skb->data;
iph = ip_hdr(skb);
/* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
* barrier() makes sure compiler wont play fool^Waliasing games.
*/
memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
sizeof(struct inet_skb_parm));
barrier();
TCP_SKB_CB(skb)->seq = ntohl(th->seq);
TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
skb->len - th->doff * 4);
TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
TCP_SKB_CB(skb)->tcp_tw_isn = 0;
TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
TCP_SKB_CB(skb)->sacked = 0;
lookup:
sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
th->dest, &refcounted);
if (!sk)
goto no_tcp_socket;
process:
if (sk->sk_state == TCP_TIME_WAIT)
goto do_time_wait;
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
struct sock *nsk;
sk = req->rsk_listener;
if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
reqsk_put(req);
goto discard_it;
}
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
/* We own a reference on the listener, increase it again
* as we might lose it too soon.
*/
sock_hold(sk);
refcounted = true;
nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
goto discard_and_relse;
}
if (nsk == sk) {
reqsk_put(req);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
goto discard_and_relse;
} else {
sock_put(sk);
return 0;
}
}
if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
__NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
goto discard_and_relse;
}
if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
if (tcp_v4_inbound_md5_hash(sk, skb))
goto discard_and_relse;
nf_reset(skb);
if (sk_filter(sk, skb))
goto discard_and_relse;
skb->dev = NULL;
if (sk->sk_state == TCP_LISTEN) {
ret = tcp_v4_do_rcv(sk, skb);
goto put_and_return;
}
sk_incoming_cpu_update(sk);
bh_lock_sock_nested(sk);
tcp_segs_in(tcp_sk(sk), skb);
ret = 0;
if (!sock_owned_by_user(sk)) {
if (!tcp_prequeue(sk, skb))
ret = tcp_v4_do_rcv(sk, skb);
} else if (unlikely(sk_add_backlog(sk, skb,
sk->sk_rcvbuf + sk->sk_sndbuf))) {
bh_unlock_sock(sk);
__NET_INC_STATS(net, LINUX_MIB_TCPBACKLOGDROP);
goto discard_and_relse;
}
bh_unlock_sock(sk);
put_and_return:
if (refcounted)
sock_put(sk);
return ret;
no_tcp_socket:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
if (tcp_checksum_complete(skb)) {
csum_error:
__TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
bad_packet:
__TCP_INC_STATS(net, TCP_MIB_INERRS);
} else {
tcp_v4_send_reset(NULL, skb);
}
discard_it:
/* Discard frame. */
kfree_skb(skb);
return 0;
discard_and_relse:
sk_drops_add(sk, skb);
if (refcounted)
sock_put(sk);
goto discard_it;
do_time_wait:
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
inet_twsk_put(inet_twsk(sk));
goto discard_it;
}
if (tcp_checksum_complete(skb)) {
inet_twsk_put(inet_twsk(sk));
goto csum_error;
}
switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
case TCP_TW_SYN: {
struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
&tcp_hashinfo, skb,
__tcp_hdrlen(th),
iph->saddr, th->source,
iph->daddr, th->dest,
inet_iif(skb));
if (sk2) {
inet_twsk_deschedule_put(inet_twsk(sk));
sk = sk2;
refcounted = false;
goto process;
}
/* Fall through to ACK */
}
case TCP_TW_ACK:
tcp_v4_timewait_ack(sk, skb);
break;
case TCP_TW_RST:
tcp_v4_send_reset(sk, skb);
inet_twsk_deschedule_put(inet_twsk(sk));
goto discard_it;
case TCP_TW_SUCCESS:;
}
goto discard_it;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 364 | 37.07% | 15 | 22.06% |
eric dumazet | eric dumazet | 352 | 35.85% | 21 | 30.88% |
linus torvalds | linus torvalds | 48 | 4.89% | 1 | 1.47% |
alexey kuznetsov | alexey kuznetsov | 35 | 3.56% | 1 | 1.47% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 27 | 2.75% | 7 | 10.29% |
hideaki yoshifuji | hideaki yoshifuji | 22 | 2.24% | 5 | 7.35% |
stephen hemminger | stephen hemminger | 17 | 1.73% | 2 | 2.94% |
florian westphal | florian westphal | 16 | 1.63% | 1 | 1.47% |
yi zhu | yi zhu | 14 | 1.43% | 2 | 2.94% |
tom herbert | tom herbert | 13 | 1.32% | 2 | 2.94% |
craig gallek | craig gallek | 12 | 1.22% | 1 | 1.47% |
james morris | james morris | 12 | 1.22% | 1 | 1.47% |
dmitry popov | dmitry popov | 12 | 1.22% | 1 | 1.47% |
nivedita singhvi | nivedita singhvi | 11 | 1.12% | 1 | 1.47% |
pavel emelianov | pavel emelianov | 8 | 0.81% | 2 | 2.94% |
marcelo ricardo leitner | marcelo ricardo leitner | 8 | 0.81% | 1 | 1.47% |
patrick mchardy | patrick mchardy | 5 | 0.51% | 1 | 1.47% |
benjamin lahaise | benjamin lahaise | 3 | 0.31% | 1 | 1.47% |
martin kafai lau | martin kafai lau | 2 | 0.20% | 1 | 1.47% |
ingo molnar | ingo molnar | 1 | 0.10% | 1 | 1.47% |
| Total | 982 | 100.00% | 68 | 100.00% |
static struct timewait_sock_ops tcp_timewait_sock_ops = {
.twsk_obj_size = sizeof(struct tcp_timewait_sock),
.twsk_unique = tcp_twsk_unique,
.twsk_destructor= tcp_twsk_destructor,
};
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
if (dst && dst_hold_safe(dst)) {
sk->sk_rx_dst = dst;
inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 54 | 100.00% | 3 | 100.00% |
| Total | 54 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(inet_sk_rx_dst_set);
const struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
.sk_rx_dst_set = inet_sk_rx_dst_set,
.conn_request = tcp_v4_conn_request,
.syn_recv_sock = tcp_v4_syn_recv_sock,
.net_header_len = sizeof(struct iphdr),
.setsockopt = ip_setsockopt,
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
.bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
#endif
.mtu_reduced = tcp_v4_mtu_reduced,
};
EXPORT_SYMBOL(ipv4_specific);
#ifdef CONFIG_TCP_MD5SIG
static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
.md5_lookup = tcp_v4_md5_lookup,
.calc_md5_hash = tcp_v4_md5_hash_skb,
.md5_parse = tcp_v4_parse_md5_keys,
};
#endif
/* NOTE: A lot of things set to zero explicitly by call to
* sk_alloc() so need not be done here.
*/
static int tcp_v4_init_sock(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
tcp_init_sock(sk);
icsk->icsk_af_ops = &ipv4_specific;
#ifdef CONFIG_TCP_MD5SIG
tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
#endif
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 21 | 41.18% | 1 | 12.50% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 11 | 21.57% | 2 | 25.00% |
hideaki yoshifuji | hideaki yoshifuji | 11 | 21.57% | 1 | 12.50% |
david s. miller | david s. miller | 4 | 7.84% | 1 | 12.50% |
linus torvalds | linus torvalds | 2 | 3.92% | 1 | 12.50% |
thomas graf | thomas graf | 1 | 1.96% | 1 | 12.50% |
neal cardwell | neal cardwell | 1 | 1.96% | 1 | 12.50% |
| Total | 51 | 100.00% | 8 | 100.00% |
void tcp_v4_destroy_sock(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_clear_xmit_timers(sk);
tcp_cleanup_congestion_control(sk);
/* Cleanup up the write buffer. */
tcp_write_queue_purge(sk);
/* Cleans up our, hopefully empty, out_of_order_queue. */
__skb_queue_purge(&tp->out_of_order_queue);
#ifdef CONFIG_TCP_MD5SIG
/* Clean up the MD5 key list, if any */
if (tp->md5sig_info) {
tcp_clear_md5_list(sk);
kfree_rcu(tp->md5sig_info, rcu);
tp->md5sig_info = NULL;
}
#endif
/* Clean prequeue, it must be empty really */
__skb_queue_purge(&tp->ucopy.prequeue);
/* Clean up a referenced TCP bind bucket. */
if (inet_csk(sk)->icsk_bind_hash)
inet_put_port(sk);
BUG_ON(tp->fastopen_rsk);
/* If socket is aborted during connect operation */
tcp_free_fastopen_req(tp);
tcp_saved_syn_free(tp);
local_bh_disable();
sk_sockets_allocated_dec(sk);
local_bh_enable();
if (mem_cgroup_sockets_enabled && sk->sk_memcg)
sock_release_memcg(sk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pre-git | pre-git | 56 | 38.10% | 6 | 21.43% |
hideaki yoshifuji | hideaki yoshifuji | 28 | 19.05% | 1 | 3.57% |
eric dumazet | eric dumazet | 15 | 10.20% | 4 | 14.29% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 8 | 5.44% | 5 | 17.86% |
johannes weiner | johannes weiner | 8 | 5.44% | 2 | 7.14% |
glauber costa | glauber costa | 7 | 4.76% | 2 | 7.14% |
jerry chu | jerry chu | 7 | 4.76% | 1 | 3.57% |
yuchung cheng | yuchung cheng | 6 | 4.08% | 1 | 3.57% |
david s. miller | david s. miller | 4 | 2.72% | 2 | 7.14% |
stephen hemminger | stephen hemminger | 4 | 2.72% | 1 | 3.57% |
chris leech | chris leech | 2 | 1.36% | 1 | 3.57% |
brian haley | brian haley | 1 | 0.68% | 1 | 3.57% |
linus torvalds | linus torvalds | 1 | 0.68% | 1 | 3.57% |
| Total | 147 | 100.00% | 28 | 100.00% |
EXPORT_SYMBOL(tcp_v4_destroy_sock);
#ifdef CONFIG_PROC_FS
/* Proc filesystem TCP sock list dumping. */
/*
* Get next listener socket follow cur. If cur is NULL, get first socket
* starting from bucket given in st->bucket; when st->bucket is zero the
* very first socket in the hash table is returned.
*/
static void *listening_get_next(struct seq_file *seq, void *cur)
{
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
struct inet_listen_hashbucket *ilb;
struct inet_connection_sock *icsk;
struct sock *sk = cur;
if (!sk) {
get_head:
ilb = &tcp_hashinfo.listening_hash[st->bucket];
spin_lock_bh(&ilb->lock);
sk = sk_head(&ilb->head);
st->offset = 0;
goto get_sk;
}
ilb = &tcp_hashinfo.listening_hash[st->bucket];
++st->num;
++st->offset;
sk = sk_next(sk);
get_sk:
sk_for_each_from(sk) {
if (!net_eq(sock_net(sk), net))
continue;
if (sk->sk_family == st->family)
return sk;
icsk = inet_csk(sk);
}
spin_unlock_bh(&ilb->lock);
st->offset = 0;
if (++st->bucket < INET_LHTABLE_SIZE)
goto get_head;
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 90 | 45.00% | 3 | 16.67% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 55 | 27.50% | 5 | 27.78% |
pre-git | pre-git | 16 | 8.00% | 2 | 11.11% |
pavel emelianov | pavel emelianov | 12 | 6.00% | 1 | 5.56% |
herbert xu | herbert xu | 11 | 5.50% | 2 | 11.11% |
hideaki yoshifuji | hideaki yoshifuji | 7 | 3.50% | 2 | 11.11% |
tom herbert | tom herbert | 6 | 3.00% | 1 | 5.56% |
daniel lezcano | daniel lezcano | 2 | 1.00% | 1 | 5.56% |
david s. miller | david s. miller | 1 | 0.50% | 1 | 5.56% |
| Total | 200 | 100.00% | 18 | 100.00% |
static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
{
struct tcp_iter_state *st = seq->private;
void *rc;
st->bucket = 0;
st->offset = 0;
rc = listening_get_next(seq, NULL);
while (rc && *pos) {
rc = listening_get_next(seq, rc);
--*pos;
}
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 41 | 54.67% | 1 | 25.00% |
tom herbert | tom herbert | 25 | 33.33% | 1 | 25.00% |
tim shepard | tim shepard | 6 | 8.00% | 1 | 25.00% |
herbert xu | herbert xu | 3 | 4.00% | 1 | 25.00% |
| Total | 75 | 100.00% | 4 | 100.00% |
static inline bool empty_bucket(const struct tcp_iter_state *st)
{
return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
andi kleen | andi kleen | 26 | 89.66% | 1 | 25.00% |
eric dumazet | eric dumazet | 3 | 10.34% | 3 | 75.00% |
| Total | 29 | 100.00% | 4 | 100.00% |
/*
* Get first established socket starting from bucket given in st->bucket.
* If st->bucket is zero, the very first socket in the hash is returned.
*/
static void *established_get_first(struct seq_file *seq)
{
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
void *rc = NULL;
st->offset = 0;
for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
struct sock *sk;
struct hlist_nulls_node *node;
spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
/* Lockless fast path for the common case of empty buckets */
if (empty_bucket(st))
continue;
spin_lock_bh(lock);
sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
if (sk->sk_family != st->family ||
!net_eq(sock_net(sk), net)) {
continue;
}
rc = sk;
goto out;
}
spin_unlock_bh(lock);
}
out:
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 73 | 48.67% | 4 | 22.22% |
pre-git | pre-git | 24 | 16.00% | 3 | 16.67% |
eric dumazet | eric dumazet | 15 | 10.00% | 4 | 22.22% |
hideaki yoshifuji | hideaki yoshifuji | 12 | 8.00% | 3 | 16.67% |
andi kleen | andi kleen | 9 | 6.00% | 1 | 5.56% |
daniel lezcano | daniel lezcano | 9 | 6.00% | 1 | 5.56% |
denis v. lunev | denis v. lunev | 4 | 2.67% | 1 | 5.56% |
tom herbert | tom herbert | 4 | 2.67% | 1 | 5.56% |
| Total | 150 | 100.00% | 18 | 100.00% |
static void *established_get_next(struct seq_file *seq, void *cur)
{
struct sock *sk = cur;
struct hlist_nulls_node *node;
struct tcp_iter_state *st = seq->private;
struct net *net = seq_file_net(seq);
++st->num;
++st->offset;
sk = sk_nulls_next(sk);
sk_nulls_for_each_from(sk, node) {
if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
return sk;
}
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
++st->bucket;
return established_get_first(seq);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 67 | 56.30% | 4 | 30.77% |
eric dumazet | eric dumazet | 19 | 15.97% | 2 | 15.38% |
hideaki yoshifuji | hideaki yoshifuji | 10 | 8.40% | 3 | 23.08% |
daniel lezcano | daniel lezcano | 9 | 7.56% | 1 | 7.69% |
tom herbert | tom herbert | 5 | 4.20% | 1 | 7.69% |
tim shepard | tim shepard | 5 | 4.20% | 1 | 7.69% |
denis v. lunev | denis v. lunev | 4 | 3.36% | 1 | 7.69% |
| Total | 119 | 100.00% | 13 | 100.00% |
static void *established_get_idx(struct seq_file *seq, loff_t pos)
{
struct tcp_iter_state *st = seq->private;
void *rc;
st->bucket = 0;
rc = established_get_first(seq);
while (rc && pos) {
rc = established_get_next(seq, rc);
--pos;
}
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 39 | 60.94% | 1 | 33.33% |
tom herbert | tom herbert | 19 | 29.69% | 1 | 33.33% |
tim shepard | tim shepard | 6 | 9.38% | 1 | 33.33% |
| Total | 64 | 100.00% | 3 | 100.00% |
static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
{
void *rc;
struct tcp_iter_state *st = seq->private;
st->state = TCP_SEQ_STATE_LISTENING;
rc = listening_get_idx(seq, &pos);
if (!rc) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
rc = established_get_idx(seq, pos);
}
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 69 | 100.00% | 1 | 100.00% |
| Total | 69 | 100.00% | 1 | 100.00% |
static void *tcp_seek_last_pos(struct seq_file *seq)
{
struct tcp_iter_state *st = seq->private;
int offset = st->offset;
int orig_num = st->num;
void *rc = NULL;
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
if (st->bucket >= INET_LHTABLE_SIZE)
break;
st->state = TCP_SEQ_STATE_LISTENING;
rc = listening_get_next(seq, NULL);
while (offset-- && rc)
rc = listening_get_next(seq, rc);
if (rc)
break;
st->bucket = 0;
st->state = TCP_SEQ_STATE_ESTABLISHED;
/* Fallthrough */
case TCP_SEQ_STATE_ESTABLISHED:
if (st->bucket > tcp_hashinfo.ehash_mask)
break;
rc = established_get_first(seq);
while (offset-- && rc)
rc = established_get_next(seq, rc);
}
st->num = orig_num;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tom herbert | tom herbert | 152 | 97.44% | 1 | 50.00% |
eric dumazet | eric dumazet | 4 | 2.56% | 1 | 50.00% |
| Total | 156 | 100.00% | 2 | 100.00% |
static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
{
struct tcp_iter_state *st = seq->private;
void *rc;
if (*pos && *pos == st->last_pos) {
rc = tcp_seek_last_pos(seq);
if (rc)
goto out;
}
st->state = TCP_SEQ_STATE_LISTENING;
st->num = 0;
st->bucket = 0;
st->offset = 0;
rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
out:
st->last_pos = *pos;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
tom herbert | tom herbert | 58 | 52.73% | 1 | 20.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 30 | 27.27% | 1 | 20.00% |
tim shepard | tim shepard | 15 | 13.64% | 1 | 20.00% |
hirofumi ogawa | hirofumi ogawa | 6 | 5.45% | 1 | 20.00% |
joe perches | joe perches | 1 | 0.91% | 1 | 20.00% |
| Total | 110 | 100.00% | 5 | 100.00% |
static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct tcp_iter_state *st = seq->private;
void *rc = NULL;
if (v == SEQ_START_TOKEN) {
rc = tcp_get_idx(seq, 0);
goto out;
}
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
rc = listening_get_next(seq, v);
if (!rc) {
st->state = TCP_SEQ_STATE_ESTABLISHED;
st->bucket = 0;
st->offset = 0;
rc = established_get_first(seq);
}
break;
case TCP_SEQ_STATE_ESTABLISHED:
rc = established_get_next(seq, v);
break;
}
out:
++*pos;
st->last_pos = *pos;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 108 | 78.83% | 1 | 33.33% |
tom herbert | tom herbert | 28 | 20.44% | 1 | 33.33% |
joe perches | joe perches | 1 | 0.73% | 1 | 33.33% |
| Total | 137 | 100.00% | 3 | 100.00% |
static void tcp_seq_stop(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st = seq->private;
switch (st->state) {
case TCP_SEQ_STATE_LISTENING:
if (v != SEQ_START_TOKEN)
spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
break;
case TCP_SEQ_STATE_ESTABLISHED:
if (v)
spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
break;
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 57 | 73.08% | 3 | 37.50% |
eric dumazet | eric dumazet | 15 | 19.23% | 3 | 37.50% |
anders gustafsson | anders gustafsson | 5 | 6.41% | 1 | 12.50% |
joe perches | joe perches | 1 | 1.28% | 1 | 12.50% |
| Total | 78 | 100.00% | 8 | 100.00% |
int tcp_seq_open(struct inode *inode, struct file *file)
{
struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
struct tcp_iter_state *s;
int err;
err = seq_open_net(inode, file, &afinfo->seq_ops,
sizeof(struct tcp_iter_state));
if (err < 0)
return err;
s = ((struct seq_file *)file->private_data)->private;
s->family = afinfo->family;
s->last_pos = 0;
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 38 | 40.43% | 2 | 25.00% |
denis v. lunev | denis v. lunev | 30 | 31.91% | 2 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 10 | 10.64% | 1 | 12.50% |
daniel lezcano | daniel lezcano | 9 | 9.57% | 1 | 12.50% |
tom herbert | tom herbert | 6 | 6.38% | 1 | 12.50% |
al viro | al viro | 1 | 1.06% | 1 | 12.50% |
| Total | 94 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL(tcp_seq_open);
int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
{
int rc = 0;
struct proc_dir_entry *p;
afinfo->seq_ops.start = tcp_seq_start;
afinfo->seq_ops.next = tcp_seq_next;
afinfo->seq_ops.stop = tcp_seq_stop;
p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
afinfo->seq_fops, afinfo);
if (!p)
rc = -ENOMEM;
return rc;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 48 | 57.83% | 2 | 40.00% |
denis v. lunev | denis v. lunev | 30 | 36.14% | 2 | 40.00% |
daniel lezcano | daniel lezcano | 5 | 6.02% | 1 | 20.00% |
| Total | 83 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(tcp_proc_register);
void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
{
remove_proc_entry(afinfo->name, net->proc_net);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 16 | 61.54% | 1 | 33.33% |
daniel lezcano | daniel lezcano | 5 | 19.23% | 1 | 33.33% |
gao feng | gao feng | 5 | 19.23% | 1 | 33.33% |
| Total | 26 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(tcp_proc_unregister);
static void get_openreq4(const struct request_sock *req,
struct seq_file *f, int i)
{
const struct inet_request_sock *ireq = inet_rsk(req);
long delta = req->rsk_timer.expires - jiffies;
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
i,
ireq->ir_loc_addr,
ireq->ir_num,
ireq->ir_rmt_addr,
ntohs(ireq->ir_rmt_port),
TCP_SYN_RECV,
0, 0, /* could print option size, but that is af dependent. */
1, /* timers active (only the expire timer) */
jiffies_delta_to_clock_t(delta),
req->num_timeout,
from_kuid_munged(seq_user_ns(f),
sock_i_uid(req->rsk_listener)),
0, /* non standard timer */
0, /* open_requests have no inode */
0,
req);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
hideaki yoshifuji | hideaki yoshifuji | 59 | 51.30% | 1 | 6.67% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 20 | 17.39% | 3 | 20.00% |
eric dumazet | eric dumazet | 20 | 17.39% | 7 | 46.67% |
eric w. biederman | eric w. biederman | 8 | 6.96% | 1 | 6.67% |
pavel emelianov | pavel emelianov | 5 | 4.35% | 1 | 6.67% |
david s. miller | david s. miller | 2 | 1.74% | 1 | 6.67% |
tetsuo handa | tetsuo handa | 1 | 0.87% | 1 | 6.67% |
| Total | 115 | 100.00% | 15 | 100.00% |
static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
{
int timer_active;
unsigned long timer_expires;
const struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct inet_sock *inet = inet_sk(sk);
const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
int rx_queue;
int state;
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sk->sk_timer)) {
timer_active = 2;
timer_expires = sk->sk_timer.expires;
} else {
timer_active = 0;
timer_expires = jiffies;
}
state = sk_state_load(sk);
if (state == TCP_LISTEN)
rx_queue = sk->sk_ack_backlog;
else
/* Because we don't lock the socket,
* we might find a transient negative value.
*/
rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
i, src, srcp, dest, destp, state,
tp->write_seq - tp->snd_una,
rx_queue,
timer_active,
jiffies_delta_to_clock_t(timer_expires - jiffies),
icsk->icsk_retransmits,
from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
icsk->icsk_probes_out,
sock_i_ino(sk),
atomic_read(&sk->sk_refcnt), sk,
jiffies_to_clock_t(icsk->icsk_rto),
jiffies_to_clock_t(icsk->icsk_ack.ato),
(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
tp->snd_cwnd,
state == TCP_LISTEN ?
fastopenq->max_qlen :
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 246 | 66.13% | 6 | 26.09% |
eric dumazet | eric dumazet | 56 | 15.05% | 6 | 26.09% |
jerry chu | jerry chu | 20 | 5.38% | 1 | 4.35% |
ilpo jarvinen | ilpo jarvinen | 13 | 3.49% | 2 | 8.70% |
nandita dukkipati | nandita dukkipati | 12 | 3.23% | 1 | 4.35% |
eric w. biederman | eric w. biederman | 8 | 2.15% | 1 | 4.35% |
stephen hemminger | stephen hemminger | 6 | 1.61% | 1 | 4.35% |
pavel emelianov | pavel emelianov | 5 | 1.34% | 1 | 4.35% |
al viro | al viro | 2 | 0.54% | 1 | 4.35% |
david s. miller | david s. miller | 2 | 0.54% | 1 | 4.35% |
tetsuo handa | tetsuo handa | 1 | 0.27% | 1 | 4.35% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.27% | 1 | 4.35% |
| Total | 372 | 100.00% | 23 | 100.00% |
static void get_timewait4_sock(const struct inet_timewait_sock *tw,
struct seq_file *f, int i)
{
long delta = tw->tw_timer.expires - jiffies;
__be32 dest, src;
__u16 destp, srcp;
dest = tw->tw_daddr;
src = tw->tw_rcv_saddr;
destp = ntohs(tw->tw_dport);
srcp = ntohs(tw->tw_sport);
seq_printf(f, "%4d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
atomic_read(&tw->tw_refcnt), tw);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 85 | 69.67% | 3 | 20.00% |
eric dumazet | eric dumazet | 14 | 11.48% | 3 | 20.00% |
pre-git | pre-git | 13 | 10.66% | 4 | 26.67% |
pavel emelianov | pavel emelianov | 5 | 4.10% | 1 | 6.67% |
david s. miller | david s. miller | 2 | 1.64% | 1 | 6.67% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.82% | 1 | 6.67% |
tetsuo handa | tetsuo handa | 1 | 0.82% | 1 | 6.67% |
al viro | al viro | 1 | 0.82% | 1 | 6.67% |
| Total | 122 | 100.00% | 15 | 100.00% |
#define TMPSZ 150
static int tcp4_seq_show(struct seq_file *seq, void *v)
{
struct tcp_iter_state *st;
struct sock *sk = v;
seq_setwidth(seq, TMPSZ - 1);
if (v == SEQ_START_TOKEN) {
seq_puts(seq, " sl local_address rem_address st tx_queue "
"rx_queue tr tm->when retrnsmt uid timeout "
"inode");
goto out;
}
st = seq->private;
if (sk->sk_state == TCP_TIME_WAIT)
get_timewait4_sock(v, seq, st->num);
else if (sk->sk_state == TCP_NEW_SYN_RECV)
get_openreq4(v, seq, st->num);
else
get_tcp4_sock(v, seq, st->num);
out:
seq_pad(seq, '\n');
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 45 | 36.00% | 1 | 9.09% |
pre-git | pre-git | 33 | 26.40% | 4 | 36.36% |
eric dumazet | eric dumazet | 28 | 22.40% | 2 | 18.18% |
tetsuo handa | tetsuo handa | 14 | 11.20% | 1 | 9.09% |
pavel emelianov | pavel emelianov | 3 | 2.40% | 1 | 9.09% |
joe perches | joe perches | 1 | 0.80% | 1 | 9.09% |
hideaki yoshifuji | hideaki yoshifuji | 1 | 0.80% | 1 | 9.09% |
| Total | 125 | 100.00% | 11 | 100.00% |
static const struct file_operations tcp_afinfo_seq_fops = {
.owner = THIS_MODULE,
.open = tcp_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net
};
static struct tcp_seq_afinfo tcp4_seq_afinfo = {
.name = "tcp",
.family = AF_INET,
.seq_fops = &tcp_afinfo_seq_fops,
.seq_ops = {
.show = tcp4_seq_show,
},
};
static int __net_init tcp4_proc_init_net(struct net *net)
{
return tcp_proc_register(net, &tcp4_seq_afinfo);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pavel emelianov | pavel emelianov | 12 | 57.14% | 1 | 25.00% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 6 | 28.57% | 1 | 25.00% |
hideaki yoshifuji | hideaki yoshifuji | 2 | 9.52% | 1 | 25.00% |
alexey dobriyan | alexey dobriyan | 1 | 4.76% | 1 | 25.00% |
| Total | 21 | 100.00% | 4 | 100.00% |
static void __net_exit tcp4_proc_exit_net(struct net *net)
{
tcp_proc_unregister(net, &tcp4_seq_afinfo);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pavel emelianov | pavel emelianov | 15 | 75.00% | 1 | 25.00% |
hideaki yoshifuji | hideaki yoshifuji | 2 | 10.00% | 1 | 25.00% |
daniel lezcano | daniel lezcano | 2 | 10.00% | 1 | 25.00% |
alexey dobriyan | alexey dobriyan | 1 | 5.00% | 1 | 25.00% |
| Total | 20 | 100.00% | 4 | 100.00% |
static struct pernet_operations tcp4_net_ops = {
.init = tcp4_proc_init_net,
.exit = tcp4_proc_exit_net,
};
int __init tcp4_proc_init(void)
{
return register_pernet_subsys(&tcp4_net_ops);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
pavel emelianov | pavel emelianov | 13 | 86.67% | 1 | 50.00% |
pre-git | pre-git | 2 | 13.33% | 1 | 50.00% |
| Total | 15 | 100.00% | 2 | 100.00% |
void tcp4_proc_exit(void)
{
unregister_pernet_subsys(&tcp4_net_ops);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
arnaldo carvalho de melo | arnaldo carvalho de melo | 9 | 69.23% | 1 | 33.33% |
hideaki yoshifuji | hideaki yoshifuji | 2 | 15.38% | 1 | 33.33% |
pavel emelianov | pavel emelianov | 2 | 15.38% | 1 | 33.33% |
| Total | 13 | 100.00% | 3 | 100.00% |
#endif /* CONFIG_PROC_FS */
struct proto tcp_prot = {
.name = "TCP",
.owner = THIS_MODULE,
.close = tcp_close,
.connect = tcp_v4_connect,
.disconnect = tcp_disconnect,
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v4_init_sock,
.destroy = tcp_v4_destroy_sock,
.shutdown = tcp_shutdown,
.setsockopt = tcp_setsockopt,
.getsockopt = tcp_getsockopt,
.recvmsg = tcp_recvmsg,
.sendmsg = tcp_sendmsg,
.sendpage = tcp_sendpage,
.backlog_rcv = tcp_v4_do_rcv,
.release_cb = tcp_release_cb,
.hash = inet_hash,
.unhash = inet_unhash,
.get_port = inet_csk_get_port,
.enter_memory_pressure = tcp_enter_memory_pressure,
.stream_memory_free = tcp_stream_memory_free,
.sockets_allocated = &tcp_sockets_allocated,
.orphan_count = &tcp_orphan_count,
.memory_allocated = &tcp_memory_allocated,
.memory_pressure = &tcp_memory_pressure,
.sysctl_mem = sysctl_tcp_mem,
.sysctl_wmem = sysctl_tcp_wmem,
.sysctl_rmem = sysctl_tcp_rmem,
.max_header = MAX_TCP_HEADER,
.obj_size = sizeof(struct tcp_sock),
.slab_flags = SLAB_DESTROY_BY_RCU,
.twsk_prot = &tcp_timewait_sock_ops,
.rsk_prot = &tcp_request_sock_ops,
.h.hashinfo = &tcp_hashinfo,
.no_autobind = true,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_tcp_setsockopt,
.compat_getsockopt = compat_tcp_getsockopt,
#endif
.diag_destroy = tcp_abort,
};
EXPORT_SYMBOL(tcp_prot);
static void __net_exit tcp_sk_exit(struct net *net)
{
int cpu;
for_each_possible_cpu(cpu)
inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
free_percpu(net->ipv4.tcp_sk);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 43 | 100.00% | 1 | 100.00% |
| Total | 43 | 100.00% | 1 | 100.00% |
static int __net_init tcp_sk_init(struct net *net)
{
int res, cpu;
net->ipv4.tcp_sk = alloc_percpu(struct sock *);
if (!net->ipv4.tcp_sk)
return -ENOMEM;
for_each_possible_cpu(cpu) {
struct sock *sk;
res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
IPPROTO_TCP, net);
if (res)
goto fail;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
*per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
}
net->ipv4.sysctl_tcp_ecn = 2;
net->ipv4.sysctl_tcp_ecn_fallback = 1;
net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
net->ipv4.sysctl_tcp_syncookies = 1;
net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
net->ipv4.sysctl_tcp_orphan_retries = 0;
net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
return 0;
fail:
tcp_sk_exit(net);
return res;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 96 | 38.87% | 3 | 12.50% |
nikolay borisov | nikolay borisov | 95 | 38.46% | 12 | 50.00% |
fan du | fan du | 24 | 9.72% | 3 | 12.50% |
denis v. lunev | denis v. lunev | 12 | 4.86% | 1 | 4.17% |
daniel borkmann | daniel borkmann | 8 | 3.24% | 1 | 4.17% |
hannes frederic sowa | hannes frederic sowa | 8 | 3.24% | 1 | 4.17% |
pre-git | pre-git | 3 | 1.21% | 2 | 8.33% |
david s. miller | david s. miller | 1 | 0.40% | 1 | 4.17% |
| Total | 247 | 100.00% | 24 | 100.00% |
static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
{
inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric w. biederman | eric w. biederman | 11 | 47.83% | 1 | 33.33% |
daniel lezcano | daniel lezcano | 11 | 47.83% | 1 | 33.33% |
denis v. lunev | denis v. lunev | 1 | 4.35% | 1 | 33.33% |
| Total | 23 | 100.00% | 3 | 100.00% |
static struct pernet_operations __net_initdata tcp_sk_ops = {
.init = tcp_sk_init,
.exit = tcp_sk_exit,
.exit_batch = tcp_sk_exit_batch,
};
void __init tcp_v4_init(void)
{
inet_hashinfo_init(&tcp_hashinfo);
if (register_pernet_subsys(&tcp_sk_ops))
panic("Failed to create the TCP control socket.\n");
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
denis v. lunev | denis v. lunev | 12 | 44.44% | 2 | 33.33% |
pre-git | pre-git | 7 | 25.93% | 1 | 16.67% |
eric dumazet | eric dumazet | 6 | 22.22% | 1 | 16.67% |
linus torvalds | linus torvalds | 1 | 3.70% | 1 | 16.67% |
eric w. biederman | eric w. biederman | 1 | 3.70% | 1 | 16.67% |
| Total | 27 | 100.00% | 6 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
eric dumazet | eric dumazet | 2304 | 20.28% | 118 | 26.05% |
pre-git | pre-git | 2248 | 19.79% | 45 | 9.93% |
arnaldo carvalho de melo | arnaldo carvalho de melo | 1771 | 15.59% | 49 | 10.82% |
hideaki yoshifuji | hideaki yoshifuji | 1467 | 12.91% | 15 | 3.31% |
david s. miller | david s. miller | 595 | 5.24% | 37 | 8.17% |
tom herbert | tom herbert | 330 | 2.90% | 4 | 0.88% |
adam langley | adam langley | 291 | 2.56% | 2 | 0.44% |
octavian purdila | octavian purdila | 204 | 1.80% | 9 | 1.99% |
pavel emelianov | pavel emelianov | 168 | 1.48% | 12 | 2.65% |
herbert xu | herbert xu | 151 | 1.33% | 8 | 1.77% |
shawn lu | shawn lu | 149 | 1.31% | 2 | 0.44% |
linus torvalds | linus torvalds | 139 | 1.22% | 6 | 1.32% |
damian lukowski | damian lukowski | 133 | 1.17% | 2 | 0.44% |
alexey kuznetsov | alexey kuznetsov | 131 | 1.15% | 6 | 1.32% |
denis v. lunev | denis v. lunev | 127 | 1.12% | 8 | 1.77% |
nikolay borisov | nikolay borisov | 95 | 0.84% | 12 | 2.65% |
stephen hemminger | stephen hemminger | 77 | 0.68% | 9 | 1.99% |
florian westphal | florian westphal | 71 | 0.62% | 2 | 0.44% |
rusty russell | rusty russell | 58 | 0.51% | 1 | 0.22% |
jerry chu | jerry chu | 53 | 0.47% | 2 | 0.44% |
daniel lezcano | daniel lezcano | 52 | 0.46% | 3 | 0.66% |
yuchung cheng | yuchung cheng | 48 | 0.42% | 3 | 0.66% |
kovacs krisztian | kovacs krisztian | 44 | 0.39% | 1 | 0.22% |
eric w. biederman | eric w. biederman | 41 | 0.36% | 5 | 1.10% |
arjan van de ven | arjan van de ven | 40 | 0.35% | 1 | 0.22% |
tom quetchenbach | tom quetchenbach | 37 | 0.33% | 1 | 0.22% |
andi kleen | andi kleen | 35 | 0.31% | 1 | 0.22% |
tim shepard | tim shepard | 32 | 0.28% | 2 | 0.44% |
fan du | fan du | 27 | 0.24% | 4 | 0.88% |
ilpo jarvinen | ilpo jarvinen | 23 | 0.20% | 4 | 0.88% |
balazs scheidler | balazs scheidler | 20 | 0.18% | 1 | 0.22% |
neal cardwell | neal cardwell | 18 | 0.16% | 3 | 0.66% |
tetsuo handa | tetsuo handa | 17 | 0.15% | 1 | 0.22% |
hannes frederic sowa | hannes frederic sowa | 17 | 0.15% | 3 | 0.66% |
andrey vagin | andrey vagin | 16 | 0.14% | 1 | 0.22% |
craig gallek | craig gallek | 16 | 0.14% | 1 | 0.22% |
dmitry mishin | dmitry mishin | 15 | 0.13% | 1 | 0.22% |
daniel borkmann | daniel borkmann | 15 | 0.13% | 2 | 0.44% |
benjamin lahaise | benjamin lahaise | 15 | 0.13% | 1 | 0.22% |
patrick mchardy | patrick mchardy | 15 | 0.13% | 4 | 0.88% |
changli gao | changli gao | 15 | 0.13% | 1 | 0.22% |
james morris | james morris | 14 | 0.12% | 2 | 0.44% |
yi zhu | yi zhu | 14 | 0.12% | 2 | 0.44% |
nandita dukkipati | nandita dukkipati | 12 | 0.11% | 1 | 0.22% |
joe perches | joe perches | 12 | 0.11% | 3 | 0.66% |
dmitry popov | dmitry popov | 12 | 0.11% | 1 | 0.22% |
wei dong | wei dong | 12 | 0.11% | 1 | 0.22% |
nivedita singhvi | nivedita singhvi | 11 | 0.10% | 1 | 0.22% |
jiri benc | jiri benc | 11 | 0.10% | 1 | 0.22% |
marcelo ricardo leitner | marcelo ricardo leitner | 8 | 0.07% | 1 | 0.22% |
david ahern | david ahern | 8 | 0.07% | 1 | 0.22% |
al viro | al viro | 8 | 0.07% | 5 | 1.10% |
johannes weiner | johannes weiner | 8 | 0.07% | 2 | 0.44% |
glauber costa | glauber costa | 7 | 0.06% | 2 | 0.44% |
william allen simpson | william allen simpson | 7 | 0.06% | 2 | 0.44% |
vijay subramanian | vijay subramanian | 7 | 0.06% | 2 | 0.44% |
john dykstra | john dykstra | 6 | 0.05% | 2 | 0.44% |
andrew morton | andrew morton | 6 | 0.05% | 1 | 0.22% |
hirofumi ogawa | hirofumi ogawa | 6 | 0.05% | 1 | 0.22% |
gao feng | gao feng | 5 | 0.04% | 1 | 0.22% |
lorenzo colitti | lorenzo colitti | 5 | 0.04% | 1 | 0.22% |
gui jianfeng | gui jianfeng | 5 | 0.04% | 1 | 0.22% |
anders gustafsson | anders gustafsson | 5 | 0.04% | 1 | 0.22% |
gerrit renker | gerrit renker | 4 | 0.04% | 1 | 0.22% |
sathya perla | sathya perla | 4 | 0.04% | 1 | 0.22% |
brian haley | brian haley | 3 | 0.03% | 2 | 0.44% |
paul moore | paul moore | 3 | 0.03% | 1 | 0.22% |
harvey harrison | harvey harrison | 3 | 0.03% | 1 | 0.22% |
zheng yan | zheng yan | 3 | 0.03% | 1 | 0.22% |
ian morris | ian morris | 3 | 0.03% | 1 | 0.22% |
adrian bunk | adrian bunk | 3 | 0.03% | 2 | 0.44% |
eliezer tamir | eliezer tamir | 3 | 0.03% | 2 | 0.44% |
michal kubecek | michal kubecek | 3 | 0.03% | 1 | 0.22% |
tejun heo | tejun heo | 3 | 0.03% | 1 | 0.22% |
alexey dobriyan | alexey dobriyan | 2 | 0.02% | 1 | 0.22% |
aydin arik | aydin arik | 2 | 0.02% | 1 | 0.22% |
christoph paasch | christoph paasch | 2 | 0.02% | 1 | 0.22% |
martin kafai lau | martin kafai lau | 2 | 0.02% | 1 | 0.22% |
chris leech | chris leech | 2 | 0.02% | 1 | 0.22% |
craig schlenter | craig schlenter | 1 | 0.01% | 1 | 0.22% |
steven cole | steven cole | 1 | 0.01% | 1 | 0.22% |
kris katterjohn | kris katterjohn | 1 | 0.01% | 1 | 0.22% |
thomas graf | thomas graf | 1 | 0.01% | 1 | 0.22% |
matthias dellweg | matthias dellweg | 1 | 0.01% | 1 | 0.22% |
ingo molnar | ingo molnar | 1 | 0.01% | 1 | 0.22% |
| Total | 11360 | 100.00% | 453 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.