Release 4.11 net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#include <net/netfilter/nf_log.h>
static int conntrack4_net_id __read_mostly;
static DEFINE_MUTEX(register_ipv4_hooks);
struct conntrack4_net {
unsigned int users;
};
static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
const __be32 *ap;
__be32 _addrs[2];
ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
sizeof(u_int32_t) * 2, _addrs);
if (ap == NULL)
return false;
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 87 | 90.62% | 1 | 25.00% |
Jan Engelhardt | 8 | 8.33% | 2 | 50.00% |
Patrick McHardy | 1 | 1.04% | 1 | 25.00% |
Total | 96 | 100.00% | 4 | 100.00% |
static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u3.ip = orig->dst.u3.ip;
tuple->dst.u3.ip = orig->src.u3.ip;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 50 | 96.15% | 1 | 50.00% |
Jan Engelhardt | 2 | 3.85% | 1 | 50.00% |
Total | 52 | 100.00% | 2 | 100.00% |
static void ipv4_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
seq_printf(s, "src=%pI4 dst=%pI4 ",
&tuple->src.u3.ip, &tuple->dst.u3.ip);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 37 | 88.10% | 1 | 33.33% |
Harvey Harrison | 3 | 7.14% | 1 | 33.33% |
Joe Perches | 2 | 4.76% | 1 | 33.33% |
Total | 42 | 100.00% | 3 | 100.00% |
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
unsigned int *dataoff, u_int8_t *protonum)
{
const struct iphdr *iph;
struct iphdr _iph;
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (iph == NULL)
return -NF_ACCEPT;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
return -NF_ACCEPT;
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
/* Check bogus IP headers */
if (*dataoff > skb->len) {
pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: "
"nhoff %u, ihl %u, skblen %u\n",
nhoff, iph->ihl << 2, skb->len);
return -NF_ACCEPT;
}
return NF_ACCEPT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 93 | 68.89% | 2 | 33.33% |
Jozsef Kadlecsik | 36 | 26.67% | 2 | 33.33% |
Jan Engelhardt | 5 | 3.70% | 1 | 16.67% |
Patrick McHardy | 1 | 0.74% | 1 | 16.67% |
Total | 135 | 100.00% | 6 | 100.00% |
static unsigned int ipv4_helper(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
return NF_ACCEPT;
help = nfct_help(ct);
if (!help)
return NF_ACCEPT;
/* rcu_read_lock()ed by nf_hook_thresh */
helper = rcu_dereference(help->helper);
if (!helper)
return NF_ACCEPT;
return helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 51 | 42.15% | 1 | 5.88% |
Patrick McHardy | 22 | 18.18% | 5 | 29.41% |
Harald Welte | 18 | 14.88% | 1 | 5.88% |
Pablo Neira Ayuso | 11 | 9.09% | 2 | 11.76% |
Arnaldo Carvalho de Melo | 6 | 4.96% | 2 | 11.76% |
Herbert Xu | 5 | 4.13% | 1 | 5.88% |
Eric W. Biedermann | 2 | 1.65% | 1 | 5.88% |
David S. Miller | 2 | 1.65% | 1 | 5.88% |
Jan Engelhardt | 2 | 1.65% | 1 | 5.88% |
Eric Dumazet | 1 | 0.83% | 1 | 5.88% |
Aaron Conole | 1 | 0.83% | 1 | 5.88% |
Total | 121 | 100.00% | 17 | 100.00% |
static unsigned int ipv4_confirm(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED_REPLY)
goto out;
/* adjust seqs for loopback traffic only in outgoing direction */
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status) &&
!nf_is_loopback_packet(skb)) {
if (!nf_ct_seq_adjust(skb, ct, ctinfo, ip_hdrlen(skb))) {
NF_CT_STAT_INC_ATOMIC(nf_ct_net(ct), drop);
return NF_DROP;
}
}
out:
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 59 | 51.30% | 2 | 20.00% |
Patrick McHardy | 43 | 37.39% | 4 | 40.00% |
Julian Anastasov | 7 | 6.09% | 1 | 10.00% |
Yasuyuki Kozakai | 2 | 1.74% | 1 | 10.00% |
David S. Miller | 2 | 1.74% | 1 | 10.00% |
Eric W. Biedermann | 2 | 1.74% | 1 | 10.00% |
Total | 115 | 100.00% | 10 | 100.00% |
static unsigned int ipv4_conntrack_in(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 24 | 63.16% | 1 | 12.50% |
Eric W. Biedermann | 5 | 13.16% | 3 | 37.50% |
David S. Miller | 4 | 10.53% | 1 | 12.50% |
Patrick McHardy | 2 | 5.26% | 1 | 12.50% |
Herbert Xu | 2 | 5.26% | 1 | 12.50% |
Alexey Dobriyan | 1 | 2.63% | 1 | 12.50% |
Total | 38 | 100.00% | 8 | 100.00% |
static unsigned int ipv4_conntrack_local(void *priv,
struct sk_buff *skb,
const struct nf_hook_state *state)
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
if (ip_is_fragment(ip_hdr(skb))) /* IP_NODEFRAG setsockopt set */
return NF_ACCEPT;
return nf_conntrack_in(state->net, PF_INET, state->hook, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 46 | 58.23% | 1 | 10.00% |
Florian Westphal | 14 | 17.72% | 1 | 10.00% |
Eric W. Biedermann | 5 | 6.33% | 3 | 30.00% |
Herbert Xu | 4 | 5.06% | 1 | 10.00% |
David S. Miller | 4 | 5.06% | 1 | 10.00% |
Arnaldo Carvalho de Melo | 3 | 3.80% | 1 | 10.00% |
Patrick McHardy | 2 | 2.53% | 1 | 10.00% |
Alexey Dobriyan | 1 | 1.27% | 1 | 10.00% |
Total | 79 | 100.00% | 10 | 100.00% |
/* Connection tracking may drop packets, but never alters them, so
make it the first hook. */
static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
{
.hook = ipv4_conntrack_in,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_helper,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = ipv4_helper,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_HELPER,
},
{
.hook = ipv4_confirm,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
/* Fast function for those who don't want to parse /proc (and I don't
blame them). */
/* Reversing the socket's dst/src point of view gives us the reply
mapping. */
static int
getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
memset(&tuple, 0, sizeof(tuple));
tuple.src.u3.ip = inet->inet_rcv_saddr;
tuple.src.u.tcp.port = inet->inet_sport;
tuple.dst.u3.ip = inet->inet_daddr;
tuple.dst.u.tcp.port = inet->inet_dport;
tuple.src.l3num = PF_INET;
tuple.dst.protonum = sk->sk_protocol;
/* We only do TCP and SCTP at the moment: is there a better way? */
if (sk->sk_protocol != IPPROTO_TCP && sk->sk_protocol != IPPROTO_SCTP) {
pr_debug("SO_ORIGINAL_DST: Not a TCP/SCTP socket\n");
return -ENOPROTOOPT;
}
if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
pr_debug("SO_ORIGINAL_DST: len %d not %zu\n",
*len, sizeof(struct sockaddr_in));
return -EINVAL;
}
h = nf_conntrack_find_get(sock_net(sk), &nf_ct_zone_dflt, &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
sin.sin_family = AF_INET;
sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u.tcp.port;
sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
&sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
&tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
&tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 329 | 84.36% | 1 | 8.33% |
Marcel Holtmann | 16 | 4.10% | 1 | 8.33% |
Rafael Laufer | 13 | 3.33% | 1 | 8.33% |
Philip Craig | 8 | 2.05% | 1 | 8.33% |
Alexey Dobriyan | 6 | 1.54% | 2 | 16.67% |
Harvey Harrison | 5 | 1.28% | 1 | 8.33% |
Patrick McHardy | 5 | 1.28% | 2 | 16.67% |
Eric Dumazet | 4 | 1.03% | 1 | 8.33% |
Jan Engelhardt | 2 | 0.51% | 1 | 8.33% |
Daniel Borkmann | 2 | 0.51% | 1 | 8.33% |
Total | 390 | 100.00% | 12 | 100.00% |
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
if (nla_put_in_addr(skb, CTA_IP_V4_SRC, tuple->src.u3.ip) ||
nla_put_in_addr(skb, CTA_IP_V4_DST, tuple->dst.u3.ip))
goto nla_put_failure;
return 0;
nla_put_failure:
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 50 | 81.97% | 1 | 20.00% |
David S. Miller | 7 | 11.48% | 1 | 20.00% |
Jiri Benc | 2 | 3.28% | 1 | 20.00% |
Patrick McHardy | 2 | 3.28% | 2 | 40.00% |
Total | 61 | 100.00% | 5 | 100.00% |
static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = {
[CTA_IP_V4_SRC] = { .type = NLA_U32 },
[CTA_IP_V4_DST] = { .type = NLA_U32 },
};
static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
{
if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
return -EINVAL;
t->src.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_SRC]);
t->dst.u3.ip = nla_get_in_addr(tb[CTA_IP_V4_DST]);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 66 | 94.29% | 1 | 25.00% |
Jiri Benc | 2 | 2.86% | 1 | 25.00% |
Patrick McHardy | 2 | 2.86% | 2 | 50.00% |
Total | 70 | 100.00% | 4 | 100.00% |
static int ipv4_nlattr_tuple_size(void)
{
return nla_policy_len(ipv4_nla_policy, CTA_IP_MAX + 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Holger Eitzenberger | 18 | 100.00% | 1 | 100.00% |
Total | 18 | 100.00% | 1 | 100.00% |
#endif
static struct nf_sockopt_ops so_getorigdst = {
.pf = PF_INET,
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = getorigdst,
.owner = THIS_MODULE,
};
static int ipv4_hooks_register(struct net *net)
{
struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id);
int err = 0;
mutex_lock(®ister_ipv4_hooks);
cnet->users++;
if (cnet->users > 1)
goto out_unlock;
err = nf_defrag_ipv4_enable(net);
if (err) {
cnet->users = 0;
goto out_unlock;
}
err = nf_register_net_hooks(net, ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (err)
cnet->users = 0;
out_unlock:
mutex_unlock(®ister_ipv4_hooks);
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 107 | 100.00% | 2 | 100.00% |
Total | 107 | 100.00% | 2 | 100.00% |
static void ipv4_hooks_unregister(struct net *net)
{
struct conntrack4_net *cnet = net_generic(net, conntrack4_net_id);
mutex_lock(®ister_ipv4_hooks);
if (cnet->users && (--cnet->users == 0))
nf_unregister_net_hooks(net, ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
mutex_unlock(®ister_ipv4_hooks);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 62 | 100.00% | 1 | 100.00% |
Total | 62 | 100.00% | 1 | 100.00% |
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.l3proto = PF_INET,
.name = "ipv4",
.pkt_to_tuple = ipv4_pkt_to_tuple,
.invert_tuple = ipv4_invert_tuple,
.print_tuple = ipv4_print_tuple,
.get_l4proto = ipv4_get_l4proto,
#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
.nlattr_tuple_size = ipv4_nlattr_tuple_size,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
.net_ns_get = ipv4_hooks_register,
.net_ns_put = ipv4_hooks_unregister,
.me = THIS_MODULE,
};
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
static struct nf_conntrack_l4proto *builtin_l4proto4[] = {
&nf_conntrack_l4proto_tcp4,
&nf_conntrack_l4proto_udp4,
&nf_conntrack_l4proto_icmp,
#ifdef CONFIG_NF_CT_PROTO_DCCP
&nf_conntrack_l4proto_dccp4,
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
&nf_conntrack_l4proto_sctp4,
#endif
#ifdef CONFIG_NF_CT_PROTO_UDPLITE
&nf_conntrack_l4proto_udplite4,
#endif
};
static int ipv4_net_init(struct net *net)
{
int ret = 0;
ret = nf_ct_l4proto_pernet_register(net, builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
if (ret < 0)
return ret;
ret = nf_ct_l3proto_pernet_register(net, &nf_conntrack_l3proto_ipv4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: pernet registration failed\n");
nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 39 | 50.65% | 1 | 14.29% |
Gao Feng | 26 | 33.77% | 4 | 57.14% |
Davide Caratti | 11 | 14.29% | 1 | 14.29% |
Stephen Hemminger | 1 | 1.30% | 1 | 14.29% |
Total | 77 | 100.00% | 7 | 100.00% |
static void ipv4_net_exit(struct net *net)
{
nf_ct_l3proto_pernet_unregister(net, &nf_conntrack_l3proto_ipv4);
nf_ct_l4proto_pernet_unregister(net, builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 24 | 77.42% | 4 | 66.67% |
Davide Caratti | 4 | 12.90% | 1 | 16.67% |
Yasuyuki Kozakai | 3 | 9.68% | 1 | 16.67% |
Total | 31 | 100.00% | 6 | 100.00% |
static struct pernet_operations ipv4_net_ops = {
.init = ipv4_net_init,
.exit = ipv4_net_exit,
.id = &conntrack4_net_id,
.size = sizeof(struct conntrack4_net),
};
static int __init nf_conntrack_l3proto_ipv4_init(void)
{
int ret = 0;
need_conntrack();
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0) {
pr_err("Unable to register netfilter socket option\n");
return ret;
}
ret = register_pernet_subsys(&ipv4_net_ops);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register pernet ops\n");
goto cleanup_sockopt;
}
ret = nf_ct_l4proto_register(builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
if (ret < 0)
goto cleanup_pernet;
ret = nf_ct_l3proto_register(&nf_conntrack_l3proto_ipv4);
if (ret < 0) {
pr_err("nf_conntrack_ipv4: can't register ipv4 proto.\n");
goto cleanup_l4proto;
}
return ret;
cleanup_l4proto:
nf_ct_l4proto_unregister(builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
cleanup_pernet:
unregister_pernet_subsys(&ipv4_net_ops);
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 96 | 66.67% | 3 | 37.50% |
Yasuyuki Kozakai | 35 | 24.31% | 1 | 12.50% |
Davide Caratti | 10 | 6.94% | 1 | 12.50% |
Bastian Stender | 1 | 0.69% | 1 | 12.50% |
Stephen Hemminger | 1 | 0.69% | 1 | 12.50% |
Florian Westphal | 1 | 0.69% | 1 | 12.50% |
Total | 144 | 100.00% | 8 | 100.00% |
static void __exit nf_conntrack_l3proto_ipv4_fini(void)
{
synchronize_net();
nf_ct_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
nf_ct_l4proto_unregister(builtin_l4proto4,
ARRAY_SIZE(builtin_l4proto4));
unregister_pernet_subsys(&ipv4_net_ops);
nf_unregister_sockopt(&so_getorigdst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 18 | 45.00% | 1 | 16.67% |
Gao Feng | 14 | 35.00% | 3 | 50.00% |
Davide Caratti | 4 | 10.00% | 1 | 16.67% |
Yasuyuki Kozakai | 4 | 10.00% | 1 | 16.67% |
Total | 40 | 100.00% | 6 | 100.00% |
module_init(nf_conntrack_l3proto_ipv4_init);
module_exit(nf_conntrack_l3proto_ipv4_fini);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Yasuyuki Kozakai | 1002 | 45.82% | 2 | 2.53% |
Pablo Neira Ayuso | 274 | 12.53% | 4 | 5.06% |
Florian Westphal | 229 | 10.47% | 3 | 3.80% |
Patrick McHardy | 190 | 8.69% | 24 | 30.38% |
Gao Feng | 177 | 8.09% | 5 | 6.33% |
Davide Caratti | 72 | 3.29% | 4 | 5.06% |
Jozsef Kadlecsik | 36 | 1.65% | 2 | 2.53% |
Jan Engelhardt | 23 | 1.05% | 3 | 3.80% |
Holger Eitzenberger | 23 | 1.05% | 1 | 1.27% |
David S. Miller | 19 | 0.87% | 2 | 2.53% |
Harald Welte | 18 | 0.82% | 1 | 1.27% |
Marcel Holtmann | 16 | 0.73% | 1 | 1.27% |
Eric W. Biedermann | 14 | 0.64% | 3 | 3.80% |
Rafael Laufer | 13 | 0.59% | 1 | 1.27% |
Herbert Xu | 11 | 0.50% | 1 | 1.27% |
Arnaldo Carvalho de Melo | 9 | 0.41% | 2 | 2.53% |
Harvey Harrison | 8 | 0.37% | 1 | 1.27% |
Alexey Dobriyan | 8 | 0.37% | 3 | 3.80% |
Philip Craig | 8 | 0.37% | 1 | 1.27% |
Julian Anastasov | 7 | 0.32% | 1 | 1.27% |
Neil Horman | 5 | 0.23% | 1 | 1.27% |
Eric Dumazet | 5 | 0.23% | 2 | 2.53% |
Jiri Benc | 4 | 0.18% | 2 | 2.53% |
KOVACS Krisztian | 3 | 0.14% | 1 | 1.27% |
Joe Perches | 2 | 0.09% | 1 | 1.27% |
Duan Jiong | 2 | 0.09% | 1 | 1.27% |
Stephen Hemminger | 2 | 0.09% | 1 | 1.27% |
Daniel Borkmann | 2 | 0.09% | 1 | 1.27% |
Andrew Morton | 2 | 0.09% | 1 | 1.27% |
Bastian Stender | 1 | 0.05% | 1 | 1.27% |
Martin Josefsson | 1 | 0.05% | 1 | 1.27% |
Aaron Conole | 1 | 0.05% | 1 | 1.27% |
Total | 2187 | 100.00% | 79 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.