Release 4.11 net/netfilter/nf_conntrack_proto.c
/* L3/L4 protocol support for nf_conntrack. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
* (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
* (C) 2006-2012 Patrick McHardy <kaber@trash.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_core.h>
static struct nf_conntrack_l4proto __rcu **nf_ct_protos[PF_MAX] __read_mostly;
struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[AF_MAX] __read_mostly;
EXPORT_SYMBOL_GPL(nf_ct_l3protos);
static DEFINE_MUTEX(nf_ct_proto_mutex);
#ifdef CONFIG_SYSCTL
static int
nf_ct_register_sysctl(struct net *net,
struct ctl_table_header **header,
const char *path,
struct ctl_table *table)
{
if (*header == NULL) {
*header = register_net_sysctl(net, path, table);
if (*header == NULL)
return -ENOMEM;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 52 | 83.87% | 1 | 25.00% |
Gao Feng | 6 | 9.68% | 1 | 25.00% |
Eric W. Biedermann | 4 | 6.45% | 2 | 50.00% |
Total | 62 | 100.00% | 4 | 100.00% |
static void
nf_ct_unregister_sysctl(struct ctl_table_header **header,
struct ctl_table **table,
unsigned int users)
{
if (users > 0)
return;
unregister_net_sysctl_table(*header);
kfree(*table);
*header = NULL;
*table = NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 38 | 74.51% | 1 | 33.33% |
Gao Feng | 12 | 23.53% | 1 | 33.33% |
Eric W. Biedermann | 1 | 1.96% | 1 | 33.33% |
Total | 51 | 100.00% | 3 | 100.00% |
#endif
struct nf_conntrack_l4proto *
__nf_ct_l4proto_find(u_int16_t l3proto, u_int8_t l4proto)
{
if (unlikely(l3proto >= AF_MAX || nf_ct_protos[l3proto] == NULL))
return &nf_conntrack_l4proto_generic;
return rcu_dereference(nf_ct_protos[l3proto][l4proto]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 42 | 93.33% | 2 | 66.67% |
Patrick McHardy | 3 | 6.67% | 1 | 33.33% |
Total | 45 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(__nf_ct_l4proto_find);
/* this is guaranteed to always return a valid protocol helper, since
* it falls back to generic_protocol */
struct nf_conntrack_l3proto *
nf_ct_l3proto_find_get(u_int16_t l3proto)
{
struct nf_conntrack_l3proto *p;
rcu_read_lock();
p = __nf_ct_l3proto_find(l3proto);
if (!try_module_get(p->me))
p = &nf_conntrack_l3proto_generic;
rcu_read_unlock();
return p;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 44 | 95.65% | 2 | 66.67% |
Patrick McHardy | 2 | 4.35% | 1 | 33.33% |
Total | 46 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_find_get);
int
nf_ct_l3proto_try_module_get(unsigned short l3proto)
{
int ret;
struct nf_conntrack_l3proto *p;
retry: p = nf_ct_l3proto_find_get(l3proto);
if (p == &nf_conntrack_l3proto_generic) {
ret = request_module("nf_conntrack-%d", l3proto);
if (!ret)
goto retry;
return -EPROTOTYPE;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 59 | 100.00% | 2 | 100.00% |
Total | 59 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_try_module_get);
void nf_ct_l3proto_module_put(unsigned short l3proto)
{
struct nf_conntrack_l3proto *p;
/* rcu_read_lock not necessary since the caller holds a reference, but
* taken anyways to avoid lockdep warnings in __nf_ct_l3proto_find()
*/
rcu_read_lock();
p = __nf_ct_l3proto_find(l3proto);
module_put(p->me);
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 28 | 80.00% | 1 | 50.00% |
Patrick McHardy | 7 | 20.00% | 1 | 50.00% |
Total | 35 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_module_put);
int nf_ct_netns_get(struct net *net, u8 nfproto)
{
const struct nf_conntrack_l3proto *l3proto;
int ret;
might_sleep();
ret = nf_ct_l3proto_try_module_get(nfproto);
if (ret < 0)
return ret;
/* we already have a reference, can't fail */
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(nfproto);
rcu_read_unlock();
if (!l3proto->net_ns_get)
return 0;
ret = l3proto->net_ns_get(net);
if (ret < 0)
nf_ct_l3proto_module_put(nfproto);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 88 | 100.00% | 2 | 100.00% |
Total | 88 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_netns_get);
void nf_ct_netns_put(struct net *net, u8 nfproto)
{
const struct nf_conntrack_l3proto *l3proto;
might_sleep();
/* same as nf_conntrack_netns_get(), reference assumed */
rcu_read_lock();
l3proto = __nf_ct_l3proto_find(nfproto);
rcu_read_unlock();
if (WARN_ON(!l3proto))
return;
if (l3proto->net_ns_put)
l3proto->net_ns_put(net);
nf_ct_l3proto_module_put(nfproto);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 63 | 100.00% | 2 | 100.00% |
Total | 63 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_netns_put);
struct nf_conntrack_l4proto *
nf_ct_l4proto_find_get(u_int16_t l3num, u_int8_t l4num)
{
struct nf_conntrack_l4proto *p;
rcu_read_lock();
p = __nf_ct_l4proto_find(l3num, l4num);
if (!try_module_get(p->me))
p = &nf_conntrack_l4proto_generic;
rcu_read_unlock();
return p;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 51 | 100.00% | 1 | 100.00% |
Total | 51 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_find_get);
void nf_ct_l4proto_put(struct nf_conntrack_l4proto *p)
{
module_put(p->me);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 17 | 100.00% | 1 | 100.00% |
Total | 17 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_put);
static int kill_l3proto(struct nf_conn *i, void *data)
{
return nf_ct_l3num(i) == ((struct nf_conntrack_l3proto *)data)->l3proto;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 29 | 90.62% | 1 | 50.00% |
Patrick McHardy | 3 | 9.38% | 1 | 50.00% |
Total | 32 | 100.00% | 2 | 100.00% |
static int kill_l4proto(struct nf_conn *i, void *data)
{
struct nf_conntrack_l4proto *l4proto;
l4proto = (struct nf_conntrack_l4proto *)data;
return nf_ct_protonum(i) == l4proto->l4proto &&
nf_ct_l3num(i) == l4proto->l3proto;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 42 | 87.50% | 2 | 66.67% |
Patrick McHardy | 6 | 12.50% | 1 | 33.33% |
Total | 48 | 100.00% | 3 | 100.00% |
int nf_ct_l3proto_register(struct nf_conntrack_l3proto *proto)
{
int ret = 0;
struct nf_conntrack_l3proto *old;
if (proto->l3proto >= AF_MAX)
return -EBUSY;
if (proto->tuple_to_nlattr && !proto->nlattr_tuple_size)
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
old = rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
lockdep_is_held(&nf_ct_proto_mutex));
if (old != &nf_conntrack_l3proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
if (proto->nlattr_tuple_size)
proto->nla_size = 3 * proto->nlattr_tuple_size();
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto], proto);
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 57 | 44.53% | 3 | 33.33% |
Holger Eitzenberger | 32 | 25.00% | 1 | 11.11% |
Eric Dumazet | 20 | 15.62% | 1 | 11.11% |
Patrick McHardy | 18 | 14.06% | 3 | 33.33% |
Gao Feng | 1 | 0.78% | 1 | 11.11% |
Total | 128 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_register);
#ifdef CONFIG_SYSCTL
extern unsigned int nf_conntrack_default_on;
int nf_ct_l3proto_pernet_register(struct net *net,
struct nf_conntrack_l3proto *proto)
{
if (nf_conntrack_default_on == 0)
return 0;
return proto->net_ns_get ? proto->net_ns_get(net) : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 20 | 52.63% | 1 | 25.00% |
Gao Feng | 17 | 44.74% | 2 | 50.00% |
Pablo Neira Ayuso | 1 | 2.63% | 1 | 25.00% |
Total | 38 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_register);
#endif
void nf_ct_l3proto_unregister(struct nf_conntrack_l3proto *proto)
{
BUG_ON(proto->l3proto >= AF_MAX);
mutex_lock(&nf_ct_proto_mutex);
BUG_ON(rcu_dereference_protected(nf_ct_l3protos[proto->l3proto],
lockdep_is_held(&nf_ct_proto_mutex)
) != proto);
rcu_assign_pointer(nf_ct_l3protos[proto->l3proto],
&nf_conntrack_l3proto_generic);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 40 | 58.82% | 3 | 33.33% |
Patrick McHardy | 17 | 25.00% | 3 | 33.33% |
Eric Dumazet | 9 | 13.24% | 1 | 11.11% |
Gao Feng | 2 | 2.94% | 2 | 22.22% |
Total | 68 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_unregister);
void nf_ct_l3proto_pernet_unregister(struct net *net,
struct nf_conntrack_l3proto *proto)
{
/*
* nf_conntrack_default_on *might* have registered hooks.
* ->net_ns_put must cope with more puts() than get(), i.e.
* if nf_conntrack_default_on was 0 at time of
* nf_ct_l3proto_pernet_register invocation this net_ns_put()
* should be a noop.
*/
if (proto->net_ns_put)
proto->net_ns_put(net);
/* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(net, kill_l3proto, proto, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 18 | 41.86% | 2 | 28.57% |
Gao Feng | 14 | 32.56% | 2 | 28.57% |
Martin Josefsson | 9 | 20.93% | 1 | 14.29% |
Alexey Dobriyan | 2 | 4.65% | 2 | 28.57% |
Total | 43 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l3proto_pernet_unregister);
static struct nf_proto_net *nf_ct_l4proto_net(struct net *net,
struct nf_conntrack_l4proto *l4proto)
{
if (l4proto->get_net_proto) {
/* statically built-in protocols use static per-net */
return l4proto->get_net_proto(net);
} else if (l4proto->net_id) {
/* ... and loadable protocols use dynamic per-net */
return net_generic(net, *l4proto->net_id);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 48 | 81.36% | 4 | 80.00% |
Pablo Neira Ayuso | 11 | 18.64% | 1 | 20.00% |
Total | 59 | 100.00% | 5 | 100.00% |
static
int nf_ct_l4proto_register_sysctl(struct net *net,
struct nf_proto_net *pn,
struct nf_conntrack_l4proto *l4proto)
{
int err = 0;
#ifdef CONFIG_SYSCTL
if (pn->ctl_table != NULL) {
err = nf_ct_register_sysctl(net,
&pn->ctl_table_header,
"net/netfilter",
pn->ctl_table);
if (err < 0) {
if (!pn->users) {
kfree(pn->ctl_table);
pn->ctl_table = NULL;
}
}
}
#endif /* CONFIG_SYSCTL */
return err;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 52 | 55.91% | 3 | 50.00% |
Gao Feng | 40 | 43.01% | 2 | 33.33% |
Eric W. Biedermann | 1 | 1.08% | 1 | 16.67% |
Total | 93 | 100.00% | 6 | 100.00% |
static
void nf_ct_l4proto_unregister_sysctl(struct net *net,
struct nf_proto_net *pn,
struct nf_conntrack_l4proto *l4proto)
{
#ifdef CONFIG_SYSCTL
if (pn->ctl_table_header != NULL)
nf_ct_unregister_sysctl(&pn->ctl_table_header,
&pn->ctl_table,
pn->users);
#endif /* CONFIG_SYSCTL */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 35 | 67.31% | 2 | 50.00% |
Gao Feng | 17 | 32.69% | 2 | 50.00% |
Total | 52 | 100.00% | 4 | 100.00% |
/* FIXME: Allow NULL functions and sub in pointers to generic for
them. --RR */
int nf_ct_l4proto_register_one(struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
if (l4proto->l3proto >= PF_MAX)
return -EBUSY;
if ((l4proto->to_nlattr && !l4proto->nlattr_size) ||
(l4proto->tuple_to_nlattr && !l4proto->nlattr_tuple_size))
return -EINVAL;
mutex_lock(&nf_ct_proto_mutex);
if (!nf_ct_protos[l4proto->l3proto]) {
/* l3proto may be loaded latter. */
struct nf_conntrack_l4proto __rcu **proto_array;
int i;
proto_array = kmalloc(MAX_NF_CT_PROTO *
sizeof(struct nf_conntrack_l4proto *),
GFP_KERNEL);
if (proto_array == NULL) {
ret = -ENOMEM;
goto out_unlock;
}
for (i = 0; i < MAX_NF_CT_PROTO; i++)
RCU_INIT_POINTER(proto_array[i],
&nf_conntrack_l4proto_generic);
/* Before making proto_array visible to lockless readers,
* we must make sure its content is committed to memory.
*/
smp_wmb();
nf_ct_protos[l4proto->l3proto] = proto_array;
} else if (rcu_dereference_protected(
nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
lockdep_is_held(&nf_ct_proto_mutex)
) != &nf_conntrack_l4proto_generic) {
ret = -EBUSY;
goto out_unlock;
}
l4proto->nla_size = 0;
if (l4proto->nlattr_size)
l4proto->nla_size += l4proto->nlattr_size();
if (l4proto->nlattr_tuple_size)
l4proto->nla_size += 3 * l4proto->nlattr_tuple_size();
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
l4proto);
out_unlock:
mutex_unlock(&nf_ct_proto_mutex);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 127 | 49.61% | 3 | 25.00% |
Holger Eitzenberger | 66 | 25.78% | 1 | 8.33% |
Patrick McHardy | 44 | 17.19% | 4 | 33.33% |
Eric Dumazet | 18 | 7.03% | 3 | 25.00% |
Davide Caratti | 1 | 0.39% | 1 | 8.33% |
Total | 256 | 100.00% | 12 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_register_one);
int nf_ct_l4proto_pernet_register_one(struct net *net,
struct nf_conntrack_l4proto *l4proto)
{
int ret = 0;
struct nf_proto_net *pn = NULL;
if (l4proto->init_net) {
ret = l4proto->init_net(net, l4proto->l3proto);
if (ret < 0)
goto out;
}
pn = nf_ct_l4proto_net(net, l4proto);
if (pn == NULL)
goto out;
ret = nf_ct_l4proto_register_sysctl(net, pn, l4proto);
if (ret < 0)
goto out;
pn->users++;
out:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 104 | 99.05% | 4 | 80.00% |
Davide Caratti | 1 | 0.95% | 1 | 20.00% |
Total | 105 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register_one);
void nf_ct_l4proto_unregister_one(struct nf_conntrack_l4proto *l4proto)
{
BUG_ON(l4proto->l3proto >= PF_MAX);
mutex_lock(&nf_ct_proto_mutex);
BUG_ON(rcu_dereference_protected(
nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
lockdep_is_held(&nf_ct_proto_mutex)
) != l4proto);
rcu_assign_pointer(nf_ct_protos[l4proto->l3proto][l4proto->l4proto],
&nf_conntrack_l4proto_generic);
mutex_unlock(&nf_ct_proto_mutex);
synchronize_rcu();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 50 | 64.10% | 3 | 33.33% |
Patrick McHardy | 17 | 21.79% | 3 | 33.33% |
Eric Dumazet | 9 | 11.54% | 1 | 11.11% |
Davide Caratti | 1 | 1.28% | 1 | 11.11% |
Gao Feng | 1 | 1.28% | 1 | 11.11% |
Total | 78 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister_one);
void nf_ct_l4proto_pernet_unregister_one(struct net *net,
struct nf_conntrack_l4proto *l4proto)
{
struct nf_proto_net *pn = NULL;
pn = nf_ct_l4proto_net(net, l4proto);
if (pn == NULL)
return;
pn->users--;
nf_ct_l4proto_unregister_sysctl(net, pn, l4proto);
/* Remove all contrack entries for this protocol */
nf_ct_iterate_cleanup(net, kill_l4proto, l4proto, 0, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 50 | 75.76% | 2 | 25.00% |
Martin Josefsson | 9 | 13.64% | 2 | 25.00% |
Florian Westphal | 4 | 6.06% | 1 | 12.50% |
Alexey Dobriyan | 2 | 3.03% | 2 | 25.00% |
Davide Caratti | 1 | 1.52% | 1 | 12.50% |
Total | 66 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister_one);
int nf_ct_l4proto_register(struct nf_conntrack_l4proto *l4proto[],
unsigned int num_proto)
{
int ret = -EINVAL, ver;
unsigned int i;
for (i = 0; i < num_proto; i++) {
ret = nf_ct_l4proto_register_one(l4proto[i]);
if (ret < 0)
break;
}
if (i != num_proto) {
ver = l4proto[i]->l3proto == PF_INET6 ? 6 : 4;
pr_err("nf_conntrack_ipv%d: can't register %s%d proto.\n",
ver, l4proto[i]->name, ver);
nf_ct_l4proto_unregister(l4proto, i);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davide Caratti | 108 | 100.00% | 1 | 100.00% |
Total | 108 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_register);
int nf_ct_l4proto_pernet_register(struct net *net,
struct nf_conntrack_l4proto *l4proto[],
unsigned int num_proto)
{
int ret = -EINVAL;
unsigned int i;
for (i = 0; i < num_proto; i++) {
ret = nf_ct_l4proto_pernet_register_one(net, l4proto[i]);
if (ret < 0)
break;
}
if (i != num_proto) {
pr_err("nf_conntrack_%s%d: pernet registration failed\n",
l4proto[i]->name,
l4proto[i]->l3proto == PF_INET6 ? 6 : 4);
nf_ct_l4proto_pernet_unregister(net, l4proto, i);
}
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davide Caratti | 109 | 100.00% | 1 | 100.00% |
Total | 109 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_register);
void nf_ct_l4proto_unregister(struct nf_conntrack_l4proto *l4proto[],
unsigned int num_proto)
{
while (num_proto-- != 0)
nf_ct_l4proto_unregister_one(l4proto[num_proto]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davide Caratti | 30 | 100.00% | 1 | 100.00% |
Total | 30 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_unregister);
void nf_ct_l4proto_pernet_unregister(struct net *net,
struct nf_conntrack_l4proto *l4proto[],
unsigned int num_proto)
{
while (num_proto-- != 0)
nf_ct_l4proto_pernet_unregister_one(net, l4proto[num_proto]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Davide Caratti | 37 | 100.00% | 1 | 100.00% |
Total | 37 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL_GPL(nf_ct_l4proto_pernet_unregister);
int nf_conntrack_proto_pernet_init(struct net *net)
{
int err;
struct nf_proto_net *pn = nf_ct_l4proto_net(net,
&nf_conntrack_l4proto_generic);
err = nf_conntrack_l4proto_generic.init_net(net,
nf_conntrack_l4proto_generic.l3proto);
if (err < 0)
return err;
err = nf_ct_l4proto_register_sysctl(net,
pn,
&nf_conntrack_l4proto_generic);
if (err < 0)
return err;
pn->users++;
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 50 | 64.94% | 5 | 83.33% |
Patrick McHardy | 27 | 35.06% | 1 | 16.67% |
Total | 77 | 100.00% | 6 | 100.00% |
void nf_conntrack_proto_pernet_fini(struct net *net)
{
struct nf_proto_net *pn = nf_ct_l4proto_net(net,
&nf_conntrack_l4proto_generic);
pn->users--;
nf_ct_l4proto_unregister_sysctl(net,
pn,
&nf_conntrack_l4proto_generic);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 29 | 76.32% | 4 | 80.00% |
Patrick McHardy | 9 | 23.68% | 1 | 20.00% |
Total | 38 | 100.00% | 5 | 100.00% |
int nf_conntrack_proto_init(void)
{
unsigned int i;
for (i = 0; i < AF_MAX; i++)
rcu_assign_pointer(nf_ct_l3protos[i],
&nf_conntrack_l3proto_generic);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Gao Feng | 38 | 100.00% | 2 | 100.00% |
Total | 38 | 100.00% | 2 | 100.00% |
void nf_conntrack_proto_fini(void)
{
unsigned int i;
/* free l3proto protocol tables */
for (i = 0; i < PF_MAX; i++)
kfree(nf_ct_protos[i]);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 23 | 69.70% | 1 | 33.33% |
Gao Feng | 10 | 30.30% | 2 | 66.67% |
Total | 33 | 100.00% | 3 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Martin Josefsson | 600 | 26.63% | 3 | 6.52% |
Gao Feng | 460 | 20.42% | 11 | 23.91% |
Patrick McHardy | 409 | 18.15% | 13 | 28.26% |
Davide Caratti | 311 | 13.80% | 1 | 2.17% |
Florian Westphal | 213 | 9.45% | 4 | 8.70% |
Holger Eitzenberger | 98 | 4.35% | 1 | 2.17% |
Pablo Neira Ayuso | 90 | 3.99% | 3 | 6.52% |
Eric Dumazet | 56 | 2.49% | 3 | 6.52% |
Eric W. Biedermann | 6 | 0.27% | 2 | 4.35% |
Alexey Dobriyan | 4 | 0.18% | 2 | 4.35% |
Tejun Heo | 3 | 0.13% | 1 | 2.17% |
Arnd Bergmann | 2 | 0.09% | 1 | 2.17% |
Adrian Bunk | 1 | 0.04% | 1 | 2.17% |
Total | 2253 | 100.00% | 46 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.