Release 4.11 net/netfilter/nfnetlink.c
/* Netfilter messages via netlink socket. Allows for user space
* protocol helpers and general trouble making from userspace.
*
* (C) 2001 by Jay Schulist <jschlst@samba.org>,
* (C) 2002-2005 by Harald Welte <laforge@gnumonks.org>
* (C) 2005-2017 by Pablo Neira Ayuso <pablo@netfilter.org>
*
* Initial netfilter messages via netlink development funded and
* generally made possible by Network Robots, Inc. (www.networkrobots.com)
*
* Further development of this code funded by Astaro AG (http://www.astaro.com)
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <linux/uaccess.h>
#include <net/sock.h>
#include <linux/init.h>
#include <net/netlink.h>
#include <linux/netfilter/nfnetlink.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NETFILTER);
#define nfnl_dereference_protected(id) \
rcu_dereference_protected(table[(id)].subsys, \
lockdep_nfnl_is_held((id)))
static char __initdata nfversion[] = "0.30";
static struct {
struct mutex mutex;
const struct nfnetlink_subsystem __rcu *subsys;
}
table[NFNL_SUBSYS_COUNT];
static const int nfnl_group2type[NFNLGRP_MAX+1] = {
[NFNLGRP_CONNTRACK_NEW] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_UPDATE] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_DESTROY] = NFNL_SUBSYS_CTNETLINK,
[NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
[NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
[NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
[NFNLGRP_NFTRACE] = NFNL_SUBSYS_NFTABLES,
};
void nfnl_lock(__u8 subsys_id)
{
mutex_lock(&table[subsys_id].mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 8 | 42.11% | 1 | 33.33% |
Patrick McHardy | 6 | 31.58% | 1 | 33.33% |
Harald Welte | 5 | 26.32% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(nfnl_lock);
void nfnl_unlock(__u8 subsys_id)
{
mutex_unlock(&table[subsys_id].mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 8 | 42.11% | 1 | 33.33% |
Harald Welte | 6 | 31.58% | 1 | 33.33% |
Patrick McHardy | 5 | 26.32% | 1 | 33.33% |
Total | 19 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(nfnl_unlock);
#ifdef CONFIG_PROVE_LOCKING
bool lockdep_nfnl_is_held(u8 subsys_id)
{
return lockdep_is_held(&table[subsys_id].mutex);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 19 | 95.00% | 1 | 50.00% |
Yaowei Bai | 1 | 5.00% | 1 | 50.00% |
Total | 20 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(lockdep_nfnl_is_held);
#endif
int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
if (table[n->subsys_id].subsys) {
nfnl_unlock(n->subsys_id);
return -EBUSY;
}
rcu_assign_pointer(table[n->subsys_id].subsys, n);
nfnl_unlock(n->subsys_id);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 40 | 60.61% | 2 | 33.33% |
Pablo Neira Ayuso | 21 | 31.82% | 1 | 16.67% |
Eric Dumazet | 4 | 6.06% | 2 | 33.33% |
Patrick McHardy | 1 | 1.52% | 1 | 16.67% |
Total | 66 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_subsys_register);
int nfnetlink_subsys_unregister(const struct nfnetlink_subsystem *n)
{
nfnl_lock(n->subsys_id);
table[n->subsys_id].subsys = NULL;
nfnl_unlock(n->subsys_id);
synchronize_rcu();
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 25 | 59.52% | 1 | 25.00% |
Pablo Neira Ayuso | 13 | 30.95% | 1 | 25.00% |
Eric Dumazet | 3 | 7.14% | 1 | 25.00% |
Patrick McHardy | 1 | 2.38% | 1 | 25.00% |
Total | 42 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_subsys_unregister);
static inline const struct nfnetlink_subsystem *nfnetlink_get_subsys(u16 type)
{
u8 subsys_id = NFNL_SUBSYS_ID(type);
if (subsys_id >= NFNL_SUBSYS_COUNT)
return NULL;
return rcu_dereference(table[subsys_id].subsys);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 32 | 78.05% | 1 | 20.00% |
Pablo Neira Ayuso | 5 | 12.20% | 2 | 40.00% |
Eric Dumazet | 3 | 7.32% | 1 | 20.00% |
Patrick McHardy | 1 | 2.44% | 1 | 20.00% |
Total | 41 | 100.00% | 5 | 100.00% |
static inline const struct nfnl_callback *
nfnetlink_find_client(u16 type, const struct nfnetlink_subsystem *ss)
{
u8 cb_id = NFNL_MSG_TYPE(type);
if (cb_id >= ss->cb_count)
return NULL;
return &ss->cb[cb_id];
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 43 | 91.49% | 1 | 33.33% |
Patrick McHardy | 2 | 4.26% | 1 | 33.33% |
Pablo Neira Ayuso | 2 | 4.26% | 1 | 33.33% |
Total | 47 | 100.00% | 3 | 100.00% |
int nfnetlink_has_listeners(struct net *net, unsigned int group)
{
return netlink_has_listeners(net->nfnl, group);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 17 | 70.83% | 1 | 50.00% |
Alexey Dobriyan | 7 | 29.17% | 1 | 50.00% |
Total | 24 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_has_listeners);
int nfnetlink_send(struct sk_buff *skb, struct net *net, u32 portid,
unsigned int group, int echo, gfp_t flags)
{
return nlmsg_notify(net->nfnl, skb, portid, group, echo, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 30 | 65.22% | 1 | 16.67% |
Alexey Dobriyan | 7 | 15.22% | 1 | 16.67% |
Pablo Neira Ayuso | 4 | 8.70% | 1 | 16.67% |
Patrick McHardy | 4 | 8.70% | 2 | 33.33% |
Eric Dumazet | 1 | 2.17% | 1 | 16.67% |
Total | 46 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_send);
int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error)
{
return netlink_set_err(net->nfnl, portid, group, error);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 24 | 72.73% | 2 | 50.00% |
Alexey Dobriyan | 7 | 21.21% | 1 | 25.00% |
Patrick McHardy | 2 | 6.06% | 1 | 25.00% |
Total | 33 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_set_err);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
int flags)
{
return netlink_unicast(net->nfnl, skb, portid, flags);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 25 | 71.43% | 1 | 33.33% |
Alexey Dobriyan | 7 | 20.00% | 1 | 33.33% |
Patrick McHardy | 3 | 8.57% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL_GPL(nfnetlink_unicast);
/* Process one complete nfnetlink message. */
static int nfnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
const struct nfnl_callback *nc;
const struct nfnetlink_subsystem *ss;
int type, err;
/* All the messages must at least contain nfgenmsg */
if (nlmsg_len(nlh) < sizeof(struct nfgenmsg))
return 0;
type = nlh->nlmsg_type;
replay:
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss) {
#ifdef CONFIG_MODULES
rcu_read_unlock();
request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
rcu_read_lock();
ss = nfnetlink_get_subsys(type);
if (!ss)
#endif
{
rcu_read_unlock();
return -EINVAL;
}
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
rcu_read_unlock();
return -EINVAL;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
struct nlattr *attr = (void *)nlh + min_len;
int attrlen = nlh->nlmsg_len - min_len;
__u8 subsys_id = NFNL_SUBSYS_ID(type);
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
if (err < 0) {
rcu_read_unlock();
return err;
}
if (nc->call_rcu) {
err = nc->call_rcu(net, net->nfnl, skb, nlh,
(const struct nlattr **)cda);
rcu_read_unlock();
} else {
rcu_read_unlock();
nfnl_lock(subsys_id);
if (nfnl_dereference_protected(subsys_id) != ss ||
nfnetlink_find_client(type, ss) != nc)
err = -EAGAIN;
else if (nc->call)
err = nc->call(net, net->nfnl, skb, nlh,
(const struct nlattr **)cda);
else
err = -EINVAL;
nfnl_unlock(subsys_id);
}
if (err == -EAGAIN)
goto replay;
return err;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Harald Welte | 150 | 38.07% | 3 | 14.29% |
Eric Dumazet | 81 | 20.56% | 1 | 4.76% |
Patrick McHardy | 66 | 16.75% | 5 | 23.81% |
Pablo Neira Ayuso | 49 | 12.44% | 5 | 23.81% |
Tomasz Bursztyka | 18 | 4.57% | 2 | 9.52% |
Alexey Dobriyan | 14 | 3.55% | 1 | 4.76% |
Thomas Graf | 10 | 2.54% | 1 | 4.76% |
Hong Zhi Guo | 4 | 1.02% | 1 | 4.76% |
Florian Westphal | 1 | 0.25% | 1 | 4.76% |
Johannes Berg | 1 | 0.25% | 1 | 4.76% |
Total | 394 | 100.00% | 21 | 100.00% |
struct nfnl_err {
struct list_head head;
struct nlmsghdr *nlh;
int err;
};
static int nfnl_err_add(struct list_head *list, struct nlmsghdr *nlh, int err)
{
struct nfnl_err *nfnl_err;
nfnl_err = kmalloc(sizeof(struct nfnl_err), GFP_KERNEL);
if (nfnl_err == NULL)
return -ENOMEM;
nfnl_err->nlh = nlh;
nfnl_err->err = err;
list_add_tail(&nfnl_err->head, list);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 72 | 100.00% | 1 | 100.00% |
Total | 72 | 100.00% | 1 | 100.00% |
static void nfnl_err_del(struct nfnl_err *nfnl_err)
{
list_del(&nfnl_err->head);
kfree(nfnl_err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static void nfnl_err_reset(struct list_head *err_list)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head)
nfnl_err_del(nfnl_err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 34 | 100.00% | 1 | 100.00% |
Total | 34 | 100.00% | 1 | 100.00% |
static void nfnl_err_deliver(struct list_head *err_list, struct sk_buff *skb)
{
struct nfnl_err *nfnl_err, *next;
list_for_each_entry_safe(nfnl_err, next, err_list, head) {
netlink_ack(skb, nfnl_err->nlh, nfnl_err->err);
nfnl_err_del(nfnl_err);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 54 | 100.00% | 1 | 100.00% |
Total | 54 | 100.00% | 1 | 100.00% |
enum {
NFNL_BATCH_FAILURE = (1 << 0),
NFNL_BATCH_DONE = (1 << 1),
NFNL_BATCH_REPLAY = (1 << 2),
};
static void nfnetlink_rcv_batch(struct sk_buff *skb, struct nlmsghdr *nlh,
u16 subsys_id, u32 genid)
{
struct sk_buff *oskb = skb;
struct net *net = sock_net(skb->sk);
const struct nfnetlink_subsystem *ss;
const struct nfnl_callback *nc;
LIST_HEAD(err_list);
u32 status;
int err;
if (subsys_id >= NFNL_SUBSYS_COUNT)
return netlink_ack(skb, nlh, -EINVAL);
replay:
status = 0;
skb = netlink_skb_clone(oskb, GFP_KERNEL);
if (!skb)
return netlink_ack(oskb, nlh, -ENOMEM);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss) {
#ifdef CONFIG_MODULES
nfnl_unlock(subsys_id);
request_module("nfnetlink-subsys-%d", subsys_id);
nfnl_lock(subsys_id);
ss = nfnl_dereference_protected(subsys_id);
if (!ss)
#endif
{
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
}
if (!ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
if (genid && ss->valid_genid && !ss->valid_genid(net, genid)) {
nfnl_unlock(subsys_id);
netlink_ack(oskb, nlh, -ERESTART);
return kfree_skb(skb);
}
while (skb->len >= nlmsg_total_size(0)) {
int msglen, type;
nlh = nlmsg_hdr(skb);
err = 0;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len ||
nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
nfnl_err_reset(&err_list);
status |= NFNL_BATCH_FAILURE;
goto done;
}
/* Only requests are handled by the kernel */
if (!(nlh->nlmsg_flags & NLM_F_REQUEST)) {
err = -EINVAL;
goto ack;
}
type = nlh->nlmsg_type;
if (type == NFNL_MSG_BATCH_BEGIN) {
/* Malformed: Batch begin twice */
nfnl_err_reset(&err_list);
status |= NFNL_BATCH_FAILURE;
goto done;
} else if (type == NFNL_MSG_BATCH_END) {
status |= NFNL_BATCH_DONE;
goto done;
} else if (type < NLMSG_MIN_TYPE) {
err = -EINVAL;
goto ack;
}
/* We only accept a batch with messages for the same
* subsystem.
*/
if (NFNL_SUBSYS_ID(type) != subsys_id) {
err = -EINVAL;
goto ack;
}
nc = nfnetlink_find_client(type, ss);
if (!nc) {
err = -EINVAL;
goto ack;
}
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
u8 cb_id = NFNL_MSG_TYPE(nlh->nlmsg_type);
struct nlattr *cda[ss->cb[cb_id].attr_count + 1];
struct nlattr *attr = (void *)nlh + min_len;
int attrlen = nlh->nlmsg_len - min_len;
err = nla_parse(cda, ss->cb[cb_id].attr_count,
attr, attrlen, ss->cb[cb_id].policy);
if (err < 0)
goto ack;
if (nc->call_batch) {
err = nc->call_batch(net, net->nfnl, skb, nlh,
(const struct nlattr **)cda);
}
/* The lock was released to autoload some module, we
* have to abort and start from scratch using the
* original skb.
*/
if (err == -EAGAIN) {
status |= NFNL_BATCH_REPLAY;
goto next;
}
}
ack:
if (nlh->nlmsg_flags & NLM_F_ACK || err) {
/* Errors are delivered once the full batch has been
* processed, this avoids that the same error is
* reported several times when replaying the batch.
*/
if (nfnl_err_add(&err_list, nlh, err) < 0) {
/* We failed to enqueue an error, reset the
* list of errors and send OOM to userspace
* pointing to the batch header.
*/
nfnl_err_reset(&err_list);
netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
status |= NFNL_BATCH_FAILURE;
goto done;
}
/* We don't stop processing the batch on errors, thus,
* userspace gets all the errors that the batch
* triggers.
*/
if (err)
status |= NFNL_BATCH_FAILURE;
}
next:
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
skb_pull(skb, msglen);
}
done:
if (status & NFNL_BATCH_REPLAY) {
ss->abort(net, oskb);
nfnl_err_reset(&err_list);
nfnl_unlock(subsys_id);
kfree_skb(skb);
goto replay;
} else if (status == NFNL_BATCH_DONE) {
ss->commit(net, oskb);
} else {
ss->abort(net, oskb);
}
nfnl_err_deliver(&err_list, oskb);
nfnl_unlock(subsys_id);
kfree_skb(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 748 | 93.62% | 10 | 62.50% |
Phil Turnbull | 24 | 3.00% | 1 | 6.25% |
Denys Fedoryshchenko | 11 | 1.38% | 1 | 6.25% |
Harald Welte | 8 | 1.00% | 1 | 6.25% |
Duan Jiong | 4 | 0.50% | 1 | 6.25% |
Denis V. Lunev | 2 | 0.25% | 1 | 6.25% |
Florian Westphal | 2 | 0.25% | 1 | 6.25% |
Total | 799 | 100.00% | 16 | 100.00% |
static const struct nla_policy nfnl_batch_policy[NFNL_BATCH_MAX + 1] = {
[NFNL_BATCH_GENID] = { .type = NLA_U32 },
};
static void nfnetlink_rcv_skb_batch(struct sk_buff *skb, struct nlmsghdr *nlh)
{
int min_len = nlmsg_total_size(sizeof(struct nfgenmsg));
struct nlattr *attr = (void *)nlh + min_len;
struct nlattr *cda[NFNL_BATCH_MAX + 1];
int attrlen = nlh->nlmsg_len - min_len;
struct nfgenmsg *nfgenmsg;
int msglen, err;
u32 gen_id = 0;
u16 res_id;
msglen = NLMSG_ALIGN(nlh->nlmsg_len);
if (msglen > skb->len)
msglen = skb->len;
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < NLMSG_HDRLEN + sizeof(struct nfgenmsg))
return;
err = nla_parse(cda, NFNL_BATCH_MAX, attr, attrlen, nfnl_batch_policy);
if (err < 0) {
netlink_ack(skb, nlh, err);
return;
}
if (cda[NFNL_BATCH_GENID])
gen_id = ntohl(nla_get_be32(cda[NFNL_BATCH_GENID]));
nfgenmsg = nlmsg_data(nlh);
skb_pull(skb, msglen);
/* Work around old nft using host byte order */
if (nfgenmsg->res_id == NFNL_SUBSYS_NFTABLES)
res_id = NFNL_SUBSYS_NFTABLES;
else
res_id = ntohs(nfgenmsg->res_id);
nfnetlink_rcv_batch(skb, nlh, res_id, gen_id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 223 | 100.00% | 4 | 100.00% |
Total | 223 | 100.00% | 4 | 100.00% |
static void nfnetlink_rcv(struct sk_buff *skb)
{
struct nlmsghdr *nlh = nlmsg_hdr(skb);
if (nlh->nlmsg_len < NLMSG_HDRLEN ||
skb->len < nlh->nlmsg_len)
return;
if (!netlink_net_capable(skb, CAP_NET_ADMIN)) {
netlink_ack(skb, nlh, -EPERM);
return;
}
if (nlh->nlmsg_type == NFNL_MSG_BATCH_BEGIN)
nfnetlink_rcv_skb_batch(skb, nlh);
else
netlink_rcv_skb(skb, &nfnetlink_rcv_msg);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 85 | 100.00% | 2 | 100.00% |
Total | 85 | 100.00% | 2 | 100.00% |
#ifdef CONFIG_MODULES
static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
return 0;
type = nfnl_group2type[group];
rcu_read_lock();
ss = nfnetlink_get_subsys(type << 8);
rcu_read_unlock();
if (!ss)
request_module("nfnetlink-subsys-%d", type);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 59 | 80.82% | 3 | 42.86% |
Richard Guy Briggs | 7 | 9.59% | 2 | 28.57% |
Johannes Berg | 5 | 6.85% | 1 | 14.29% |
Florian Westphal | 2 | 2.74% | 1 | 14.29% |
Total | 73 | 100.00% | 7 | 100.00% |
#endif
static int __net_init nfnetlink_net_init(struct net *net)
{
struct sock *nfnl;
struct netlink_kernel_cfg cfg = {
.groups = NFNLGRP_MAX,
.input = nfnetlink_rcv,
#ifdef CONFIG_MODULES
.bind = nfnetlink_bind,
#endif
};
nfnl = netlink_kernel_create(net, NETLINK_NETFILTER, &cfg);
if (!nfnl)
return -ENOMEM;
net->nfnl_stash = nfnl;
rcu_assign_pointer(net->nfnl, nfnl);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Dobriyan | 51 | 61.45% | 1 | 16.67% |
Pablo Neira Ayuso | 29 | 34.94% | 2 | 33.33% |
Adrian Bunk | 1 | 1.20% | 1 | 16.67% |
Eric Dumazet | 1 | 1.20% | 1 | 16.67% |
Harald Welte | 1 | 1.20% | 1 | 16.67% |
Total | 83 | 100.00% | 6 | 100.00% |
static void __net_exit nfnetlink_net_exit_batch(struct list_head *net_exit_list)
{
struct net *net;
list_for_each_entry(net, net_exit_list, exit_list)
RCU_INIT_POINTER(net->nfnl, NULL);
synchronize_net();
list_for_each_entry(net, net_exit_list, exit_list)
netlink_kernel_release(net->nfnl_stash);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Dobriyan | 40 | 76.92% | 1 | 25.00% |
Harald Welte | 10 | 19.23% | 1 | 25.00% |
Stephen Hemminger | 1 | 1.92% | 1 | 25.00% |
Denis V. Lunev | 1 | 1.92% | 1 | 25.00% |
Total | 52 | 100.00% | 4 | 100.00% |
static struct pernet_operations nfnetlink_net_ops = {
.init = nfnetlink_net_init,
.exit_batch = nfnetlink_net_exit_batch,
};
static int __init nfnetlink_init(void)
{
int i;
for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
for (i=0; i<NFNL_SUBSYS_COUNT; i++)
mutex_init(&table[i].mutex);
pr_info("Netfilter messages via NETLINK v%s.\n", nfversion);
return register_pernet_subsys(&nfnetlink_net_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 52 | 69.33% | 2 | 28.57% |
Harald Welte | 15 | 20.00% | 1 | 14.29% |
Alexey Dobriyan | 5 | 6.67% | 1 | 14.29% |
Eric W. Biedermann | 1 | 1.33% | 1 | 14.29% |
Stephen Hemminger | 1 | 1.33% | 1 | 14.29% |
Adrian Bunk | 1 | 1.33% | 1 | 14.29% |
Total | 75 | 100.00% | 7 | 100.00% |
static void __exit nfnetlink_exit(void)
{
pr_info("Removing netfilter NETLINK layer.\n");
unregister_pernet_subsys(&nfnetlink_net_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Dobriyan | 14 | 70.00% | 1 | 33.33% |
Harald Welte | 5 | 25.00% | 1 | 33.33% |
Stephen Hemminger | 1 | 5.00% | 1 | 33.33% |
Total | 20 | 100.00% | 3 | 100.00% |
module_init(nfnetlink_init);
module_exit(nfnetlink_exit);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pablo Neira Ayuso | 1694 | 62.97% | 24 | 38.10% |
Harald Welte | 475 | 17.66% | 4 | 6.35% |
Alexey Dobriyan | 169 | 6.28% | 1 | 1.59% |
Patrick McHardy | 143 | 5.32% | 9 | 14.29% |
Eric Dumazet | 94 | 3.49% | 3 | 4.76% |
Phil Turnbull | 24 | 0.89% | 1 | 1.59% |
Florian Westphal | 19 | 0.71% | 3 | 4.76% |
Tomasz Bursztyka | 18 | 0.67% | 2 | 3.17% |
Denys Fedoryshchenko | 11 | 0.41% | 1 | 1.59% |
Thomas Graf | 10 | 0.37% | 1 | 1.59% |
Richard Guy Briggs | 7 | 0.26% | 2 | 3.17% |
Johannes Berg | 6 | 0.22% | 2 | 3.17% |
Hong Zhi Guo | 5 | 0.19% | 1 | 1.59% |
Duan Jiong | 4 | 0.15% | 1 | 1.59% |
Stephen Hemminger | 3 | 0.11% | 2 | 3.17% |
Denis V. Lunev | 3 | 0.11% | 2 | 3.17% |
Adrian Bunk | 2 | 0.07% | 1 | 1.59% |
Yaowei Bai | 1 | 0.04% | 1 | 1.59% |
Linus Torvalds | 1 | 0.04% | 1 | 1.59% |
Eric W. Biedermann | 1 | 0.04% | 1 | 1.59% |
Total | 2690 | 100.00% | 63 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.