Release 4.13 net/netfilter/core.c
/* netfilter.c: look after the filters for various protocols.
* Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
*
* Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
* way.
*
* Rusty Russell (C)2000 -- This code is GPL.
* Patrick McHardy (c) 2006-2012
*/
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <net/protocol.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/netfilter_ipv6.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#include "nf_internals.h"
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
EXPORT_SYMBOL(nf_afinfo);
const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;
EXPORT_SYMBOL_GPL(nf_ipv6_ops);
DEFINE_PER_CPU(bool, nf_skb_duplicated);
EXPORT_SYMBOL_GPL(nf_skb_duplicated);
int nf_register_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo);
mutex_unlock(&afinfo_mutex);
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Patrick McHardy | 36 | 94.74% | 3 | 60.00% |
| Pablo Neira Ayuso | 1 | 2.63% | 1 | 20.00% |
| Stephen Hemminger | 1 | 2.63% | 1 | 20.00% |
| Total | 38 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(nf_register_afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo)
{
mutex_lock(&afinfo_mutex);
RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL);
mutex_unlock(&afinfo_mutex);
synchronize_rcu();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Patrick McHardy | 37 | 97.37% | 3 | 75.00% |
| Stephen Hemminger | 1 | 2.63% | 1 | 25.00% |
| Total | 38 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(nf_unregister_afinfo);
#ifdef HAVE_JUMP_LABEL
struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
EXPORT_SYMBOL(nf_hooks_needed);
#endif
static DEFINE_MUTEX(nf_hook_mutex);
#define nf_entry_dereference(e) \
rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
static struct nf_hook_entry __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg)
{
if (reg->pf != NFPROTO_NETDEV)
return net->nf.hooks[reg->pf]+reg->hooknum;
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->hooknum == NF_NETDEV_INGRESS) {
if (reg->dev && dev_net(reg->dev) == net)
return ®->dev->nf_hooks_ingress;
}
#endif
return NULL;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Eric W. Biedermann | 39 | 45.35% | 2 | 25.00% |
| Pablo Neira Ayuso | 23 | 26.74% | 2 | 25.00% |
| Linus Torvalds | 13 | 15.12% | 1 | 12.50% |
| Harald Welte | 7 | 8.14% | 1 | 12.50% |
| Aaron Conole | 4 | 4.65% | 2 | 25.00% |
| Total | 86 | 100.00% | 8 | 100.00% |
int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
struct nf_hook_entry __rcu **pp;
struct nf_hook_entry *entry, *p;
if (reg->pf == NFPROTO_NETDEV) {
#ifndef CONFIG_NETFILTER_INGRESS
if (reg->hooknum == NF_NETDEV_INGRESS)
return -EOPNOTSUPP;
#endif
if (reg->hooknum != NF_NETDEV_INGRESS ||
!reg->dev || dev_net(reg->dev) != net)
return -EINVAL;
}
pp = nf_hook_entry_head(net, reg);
if (!pp)
return -EINVAL;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return -ENOMEM;
nf_hook_entry_init(entry, reg);
mutex_lock(&nf_hook_mutex);
/* Find the spot in the list */
for (; (p = nf_entry_dereference(*pp)) != NULL; pp = &p->next) {
if (reg->priority < nf_hook_entry_priority(p))
break;
}
rcu_assign_pointer(entry->next, p);
rcu_assign_pointer(*pp, entry);
mutex_unlock(&nf_hook_mutex);
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
net_inc_ingress_queue();
#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Aaron Conole | 91 | 37.45% | 5 | 29.41% |
| Eric W. Biedermann | 62 | 25.51% | 4 | 23.53% |
| Linus Torvalds | 42 | 17.28% | 1 | 5.88% |
| Eric Dumazet | 17 | 7.00% | 1 | 5.88% |
| Harald Welte | 15 | 6.17% | 1 | 5.88% |
| Pablo Neira Ayuso | 10 | 4.12% | 2 | 11.76% |
| Zhouyi Zhou | 3 | 1.23% | 1 | 5.88% |
| Patrick McHardy | 2 | 0.82% | 1 | 5.88% |
| Ingo Molnar | 1 | 0.41% | 1 | 5.88% |
| Total | 243 | 100.00% | 17 | 100.00% |
EXPORT_SYMBOL(nf_register_net_hook);
static struct nf_hook_entry *
__nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
struct nf_hook_entry __rcu **pp;
struct nf_hook_entry *p;
pp = nf_hook_entry_head(net, reg);
if (WARN_ON_ONCE(!pp))
return NULL;
mutex_lock(&nf_hook_mutex);
for (; (p = nf_entry_dereference(*pp)) != NULL; pp = &p->next) {
if (nf_hook_entry_ops(p) == reg) {
rcu_assign_pointer(*pp, p->next);
break;
}
}
mutex_unlock(&nf_hook_mutex);
if (!p) {
WARN(1, "nf_unregister_net_hook: hook not found!\n");
return NULL;
}
#ifdef CONFIG_NETFILTER_INGRESS
if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS)
net_dec_ingress_queue();
#endif
#ifdef HAVE_JUMP_LABEL
static_key_slow_dec(&nf_hooks_needed[reg->pf][reg->hooknum]);
#endif
return p;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Eric W. Biedermann | 79 | 45.93% | 1 | 11.11% |
| Linus Torvalds | 34 | 19.77% | 1 | 11.11% |
| Aaron Conole | 24 | 13.95% | 3 | 33.33% |
| Florian Westphal | 15 | 8.72% | 1 | 11.11% |
| Harald Welte | 14 | 8.14% | 1 | 11.11% |
| Pablo Neira Ayuso | 6 | 3.49% | 2 | 22.22% |
| Total | 172 | 100.00% | 9 | 100.00% |
void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg)
{
struct nf_hook_entry *p = __nf_unregister_net_hook(net, reg);
unsigned int nfq;
if (!p)
return;
synchronize_net();
/* other cpu might still process nfqueue verdict that used reg */
nfq = nf_queue_nf_hook_drop(net);
if (nfq)
synchronize_net();
kfree(p);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Florian Westphal | 47 | 77.05% | 3 | 50.00% |
| Eric W. Biedermann | 12 | 19.67% | 1 | 16.67% |
| Pablo Neira Ayuso | 1 | 1.64% | 1 | 16.67% |
| Linus Torvalds | 1 | 1.64% | 1 | 16.67% |
| Total | 61 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(nf_unregister_net_hook);
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int n)
{
unsigned int i;
int err = 0;
for (i = 0; i < n; i++) {
err = nf_register_net_hook(net, ®[i]);
if (err)
goto err;
}
return err;
err:
if (i > 0)
nf_unregister_net_hooks(net, reg, i);
return err;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Eric W. Biedermann | 87 | 100.00% | 1 | 100.00% |
| Total | 87 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(nf_register_net_hooks);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg,
unsigned int hookcount)
{
struct nf_hook_entry *to_free[16];
unsigned int i, n, nfq;
do {
n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free));
for (i = 0; i < n; i++)
to_free[i] = __nf_unregister_net_hook(net, ®[i]);
synchronize_net();
/* need 2nd synchronize_net() if nfqueue is used, skb
* can get reinjected right before nf_queue_hook_drop()
*/
nfq = nf_queue_nf_hook_drop(net);
if (nfq)
synchronize_net();
for (i = 0; i < n; i++)
kfree(to_free[i]);
reg += n;
hookcount -= n;
} while (hookcount > 0);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Florian Westphal | 102 | 76.69% | 2 | 66.67% |
| Eric W. Biedermann | 31 | 23.31% | 1 | 33.33% |
| Total | 133 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(nf_unregister_net_hooks);
/* Returns 1 if okfn() needs to be executed by the caller,
* -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
struct nf_hook_entry *entry)
{
unsigned int verdict;
int ret;
do {
verdict = nf_hook_entry_hookfn(entry, skb, state);
switch (verdict & NF_VERDICT_MASK) {
case NF_ACCEPT:
entry = rcu_dereference(entry->next);
break;
case NF_DROP:
kfree_skb(skb);
ret = NF_DROP_GETERR(verdict);
if (ret == 0)
ret = -EPERM;
return ret;
case NF_QUEUE:
ret = nf_queue(skb, state, &entry, verdict);
if (ret == 1 && entry)
continue;
return ret;
default:
/* Implicit handling for NF_STOLEN, as well as any other
* non conventional verdicts.
*/
return 0;
}
} while (entry);
return 1;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Harald Welte | 52 | 39.10% | 1 | 7.14% |
| Pablo Neira Ayuso | 51 | 38.35% | 4 | 28.57% |
| Eric Paris | 11 | 8.27% | 1 | 7.14% |
| Florian Westphal | 8 | 6.02% | 3 | 21.43% |
| Aaron Conole | 4 | 3.01% | 2 | 14.29% |
| David S. Miller | 4 | 3.01% | 1 | 7.14% |
| Herbert Xu | 2 | 1.50% | 1 | 7.14% |
| Michael Wang | 1 | 0.75% | 1 | 7.14% |
| Total | 133 | 100.00% | 14 | 100.00% |
EXPORT_SYMBOL(nf_hook_slow);
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len)
{
if (writable_len > skb->len)
return 0;
/* Not exclusive use of packet? Must copy. */
if (!skb_cloned(skb)) {
if (writable_len <= skb_headlen(skb))
return 1;
} else if (skb_clone_writable(skb, writable_len))
return 1;
if (writable_len <= skb_headlen(skb))
writable_len = 0;
else
writable_len -= skb_headlen(skb);
return !!__pskb_pull_tail(skb, writable_len);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Harald Welte | 47 | 51.09% | 1 | 33.33% |
| Herbert Xu | 39 | 42.39% | 1 | 33.33% |
| Patrick McHardy | 6 | 6.52% | 1 | 33.33% |
| Total | 92 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(skb_make_writable);
/* This needs to be compiled in any case to avoid dependencies between the
* nfnetlink_queue code and nf_conntrack.
*/
struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly;
EXPORT_SYMBOL_GPL(nfnl_ct_hook);
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* This does not belong here, but locally generated errors need it if connection
tracking in use: without this, connection may not be in hash table, and hence
manufactured ICMP or RST packets will not be associated with it. */
void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *)
__rcu __read_mostly;
EXPORT_SYMBOL(ip_ct_attach);
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb)
{
void (*attach)(struct sk_buff *, const struct sk_buff *);
if (skb->_nfct) {
rcu_read_lock();
attach = rcu_dereference(ip_ct_attach);
if (attach)
attach(new, skb);
rcu_read_unlock();
}
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Harald Welte | 46 | 71.88% | 1 | 25.00% |
| Patrick McHardy | 17 | 26.56% | 2 | 50.00% |
| Florian Westphal | 1 | 1.56% | 1 | 25.00% |
| Total | 64 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(nf_ct_attach);
void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly;
EXPORT_SYMBOL(nf_ct_destroy);
void nf_conntrack_destroy(struct nf_conntrack *nfct)
{
void (*destroy)(struct nf_conntrack *);
rcu_read_lock();
destroy = rcu_dereference(nf_ct_destroy);
BUG_ON(destroy == NULL);
destroy(nfct);
rcu_read_unlock();
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Yasuyuki Kozakai | 46 | 100.00% | 1 | 100.00% |
| Total | 46 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(nf_conntrack_destroy);
/* Built-in default zone used e.g. by modules. */
const struct nf_conntrack_zone nf_ct_zone_dflt = {
.id = NF_CT_DEFAULT_ZONE_ID,
.dir = NF_CT_DEFAULT_ZONE_DIR,
};
EXPORT_SYMBOL_GPL(nf_ct_zone_dflt);
#endif /* CONFIG_NF_CONNTRACK */
#ifdef CONFIG_NF_NAT_NEEDED
void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *);
EXPORT_SYMBOL(nf_nat_decode_session_hook);
#endif
static int __net_init netfilter_net_init(struct net *net)
{
int i, h;
for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) {
for (h = 0; h < NF_MAX_HOOKS; h++)
RCU_INIT_POINTER(net->nf.hooks[i][h], NULL);
}
#ifdef CONFIG_PROC_FS
net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter",
net->proc_net);
if (!net->nf.proc_netfilter) {
if (!net_eq(net, &init_net))
pr_err("cannot create netfilter proc entry");
return -ENOMEM;
}
#endif
return 0;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Gao Feng | 60 | 48.00% | 1 | 20.00% |
| Eric W. Biedermann | 54 | 43.20% | 1 | 20.00% |
| Pablo Neira Ayuso | 7 | 5.60% | 1 | 20.00% |
| Aaron Conole | 3 | 2.40% | 1 | 20.00% |
| Dan Carpenter | 1 | 0.80% | 1 | 20.00% |
| Total | 125 | 100.00% | 5 | 100.00% |
static void __net_exit netfilter_net_exit(struct net *net)
{
remove_proc_entry("netfilter", net->proc_net);
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Gao Feng | 21 | 100.00% | 1 | 100.00% |
| Total | 21 | 100.00% | 1 | 100.00% |
static struct pernet_operations netfilter_net_ops = {
.init = netfilter_net_init,
.exit = netfilter_net_exit,
};
int __init netfilter_init(void)
{
int ret;
ret = register_pernet_subsys(&netfilter_net_ops);
if (ret < 0)
goto err;
ret = netfilter_log_init();
if (ret < 0)
goto err_pernet;
return 0;
err_pernet:
unregister_pernet_subsys(&netfilter_net_ops);
err:
return ret;
}
Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Pablo Neira Ayuso | 32 | 55.17% | 1 | 33.33% |
| Harald Welte | 19 | 32.76% | 1 | 33.33% |
| Gao Feng | 7 | 12.07% | 1 | 33.33% |
| Total | 58 | 100.00% | 3 | 100.00% |
Overall Contributors
| Person | Tokens | Prop | Commits | CommitProp |
| Eric W. Biedermann | 383 | 22.32% | 5 | 7.94% |
| Harald Welte | 283 | 16.49% | 1 | 1.59% |
| Florian Westphal | 201 | 11.71% | 10 | 15.87% |
| Patrick McHardy | 157 | 9.15% | 9 | 14.29% |
| Pablo Neira Ayuso | 142 | 8.28% | 11 | 17.46% |
| Aaron Conole | 137 | 7.98% | 5 | 7.94% |
| Gao Feng | 105 | 6.12% | 1 | 1.59% |
| Linus Torvalds | 90 | 5.24% | 1 | 1.59% |
| Yasuyuki Kozakai | 70 | 4.08% | 2 | 3.17% |
| Eric Dumazet | 47 | 2.74% | 2 | 3.17% |
| Herbert Xu | 41 | 2.39% | 2 | 3.17% |
| Daniel Borkmann | 23 | 1.34% | 1 | 1.59% |
| Eric Paris | 11 | 0.64% | 1 | 1.59% |
| Zhouyi Zhou | 6 | 0.35% | 1 | 1.59% |
| David S. Miller | 4 | 0.23% | 1 | 1.59% |
| Tejun Heo | 3 | 0.17% | 1 | 1.59% |
| Ken-ichirou MATSUZAWA | 3 | 0.17% | 1 | 1.59% |
| Ingo Molnar | 2 | 0.12% | 1 | 1.59% |
| Stephen Hemminger | 2 | 0.12% | 1 | 1.59% |
| Martin Josefsson | 1 | 0.06% | 1 | 1.59% |
| Arnd Bergmann | 1 | 0.06% | 1 | 1.59% |
| Jan Engelhardt | 1 | 0.06% | 1 | 1.59% |
| Igor Maravić | 1 | 0.06% | 1 | 1.59% |
| Michael Wang | 1 | 0.06% | 1 | 1.59% |
| Dan Carpenter | 1 | 0.06% | 1 | 1.59% |
| Total | 1716 | 100.00% | 63 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.