cregit-Linux how code gets into the kernel

Release 4.14 net/netfilter/core.c

Directory: net/netfilter
/* netfilter.c: look after the filters for various protocols.
 * Heavily influenced by the old firewall.c by David Bonn and Alan Cox.
 *
 * Thanks to Rob `CmdrTaco' Malda for not influencing this code in any
 * way.
 *
 * Rusty Russell (C)2000 -- This code is GPL.
 * Patrick McHardy (c) 2006-2012
 */
#include <linux/kernel.h>
#include <linux/netfilter.h>
#include <net/protocol.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/netdevice.h>
#include <linux/netfilter_ipv6.h>
#include <linux/inetdevice.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/sock.h>

#include "nf_internals.h"

static DEFINE_MUTEX(afinfo_mutex);


const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;

EXPORT_SYMBOL(nf_afinfo);

const struct nf_ipv6_ops __rcu *nf_ipv6_ops __read_mostly;

EXPORT_SYMBOL_GPL(nf_ipv6_ops);

DEFINE_PER_CPU(bool, nf_skb_duplicated);

EXPORT_SYMBOL_GPL(nf_skb_duplicated);


int nf_register_afinfo(const struct nf_afinfo *afinfo) { mutex_lock(&afinfo_mutex); RCU_INIT_POINTER(nf_afinfo[afinfo->family], afinfo); mutex_unlock(&afinfo_mutex); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy3694.74%360.00%
Stephen Hemminger12.63%120.00%
Pablo Neira Ayuso12.63%120.00%
Total38100.00%5100.00%

EXPORT_SYMBOL_GPL(nf_register_afinfo);
void nf_unregister_afinfo(const struct nf_afinfo *afinfo) { mutex_lock(&afinfo_mutex); RCU_INIT_POINTER(nf_afinfo[afinfo->family], NULL); mutex_unlock(&afinfo_mutex); synchronize_rcu(); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy3797.37%375.00%
Stephen Hemminger12.63%125.00%
Total38100.00%4100.00%

EXPORT_SYMBOL_GPL(nf_unregister_afinfo); #ifdef HAVE_JUMP_LABEL struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS]; EXPORT_SYMBOL(nf_hooks_needed); #endif static DEFINE_MUTEX(nf_hook_mutex); /* max hooks per family/hooknum */ #define MAX_HOOK_COUNT 1024 #define nf_entry_dereference(e) \ rcu_dereference_protected(e, lockdep_is_held(&nf_hook_mutex))
static struct nf_hook_entries *allocate_hook_entries_size(u16 num) { struct nf_hook_entries *e; size_t alloc = sizeof(*e) + sizeof(struct nf_hook_entry) * num + sizeof(struct nf_hook_ops *) * num; if (num == 0) return NULL; e = kvzalloc(alloc, GFP_KERNEL); if (e) e->num_hook_entries = num; return e; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole73100.00%1100.00%
Total73100.00%1100.00%


static unsigned int accept_all(void *priv, struct sk_buff *skb, const struct nf_hook_state *state) { return NF_ACCEPT; /* ACCEPT makes nf_hook_slow call next hook */ }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole26100.00%1100.00%
Total26100.00%1100.00%

static const struct nf_hook_ops dummy_ops = { .hook = accept_all, .priority = INT_MIN, };
static struct nf_hook_entries * nf_hook_entries_grow(const struct nf_hook_entries *old, const struct nf_hook_ops *reg) { unsigned int i, alloc_entries, nhooks, old_entries; struct nf_hook_ops **orig_ops = NULL; struct nf_hook_ops **new_ops; struct nf_hook_entries *new; bool inserted = false; alloc_entries = 1; old_entries = old ? old->num_hook_entries : 0; if (old) { orig_ops = nf_hook_entries_get_hook_ops(old); for (i = 0; i < old_entries; i++) { if (orig_ops[i] != &dummy_ops) alloc_entries++; } } if (alloc_entries > MAX_HOOK_COUNT) return ERR_PTR(-E2BIG); new = allocate_hook_entries_size(alloc_entries); if (!new) return ERR_PTR(-ENOMEM); new_ops = nf_hook_entries_get_hook_ops(new); i = 0; nhooks = 0; while (i < old_entries) { if (orig_ops[i] == &dummy_ops) { ++i; continue; } if (inserted || reg->priority > orig_ops[i]->priority) { new_ops[nhooks] = (void *)orig_ops[i]; new->hooks[nhooks] = old->hooks[i]; i++; } else { new_ops[nhooks] = (void *)reg; new->hooks[nhooks].hook = reg->hook; new->hooks[nhooks].priv = reg->priv; inserted = true; } nhooks++; } if (!inserted) { new_ops[nhooks] = (void *)reg; new->hooks[nhooks].hook = reg->hook; new->hooks[nhooks].priv = reg->priv; } return new; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole322100.00%1100.00%
Total322100.00%1100.00%


static void hooks_validate(const struct nf_hook_entries *hooks) { #ifdef CONFIG_DEBUG_KERNEL struct nf_hook_ops **orig_ops; int prio = INT_MIN; size_t i = 0; orig_ops = nf_hook_entries_get_hook_ops(hooks); for (i = 0; i < hooks->num_hook_entries; i++) { if (orig_ops[i] == &dummy_ops) continue; WARN_ON(orig_ops[i]->priority < prio); if (orig_ops[i]->priority > prio) prio = orig_ops[i]->priority; } #endif }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal100100.00%1100.00%
Total100100.00%1100.00%

/* * __nf_hook_entries_try_shrink - try to shrink hook array * * @pp -- location of hook blob * * Hook unregistration must always succeed, so to-be-removed hooks * are replaced by a dummy one that will just move to next hook. * * This counts the current dummy hooks, attempts to allocate new blob, * copies the live hooks, then replaces and discards old one. * * return values: * * Returns address to free, or NULL. */
static void *__nf_hook_entries_try_shrink(struct nf_hook_entries __rcu **pp) { struct nf_hook_entries *old, *new = NULL; unsigned int i, j, skip = 0, hook_entries; struct nf_hook_ops **orig_ops; struct nf_hook_ops **new_ops; old = nf_entry_dereference(*pp); if (WARN_ON_ONCE(!old)) return NULL; orig_ops = nf_hook_entries_get_hook_ops(old); for (i = 0; i < old->num_hook_entries; i++) { if (orig_ops[i] == &dummy_ops) skip++; } /* if skip == hook_entries all hooks have been removed */ hook_entries = old->num_hook_entries; if (skip == hook_entries) goto out_assign; if (skip == 0) return NULL; hook_entries -= skip; new = allocate_hook_entries_size(hook_entries); if (!new) return NULL; new_ops = nf_hook_entries_get_hook_ops(new); for (i = 0, j = 0; i < old->num_hook_entries; i++) { if (orig_ops[i] == &dummy_ops) continue; new->hooks[j] = old->hooks[i]; new_ops[j] = (void *)orig_ops[i]; j++; } hooks_validate(new); out_assign: rcu_assign_pointer(*pp, new); return old; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole23197.88%150.00%
Florian Westphal52.12%150.00%
Total236100.00%2100.00%


static struct nf_hook_entries __rcu **nf_hook_entry_head(struct net *net, const struct nf_hook_ops *reg) { if (reg->pf != NFPROTO_NETDEV) return net->nf.hooks[reg->pf]+reg->hooknum; #ifdef CONFIG_NETFILTER_INGRESS if (reg->hooknum == NF_NETDEV_INGRESS) { if (reg->dev && dev_net(reg->dev) == net) return &reg->dev->nf_hooks_ingress; } #endif WARN_ON_ONCE(1); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Eric W. Biedermann3942.86%222.22%
Pablo Neira Ayuso2325.27%222.22%
Linus Torvalds1314.29%111.11%
Aaron Conole99.89%333.33%
Harald Welte77.69%111.11%
Total91100.00%9100.00%


int nf_register_net_hook(struct net *net, const struct nf_hook_ops *reg) { struct nf_hook_entries *p, *new_hooks; struct nf_hook_entries __rcu **pp; if (reg->pf == NFPROTO_NETDEV) { #ifndef CONFIG_NETFILTER_INGRESS if (reg->hooknum == NF_NETDEV_INGRESS) return -EOPNOTSUPP; #endif if (reg->hooknum != NF_NETDEV_INGRESS || !reg->dev || dev_net(reg->dev) != net) return -EINVAL; } pp = nf_hook_entry_head(net, reg); if (!pp) return -EINVAL; mutex_lock(&nf_hook_mutex); p = nf_entry_dereference(*pp); new_hooks = nf_hook_entries_grow(p, reg); if (!IS_ERR(new_hooks)) rcu_assign_pointer(*pp, new_hooks); mutex_unlock(&nf_hook_mutex); if (IS_ERR(new_hooks)) return PTR_ERR(new_hooks); hooks_validate(new_hooks); #ifdef CONFIG_NETFILTER_INGRESS if (reg->pf == NFPROTO_NETDEV && reg->hooknum == NF_NETDEV_INGRESS) net_inc_ingress_queue(); #endif #ifdef HAVE_JUMP_LABEL static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]); #endif synchronize_net(); BUG_ON(p == new_hooks); kvfree(p); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole11550.88%633.33%
Eric W. Biedermann4218.58%316.67%
Linus Torvalds2812.39%15.56%
Eric Dumazet177.52%15.56%
Harald Welte104.42%15.56%
Florian Westphal52.21%15.56%
Zhouyi Zhou31.33%15.56%
Pablo Neira Ayuso31.33%211.11%
Patrick McHardy20.88%15.56%
Ingo Molnar10.44%15.56%
Total226100.00%18100.00%

EXPORT_SYMBOL(nf_register_net_hook); /* * __nf_unregister_net_hook - remove a hook from blob * * @oldp: current address of hook blob * @unreg: hook to unregister * * This cannot fail, hook unregistration must always succeed. * Therefore replace the to-be-removed hook with a dummy hook. */
static void __nf_unregister_net_hook(struct nf_hook_entries *old, const struct nf_hook_ops *unreg) { struct nf_hook_ops **orig_ops; bool found = false; unsigned int i; orig_ops = nf_hook_entries_get_hook_ops(old); for (i = 0; i < old->num_hook_entries; i++) { if (orig_ops[i] != unreg) continue; WRITE_ONCE(old->hooks[i].hook, accept_all); WRITE_ONCE(orig_ops[i], &dummy_ops); found = true; break; } if (found) { #ifdef CONFIG_NETFILTER_INGRESS if (unreg->pf == NFPROTO_NETDEV && unreg->hooknum == NF_NETDEV_INGRESS) net_dec_ingress_queue(); #endif #ifdef HAVE_JUMP_LABEL static_key_slow_dec(&nf_hooks_needed[unreg->pf][unreg->hooknum]); #endif } else { WARN_ONCE(1, "hook not found, pf %d num %d", unreg->pf, unreg->hooknum); } }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole9155.83%333.33%
Eric W. Biedermann5533.74%111.11%
Harald Welte95.52%111.11%
Pablo Neira Ayuso42.45%222.22%
Florian Westphal21.23%111.11%
Linus Torvalds21.23%111.11%
Total163100.00%9100.00%


void nf_unregister_net_hook(struct net *net, const struct nf_hook_ops *reg) { struct nf_hook_entries __rcu **pp; struct nf_hook_entries *p; unsigned int nfq; pp = nf_hook_entry_head(net, reg); if (!pp) return; mutex_lock(&nf_hook_mutex); p = nf_entry_dereference(*pp); if (WARN_ON_ONCE(!p)) { mutex_unlock(&nf_hook_mutex); return; } __nf_unregister_net_hook(p, reg); p = __nf_hook_entries_try_shrink(pp); mutex_unlock(&nf_hook_mutex); if (!p) return; synchronize_net(); /* other cpu might still process nfqueue verdict that used reg */ nfq = nf_queue_nf_hook_drop(net); if (nfq) synchronize_net(); kvfree(p); }

Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole7559.06%114.29%
Florian Westphal3930.71%342.86%
Eric W. Biedermann118.66%114.29%
Pablo Neira Ayuso10.79%114.29%
Linus Torvalds10.79%114.29%
Total127100.00%7100.00%

EXPORT_SYMBOL(nf_unregister_net_hook);
int nf_register_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int n) { unsigned int i; int err = 0; for (i = 0; i < n; i++) { err = nf_register_net_hook(net, &reg[i]); if (err) goto err; } return err; err: if (i > 0) nf_unregister_net_hooks(net, reg, i); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Eric W. Biedermann87100.00%1100.00%
Total87100.00%1100.00%

EXPORT_SYMBOL(nf_register_net_hooks);
void nf_unregister_net_hooks(struct net *net, const struct nf_hook_ops *reg, unsigned int hookcount) { struct nf_hook_entries *to_free[16], *p; struct nf_hook_entries __rcu **pp; unsigned int i, j, n; mutex_lock(&nf_hook_mutex); for (i = 0; i < hookcount; i++) { pp = nf_hook_entry_head(net, &reg[i]); if (!pp) continue; p = nf_entry_dereference(*pp); if (WARN_ON_ONCE(!p)) continue; __nf_unregister_net_hook(p, &reg[i]); } mutex_unlock(&nf_hook_mutex); do { n = min_t(unsigned int, hookcount, ARRAY_SIZE(to_free)); mutex_lock(&nf_hook_mutex); for (i = 0, j = 0; i < hookcount && j < n; i++) { pp = nf_hook_entry_head(net, &reg[i]); if (!pp) continue; p = nf_entry_dereference(*pp); if (!p) continue; to_free[j] = __nf_hook_entries_try_shrink(pp); if (to_free[j]) ++j; } mutex_unlock(&nf_hook_mutex); if (j) { unsigned int nfq; synchronize_net(); /* need 2nd synchronize_net() if nfqueue is used, skb * can get reinjected right before nf_queue_hook_drop() */ nfq = nf_queue_nf_hook_drop(net); if (nfq) synchronize_net(); for (i = 0; i < j; i++) kvfree(to_free[i]); } reg += n; hookcount -= n; } while (hookcount > 0); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal25990.56%250.00%
Eric W. Biedermann269.09%125.00%
Aaron Conole10.35%125.00%
Total286100.00%4100.00%

EXPORT_SYMBOL(nf_unregister_net_hooks); /* Returns 1 if okfn() needs to be executed by the caller, * -EPERM for NF_DROP, 0 otherwise. Caller must hold rcu_read_lock. */
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *e, unsigned int s) { unsigned int verdict; int ret; for (; s < e->num_hook_entries; s++) { verdict = nf_hook_entry_hookfn(&e->hooks[s], skb, state); switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: break; case NF_DROP: kfree_skb(skb); ret = NF_DROP_GETERR(verdict); if (ret == 0) ret = -EPERM; return ret; case NF_QUEUE: ret = nf_queue(skb, state, e, s, verdict); if (ret == 1) continue; return ret; default: /* Implicit handling for NF_STOLEN, as well as any other * non conventional verdicts. */ return 0; } } return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Harald Welte5035.71%17.14%
Pablo Neira Ayuso3323.57%321.43%
Aaron Conole3222.86%321.43%
Eric Paris117.86%17.14%
Florian Westphal75.00%321.43%
David S. Miller42.86%17.14%
Herbert Xu21.43%17.14%
Michael Wang10.71%17.14%
Total140100.00%14100.00%

EXPORT_SYMBOL(nf_hook_slow);
int skb_make_writable(struct sk_buff *skb, unsigned int writable_len) { if (writable_len > skb->len) return 0; /* Not exclusive use of packet? Must copy. */ if (!skb_cloned(skb)) { if (writable_len <= skb_headlen(skb)) return 1; } else if (skb_clone_writable(skb, writable_len)) return 1; if (writable_len <= skb_headlen(skb)) writable_len = 0; else writable_len -= skb_headlen(skb); return !!__pskb_pull_tail(skb, writable_len); }

Contributors

PersonTokensPropCommitsCommitProp
Harald Welte4751.09%133.33%
Herbert Xu3942.39%133.33%
Patrick McHardy66.52%133.33%
Total92100.00%3100.00%

EXPORT_SYMBOL(skb_make_writable); /* This needs to be compiled in any case to avoid dependencies between the * nfnetlink_queue code and nf_conntrack. */ struct nfnl_ct_hook __rcu *nfnl_ct_hook __read_mostly; EXPORT_SYMBOL_GPL(nfnl_ct_hook); #if IS_ENABLED(CONFIG_NF_CONNTRACK) /* This does not belong here, but locally generated errors need it if connection tracking in use: without this, connection may not be in hash table, and hence manufactured ICMP or RST packets will not be associated with it. */ void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu __read_mostly; EXPORT_SYMBOL(ip_ct_attach);
void nf_ct_attach(struct sk_buff *new, const struct sk_buff *skb) { void (*attach)(struct sk_buff *, const struct sk_buff *); if (skb->_nfct) { rcu_read_lock(); attach = rcu_dereference(ip_ct_attach); if (attach) attach(new, skb); rcu_read_unlock(); } }

Contributors

PersonTokensPropCommitsCommitProp
Harald Welte4671.88%125.00%
Patrick McHardy1726.56%250.00%
Florian Westphal11.56%125.00%
Total64100.00%4100.00%

EXPORT_SYMBOL(nf_ct_attach); void (*nf_ct_destroy)(struct nf_conntrack *) __rcu __read_mostly; EXPORT_SYMBOL(nf_ct_destroy);
void nf_conntrack_destroy(struct nf_conntrack *nfct) { void (*destroy)(struct nf_conntrack *); rcu_read_lock(); destroy = rcu_dereference(nf_ct_destroy); BUG_ON(destroy == NULL); destroy(nfct); rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai46100.00%1100.00%
Total46100.00%1100.00%

EXPORT_SYMBOL(nf_conntrack_destroy); /* Built-in default zone used e.g. by modules. */ const struct nf_conntrack_zone nf_ct_zone_dflt = { .id = NF_CT_DEFAULT_ZONE_ID, .dir = NF_CT_DEFAULT_ZONE_DIR, }; EXPORT_SYMBOL_GPL(nf_ct_zone_dflt); #endif /* CONFIG_NF_CONNTRACK */ #ifdef CONFIG_NF_NAT_NEEDED void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); EXPORT_SYMBOL(nf_nat_decode_session_hook); #endif
static int __net_init netfilter_net_init(struct net *net) { int i, h; for (i = 0; i < ARRAY_SIZE(net->nf.hooks); i++) { for (h = 0; h < NF_MAX_HOOKS; h++) RCU_INIT_POINTER(net->nf.hooks[i][h], NULL); } #ifdef CONFIG_PROC_FS net->nf.proc_netfilter = proc_net_mkdir(net, "netfilter", net->proc_net); if (!net->nf.proc_netfilter) { if (!net_eq(net, &init_net)) pr_err("cannot create netfilter proc entry"); return -ENOMEM; } #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Gao Feng6048.00%120.00%
Eric W. Biedermann5443.20%120.00%
Pablo Neira Ayuso75.60%120.00%
Aaron Conole32.40%120.00%
Dan Carpenter10.80%120.00%
Total125100.00%5100.00%


static void __net_exit netfilter_net_exit(struct net *net) { remove_proc_entry("netfilter", net->proc_net); }

Contributors

PersonTokensPropCommitsCommitProp
Gao Feng21100.00%1100.00%
Total21100.00%1100.00%

static struct pernet_operations netfilter_net_ops = { .init = netfilter_net_init, .exit = netfilter_net_exit, };
int __init netfilter_init(void) { int ret; ret = register_pernet_subsys(&netfilter_net_ops); if (ret < 0) goto err; ret = netfilter_log_init(); if (ret < 0) goto err_pernet; return 0; err_pernet: unregister_pernet_subsys(&netfilter_net_ops); err: return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Pablo Neira Ayuso3255.17%133.33%
Harald Welte1932.76%133.33%
Gao Feng712.07%133.33%
Total58100.00%3100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Aaron Conole101537.55%69.38%
Florian Westphal44616.50%1218.75%
Eric W. Biedermann33312.32%46.25%
Harald Welte27110.03%11.56%
Patrick McHardy1575.81%914.06%
Pablo Neira Ayuso1154.25%1015.62%
Gao Feng1053.88%11.56%
Yasuyuki Kozakai702.59%23.12%
Eric Dumazet471.74%23.12%
Linus Torvalds441.63%11.56%
Herbert Xu411.52%23.12%
Daniel Borkmann230.85%11.56%
Eric Paris110.41%11.56%
Zhouyi Zhou60.22%11.56%
David S. Miller40.15%11.56%
Ken-ichirou MATSUZAWA30.11%11.56%
Ingo Molnar20.07%11.56%
Stephen Hemminger20.07%11.56%
Tejun Heo20.07%11.56%
Jan Engelhardt10.04%11.56%
Michael Wang10.04%11.56%
Dan Carpenter10.04%11.56%
Igor Maravić10.04%11.56%
Arnd Bergmann10.04%11.56%
Martin Josefsson10.04%11.56%
Total2703100.00%64100.00%
Directory: net/netfilter
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.