cregit-Linux how code gets into the kernel

Release 4.14 net/netfilter/nf_queue.c

Directory: net/netfilter
 * Rusty Russell (C)2000 -- This code is GPL.
 * Patrick McHardy (c) 2006-2012

#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
#include <linux/netfilter_bridge.h>
#include <linux/seq_file.h>
#include <linux/rcupdate.h>
#include <net/protocol.h>
#include <net/netfilter/nf_queue.h>
#include <net/dst.h>

#include "nf_internals.h"

 * Hook for nfnetlink_queue to register its queue handler.
 * We do this so that most of the NFQUEUE code can be modular.
 * Once the queue is registered it must reinject all packets it
 * receives, no matter what.

/* return EBUSY when somebody else is registered, return EEXIST if the
 * same handler is registered, return 0 in case of success. */

void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh) { /* should never happen, we only have one queueing backend in kernel */ WARN_ON(rcu_access_pointer(net->nf.queue_handler)); rcu_assign_pointer(net->nf.queue_handler, qh); }


Harald Welte1435.00%225.00%
Eric W. Biedermann1332.50%112.50%
Eric Dumazet512.50%225.00%
Florian Westphal410.00%112.50%
Yasuyuki Kozakai37.50%112.50%
Patrick McHardy12.50%112.50%

EXPORT_SYMBOL(nf_register_queue_handler); /* The caller must flush their queue before this */
void nf_unregister_queue_handler(struct net *net) { RCU_INIT_POINTER(net->nf.queue_handler, NULL); }


Harald Welte838.10%120.00%
Eric W. Biedermann838.10%120.00%
Yasuyuki Kozakai314.29%120.00%
Florian Westphal14.76%120.00%
Stephen Hemminger14.76%120.00%

void nf_queue_entry_release_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; /* Release those devices we held, or Alexey will kill me. */ if (state->in) dev_put(state->in); if (state->out) dev_put(state->out); if (state->sk) sock_put(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { struct net_device *physdev; physdev = nf_bridge_get_physindev(entry->skb); if (physdev) dev_put(physdev); physdev = nf_bridge_get_physoutdev(entry->skb); if (physdev) dev_put(physdev); } #endif }


Patrick McHardy6453.78%120.00%
David S. Miller2924.37%240.00%
Florian Westphal2117.65%120.00%
Pablo Neira Ayuso54.20%120.00%

EXPORT_SYMBOL_GPL(nf_queue_entry_release_refs); /* Bump dev refs so they don't vanish while packet is out */
void nf_queue_entry_get_refs(struct nf_queue_entry *entry) { struct nf_hook_state *state = &entry->state; if (state->in) dev_hold(state->in); if (state->out) dev_hold(state->out); if (state->sk) sock_hold(state->sk); #if IS_ENABLED(CONFIG_BRIDGE_NETFILTER) if (entry->skb->nf_bridge) { struct net_device *physdev; physdev = nf_bridge_get_physindev(entry->skb); if (physdev) dev_hold(physdev); physdev = nf_bridge_get_physoutdev(entry->skb); if (physdev) dev_hold(physdev); } #endif }


Florian Westphal8471.19%350.00%
David S. Miller2924.58%233.33%
Pablo Neira Ayuso54.24%116.67%

unsigned int nf_queue_nf_hook_drop(struct net *net) { const struct nf_queue_handler *qh; unsigned int count = 0; rcu_read_lock(); qh = rcu_dereference(net->nf.queue_handler); if (qh) count = qh->nf_hook_drop(net); rcu_read_unlock(); return count; }


Eric W. Biedermann4071.43%250.00%
Florian Westphal1323.21%125.00%
Pablo Neira Ayuso35.36%125.00%

static int __nf_queue(struct sk_buff *skb, const struct nf_hook_state *state, const struct nf_hook_entries *entries, unsigned int index, unsigned int queuenum) { int status = -ENOENT; struct nf_queue_entry *entry = NULL; const struct nf_afinfo *afinfo; const struct nf_queue_handler *qh; struct net *net = state->net; /* QUEUE == DROP if no one is waiting, to be safe. */ qh = rcu_dereference(net->nf.queue_handler); if (!qh) { status = -ESRCH; goto err; } afinfo = nf_get_afinfo(state->pf); if (!afinfo) goto err; entry = kmalloc(sizeof(*entry) + afinfo->route_key_size, GFP_ATOMIC); if (!entry) { status = -ENOMEM; goto err; } *entry = (struct nf_queue_entry) { .skb = skb, .state = *state, .hook_index = index, .size = sizeof(*entry) + afinfo->route_key_size, }; nf_queue_entry_get_refs(entry); skb_dst_force(skb); afinfo->saveroute(skb, entry); status = qh->outfn(entry, queuenum); if (status < 0) { nf_queue_entry_release_refs(entry); goto err; } return 0; err: kfree(entry); return status; }


Harald Welte8435.29%28.33%
Patrick McHardy6226.05%729.17%
Florian Westphal3715.55%625.00%
Eric W. Biedermann135.46%14.17%
Yasuyuki Kozakai114.62%14.17%
Pablo Neira Ayuso93.78%28.33%
Aaron Conole93.78%14.17%
David S. Miller72.94%28.33%
Eric Dumazet52.10%14.17%
Lucas De Marchi10.42%14.17%

/* Packets leaving via this function must come back through nf_reinject(). */
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *entries, unsigned int index, unsigned int verdict) { int ret; ret = __nf_queue(skb, state, entries, index, verdict >> NF_VERDICT_QBITS); if (ret < 0) { if (ret == -ESRCH && (verdict & NF_VERDICT_FLAG_QUEUE_BYPASS)) return 1; kfree_skb(skb); } return 0; }


Pablo Neira Ayuso7187.65%266.67%
Aaron Conole1012.35%133.33%

static unsigned int nf_iterate(struct sk_buff *skb, struct nf_hook_state *state, const struct nf_hook_entries *hooks, unsigned int *index) { const struct nf_hook_entry *hook; unsigned int verdict, i = *index; while (i < hooks->num_hook_entries) { hook = &hooks->hooks[i]; repeat: verdict = nf_hook_entry_hookfn(hook, skb, state); if (verdict != NF_ACCEPT) { if (verdict != NF_REPEAT) return verdict; goto repeat; } i++; } *index = i; return NF_ACCEPT; }


Pablo Neira Ayuso6358.88%133.33%
Aaron Conole4441.12%266.67%

/* Caller must hold rcu read-side lock */
void nf_reinject(struct nf_queue_entry *entry, unsigned int verdict) { const struct nf_hook_entry *hook_entry; const struct nf_hook_entries *hooks; struct sk_buff *skb = entry->skb; const struct nf_afinfo *afinfo; const struct net *net; unsigned int i; int err; u8 pf; net = entry->; pf = entry->; hooks = rcu_dereference(net->nf.hooks[pf][entry->state.hook]); nf_queue_entry_release_refs(entry); i = entry->hook_index; if (WARN_ON_ONCE(i >= hooks->num_hook_entries)) { kfree_skb(skb); kfree(entry); return; } hook_entry = &hooks->hooks[i]; /* Continue traversal iff userspace said ok... */ if (verdict == NF_REPEAT) verdict = nf_hook_entry_hookfn(hook_entry, skb, &entry->state); if (verdict == NF_ACCEPT) { afinfo = nf_get_afinfo(entry->; if (!afinfo || afinfo->reroute(entry->, skb, entry) < 0) verdict = NF_DROP; } if (verdict == NF_ACCEPT) { next_hook: ++i; verdict = nf_iterate(skb, &entry->state, hooks, &i); } switch (verdict & NF_VERDICT_MASK) { case NF_ACCEPT: case NF_STOP: local_bh_disable(); entry->state.okfn(entry->, entry->, skb); local_bh_enable(); break; case NF_QUEUE: err = nf_queue(skb, &entry->state, hooks, i, verdict); if (err == 1) goto next_hook; break; case NF_STOLEN: break; default: kfree_skb(skb); } kfree(entry); }


Aaron Conole11334.98%311.54%
Harald Welte8827.24%13.85%
Patrick McHardy6319.50%726.92%
Florian Westphal206.19%519.23%
David S. Miller144.33%311.54%
Eric W. Biedermann123.72%27.69%
Pablo Neira Ayuso51.55%27.69%
Michael Wang41.24%13.85%
Eric Dumazet30.93%13.85%
Julian Anastasov10.31%13.85%


Overall Contributors

Harald Welte23820.14%35.77%
Florian Westphal20016.92%1223.08%
Patrick McHardy19716.67%1121.15%
Aaron Conole17714.97%35.77%
Pablo Neira Ayuso16213.71%59.62%
Eric W. Biedermann867.28%47.69%
David S. Miller796.68%47.69%
Yasuyuki Kozakai171.44%11.92%
Eric Dumazet161.35%47.69%
Michael Wang40.34%11.92%
Tejun Heo30.25%11.92%
Lucas De Marchi10.08%11.92%
Stephen Hemminger10.08%11.92%
Julian Anastasov10.08%11.92%
Directory: net/netfilter
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.