cregit-Linux how code gets into the kernel

Release 4.11 net/netfilter/nf_conntrack_core.c

Directory: net/netfilter
/* Connection state tracking for netfilter.  This is separated from,
   but required by, the NAT layer; it can also be used by an iptables
   extension. */

/* (C) 1999-2001 Paul `Rusty' Russell
 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>


#define NF_CONNTRACK_VERSION	"0.5.0"

int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
				      enum nf_nat_manip_type manip,
				      const struct nlattr *attr) __read_mostly;

EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);


__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];

EXPORT_SYMBOL_GPL(nf_conntrack_locks);

__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);

EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);


struct hlist_nulls_head *nf_conntrack_hash __read_mostly;

EXPORT_SYMBOL_GPL(nf_conntrack_hash);


struct conntrack_gc_work {
	
struct delayed_work	dwork;
	
u32			last_bucket;
	
bool			exiting;
	
long			next_gc_run;
};


static __read_mostly struct kmem_cache *nf_conntrack_cachep;

static __read_mostly spinlock_t nf_conntrack_locks_all_lock;
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);

static __read_mostly bool nf_conntrack_locks_all;

/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */

#define GC_MAX_BUCKETS_DIV	128u
/* upper bound of full table scan */

#define GC_MAX_SCAN_JIFFIES	(16u * HZ)
/* desired ratio of entries found to be expired */

#define GC_EVICT_RATIO	50u


static struct conntrack_gc_work conntrack_gc_work;


void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { spin_lock(lock); while (unlikely(nf_conntrack_locks_all)) { spin_unlock(lock); /* * Order the 'nf_conntrack_locks_all' load vs. the * spin_unlock_wait() loads below, to ensure * that 'nf_conntrack_locks_all_lock' is indeed held: */ smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ spin_unlock_wait(&nf_conntrack_locks_all_lock); spin_lock(lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Sasha Levin4287.50%133.33%
Peter Zijlstra510.42%133.33%
Nicholas Mc Guire12.08%133.33%
Total48100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_conntrack_lock);
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; spin_unlock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_unlock(&nf_conntrack_locks[h2]); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer46100.00%1100.00%
Total46100.00%1100.00%

/* return true if we need to recompute hashes (in case hash table was resized) */
static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, unsigned int h2, unsigned int sequence) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { nf_conntrack_lock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { nf_conntrack_lock(&nf_conntrack_locks[h2]); spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { nf_conntrack_double_unlock(h1, h2); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer11097.35%133.33%
Sasha Levin21.77%133.33%
Florian Westphal10.88%133.33%
Total113100.00%3100.00%


static void nf_conntrack_all_lock(void) { int i; spin_lock(&nf_conntrack_locks_all_lock); nf_conntrack_locks_all = true; /* * Order the above store of 'nf_conntrack_locks_all' against * the spin_unlock_wait() loads below, such that if * nf_conntrack_lock() observes 'nf_conntrack_locks_all' * we must observe nf_conntrack_locks[] held: */ smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ for (i = 0; i < CONNTRACK_LOCKS; i++) { spin_unlock_wait(&nf_conntrack_locks[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer2754.00%125.00%
Sasha Levin1734.00%125.00%
Peter Zijlstra510.00%125.00%
Nicholas Mc Guire12.00%125.00%
Total50100.00%4100.00%


static void nf_conntrack_all_unlock(void) { /* * All prior stores must be complete before we clear * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() * might observe the false value but not the entire * critical section: */ smp_store_release(&nf_conntrack_locks_all, false); spin_unlock(&nf_conntrack_locks_all_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer1356.52%133.33%
Peter Zijlstra626.09%133.33%
Sasha Levin417.39%133.33%
Total23100.00%3100.00%

unsigned int nf_conntrack_htable_size __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; seqcount_t nf_conntrack_generation __read_mostly; /* nf_conn must be 8 bytes aligned, as the 3 LSB bits are used * for the nfctinfo. We cheat by (ab)using the PER CPU cache line * alignment to enforce this. */ DEFINE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); static unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, const struct net *net) { unsigned int n; u32 seed; get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); return jhash2((u32 *)tuple, n, seed ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai4642.20%114.29%
Florian Westphal3027.52%228.57%
Patrick McHardy2321.10%114.29%
Changli Gao54.59%114.29%
Al Viro43.67%114.29%
Sami Farin10.92%114.29%
Total109100.00%7100.00%


static u32 scale_hash(u32 hash) { return reciprocal_scale(hash, nf_conntrack_htable_size); }

Contributors

PersonTokensPropCommitsCommitProp
Changli Gao1482.35%133.33%
Florian Westphal317.65%266.67%
Total17100.00%3100.00%


static u32 __hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple, unsigned int size) { return reciprocal_scale(hash_conntrack_raw(tuple, net), size); }

Contributors

PersonTokensPropCommitsCommitProp
Changli Gao2571.43%150.00%
Florian Westphal1028.57%150.00%
Total35100.00%2100.00%


static u32 hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) { return scale_hash(hash_conntrack_raw(tuple, net)); }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai1758.62%120.00%
Florian Westphal517.24%240.00%
Patrick McHardy517.24%120.00%
Daniel Borkmann26.90%120.00%
Total29100.00%5100.00%


bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct net *net, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) return false; tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai9581.90%120.00%
Philip Craig97.76%120.00%
Eric W. Biedermann76.03%120.00%
Martin Josefsson32.59%120.00%
Jan Engelhardt21.72%120.00%
Total116100.00%5100.00%

EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int protoff; u_int8_t protonum; int ret; rcu_read_lock(); l3proto = __nf_ct_l3proto_find(l3num); ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); if (ret != NF_ACCEPT) { rcu_read_unlock(); return false; } l4proto = __nf_ct_l4proto_find(l3num, protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, l3proto, l4proto); rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai11892.91%133.33%
Eric W. Biedermann75.51%133.33%
Jan Engelhardt21.57%133.33%
Total127100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; if (l3proto->invert_tuple(inverse, orig) == 0) return false; inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; return l4proto->invert_tuple(inverse, orig); }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai9086.54%125.00%
Philip Craig98.65%125.00%
Martin Josefsson32.88%125.00%
Jan Engelhardt21.92%125.00%
Total104100.00%4100.00%

EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Josefsson2244.00%116.67%
Yasuyuki Kozakai1938.00%116.67%
Eric Dumazet48.00%116.67%
Harald Welte36.00%116.67%
Patrick McHardy24.00%233.33%
Total50100.00%6100.00%

/* must be called with local_bh_disable */
static void nf_ct_add_to_dying_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) dying list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->dying); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer76100.00%1100.00%
Total76100.00%1100.00%

/* must be called with local_bh_disable */
static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) unconfirmed list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->unconfirmed); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer76100.00%1100.00%
Total76100.00%1100.00%

/* must be called with local_bh_disable */
static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* We overload first tuple to link into unconfirmed or dying list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer80100.00%1100.00%
Total80100.00%1100.00%

#define NFCT_ALIGN(len) (((len) + NFCT_INFOMASK) & ~NFCT_INFOMASK) /* Released via destroy_conntrack() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) { struct nf_conn *tmpl, *p; if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) { tmpl = kzalloc(sizeof(*tmpl) + NFCT_INFOMASK, flags); if (!tmpl) return NULL; p = tmpl; tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); if (tmpl != p) { tmpl = (struct nf_conn *)NFCT_ALIGN((unsigned long)p); tmpl->proto.tmpl_padto = (char *)tmpl - (char *)p; } } else { tmpl = kzalloc(sizeof(*tmpl), flags); if (!tmpl) return NULL; } tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); nf_ct_zone_add(tmpl, zone); atomic_set(&tmpl->ct_general.use, 0); return tmpl; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal10054.35%233.33%
Pablo Neira Ayuso7540.76%116.67%
Daniel Borkmann63.26%233.33%
Joe Stringer31.63%116.67%
Total184100.00%6100.00%

EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
void nf_ct_tmpl_free(struct nf_conn *tmpl) { nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); if (ARCH_KMALLOC_MINALIGN <= NFCT_INFOMASK) kfree((char *)tmpl - tmpl->proto.tmpl_padto); else kfree(tmpl); }

Contributors

PersonTokensPropCommitsCommitProp
Pablo Neira Ayuso2553.19%150.00%
Florian Westphal2246.81%150.00%
Total47100.00%2100.00%

EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; struct nf_conntrack_l4proto *l4proto; pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); return; } rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->destroy) l4proto->destroy(ct); rcu_read_unlock(); local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, * too. */ nf_ct_remove_expectations(ct); nf_ct_del_from_dying_or_unconfirmed_list(ct); local_bh_enable(); if (ct->master) nf_ct_put(ct->master); pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); }

Contributors

PersonTokensPropCommitsCommitProp
Martin Josefsson6042.25%216.67%
Yasuyuki Kozakai4028.17%18.33%
Pablo Neira Ayuso1913.38%216.67%
Patrick McHardy149.86%433.33%
Jesper Dangaard Brouer64.23%216.67%
Harald Welte32.11%18.33%
Total142100.00%12100.00%


static void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; unsigned int sequence; nf_ct_helper_destroy(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); clean_from_lists(ct); nf_conntrack_double_unlock(hash, reply_hash); nf_ct_add_to_dying_list(ct); local_bh_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer6958.47%228.57%
Pablo Neira Ayuso4336.44%228.57%
Daniel Borkmann43.39%114.29%
Florian Westphal21.69%228.57%
Total118100.00%7100.00%


bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { struct nf_conn_tstamp *tstamp; if (test_and_set_bit(IPS_DYING_BIT, &ct->status)) return false; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_get_real_ns(); if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { /* destroy event was not delivered. nf_ct_put will * be done by event cache worker on redelivery. */ nf_ct_delete_from_lists(ct); nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); return false; } nf_conntrack_ecache_work(nf_ct_net(ct)); nf_ct_delete_from_lists(ct); nf_ct_put(ct); return true; }

Contributors

PersonTokensPropCommitsCommitProp
Pablo Neira Ayuso5951.30%228.57%
Florian Westphal5043.48%342.86%
Yasuyuki Kozakai54.35%114.29%
Eric Dumazet10.87%114.29%
Total115100.00%7100.00%

EXPORT_SYMBOL_GPL(nf_ct_delete);
static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, const struct net *net) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); /* A conntrack can be recreated with the equal tuple, * so we need to check that the conntrack is confirmed */ return nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && nf_ct_is_confirmed(ct) && net_eq(net, nf_ct_net(ct)); }

Contributors

PersonTokensPropCommitsCommitProp
Andrey Vagin5164.56%125.00%
Florian Westphal1620.25%125.00%
Daniel Borkmann1215.19%250.00%
Total79100.00%4100.00%

/* caller must hold rcu readlock and none of the nf_conntrack_locks */
static void nf_ct_gc_expired(struct nf_conn *ct) { if (!atomic_inc_not_zero(&ct->ct_general.use)) return; if (nf_ct_should_gc(ct)) nf_ct_kill(ct); nf_ct_put(ct); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal42100.00%1100.00%
Total42100.00%1100.00%

/* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) */
static struct nf_conntrack_tuple_hash * ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; unsigned int bucket, hsize; begin: nf_conntrack_get_ht(&ct_hash, &hsize); bucket = reciprocal_scale(hash, hsize); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { struct nf_conn *ct; ct = nf_ct_tuplehash_to_ctrack(h); if (nf_ct_is_expired(ct)) { nf_ct_gc_expired(ct); continue; } if (nf_ct_is_dying(ct)) continue; if (nf_ct_key_equal(h, tuple, zone, net)) return h; } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal5635.67%426.67%
Yasuyuki Kozakai3622.93%16.67%
Eric Dumazet159.55%16.67%
Patrick McHardy127.64%320.00%
Liping Zhang95.73%16.67%
Jesper Dangaard Brouer85.10%16.67%
Changli Gao85.10%16.67%
Alexey Dobriyan53.18%16.67%
Daniel Borkmann42.55%16.67%
Andrey Vagin42.55%16.67%
Total157100.00%15100.00%

/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash * __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); begin: h = ____nf_conntrack_find(net, zone, tuple, hash); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) { nf_ct_put(ct); goto begin; } } } rcu_read_unlock(); return h; }

Contributors

PersonTokensPropCommitsCommitProp
Yasuyuki Kozakai4232.31%110.00%
Patrick McHardy3829.23%330.00%
Eric Dumazet2519.23%110.00%
Changli Gao86.15%110.00%
Alexey Dobriyan75.38%110.00%
Andrey Vagin43.08%110.00%
Daniel Borkmann43.08%110.00%
Florian Westphal21.54%110.00%
Total130100.00%10100.00%


struct nf_conntrack_tuple_hash * nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, hash_conntrack_raw(tuple, net)); }

Contributors

PersonTokensPropCommitsCommitProp
Changli Gao3585.37%133.33%
Daniel Borkmann49.76%133.33%
Florian Westphal24.88%133.33%
Total41100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) { hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &nf_conntrack_hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &nf_conntrack_hash[reply_hash]); }

Contributors

PersonTokensPropCommitsCommitProp
Pablo Neira Ayuso3866.67%116.67%
Patrick McHardy915.79%116.67%
Eric Dumazet47.02%116.67%
Jesper Dangaard Brouer23.51%116.67%
Florian Westphal23.51%116.67%
Yasuyuki Kozakai23.51%116.67%
Total57100.00%6100.00%


int nf_conntrack_hash_check_insert(struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int sequence; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert); local_bh_enable(); return 0; out: nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); return -