cregit-Linux how code gets into the kernel

Release 4.8 net/netfilter/nf_conntrack_core.c

Directory: net/netfilter
/* Connection state tracking for netfilter.  This is separated from,
   but required by, the NAT layer; it can also be used by an iptables
   extension. */

/* (C) 1999-2001 Paul `Rusty' Russell
 * (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
 * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
 * (C) 2005-2012 Patrick McHardy <kaber@trash.net>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 */


#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/vmalloc.h>
#include <linux/stddef.h>
#include <linux/slab.h>
#include <linux/random.h>
#include <linux/jhash.h>
#include <linux/err.h>
#include <linux/percpu.h>
#include <linux/moduleparam.h>
#include <linux/notifier.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/socket.h>
#include <linux/mm.h>
#include <linux/nsproxy.h>
#include <linux/rculist_nulls.h>

#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_timestamp.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_labels.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netns/hash.h>


#define NF_CONNTRACK_VERSION	"0.5.0"

int (*nfnetlink_parse_nat_setup_hook)(struct nf_conn *ct,
				      enum nf_nat_manip_type manip,
				      const struct nlattr *attr) __read_mostly;

EXPORT_SYMBOL_GPL(nfnetlink_parse_nat_setup_hook);


__cacheline_aligned_in_smp spinlock_t nf_conntrack_locks[CONNTRACK_LOCKS];

EXPORT_SYMBOL_GPL(nf_conntrack_locks);

__cacheline_aligned_in_smp DEFINE_SPINLOCK(nf_conntrack_expect_lock);

EXPORT_SYMBOL_GPL(nf_conntrack_expect_lock);


struct hlist_nulls_head *nf_conntrack_hash __read_mostly;

EXPORT_SYMBOL_GPL(nf_conntrack_hash);


static __read_mostly struct kmem_cache *nf_conntrack_cachep;

static __read_mostly spinlock_t nf_conntrack_locks_all_lock;

static __read_mostly seqcount_t nf_conntrack_generation;
static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);

static __read_mostly bool nf_conntrack_locks_all;


void nf_conntrack_lock(spinlock_t *lock) __acquires(lock) { spin_lock(lock); while (unlikely(nf_conntrack_locks_all)) { spin_unlock(lock); /* * Order the 'nf_conntrack_locks_all' load vs. the * spin_unlock_wait() loads below, to ensure * that 'nf_conntrack_locks_all_lock' is indeed held: */ smp_rmb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ spin_unlock_wait(&nf_conntrack_locks_all_lock); spin_lock(lock); } }

Contributors

PersonTokensPropCommitsCommitProp
sasha levinsasha levin4287.50%133.33%
peter zijlstrapeter zijlstra510.42%133.33%
nicholas mc guirenicholas mc guire12.08%133.33%
Total48100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_conntrack_lock);
static void nf_conntrack_double_unlock(unsigned int h1, unsigned int h2) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; spin_unlock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_unlock(&nf_conntrack_locks[h2]); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer46100.00%1100.00%
Total46100.00%1100.00%

/* return true if we need to recompute hashes (in case hash table was resized) */
static bool nf_conntrack_double_lock(struct net *net, unsigned int h1, unsigned int h2, unsigned int sequence) { h1 %= CONNTRACK_LOCKS; h2 %= CONNTRACK_LOCKS; if (h1 <= h2) { nf_conntrack_lock(&nf_conntrack_locks[h1]); if (h1 != h2) spin_lock_nested(&nf_conntrack_locks[h2], SINGLE_DEPTH_NESTING); } else { nf_conntrack_lock(&nf_conntrack_locks[h2]); spin_lock_nested(&nf_conntrack_locks[h1], SINGLE_DEPTH_NESTING); } if (read_seqcount_retry(&nf_conntrack_generation, sequence)) { nf_conntrack_double_unlock(h1, h2); return true; } return false; }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer11097.35%133.33%
sasha levinsasha levin21.77%133.33%
florian westphalflorian westphal10.88%133.33%
Total113100.00%3100.00%


static void nf_conntrack_all_lock(void) { int i; spin_lock(&nf_conntrack_locks_all_lock); nf_conntrack_locks_all = true; /* * Order the above store of 'nf_conntrack_locks_all' against * the spin_unlock_wait() loads below, such that if * nf_conntrack_lock() observes 'nf_conntrack_locks_all' * we must observe nf_conntrack_locks[] held: */ smp_mb(); /* spin_lock(&nf_conntrack_locks_all_lock) */ for (i = 0; i < CONNTRACK_LOCKS; i++) { spin_unlock_wait(&nf_conntrack_locks[i]); } }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer2754.00%125.00%
sasha levinsasha levin1734.00%125.00%
peter zijlstrapeter zijlstra510.00%125.00%
nicholas mc guirenicholas mc guire12.00%125.00%
Total50100.00%4100.00%


static void nf_conntrack_all_unlock(void) { /* * All prior stores must be complete before we clear * 'nf_conntrack_locks_all'. Otherwise nf_conntrack_lock() * might observe the false value but not the entire * critical section: */ smp_store_release(&nf_conntrack_locks_all, false); spin_unlock(&nf_conntrack_locks_all_lock); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer1356.52%133.33%
peter zijlstrapeter zijlstra626.09%133.33%
sasha levinsasha levin417.39%133.33%
Total23100.00%3100.00%

unsigned int nf_conntrack_htable_size __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_htable_size); unsigned int nf_conntrack_max __read_mostly; EXPORT_SYMBOL_GPL(nf_conntrack_max); DEFINE_PER_CPU(struct nf_conn, nf_conntrack_untracked); EXPORT_PER_CPU_SYMBOL(nf_conntrack_untracked); static unsigned int nf_conntrack_hash_rnd __read_mostly;
static u32 hash_conntrack_raw(const struct nf_conntrack_tuple *tuple, const struct net *net) { unsigned int n; u32 seed; get_random_once(&nf_conntrack_hash_rnd, sizeof(nf_conntrack_hash_rnd)); /* The direction must be ignored, so we hash everything up to the * destination ports (which is a multiple of 4) and treat the last * three bytes manually. */ seed = nf_conntrack_hash_rnd ^ net_hash_mix(net); n = (sizeof(tuple->src) + sizeof(tuple->dst.u3)) / sizeof(u32); return jhash2((u32 *)tuple, n, seed ^ (((__force __u16)tuple->dst.u.all << 16) | tuple->dst.protonum)); }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai4642.20%114.29%
florian westphalflorian westphal3027.52%228.57%
patrick mchardypatrick mchardy2321.10%114.29%
changli gaochangli gao54.59%114.29%
al viroal viro43.67%114.29%
sami farinsami farin10.92%114.29%
Total109100.00%7100.00%


static u32 scale_hash(u32 hash) { return reciprocal_scale(hash, nf_conntrack_htable_size); }

Contributors

PersonTokensPropCommitsCommitProp
changli gaochangli gao1482.35%133.33%
florian westphalflorian westphal317.65%266.67%
Total17100.00%3100.00%


static u32 __hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple, unsigned int size) { return reciprocal_scale(hash_conntrack_raw(tuple, net), size); }

Contributors

PersonTokensPropCommitsCommitProp
changli gaochangli gao2571.43%150.00%
florian westphalflorian westphal1028.57%150.00%
Total35100.00%2100.00%


static u32 hash_conntrack(const struct net *net, const struct nf_conntrack_tuple *tuple) { return scale_hash(hash_conntrack_raw(tuple, net)); }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai1758.62%120.00%
patrick mchardypatrick mchardy517.24%120.00%
florian westphalflorian westphal517.24%240.00%
daniel borkmanndaniel borkmann26.90%120.00%
Total29100.00%5100.00%


bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff, unsigned int dataoff, u_int16_t l3num, u_int8_t protonum, struct net *net, struct nf_conntrack_tuple *tuple, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(tuple, 0, sizeof(*tuple)); tuple->src.l3num = l3num; if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0) return false; tuple->dst.protonum = protonum; tuple->dst.dir = IP_CT_DIR_ORIGINAL; return l4proto->pkt_to_tuple(skb, dataoff, net, tuple); }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai9581.90%120.00%
philip craigphilip craig97.76%120.00%
eric w. biedermaneric w. biederman76.03%120.00%
martin josefssonmartin josefsson32.59%120.00%
jan engelhardtjan engelhardt21.72%120.00%
Total116100.00%5100.00%

EXPORT_SYMBOL_GPL(nf_ct_get_tuple);
bool nf_ct_get_tuplepr(const struct sk_buff *skb, unsigned int nhoff, u_int16_t l3num, struct net *net, struct nf_conntrack_tuple *tuple) { struct nf_conntrack_l3proto *l3proto; struct nf_conntrack_l4proto *l4proto; unsigned int protoff; u_int8_t protonum; int ret; rcu_read_lock(); l3proto = __nf_ct_l3proto_find(l3num); ret = l3proto->get_l4proto(skb, nhoff, &protoff, &protonum); if (ret != NF_ACCEPT) { rcu_read_unlock(); return false; } l4proto = __nf_ct_l4proto_find(l3num, protonum); ret = nf_ct_get_tuple(skb, nhoff, protoff, l3num, protonum, net, tuple, l3proto, l4proto); rcu_read_unlock(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai11892.91%133.33%
eric w. biedermaneric w. biederman75.51%133.33%
jan engelhardtjan engelhardt21.57%133.33%
Total127100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_ct_get_tuplepr);
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse, const struct nf_conntrack_tuple *orig, const struct nf_conntrack_l3proto *l3proto, const struct nf_conntrack_l4proto *l4proto) { memset(inverse, 0, sizeof(*inverse)); inverse->src.l3num = orig->src.l3num; if (l3proto->invert_tuple(inverse, orig) == 0) return false; inverse->dst.dir = !orig->dst.dir; inverse->dst.protonum = orig->dst.protonum; return l4proto->invert_tuple(inverse, orig); }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai9086.54%125.00%
philip craigphilip craig98.65%125.00%
martin josefssonmartin josefsson32.88%125.00%
jan engelhardtjan engelhardt21.92%125.00%
Total104100.00%4100.00%

EXPORT_SYMBOL_GPL(nf_ct_invert_tuple);
static void clean_from_lists(struct nf_conn *ct) { pr_debug("clean_from_lists(%p)\n", ct); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode); /* Destroy all pending expectations */ nf_ct_remove_expectations(ct); }

Contributors

PersonTokensPropCommitsCommitProp
martin josefssonmartin josefsson2244.00%116.67%
yasuyuki kozakaiyasuyuki kozakai1938.00%116.67%
eric dumazeteric dumazet48.00%116.67%
harald welteharald welte36.00%116.67%
patrick mchardypatrick mchardy24.00%233.33%
Total50100.00%6100.00%

/* must be called with local_bh_disable */
static void nf_ct_add_to_dying_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) dying list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->dying); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer76100.00%1100.00%
Total76100.00%1100.00%

/* must be called with local_bh_disable */
static void nf_ct_add_to_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* add this conntrack to the (per cpu) unconfirmed list */ ct->cpu = smp_processor_id(); pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); hlist_nulls_add_head(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &pcpu->unconfirmed); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer76100.00%1100.00%
Total76100.00%1100.00%

/* must be called with local_bh_disable */
static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct) { struct ct_pcpu *pcpu; /* We overload first tuple to link into unconfirmed or dying list.*/ pcpu = per_cpu_ptr(nf_ct_net(ct)->ct.pcpu_lists, ct->cpu); spin_lock(&pcpu->lock); BUG_ON(hlist_nulls_unhashed(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode)); hlist_nulls_del_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode); spin_unlock(&pcpu->lock); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer80100.00%1100.00%
Total80100.00%1100.00%

/* Released via destroy_conntrack() */
struct nf_conn *nf_ct_tmpl_alloc(struct net *net, const struct nf_conntrack_zone *zone, gfp_t flags) { struct nf_conn *tmpl; tmpl = kzalloc(sizeof(*tmpl), flags); if (tmpl == NULL) return NULL; tmpl->status = IPS_TEMPLATE; write_pnet(&tmpl->ct_net, net); nf_ct_zone_add(tmpl, zone); atomic_set(&tmpl->ct_general.use, 0); return tmpl; }

Contributors

PersonTokensPropCommitsCommitProp
pablo neira ayusopablo neira ayuso7688.37%120.00%
daniel borkmanndaniel borkmann66.98%240.00%
joe stringerjoe stringer33.49%120.00%
florian westphalflorian westphal11.16%120.00%
Total86100.00%5100.00%

EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
void nf_ct_tmpl_free(struct nf_conn *tmpl) { nf_ct_ext_destroy(tmpl); nf_ct_ext_free(tmpl); kfree(tmpl); }

Contributors

PersonTokensPropCommitsCommitProp
pablo neira ayusopablo neira ayuso25100.00%1100.00%
Total25100.00%1100.00%

EXPORT_SYMBOL_GPL(nf_ct_tmpl_free);
static void destroy_conntrack(struct nf_conntrack *nfct) { struct nf_conn *ct = (struct nf_conn *)nfct; struct net *net = nf_ct_net(ct); struct nf_conntrack_l4proto *l4proto; pr_debug("destroy_conntrack(%p)\n", ct); NF_CT_ASSERT(atomic_read(&nfct->use) == 0); NF_CT_ASSERT(!timer_pending(&ct->timeout)); if (unlikely(nf_ct_is_template(ct))) { nf_ct_tmpl_free(ct); return; } rcu_read_lock(); l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); if (l4proto->destroy) l4proto->destroy(ct); rcu_read_unlock(); local_bh_disable(); /* Expectations will have been removed in clean_from_lists, * except TFTP can create an expectation on the first packet, * before connection is in the list, so we need to clean here, * too. */ nf_ct_remove_expectations(ct); nf_ct_del_from_dying_or_unconfirmed_list(ct); NF_CT_STAT_INC(net, delete); local_bh_enable(); if (ct->master) nf_ct_put(ct->master); pr_debug("destroy_conntrack: returning ct=%p to slab\n", ct); nf_conntrack_free(ct); }

Contributors

PersonTokensPropCommitsCommitProp
martin josefssonmartin josefsson7443.27%215.38%
yasuyuki kozakaiyasuyuki kozakai4325.15%17.69%
pablo neira ayusopablo neira ayuso1911.11%215.38%
patrick mchardypatrick mchardy148.19%430.77%
alexey dobriyanalexey dobriyan127.02%17.69%
jesper dangaard brouerjesper dangaard brouer63.51%215.38%
harald welteharald welte31.75%17.69%
Total171100.00%13100.00%


static void nf_ct_delete_from_lists(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; unsigned int sequence; nf_ct_helper_destroy(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); clean_from_lists(ct); nf_conntrack_double_unlock(hash, reply_hash); nf_ct_add_to_dying_list(ct); NF_CT_STAT_INC(net, delete_list); local_bh_enable(); }

Contributors

PersonTokensPropCommitsCommitProp
jesper dangaard brouerjesper dangaard brouer7358.40%228.57%
pablo neira ayusopablo neira ayuso4636.80%228.57%
daniel borkmanndaniel borkmann43.20%114.29%
florian westphalflorian westphal21.60%228.57%
Total125100.00%7100.00%


bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report) { struct nf_conn_tstamp *tstamp; tstamp = nf_conn_tstamp_find(ct); if (tstamp && tstamp->stop == 0) tstamp->stop = ktime_get_real_ns(); if (nf_ct_is_dying(ct)) goto delete; if (nf_conntrack_event_report(IPCT_DESTROY, ct, portid, report) < 0) { /* destroy event was not delivered */ nf_ct_delete_from_lists(ct); nf_conntrack_ecache_delayed_work(nf_ct_net(ct)); return false; } nf_conntrack_ecache_work(nf_ct_net(ct)); set_bit(IPS_DYING_BIT, &ct->status); delete: nf_ct_delete_from_lists(ct); nf_ct_put(ct); return true; }

Contributors

PersonTokensPropCommitsCommitProp
pablo neira ayusopablo neira ayuso7359.84%233.33%
florian westphalflorian westphal4335.25%233.33%
yasuyuki kozakaiyasuyuki kozakai54.10%116.67%
eric dumazeteric dumazet10.82%116.67%
Total122100.00%6100.00%

EXPORT_SYMBOL_GPL(nf_ct_delete);
static void death_by_timeout(unsigned long ul_conntrack) { nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
florian westphalflorian westphal2395.83%150.00%
yasuyuki kozakaiyasuyuki kozakai14.17%150.00%
Total24100.00%2100.00%


static inline bool nf_ct_key_equal(struct nf_conntrack_tuple_hash *h, const struct nf_conntrack_tuple *tuple, const struct nf_conntrack_zone *zone, const struct net *net) { struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h); /* A conntrack can be recreated with the equal tuple, * so we need to check that the conntrack is confirmed */ return nf_ct_tuple_equal(tuple, &h->tuple) && nf_ct_zone_equal(ct, zone, NF_CT_DIRECTION(h)) && nf_ct_is_confirmed(ct) && net_eq(net, nf_ct_net(ct)); }

Contributors

PersonTokensPropCommitsCommitProp
andrey vaginandrey vagin5164.56%125.00%
florian westphalflorian westphal1620.25%125.00%
daniel borkmanndaniel borkmann1215.19%250.00%
Total79100.00%4100.00%

/* must be called with rcu read lock held */
void nf_conntrack_get_ht(struct hlist_nulls_head **hash, unsigned int *hsize) { struct hlist_nulls_head *hptr; unsigned int sequence, hsz; do { sequence = read_seqcount_begin(&nf_conntrack_generation); hsz = nf_conntrack_htable_size; hptr = nf_conntrack_hash; } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); *hash = hptr; *hsize = hsz; }

Contributors

PersonTokensPropCommitsCommitProp
liping zhangliping zhang67100.00%1100.00%
Total67100.00%1100.00%

EXPORT_SYMBOL_GPL(nf_conntrack_get_ht); /* * Warning : * - Caller must take a reference on returned object * and recheck nf_ct_tuple_equal(tuple, &h->tuple) */
static struct nf_conntrack_tuple_hash * ____nf_conntrack_find(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct hlist_nulls_head *ct_hash; struct hlist_nulls_node *n; unsigned int bucket, sequence; begin: do { sequence = read_seqcount_begin(&nf_conntrack_generation); bucket = scale_hash(hash); ct_hash = nf_conntrack_hash; } while (read_seqcount_retry(&nf_conntrack_generation, sequence)); hlist_nulls_for_each_entry_rcu(h, n, &ct_hash[bucket], hnnode) { if (nf_ct_key_equal(h, tuple, zone, net)) { NF_CT_STAT_INC_ATOMIC(net, found); return h; } NF_CT_STAT_INC_ATOMIC(net, searched); } /* * if the nulls value we got at the end of this lookup is * not the expected one, we must restart lookup. * We probably met an item that was moved to another chain. */ if (get_nulls_value(n) != bucket) { NF_CT_STAT_INC_ATOMIC(net, search_restart); goto begin; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai4730.72%16.67%
florian westphalflorian westphal4630.07%426.67%
eric dumazeteric dumazet159.80%16.67%
patrick mchardypatrick mchardy127.84%320.00%
alexey dobriyanalexey dobriyan95.88%213.33%
changli gaochangli gao85.23%16.67%
jesper dangaard brouerjesper dangaard brouer85.23%16.67%
andrey vaginandrey vagin42.61%16.67%
daniel borkmanndaniel borkmann42.61%16.67%
Total153100.00%15100.00%

/* Find a connection corresponding to a tuple. */
static struct nf_conntrack_tuple_hash * __nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple, u32 hash) { struct nf_conntrack_tuple_hash *h; struct nf_conn *ct; rcu_read_lock(); begin: h = ____nf_conntrack_find(net, zone, tuple, hash); if (h) { ct = nf_ct_tuplehash_to_ctrack(h); if (unlikely(nf_ct_is_dying(ct) || !atomic_inc_not_zero(&ct->ct_general.use))) h = NULL; else { if (unlikely(!nf_ct_key_equal(h, tuple, zone, net))) { nf_ct_put(ct); goto begin; } } } rcu_read_unlock(); return h; }

Contributors

PersonTokensPropCommitsCommitProp
yasuyuki kozakaiyasuyuki kozakai4232.31%110.00%
patrick mchardypatrick mchardy3829.23%330.00%
eric dumazeteric dumazet2519.23%110.00%
changli gaochangli gao86.15%110.00%
alexey dobriyanalexey dobriyan75.38%110.00%
andrey vaginandrey vagin43.08%110.00%
daniel borkmanndaniel borkmann43.08%110.00%
florian westphalflorian westphal21.54%110.00%
Total130100.00%10100.00%


struct nf_conntrack_tuple_hash * nf_conntrack_find_get(struct net *net, const struct nf_conntrack_zone *zone, const struct nf_conntrack_tuple *tuple) { return __nf_conntrack_find_get(net, zone, tuple, hash_conntrack_raw(tuple, net)); }

Contributors

PersonTokensPropCommitsCommitProp
changli gaochangli gao3585.37%133.33%
daniel borkmanndaniel borkmann49.76%133.33%
florian westphalflorian westphal24.88%133.33%
Total41100.00%3100.00%

EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
static void __nf_conntrack_hash_insert(struct nf_conn *ct, unsigned int hash, unsigned int reply_hash) { hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode, &nf_conntrack_hash[hash]); hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_REPLY].hnnode, &nf_conntrack_hash[reply_hash]); }

Contributors

PersonTokensPropCommitsCommitProp
pablo neira ayusopablo neira ayuso3866.67%116.67%
patrick mchardypatrick mchardy915.79%116.67%
eric dumazeteric dumazet47.02%116.67%
florian westphalflorian westphal23.51%116.67%
jesper dangaard brouerjesper dangaard brouer23.51%116.67%
yasuyuki kozakaiyasuyuki kozakai23.51%116.67%
Total57100.00%6100.00%


int nf_conntrack_hash_check_insert(struct nf_conn *ct) { const struct nf_conntrack_zone *zone; struct net *net = nf_ct_net(ct); unsigned int hash, reply_hash; struct nf_conntrack_tuple_hash *h; struct hlist_nulls_node *n; unsigned int sequence; zone = nf_ct_zone(ct); local_bh_disable(); do { sequence = read_seqcount_begin(&nf_conntrack_generation); hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple); reply_hash = hash_conntrack(net, &ct->tuplehash[IP_CT_DIR_REPLY].tuple); } while (nf_conntrack_double_lock(net, hash, reply_hash, sequence)); /* See if there's one in the list already, including reverse */ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, zone, net)) goto out; hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[reply_hash], hnnode) if (nf_ct_key_equal(h, &ct->tuplehash[IP_CT_DIR_REPLY].tuple, zone, net)) goto out; add_timer(&ct->timeout); smp_wmb(); /* The caller holds a reference to this object */ atomic_set(&ct->ct_general.use, 2); __nf_conntrack_hash_insert(ct, hash, reply_hash); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net,