Release 4.11 net/core/neighbour.c
/*
* Generic address resolution entity
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Fixes:
* Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
* Harald Welte Add neighbour cache statistics like rtstat
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/socket.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <linux/times.h>
#include <net/net_namespace.h>
#include <net/neighbour.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/random.h>
#include <linux/string.h>
#include <linux/log2.h>
#include <linux/inetdevice.h>
#include <net/addrconf.h>
#define DEBUG
#define NEIGH_DEBUG 1
#define neigh_dbg(level, fmt, ...) \
do { \
if (level <= NEIGH_DEBUG) \
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
#define PNEIGH_HASHMASK 0xF
static void neigh_timer_handler(unsigned long arg);
static void __neigh_notify(struct neighbour *n, int type, int flags);
static void neigh_update_notify(struct neighbour *neigh);
static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
#ifdef CONFIG_PROC_FS
static const struct file_operations neigh_stat_seq_fops;
#endif
/*
Neighbour hash table buckets are protected with rwlock tbl->lock.
- All the scans/updates to hash buckets MUST be made under this lock.
- NOTHING clever should be made under this lock: no callbacks
to protocol backends, no attempts to send something to network.
It will result in deadlocks, if backend/driver wants to use neighbour
cache.
- If the entry requires some non-trivial actions, increase
its reference count and release table lock.
Neighbour entries are protected:
- with reference count.
- with rwlock neigh->lock
Reference count prevents destruction.
neigh->lock mainly serializes ll address data and its validity state.
However, the same lock is used to protect another entry fields:
- timer
- resolution queue
Again, nothing clever shall be made under neigh->lock,
the most complicated procedure, which we allow is dev->hard_header.
It is supposed, that dev->hard_header is simplistic and does
not make callbacks to neighbour tables.
*/
static int neigh_blackhole(struct neighbour *neigh, struct sk_buff *skb)
{
kfree_skb(skb);
return -ENETDOWN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 20 | 80.00% | 2 | 66.67% |
David S. Miller | 5 | 20.00% | 1 | 33.33% |
Total | 25 | 100.00% | 3 | 100.00% |
static void neigh_cleanup_and_release(struct neighbour *neigh)
{
if (neigh->parms->neigh_cleanup)
neigh->parms->neigh_cleanup(neigh);
__neigh_notify(neigh, RTM_DELNEIGH, 0);
call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
neigh_release(neigh);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 42 | 85.71% | 2 | 66.67% |
Ido Schimmel | 7 | 14.29% | 1 | 33.33% |
Total | 49 | 100.00% | 3 | 100.00% |
/*
* It is random distribution in the interval (1/2)*base...(3/2)*base.
* It corresponds to default IPv6 settings and is not overridable,
* because it is really reasonable choice.
*/
unsigned long neigh_rand_reach_time(unsigned long base)
{
return base ? (prandom_u32() % base) + (base >> 1) : 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 22 | 78.57% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 5 | 17.86% | 1 | 33.33% |
Aruna-Hewapathirane | 1 | 3.57% | 1 | 33.33% |
Total | 28 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(neigh_rand_reach_time);
static int neigh_forced_gc(struct neigh_table *tbl)
{
int shrunk = 0;
int i;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (i = 0; i < (1 << nht->hash_shift); i++) {
struct neighbour *n;
struct neighbour __rcu **np;
np = &nht->hash_buckets[i];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
/* Neighbour record may be discarded if:
* - nobody refers to it.
* - it is not permanent
*/
write_lock(&n->lock);
if (atomic_read(&n->refcnt) == 1 &&
!(n->nud_state & NUD_PERMANENT)) {
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
n->dead = 1;
shrunk = 1;
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
continue;
}
write_unlock(&n->lock);
np = &n->next;
}
}
tbl->last_flush = jiffies;
write_unlock_bh(&tbl->lock);
return shrunk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 147 | 63.36% | 4 | 33.33% |
Eric Dumazet | 54 | 23.28% | 2 | 16.67% |
David S. Miller | 30 | 12.93% | 5 | 41.67% |
Thomas Graf | 1 | 0.43% | 1 | 8.33% |
Total | 232 | 100.00% | 12 | 100.00% |
static void neigh_add_timer(struct neighbour *n, unsigned long when)
{
neigh_hold(n);
if (unlikely(mod_timer(&n->timer, when))) {
printk("NEIGH: BUG, double timer add, state is %x\n",
n->nud_state);
dump_stack();
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 49 | 100.00% | 1 | 100.00% |
Total | 49 | 100.00% | 1 | 100.00% |
static int neigh_del_timer(struct neighbour *n)
{
if ((n->nud_state & NUD_IN_TIMER) &&
del_timer(&n->timer)) {
neigh_release(n);
return 1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 39 | 92.86% | 1 | 50.00% |
Arnaldo Carvalho de Melo | 3 | 7.14% | 1 | 50.00% |
Total | 42 | 100.00% | 2 | 100.00% |
static void pneigh_queue_purge(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(list)) != NULL) {
dev_put(skb->dev);
kfree_skb(skb);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
{
int i;
struct neigh_hash_table *nht;
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
for (i = 0; i < (1 << nht->hash_shift); i++) {
struct neighbour *n;
struct neighbour __rcu **np = &nht->hash_buckets[i];
while ((n = rcu_dereference_protected(*np,
lockdep_is_held(&tbl->lock))) != NULL) {
if (dev && n->dev != dev) {
np = &n->next;
continue;
}
rcu_assign_pointer(*np,
rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock)));
write_lock(&n->lock);
neigh_del_timer(n);
n->dead = 1;
if (atomic_read(&n->refcnt) != 1) {
/* The most unpleasant situation.
We must destroy neighbour entry,
but someone still uses it.
The destroy will be delayed until
the last user releases us, but
we must kill timers etc. and move
it to safe state.
*/
__skb_queue_purge(&n->arp_queue);
n->arp_queue_len_bytes = 0;
n->output = neigh_blackhole;
if (n->nud_state & NUD_VALID)
n->nud_state = NUD_NOARP;
else
n->nud_state = NUD_NONE;
neigh_dbg(2, "neigh %p is stray\n", n);
}
write_unlock(&n->lock);
neigh_cleanup_and_release(n);
}
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 165 | 67.62% | 6 | 35.29% |
Eric Dumazet | 61 | 25.00% | 4 | 23.53% |
David S. Miller | 7 | 2.87% | 2 | 11.76% |
Joe Perches | 4 | 1.64% | 1 | 5.88% |
Herbert Xu | 3 | 1.23% | 1 | 5.88% |
Arnaldo Carvalho de Melo | 2 | 0.82% | 1 | 5.88% |
Tommi Virtanen | 1 | 0.41% | 1 | 5.88% |
Thomas Graf | 1 | 0.41% | 1 | 5.88% |
Total | 244 | 100.00% | 17 | 100.00% |
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
write_unlock_bh(&tbl->lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(neigh_changeaddr);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
{
write_lock_bh(&tbl->lock);
neigh_flush_dev(tbl, dev);
pneigh_ifdown(tbl, dev);
write_unlock_bh(&tbl->lock);
del_timer_sync(&tbl->proxy_timer);
pneigh_queue_purge(&tbl->proxy_queue);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 35 | 54.69% | 6 | 85.71% |
Herbert Xu | 29 | 45.31% | 1 | 14.29% |
Total | 64 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(neigh_ifdown);
static struct neighbour *neigh_alloc(struct neigh_table *tbl, struct net_device *dev)
{
struct neighbour *n = NULL;
unsigned long now = jiffies;
int entries;
entries = atomic_inc_return(&tbl->entries) - 1;
if (entries >= tbl->gc_thresh3 ||
(entries >= tbl->gc_thresh2 &&
time_after(now, tbl->last_flush + 5 * HZ))) {
if (!neigh_forced_gc(tbl) &&
entries >= tbl->gc_thresh3) {
net_info_ratelimited("%s: neighbor table overflow!\n",
tbl->id);
NEIGH_CACHE_STAT_INC(tbl, table_fulls);
goto out_entries;
}
}
n = kzalloc(tbl->entry_size + dev->neigh_priv_len, GFP_ATOMIC);
if (!n)
goto out_entries;
__skb_queue_head_init(&n->arp_queue);
rwlock_init(&n->lock);
seqlock_init(&n->ha_lock);
n->updated = n->used = now;
n->nud_state = NUD_NONE;
n->output = neigh_blackhole;
seqlock_init(&n->hh.hh_lock);
n->parms = neigh_parms_clone(&tbl->parms);
setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
NEIGH_CACHE_STAT_INC(tbl, allocs);
n->tbl = tbl;
atomic_set(&n->refcnt, 1);
n->dead = 1;
out:
return n;
out_entries:
atomic_dec(&tbl->entries);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 151 | 56.77% | 5 | 27.78% |
Herbert Xu | 36 | 13.53% | 2 | 11.11% |
David S. Miller | 29 | 10.90% | 4 | 22.22% |
Rick Jones | 18 | 6.77% | 1 | 5.56% |
Arnaldo Carvalho de Melo | 10 | 3.76% | 1 | 5.56% |
Eric Dumazet | 9 | 3.38% | 2 | 11.11% |
Hideaki Yoshifuji / 吉藤英明 | 5 | 1.88% | 1 | 5.56% |
Andrew Morton | 4 | 1.50% | 1 | 5.56% |
Pavel Emelyanov | 4 | 1.50% | 1 | 5.56% |
Total | 266 | 100.00% | 18 | 100.00% |
static void neigh_get_hash_rnd(u32 *x)
{
get_random_bytes(x, sizeof(*x));
*x |= 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static struct neigh_hash_table *neigh_hash_alloc(unsigned int shift)
{
size_t size = (1 << shift) * sizeof(struct neighbour *);
struct neigh_hash_table *ret;
struct neighbour __rcu **buckets;
int i;
ret = kmalloc(sizeof(*ret), GFP_ATOMIC);
if (!ret)
return NULL;
if (size <= PAGE_SIZE)
buckets = kzalloc(size, GFP_ATOMIC);
else
buckets = (struct neighbour __rcu **)
__get_free_pages(GFP_ATOMIC | __GFP_ZERO,
get_order(size));
if (!buckets) {
kfree(ret);
return NULL;
}
ret->hash_buckets = buckets;
ret->hash_shift = shift;
for (i = 0; i < NEIGH_NUM_HASH_RND; i++)
neigh_get_hash_rnd(&ret->hash_rnd[i]);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 88 | 56.77% | 4 | 57.14% |
Eric Dumazet | 64 | 41.29% | 2 | 28.57% |
Andrew Morton | 3 | 1.94% | 1 | 14.29% |
Total | 155 | 100.00% | 7 | 100.00% |
static void neigh_hash_free_rcu(struct rcu_head *head)
{
struct neigh_hash_table *nht = container_of(head,
struct neigh_hash_table,
rcu);
size_t size = (1 << nht->hash_shift) * sizeof(struct neighbour *);
struct neighbour __rcu **buckets = nht->hash_buckets;
if (size <= PAGE_SIZE)
kfree(buckets);
else
free_pages((unsigned long)buckets, get_order(size));
kfree(nht);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 45 | 52.33% | 2 | 50.00% |
Eric Dumazet | 41 | 47.67% | 2 | 50.00% |
Total | 86 | 100.00% | 4 | 100.00% |
static struct neigh_hash_table *neigh_hash_grow(struct neigh_table *tbl,
unsigned long new_shift)
{
unsigned int i, hash;
struct neigh_hash_table *new_nht, *old_nht;
NEIGH_CACHE_STAT_INC(tbl, hash_grows);
old_nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
new_nht = neigh_hash_alloc(new_shift);
if (!new_nht)
return old_nht;
for (i = 0; i < (1 << old_nht->hash_shift); i++) {
struct neighbour *n, *next;
for (n = rcu_dereference_protected(old_nht->hash_buckets[i],
lockdep_is_held(&tbl->lock));
n != NULL;
n = next) {
hash = tbl->hash(n->primary_key, n->dev,
new_nht->hash_rnd);
hash >>= (32 - new_nht->hash_shift);
next = rcu_dereference_protected(n->next,
lockdep_is_held(&tbl->lock));
rcu_assign_pointer(n->next,
rcu_dereference_protected(
new_nht->hash_buckets[hash],
lockdep_is_held(&tbl->lock)));
rcu_assign_pointer(new_nht->hash_buckets[hash], n);
}
}
rcu_assign_pointer(tbl->nht, new_nht);
call_rcu(&old_nht->rcu, neigh_hash_free_rcu);
return new_nht;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 124 | 52.54% | 4 | 66.67% |
Eric Dumazet | 112 | 47.46% | 2 | 33.33% |
Total | 236 | 100.00% | 6 | 100.00% |
struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
struct net_device *dev)
{
struct neighbour *n;
NEIGH_CACHE_STAT_INC(tbl, lookups);
rcu_read_lock_bh();
n = __neigh_lookup_noref(tbl, pkey, dev);
if (n) {
if (!atomic_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
}
rcu_read_unlock_bh();
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 42 | 51.22% | 4 | 44.44% |
Eric Dumazet | 20 | 24.39% | 2 | 22.22% |
David S. Miller | 14 | 17.07% | 1 | 11.11% |
Eric W. Biedermann | 3 | 3.66% | 1 | 11.11% |
Pavel Emelyanov | 3 | 3.66% | 1 | 11.11% |
Total | 82 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(neigh_lookup);
struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
const void *pkey)
{
struct neighbour *n;
int key_len = tbl->key_len;
u32 hash_val;
struct neigh_hash_table *nht;
NEIGH_CACHE_STAT_INC(tbl, lookups);
rcu_read_lock_bh();
nht = rcu_dereference_bh(tbl->nht);
hash_val = tbl->hash(pkey, NULL, nht->hash_rnd) >> (32 - nht->hash_shift);
for (n = rcu_dereference_bh(nht->hash_buckets[hash_val]);
n != NULL;
n = rcu_dereference_bh(n->next)) {
if (!memcmp(n->primary_key, pkey, key_len) &&
net_eq(dev_net(n->dev), net)) {
if (!atomic_inc_not_zero(&n->refcnt))
n = NULL;
NEIGH_CACHE_STAT_INC(tbl, hits);
break;
}
}
rcu_read_unlock_bh();
return n;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 101 | 59.41% | 3 | 33.33% |
Eric Dumazet | 43 | 25.29% | 2 | 22.22% |
Eric W. Biedermann | 9 | 5.29% | 1 | 11.11% |
Pavel Emelyanov | 9 | 5.29% | 1 | 11.11% |
Hideaki Yoshifuji / 吉藤英明 | 8 | 4.71% | 2 | 22.22% |
Total | 170 | 100.00% | 9 | 100.00% |
EXPORT_SYMBOL(neigh_lookup_nodev);
struct neighbour *__neigh_create(struct neigh_table *tbl, const void *pkey,
struct net_device *dev, bool want_ref)
{
u32 hash_val;
int key_len = tbl->key_len;
int error;
struct neighbour *n1, *rc, *n = neigh_alloc(tbl, dev);
struct neigh_hash_table *nht;
if (!n) {
rc = ERR_PTR(-ENOBUFS);
goto out;
}
memcpy(n->primary_key, pkey, key_len);
n->dev = dev;
dev_hold(dev);
/* Protocol specific setup. */
if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
rc = ERR_PTR(error);
goto out_neigh_release;
}
if (dev->netdev_ops->ndo_neigh_construct) {
error = dev->netdev_ops->ndo_neigh_construct(dev, n);
if (error < 0) {
rc = ERR_PTR(error);
goto out_neigh_release;
}
}
/* Device specific setup. */
if (n->parms->neigh_setup &&
(error = n->parms->neigh_setup(n)) < 0) {
rc = ERR_PTR(error);
goto out_neigh_release;
}
n->confirmed = jiffies - (NEIGH_VAR(n->parms, BASE_REACHABLE_TIME) << 1);
write_lock_bh(&tbl->lock);
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
if (atomic_read(&tbl->entries) > (1 << nht->hash_shift))
nht = neigh_hash_grow(tbl, nht->hash_shift + 1);
hash_val = tbl->hash(pkey, dev, nht->hash_rnd) >> (32 - nht->hash_shift);
if (n->parms->dead) {
rc = ERR_PTR(-EINVAL);
goto out_tbl_unlock;
}
for (n1 = rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock));
n1 != NULL;
n1 = rcu_dereference_protected(n1->next,
lockdep_is_held(&tbl->lock))) {
if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
if (want_ref)
neigh_hold(n1);
rc = n1;
goto out_tbl_unlock;
}
}
n->dead = 0;
if (want_ref)
neigh_hold(n);
rcu_assign_pointer(n->next,
rcu_dereference_protected(nht->hash_buckets[hash_val],
lockdep_is_held(&tbl->lock)));
rcu_assign_pointer(nht->hash_buckets[hash_val], n);
write_unlock_bh(&tbl->lock);
neigh_dbg(2, "neigh %p is created\n", n);
rc = n;
out:
return rc;
out_tbl_unlock:
write_unlock_bh(&tbl->lock);
out_neigh_release:
neigh_release(n);
goto out;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 200 | 39.22% | 6 | 27.27% |
David S. Miller | 108 | 21.18% | 7 | 31.82% |
Eric Dumazet | 87 | 17.06% | 2 | 9.09% |
Herbert Xu | 55 | 10.78% | 3 | 13.64% |
Arnaldo Carvalho de Melo | 49 | 9.61% | 1 | 4.55% |
Jiri Pirko | 7 | 1.37% | 2 | 9.09% |
Joe Perches | 4 | 0.78% | 1 | 4.55% |
Total | 510 | 100.00% | 22 | 100.00% |
EXPORT_SYMBOL(__neigh_create);
static u32 pneigh_hash(const void *pkey, int key_len)
{
u32 hash_val = *(u32 *)(pkey + key_len - 4);
hash_val ^= (hash_val >> 16);
hash_val ^= hash_val >> 8;
hash_val ^= hash_val >> 4;
hash_val &= PNEIGH_HASHMASK;
return hash_val;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 49 | 85.96% | 1 | 50.00% |
Hideaki Yoshifuji / 吉藤英明 | 8 | 14.04% | 1 | 50.00% |
Total | 57 | 100.00% | 2 | 100.00% |
static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
struct net *net,
const void *pkey,
int key_len,
struct net_device *dev)
{
while (n) {
if (!memcmp(n->key, pkey, key_len) &&
net_eq(pneigh_net(n), net) &&
(n->dev == dev || !n->dev))
return n;
n = n->next;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 47 | 54.65% | 1 | 33.33% |
Pavel Emelyanov | 36 | 41.86% | 1 | 33.33% |
David S. Miller | 3 | 3.49% | 1 | 33.33% |
Total | 86 | 100.00% | 3 | 100.00% |
struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
struct net *net, const void *pkey, struct net_device *dev)
{
int key_len = tbl->key_len;
u32 hash_val = pneigh_hash(pkey, key_len);
return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
net, pkey, key_len, dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 62 | 98.41% | 1 | 50.00% |
Pavel Emelyanov | 1 | 1.59% | 1 | 50.00% |
Total | 63 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL_GPL(__pneigh_lookup);
struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
struct net *net, const void *pkey,
struct net_device *dev, int creat)
{
struct pneigh_entry *n;
int key_len = tbl->key_len;
u32 hash_val = pneigh_hash(pkey, key_len);
read_lock_bh(&tbl->lock);
n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
net, pkey, key_len, dev);
read_unlock_bh(&tbl->lock);
if (n || !creat)
goto out;
ASSERT_RTNL();
n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL)