Release 4.14 net/core/dst.c
/*
* net/core/dst.c Protocol independent destination cache.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <net/net_namespace.h>
#include <linux/sched.h>
#include <linux/prefetch.h>
#include <net/lwtunnel.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
/*
* Theory of operations:
* 1) We use a list, protected by a spinlock, to add
* new entries from both BH and non-BH context.
* 2) In order to keep spinlock held for a small delay,
* we use a second list where are stored long lived
* entries, that are handled by the garbage collect thread
* fired by a workqueue.
* 3) This list is guarded by a mutex,
* so that the gc_task and dst_dev_event() can be synchronized.
*/
/*
* We want to keep lock & list close together
* to dirty as few cache lines as possible in __dst_free().
* As this is not a very strong hint, we dont force an alignment on SMP.
*/
int dst_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 18 | 64.29% | 1 | 20.00% |
Eric Dumazet | 4 | 14.29% | 1 | 20.00% |
Alexey Kuznetsov | 3 | 10.71% | 1 | 20.00% |
Andrew Morton | 2 | 7.14% | 1 | 20.00% |
Ingo Molnar | 1 | 3.57% | 1 | 20.00% |
Total | 28 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(dst_discard_out);
const struct dst_metrics dst_default_metrics = {
/* This initializer is needed to force linker to place this variable
* into const section. Otherwise it might end into bss section.
* We really want to avoid false sharing on this variable, and catch
* any writes on it.
*/
.refcnt = REFCOUNT_INIT(1),
};
void dst_init(struct dst_entry *dst, struct dst_ops *ops,
struct net_device *dev, int initial_ref, int initial_obsolete,
unsigned short flags)
{
dst->child = NULL;
dst->dev = dev;
if (dev)
dev_hold(dev);
dst->ops = ops;
dst_init_metrics(dst, dst_default_metrics.metrics, true);
dst->expires = 0UL;
dst->path = dst;
dst->from = NULL;
#ifdef CONFIG_XFRM
dst->xfrm = NULL;
#endif
dst->input = dst_discard;
dst->output = dst_discard_out;
dst->error = 0;
dst->obsolete = initial_obsolete;
dst->header_len = 0;
dst->trailer_len = 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
dst->tclassid = 0;
#endif
dst->lwtstate = NULL;
atomic_set(&dst->__refcnt, initial_ref);
dst->__use = 0;
dst->lastuse = jiffies;
dst->flags = flags;
dst->next = NULL;
if (!(flags & DST_NOCOUNT))
dst_entries_add(ops, 1);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 129 | 64.50% | 1 | 10.00% |
Eric Dumazet | 23 | 11.50% | 2 | 20.00% |
Linus Torvalds (pre-git) | 22 | 11.00% | 2 | 20.00% |
Alexey Kuznetsov | 15 | 7.50% | 2 | 20.00% |
Denis V. Lunev | 5 | 2.50% | 1 | 10.00% |
Arjan van de Ven | 5 | 2.50% | 1 | 10.00% |
David S. Miller | 1 | 0.50% | 1 | 10.00% |
Total | 200 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(dst_init);
void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
int initial_ref, int initial_obsolete, unsigned short flags)
{
struct dst_entry *dst;
if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
if (ops->gc(ops))
return NULL;
}
dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
if (!dst)
return NULL;
dst_init(dst, ops, dev, initial_ref, initial_obsolete, flags);
return dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 56 | 57.73% | 1 | 14.29% |
David S. Miller | 26 | 26.80% | 4 | 57.14% |
Linus Torvalds (pre-git) | 13 | 13.40% | 1 | 14.29% |
Thomas Graf | 2 | 2.06% | 1 | 14.29% |
Total | 97 | 100.00% | 7 | 100.00% |
EXPORT_SYMBOL(dst_alloc);
struct dst_entry *dst_destroy(struct dst_entry * dst)
{
struct dst_entry *child;
smp_rmb();
child = dst->child;
if (!(dst->flags & DST_NOCOUNT))
dst_entries_add(dst->ops, -1);
if (dst->ops->destroy)
dst->ops->destroy(dst);
if (dst->dev)
dev_put(dst->dev);
lwtstate_put(dst->lwtstate);
if (dst->flags & DST_METADATA)
metadata_dst_free((struct metadata_dst *)dst);
else
kmem_cache_free(dst->ops->kmem_cachep, dst);
dst = child;
if (dst)
dst_release_immediate(dst);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 85 | 65.38% | 1 | 9.09% |
David S. Miller | 30 | 23.08% | 4 | 36.36% |
Linus Torvalds (pre-git) | 10 | 7.69% | 2 | 18.18% |
Jiri Benc | 2 | 1.54% | 1 | 9.09% |
Alexey Kuznetsov | 1 | 0.77% | 1 | 9.09% |
Thomas Graf | 1 | 0.77% | 1 | 9.09% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.77% | 1 | 9.09% |
Total | 130 | 100.00% | 11 | 100.00% |
EXPORT_SYMBOL(dst_destroy);
static void dst_destroy_rcu(struct rcu_head *head)
{
struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
dst = dst_destroy(dst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 18 | 54.55% | 1 | 33.33% |
Wei Wang | 14 | 42.42% | 1 | 33.33% |
Linus Torvalds (pre-git) | 1 | 3.03% | 1 | 33.33% |
Total | 33 | 100.00% | 3 | 100.00% |
/* Operations to mark dst as DEAD and clean up the net device referenced
* by dst:
* 1. put the dst under loopback interface and discard all tx/rx packets
* on this route.
* 2. release the net_device
* This function should be called when removing routes from the fib tree
* in preparation for a NETDEV_DOWN/NETDEV_UNREGISTER event and also to
* make the next dst_ops->check() fail.
*/
void dst_dev_put(struct dst_entry *dst)
{
struct net_device *dev = dst->dev;
dst->obsolete = DST_OBSOLETE_DEAD;
if (dst->ops->ifdown)
dst->ops->ifdown(dst, dev, true);
dst->input = dst_discard;
dst->output = dst_discard_out;
dst->dev = dev_net(dst->dev)->loopback_dev;
dev_hold(dst->dev);
dev_put(dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 51 | 61.45% | 1 | 16.67% |
Linus Torvalds (pre-git) | 28 | 33.73% | 2 | 33.33% |
Eric Dumazet | 2 | 2.41% | 1 | 16.67% |
Alexey Kuznetsov | 1 | 1.20% | 1 | 16.67% |
Eric W. Biedermann | 1 | 1.20% | 1 | 16.67% |
Total | 83 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(dst_dev_put);
void dst_release(struct dst_entry *dst)
{
if (dst) {
int newrefcnt;
newrefcnt = atomic_dec_return(&dst->__refcnt);
if (unlikely(newrefcnt < 0))
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
if (!newrefcnt)
call_rcu(&dst->rcu_head, dst_destroy_rcu);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 39 | 60.94% | 1 | 16.67% |
Linus Torvalds (pre-git) | 13 | 20.31% | 2 | 33.33% |
Alexey Kuznetsov | 9 | 14.06% | 1 | 16.67% |
Vinay K. Nallamothu | 2 | 3.12% | 1 | 16.67% |
Tejun Heo | 1 | 1.56% | 1 | 16.67% |
Total | 64 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(dst_release);
void dst_release_immediate(struct dst_entry *dst)
{
if (dst) {
int newrefcnt;
newrefcnt = atomic_dec_return(&dst->__refcnt);
if (unlikely(newrefcnt < 0))
net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
__func__, dst, newrefcnt);
if (!newrefcnt)
dst_destroy(dst);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 32 | 54.24% | 1 | 16.67% |
Linus Torvalds (pre-git) | 20 | 33.90% | 2 | 33.33% |
Alexey Kuznetsov | 5 | 8.47% | 1 | 16.67% |
Eric Dumazet | 1 | 1.69% | 1 | 16.67% |
David S. Miller | 1 | 1.69% | 1 | 16.67% |
Total | 59 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(dst_release_immediate);
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
{
struct dst_metrics *p = kmalloc(sizeof(*p), GFP_ATOMIC);
if (p) {
struct dst_metrics *old_p = (struct dst_metrics *)__DST_METRICS_PTR(old);
unsigned long prev, new;
refcount_set(&p->refcnt, 1);
memcpy(p->metrics, old_p->metrics, sizeof(p->metrics));
new = (unsigned long) p;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev != old) {
kfree(p);
p = (struct dst_metrics *)__DST_METRICS_PTR(prev);
if (prev & DST_METRICS_READ_ONLY)
p = NULL;
} else if (prev & DST_METRICS_REFCOUNTED) {
if (refcount_dec_and_test(&old_p->refcnt))
kfree(old_p);
}
}
BUILD_BUG_ON(offsetof(struct dst_metrics, metrics) != 0);
return (u32 *)p;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 152 | 81.28% | 2 | 20.00% |
Linus Torvalds (pre-git) | 11 | 5.88% | 2 | 20.00% |
Alexey Kuznetsov | 9 | 4.81% | 2 | 20.00% |
Américo Wang | 5 | 2.67% | 1 | 10.00% |
Paolo Abeni | 4 | 2.14% | 1 | 10.00% |
Thomas Graf | 4 | 2.14% | 1 | 10.00% |
Eric Dumazet | 2 | 1.07% | 1 | 10.00% |
Total | 187 | 100.00% | 10 | 100.00% |
EXPORT_SYMBOL(dst_cow_metrics_generic);
/* Caller asserts that dst_metrics_read_only(dst) is false. */
void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
{
unsigned long prev, new;
new = ((unsigned long) &dst_default_metrics) | DST_METRICS_READ_ONLY;
prev = cmpxchg(&dst->_metrics, old, new);
if (prev == old)
kfree(__DST_METRICS_PTR(old));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 48 | 78.69% | 1 | 50.00% |
Eric Dumazet | 13 | 21.31% | 1 | 50.00% |
Total | 61 | 100.00% | 2 | 100.00% |
EXPORT_SYMBOL(__dst_destroy_metrics_generic);
static struct dst_ops md_dst_ops = {
.family = AF_UNSPEC,
};
static int dst_md_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
{
WARN_ONCE(1, "Attempting to call output on metadata dst\n");
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 36 | 100.00% | 1 | 100.00% |
Total | 36 | 100.00% | 1 | 100.00% |
static int dst_md_discard(struct sk_buff *skb)
{
WARN_ONCE(1, "Attempting to call input on metadata dst\n");
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
static void __metadata_dst_init(struct metadata_dst *md_dst,
enum metadata_type type, u8 optslen)
{
struct dst_entry *dst;
dst = &md_dst->dst;
dst_init(dst, &md_dst_ops, NULL, 1, DST_OBSOLETE_NONE,
DST_METADATA | DST_NOCOUNT);
dst->input = dst_md_discard;
dst->output = dst_md_discard_out;
memset(dst + 1, 0, sizeof(*md_dst) + optslen - sizeof(*dst));
md_dst->type = type;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 79 | 88.76% | 2 | 66.67% |
Jakub Kiciński | 10 | 11.24% | 1 | 33.33% |
Total | 89 | 100.00% | 3 | 100.00% |
struct metadata_dst *metadata_dst_alloc(u8 optslen, enum metadata_type type,
gfp_t flags)
{
struct metadata_dst *md_dst;
md_dst = kmalloc(sizeof(*md_dst) + optslen, flags);
if (!md_dst)
return NULL;
__metadata_dst_init(md_dst, type, optslen);
return md_dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 30 | 52.63% | 1 | 12.50% |
Eric Dumazet | 13 | 22.81% | 4 | 50.00% |
Jakub Kiciński | 6 | 10.53% | 1 | 12.50% |
Ilpo Järvinen | 6 | 10.53% | 1 | 12.50% |
Konstantin Khlebnikov | 2 | 3.51% | 1 | 12.50% |
Total | 57 | 100.00% | 8 | 100.00% |
EXPORT_SYMBOL_GPL(metadata_dst_alloc);
void metadata_dst_free(struct metadata_dst *md_dst)
{
#ifdef CONFIG_DST_CACHE
if (md_dst->type == METADATA_IP_TUNNEL)
dst_cache_destroy(&md_dst->u.tun_info.dst_cache);
#endif
kfree(md_dst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 32 | 80.00% | 2 | 66.67% |
David Lamparter | 8 | 20.00% | 1 | 33.33% |
Total | 40 | 100.00% | 3 | 100.00% |
struct metadata_dst __percpu *
metadata_dst_alloc_percpu(u8 optslen, enum metadata_type type, gfp_t flags)
{
int cpu;
struct metadata_dst __percpu *md_dst;
md_dst = __alloc_percpu_gfp(sizeof(struct metadata_dst) + optslen,
__alignof__(struct metadata_dst), flags);
if (!md_dst)
return NULL;
for_each_possible_cpu(cpu)
__metadata_dst_init(per_cpu_ptr(md_dst, cpu), type, optslen);
return md_dst;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 44 | 57.14% | 1 | 25.00% |
David S. Miller | 22 | 28.57% | 1 | 25.00% |
Jakub Kiciński | 6 | 7.79% | 1 | 25.00% |
Eric Dumazet | 5 | 6.49% | 1 | 25.00% |
Total | 77 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL_GPL(metadata_dst_alloc_percpu);
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Wei Wang | 927 | 65.79% | 4 | 6.90% |
Linus Torvalds (pre-git) | 142 | 10.08% | 8 | 13.79% |
David S. Miller | 84 | 5.96% | 9 | 15.52% |
Eric Dumazet | 77 | 5.46% | 10 | 17.24% |
Alexey Kuznetsov | 43 | 3.05% | 3 | 5.17% |
Thomas Graf | 32 | 2.27% | 1 | 1.72% |
Jakub Kiciński | 22 | 1.56% | 1 | 1.72% |
Ilpo Järvinen | 9 | 0.64% | 1 | 1.72% |
Arnaldo Carvalho de Melo | 9 | 0.64% | 1 | 1.72% |
David Lamparter | 8 | 0.57% | 1 | 1.72% |
Américo Wang | 5 | 0.35% | 1 | 1.72% |
Denis V. Lunev | 5 | 0.35% | 1 | 1.72% |
Jiri Benc | 5 | 0.35% | 1 | 1.72% |
Arjan van de Ven | 5 | 0.35% | 1 | 1.72% |
Nicolas Dichtel | 4 | 0.28% | 1 | 1.72% |
Laurent Chavey | 4 | 0.28% | 1 | 1.72% |
Tejun Heo | 4 | 0.28% | 2 | 3.45% |
Paolo Abeni | 4 | 0.28% | 1 | 1.72% |
Herbert Xu | 4 | 0.28% | 1 | 1.72% |
Eric W. Biedermann | 4 | 0.28% | 2 | 3.45% |
Linus Torvalds | 3 | 0.21% | 1 | 1.72% |
Konstantin Khlebnikov | 2 | 0.14% | 1 | 1.72% |
Vinay K. Nallamothu | 2 | 0.14% | 1 | 1.72% |
Andrew Morton | 2 | 0.14% | 1 | 1.72% |
Christoph Hellwig | 1 | 0.07% | 1 | 1.72% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.07% | 1 | 1.72% |
Ingo Molnar | 1 | 0.07% | 1 | 1.72% |
Total | 1409 | 100.00% | 58 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.