Release 4.11 net/ipv4/tunnel4.c
/* tunnel4.c: Generic IP tunnel transformer.
*
* Copyright (C) 2003 David S. Miller (davem@redhat.com)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/mpls.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/xfrm.h>
static struct xfrm_tunnel __rcu *tunnel4_handlers __read_mostly;
static struct xfrm_tunnel __rcu *tunnel64_handlers __read_mostly;
static struct xfrm_tunnel __rcu *tunnelmpls4_handlers __read_mostly;
static DEFINE_MUTEX(tunnel4_mutex);
static inline struct xfrm_tunnel __rcu **fam_handlers(unsigned short family)
{
return (family == AF_INET) ? &tunnel4_handlers :
(family == AF_INET6) ? &tunnel64_handlers :
&tunnelmpls4_handlers;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 27 | 72.97% | 1 | 33.33% |
Simon Horman | 9 | 24.32% | 1 | 33.33% |
Eric Dumazet | 1 | 2.70% | 1 | 33.33% |
Total | 37 | 100.00% | 3 | 100.00% |
int xfrm4_tunnel_register(struct xfrm_tunnel *handler, unsigned short family)
{
struct xfrm_tunnel __rcu **pprev;
struct xfrm_tunnel *t;
int ret = -EEXIST;
int priority = handler->priority;
mutex_lock(&tunnel4_mutex);
for (pprev = fam_handlers(family);
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel4_mutex))) != NULL;
pprev = &t->next) {
if (t->priority > priority)
break;
if (t->priority == priority)
goto err;
}
handler->next = *pprev;
rcu_assign_pointer(*pprev, handler);
ret = 0;
err:
mutex_unlock(&tunnel4_mutex);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 95 | 72.52% | 1 | 20.00% |
Eric Dumazet | 28 | 21.37% | 2 | 40.00% |
Kazunori Miyazawa | 5 | 3.82% | 1 | 20.00% |
Pavel Emelyanov | 3 | 2.29% | 1 | 20.00% |
Total | 131 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL(xfrm4_tunnel_register);
int xfrm4_tunnel_deregister(struct xfrm_tunnel *handler, unsigned short family)
{
struct xfrm_tunnel __rcu **pprev;
struct xfrm_tunnel *t;
int ret = -ENOENT;
mutex_lock(&tunnel4_mutex);
for (pprev = fam_handlers(family);
(t = rcu_dereference_protected(*pprev,
lockdep_is_held(&tunnel4_mutex))) != NULL;
pprev = &t->next) {
if (t == handler) {
*pprev = handler->next;
ret = 0;
break;
}
}
mutex_unlock(&tunnel4_mutex);
synchronize_net();
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 75 | 70.75% | 1 | 25.00% |
Eric Dumazet | 23 | 21.70% | 1 | 25.00% |
Kazunori Miyazawa | 5 | 4.72% | 1 | 25.00% |
Pavel Emelyanov | 3 | 2.83% | 1 | 25.00% |
Total | 106 | 100.00% | 4 | 100.00% |
EXPORT_SYMBOL(xfrm4_tunnel_deregister);
#define for_each_tunnel_rcu(head, handler) \
for (handler = rcu_dereference(head); \
handler != NULL; \
handler = rcu_dereference(handler->next))
\
static int tunnel4_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
goto drop;
for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->handler(skb))
return 0;
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 67 | 90.54% | 2 | 66.67% |
Eric Dumazet | 7 | 9.46% | 1 | 33.33% |
Total | 74 | 100.00% | 3 | 100.00% |
#if IS_ENABLED(CONFIG_IPV6)
static int tunnel64_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
goto drop;
for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->handler(skb))
return 0;
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kazunori Miyazawa | 66 | 90.41% | 1 | 33.33% |
Eric Dumazet | 6 | 8.22% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 1.37% | 1 | 33.33% |
Total | 73 | 100.00% | 3 | 100.00% |
#endif
#if IS_ENABLED(CONFIG_MPLS)
static int tunnelmpls4_rcv(struct sk_buff *skb)
{
struct xfrm_tunnel *handler;
if (!pskb_may_pull(skb, sizeof(struct mpls_label)))
goto drop;
for_each_tunnel_rcu(tunnelmpls4_handlers, handler)
if (!handler->handler(skb))
return 0;
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
drop:
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Simon Horman | 73 | 100.00% | 1 | 100.00% |
Total | 73 | 100.00% | 1 | 100.00% |
#endif
static void tunnel4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnel4_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 32 | 84.21% | 1 | 50.00% |
Eric Dumazet | 6 | 15.79% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
#if IS_ENABLED(CONFIG_IPV6)
static void tunnel64_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnel64_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Pavel Emelyanov | 32 | 84.21% | 1 | 50.00% |
Eric Dumazet | 6 | 15.79% | 1 | 50.00% |
Total | 38 | 100.00% | 2 | 100.00% |
#endif
#if IS_ENABLED(CONFIG_MPLS)
static void tunnelmpls4_err(struct sk_buff *skb, u32 info)
{
struct xfrm_tunnel *handler;
for_each_tunnel_rcu(tunnelmpls4_handlers, handler)
if (!handler->err_handler(skb, info))
break;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Simon Horman | 38 | 100.00% | 1 | 100.00% |
Total | 38 | 100.00% | 1 | 100.00% |
#endif
static const struct net_protocol tunnel4_protocol = {
.handler = tunnel4_rcv,
.err_handler = tunnel4_err,
.no_policy = 1,
.netns_ok = 1,
};
#if IS_ENABLED(CONFIG_IPV6)
static const struct net_protocol tunnel64_protocol = {
.handler = tunnel64_rcv,
.err_handler = tunnel64_err,
.no_policy = 1,
.netns_ok = 1,
};
#endif
#if IS_ENABLED(CONFIG_MPLS)
static const struct net_protocol tunnelmpls4_protocol = {
.handler = tunnelmpls4_rcv,
.err_handler = tunnelmpls4_err,
.no_policy = 1,
.netns_ok = 1,
};
#endif
static int __init tunnel4_init(void)
{
if (inet_add_protocol(&tunnel4_protocol, IPPROTO_IPIP))
goto err;
#if IS_ENABLED(CONFIG_IPV6)
if (inet_add_protocol(&tunnel64_protocol, IPPROTO_IPV6)) {
inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
goto err;
}
#endif
#if IS_ENABLED(CONFIG_MPLS)
if (inet_add_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS)) {
inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP);
#if IS_ENABLED(CONFIG_IPV6)
inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6);
#endif
goto err;
}
#endif
return 0;
err:
pr_err("%s: can't add protocol\n", __func__);
return -EAGAIN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Simon Horman | 74 | 63.79% | 2 | 40.00% |
Kazunori Miyazawa | 23 | 19.83% | 1 | 20.00% |
Herbert Xu | 18 | 15.52% | 1 | 20.00% |
Eric Dumazet | 1 | 0.86% | 1 | 20.00% |
Total | 116 | 100.00% | 5 | 100.00% |
static void __exit tunnel4_fini(void)
{
#if IS_ENABLED(CONFIG_MPLS)
if (inet_del_protocol(&tunnelmpls4_protocol, IPPROTO_MPLS))
pr_err("tunnelmpls4 close: can't remove protocol\n");
#endif
#if IS_ENABLED(CONFIG_IPV6)
if (inet_del_protocol(&tunnel64_protocol, IPPROTO_IPV6))
pr_err("tunnel64 close: can't remove protocol\n");
#endif
if (inet_del_protocol(&tunnel4_protocol, IPPROTO_IPIP))
pr_err("tunnel4 close: can't remove protocol\n");
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 23 | 32.86% | 1 | 20.00% |
Simon Horman | 23 | 32.86% | 1 | 20.00% |
Kazunori Miyazawa | 21 | 30.00% | 1 | 20.00% |
Joe Perches | 2 | 2.86% | 1 | 20.00% |
Eric Dumazet | 1 | 1.43% | 1 | 20.00% |
Total | 70 | 100.00% | 5 | 100.00% |
module_init(tunnel4_init);
module_exit(tunnel4_fini);
MODULE_LICENSE("GPL");
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 397 | 38.73% | 2 | 11.11% |
Simon Horman | 280 | 27.32% | 2 | 11.11% |
Kazunori Miyazawa | 161 | 15.71% | 1 | 5.56% |
Eric Dumazet | 96 | 9.37% | 5 | 27.78% |
Pavel Emelyanov | 83 | 8.10% | 4 | 22.22% |
Tejun Heo | 3 | 0.29% | 1 | 5.56% |
Alexey Dobriyan | 2 | 0.20% | 1 | 5.56% |
Joe Perches | 2 | 0.20% | 1 | 5.56% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.10% | 1 | 5.56% |
Total | 1025 | 100.00% | 18 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.