Release 4.11 net/ipv4/xfrm4_policy.c
/*
* xfrm4_policy.c
*
* Changes:
* Kazunori MIYAZAWA @USAGI
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/inetdevice.h>
#include <linux/if_tunnel.h>
#include <net/dst.h>
#include <net/xfrm.h>
#include <net/ip.h>
#include <net/l3mdev.h>
static struct dst_entry *__xfrm4_dst_lookup(struct net *net, struct flowi4 *fl4,
int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct rtable *rt;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = daddr->a4;
fl4->flowi4_tos = tos;
fl4->flowi4_oif = l3mdev_master_ifindex_by_index(net, oif);
if (saddr)
fl4->saddr = saddr->a4;
fl4->flowi4_flags = FLOWI_FLAG_SKIP_NH_OIF;
rt = __ip_route_output_key(net, fl4);
if (!IS_ERR(rt))
return &rt->dst;
return ERR_CAST(rt);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 51 | 41.13% | 4 | 33.33% |
Herbert Xu | 33 | 26.61% | 1 | 8.33% |
David Ahern | 20 | 16.13% | 3 | 25.00% |
Hideaki Yoshifuji / 吉藤英明 | 11 | 8.87% | 1 | 8.33% |
Alexey Dobriyan | 6 | 4.84% | 1 | 8.33% |
Changli Gao | 2 | 1.61% | 1 | 8.33% |
Denis V. Lunev | 1 | 0.81% | 1 | 8.33% |
Total | 124 | 100.00% | 12 | 100.00% |
static struct dst_entry *xfrm4_dst_lookup(struct net *net, int tos, int oif,
const xfrm_address_t *saddr,
const xfrm_address_t *daddr)
{
struct flowi4 fl4;
return __xfrm4_dst_lookup(net, &fl4, tos, oif, saddr, daddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 45 | 90.00% | 1 | 50.00% |
David Ahern | 5 | 10.00% | 1 | 50.00% |
Total | 50 | 100.00% | 2 | 100.00% |
static int xfrm4_get_saddr(struct net *net, int oif,
xfrm_address_t *saddr, xfrm_address_t *daddr)
{
struct dst_entry *dst;
struct flowi4 fl4;
dst = __xfrm4_dst_lookup(net, &fl4, 0, oif, NULL, daddr);
if (IS_ERR(dst))
return -EHOSTUNREACH;
saddr->a4 = fl4.saddr;
dst_release(dst);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Patrick McHardy | 34 | 44.74% | 1 | 16.67% |
Herbert Xu | 21 | 27.63% | 1 | 16.67% |
David S. Miller | 9 | 11.84% | 1 | 16.67% |
Alexey Dobriyan | 7 | 9.21% | 2 | 33.33% |
David Ahern | 5 | 6.58% | 1 | 16.67% |
Total | 76 | 100.00% | 6 | 100.00% |
static int xfrm4_get_tos(const struct flowi *fl)
{
return IPTOS_RT_MASK & fl->u.ip4.flowi4_tos; /* Strip ECN bits */
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 9 | 37.50% | 1 | 16.67% |
Herbert Xu | 6 | 25.00% | 2 | 33.33% |
David S. Miller | 6 | 25.00% | 2 | 33.33% |
Ulrich Weber | 3 | 12.50% | 1 | 16.67% |
Total | 24 | 100.00% | 6 | 100.00% |
static int xfrm4_init_path(struct xfrm_dst *path, struct dst_entry *dst,
int nfheader_len)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Masahide Nakamura | 22 | 100.00% | 1 | 100.00% |
Total | 22 | 100.00% | 1 | 100.00% |
static int xfrm4_fill_dst(struct xfrm_dst *xdst, struct net_device *dev,
const struct flowi *fl)
{
struct rtable *rt = (struct rtable *)xdst->route;
const struct flowi4 *fl4 = &fl->u.ip4;
xdst->u.rt.rt_iif = fl4->flowi4_iif;
xdst->u.dst.dev = dev;
dev_hold(dev);
/* Sheit... I remember I did this right. Apparently,
* it was magically lost, so this code needs audit */
xdst->u.rt.rt_is_input = rt->rt_is_input;
xdst->u.rt.rt_flags = rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST |
RTCF_LOCAL);
xdst->u.rt.rt_type = rt->rt_type;
xdst->u.rt.rt_gateway = rt->rt_gateway;
xdst->u.rt.rt_uses_gateway = rt->rt_uses_gateway;
xdst->u.rt.rt_pmtu = rt->rt_pmtu;
xdst->u.rt.rt_table_id = rt->rt_table_id;
INIT_LIST_HEAD(&xdst->u.rt.rt_uncached);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 57 | 30.98% | 7 | 46.67% |
Hideaki Yoshifuji / 吉藤英明 | 55 | 29.89% | 1 | 6.67% |
Herbert Xu | 40 | 21.74% | 4 | 26.67% |
David Ahern | 12 | 6.52% | 1 | 6.67% |
Julian Anastasov | 12 | 6.52% | 1 | 6.67% |
Zheng Yan | 8 | 4.35% | 1 | 6.67% |
Total | 184 | 100.00% | 15 | 100.00% |
static void
_decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
{
const struct iphdr *iph = ip_hdr(skb);
u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
struct flowi4 *fl4 = &fl->u.ip4;
int oif = 0;
if (skb_dst(skb))
oif = skb_dst(skb)->dev->ifindex;
memset(fl4, 0, sizeof(struct flowi4));
fl4->flowi4_mark = skb->mark;
fl4->flowi4_oif = reverse ? skb->skb_iif : oif;
if (!ip_is_fragment(iph)) {
switch (iph->protocol) {
case IPPROTO_UDP:
case IPPROTO_UDPLITE:
case IPPROTO_TCP:
case IPPROTO_SCTP:
case IPPROTO_DCCP:
if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ports;
xprth = skb_network_header(skb) + iph->ihl * 4;
ports = (__be16 *)xprth;
fl4->fl4_sport = ports[!!reverse];
fl4->fl4_dport = ports[!reverse];
}
break;
case IPPROTO_ICMP:
if (xprth + 2 < skb->data ||
pskb_may_pull(skb, xprth + 2 - skb->data)) {
u8 *icmp;
xprth = skb_network_header(skb) + iph->ihl * 4;
icmp = xprth;
fl4->fl4_icmp_type = icmp[0];
fl4->fl4_icmp_code = icmp[1];
}
break;
case IPPROTO_ESP:
if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be32 *ehdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ehdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ehdr[0];
}
break;
case IPPROTO_AH:
if (xprth + 8 < skb->data ||
pskb_may_pull(skb, xprth + 8 - skb->data)) {
__be32 *ah_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ah_hdr = (__be32 *)xprth;
fl4->fl4_ipsec_spi = ah_hdr[1];
}
break;
case IPPROTO_COMP:
if (xprth + 4 < skb->data ||
pskb_may_pull(skb, xprth + 4 - skb->data)) {
__be16 *ipcomp_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
ipcomp_hdr = (__be16 *)xprth;
fl4->fl4_ipsec_spi = htonl(ntohs(ipcomp_hdr[1]));
}
break;
case IPPROTO_GRE:
if (xprth + 12 < skb->data ||
pskb_may_pull(skb, xprth + 12 - skb->data)) {
__be16 *greflags;
__be32 *gre_hdr;
xprth = skb_network_header(skb) + iph->ihl * 4;
greflags = (__be16 *)xprth;
gre_hdr = (__be32 *)xprth;
if (greflags[0] & GRE_KEY) {
if (greflags[0] & GRE_CSUM)
gre_hdr++;
fl4->fl4_gre_key = gre_hdr[1];
}
}
break;
default:
fl4->fl4_ipsec_spi = 0;
break;
}
}
fl4->flowi4_proto = iph->protocol;
fl4->daddr = reverse ? iph->saddr : iph->daddr;
fl4->saddr = reverse ? iph->daddr : iph->saddr;
fl4->flowi4_tos = iph->tos;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 188 | 29.79% | 1 | 3.85% |
Steffen Klassert | 175 | 27.73% | 4 | 15.38% |
Timo Teräs | 65 | 10.30% | 1 | 3.85% |
David S. Miller | 42 | 6.66% | 2 | 7.69% |
Masahide Nakamura | 39 | 6.18% | 1 | 3.85% |
James Morris | 39 | 6.18% | 1 | 3.85% |
Herbert Xu | 37 | 5.86% | 3 | 11.54% |
Al Viro | 8 | 1.27% | 2 | 7.69% |
Wei Yongjun | 8 | 1.27% | 1 | 3.85% |
David Ahern | 8 | 1.27% | 2 | 7.69% |
Arnaldo Carvalho de Melo | 6 | 0.95% | 2 | 7.69% |
Peter Kosyh | 6 | 0.95% | 1 | 3.85% |
Gerrit Renker | 3 | 0.48% | 1 | 3.85% |
Patrick McHardy | 3 | 0.48% | 1 | 3.85% |
Paul Gortmaker | 2 | 0.32% | 1 | 3.85% |
Alexey Dobriyan | 1 | 0.16% | 1 | 3.85% |
Eric Dumazet | 1 | 0.16% | 1 | 3.85% |
Total | 631 | 100.00% | 26 | 100.00% |
static inline int xfrm4_garbage_collect(struct dst_ops *ops)
{
struct net *net = container_of(ops, struct net, xfrm.xfrm4_dst_ops);
xfrm_garbage_collect_deferred(net);
return (dst_entries_get_slow(ops) > ops->gc_thresh * 2);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Dobriyan | 23 | 47.92% | 2 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 19 | 39.58% | 1 | 16.67% |
Daniel Lezcano | 4 | 8.33% | 1 | 16.67% |
Eric Dumazet | 1 | 2.08% | 1 | 16.67% |
Florian Westphal | 1 | 2.08% | 1 | 16.67% |
Total | 48 | 100.00% | 6 | 100.00% |
static void xfrm4_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->update_pmtu(path, sk, skb, mtu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 32 | 53.33% | 1 | 33.33% |
David S. Miller | 14 | 23.33% | 1 | 33.33% |
Herbert Xu | 14 | 23.33% | 1 | 33.33% |
Total | 60 | 100.00% | 3 | 100.00% |
static void xfrm4_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
struct dst_entry *path = xdst->route;
path->ops->redirect(path, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 55 | 100.00% | 2 | 100.00% |
Total | 55 | 100.00% | 2 | 100.00% |
static void xfrm4_dst_destroy(struct dst_entry *dst)
{
struct xfrm_dst *xdst = (struct xfrm_dst *)dst;
dst_destroy_metrics_generic(dst);
xfrm_dst_destroy(xdst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 28 | 84.85% | 1 | 50.00% |
David S. Miller | 5 | 15.15% | 1 | 50.00% |
Total | 33 | 100.00% | 2 | 100.00% |
static void xfrm4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
int unregister)
{
if (!unregister)
return;
xfrm_dst_ifdown(dst, dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Herbert Xu | 32 | 100.00% | 1 | 100.00% |
Total | 32 | 100.00% | 1 | 100.00% |
static struct dst_ops xfrm4_dst_ops_template = {
.family = AF_INET,
.gc = xfrm4_garbage_collect,
.update_pmtu = xfrm4_update_pmtu,
.redirect = xfrm4_redirect,
.cow_metrics = dst_cow_metrics_generic,
.destroy = xfrm4_dst_destroy,
.ifdown = xfrm4_dst_ifdown,
.local_out = __ip_local_out,
.gc_thresh = INT_MAX,
};
static const struct xfrm_policy_afinfo xfrm4_policy_afinfo = {
.dst_ops = &xfrm4_dst_ops_template,
.dst_lookup = xfrm4_dst_lookup,
.get_saddr = xfrm4_get_saddr,
.decode_session = _decode_session4,
.get_tos = xfrm4_get_tos,
.init_path = xfrm4_init_path,
.fill_dst = xfrm4_fill_dst,
.blackhole_route = ipv4_blackhole_route,
};
#ifdef CONFIG_SYSCTL
static struct ctl_table xfrm4_policy_table[] = {
{
.procname = "xfrm4_gc_thresh",
.data = &init_net.xfrm.xfrm4_dst_ops.gc_thresh,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static __net_init int xfrm4_net_sysctl_init(struct net *net)
{
struct ctl_table *table;
struct ctl_table_header *hdr;
table = xfrm4_policy_table;
if (!net_eq(net, &init_net)) {
table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
if (!table)
goto err_alloc;
table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
}
hdr = register_net_sysctl(net, "net/ipv4", table);
if (!hdr)
goto err_reg;
net->ipv4.xfrm4_hdr = hdr;
return 0;
err_reg:
if (!net_eq(net, &init_net))
kfree(table);
err_alloc:
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Kubeček | 125 | 95.42% | 1 | 25.00% |
Neil Horman | 4 | 3.05% | 1 | 25.00% |
Dan Streetman | 1 | 0.76% | 1 | 25.00% |
Arnd Bergmann | 1 | 0.76% | 1 | 25.00% |
Total | 131 | 100.00% | 4 | 100.00% |
static __net_exit void xfrm4_net_sysctl_exit(struct net *net)
{
struct ctl_table *table;
if (!net->ipv4.xfrm4_hdr)
return;
table = net->ipv4.xfrm4_hdr->ctl_table_arg;
unregister_net_sysctl_table(net->ipv4.xfrm4_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Michal Kubeček | 59 | 95.16% | 1 | 25.00% |
Ian Morris | 1 | 1.61% | 1 | 25.00% |
Dan Streetman | 1 | 1.61% | 1 | 25.00% |
Arnd Bergmann | 1 | 1.61% | 1 | 25.00% |
Total | 62 | 100.00% | 4 | 100.00% |
#else /* CONFIG_SYSCTL */
static inline int xfrm4_net_sysctl_init(struct net *net)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Streetman | 14 | 93.33% | 1 | 50.00% |
Arnd Bergmann | 1 | 6.67% | 1 | 50.00% |
Total | 15 | 100.00% | 2 | 100.00% |
static inline void xfrm4_net_sysctl_exit(struct net *net)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Streetman | 10 | 90.91% | 1 | 50.00% |
Arnd Bergmann | 1 | 9.09% | 1 | 50.00% |
Total | 11 | 100.00% | 2 | 100.00% |
#endif
static int __net_init xfrm4_net_init(struct net *net)
{
int ret;
memcpy(&net->xfrm.xfrm4_dst_ops, &xfrm4_dst_ops_template,
sizeof(xfrm4_dst_ops_template));
ret = dst_entries_init(&net->xfrm.xfrm4_dst_ops);
if (ret)
return ret;
ret = xfrm4_net_sysctl_init(net);
if (ret)
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Streetman | 76 | 100.00% | 1 | 100.00% |
Total | 76 | 100.00% | 1 | 100.00% |
static void __net_exit xfrm4_net_exit(struct net *net)
{
xfrm4_net_sysctl_exit(net);
dst_entries_destroy(&net->xfrm.xfrm4_dst_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Dan Streetman | 27 | 100.00% | 1 | 100.00% |
Total | 27 | 100.00% | 1 | 100.00% |
static struct pernet_operations __net_initdata xfrm4_net_ops = {
.init = xfrm4_net_init,
.exit = xfrm4_net_exit,
};
static void __init xfrm4_policy_init(void)
{
xfrm_policy_register_afinfo(&xfrm4_policy_afinfo, AF_INET);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 14 | 82.35% | 1 | 33.33% |
Florian Westphal | 2 | 11.76% | 1 | 33.33% |
Patrick McHardy | 1 | 5.88% | 1 | 33.33% |
Total | 17 | 100.00% | 3 | 100.00% |
void __init xfrm4_init(void)
{
xfrm4_state_init();
xfrm4_policy_init();
xfrm4_protocol_init();
register_pernet_subsys(&xfrm4_net_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 7 | 30.43% | 1 | 16.67% |
Alexey Dobriyan | 6 | 26.09% | 1 | 16.67% |
Neil Horman | 4 | 17.39% | 1 | 16.67% |
Steffen Klassert | 4 | 17.39% | 2 | 33.33% |
Michal Kubeček | 2 | 8.70% | 1 | 16.67% |
Total | 23 | 100.00% | 6 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 388 | 20.42% | 2 | 2.44% |
David S. Miller | 299 | 15.74% | 17 | 20.73% |
Herbert Xu | 247 | 13.00% | 12 | 14.63% |
Michal Kubeček | 203 | 10.68% | 1 | 1.22% |
Steffen Klassert | 180 | 9.47% | 7 | 8.54% |
Dan Streetman | 136 | 7.16% | 1 | 1.22% |
Timo Teräs | 68 | 3.58% | 1 | 1.22% |
Masahide Nakamura | 66 | 3.47% | 2 | 2.44% |
David Ahern | 53 | 2.79% | 7 | 8.54% |
Neil Horman | 52 | 2.74% | 1 | 1.22% |
Alexey Dobriyan | 47 | 2.47% | 5 | 6.10% |
Patrick McHardy | 45 | 2.37% | 3 | 3.66% |
James Morris | 39 | 2.05% | 1 | 1.22% |
Julian Anastasov | 12 | 0.63% | 1 | 1.22% |
Zheng Yan | 8 | 0.42% | 1 | 1.22% |
Al Viro | 8 | 0.42% | 2 | 2.44% |
Wei Yongjun | 8 | 0.42% | 1 | 1.22% |
Arnaldo Carvalho de Melo | 6 | 0.32% | 2 | 2.44% |
Peter Kosyh | 6 | 0.32% | 1 | 1.22% |
Florian Westphal | 4 | 0.21% | 3 | 3.66% |
Daniel Lezcano | 4 | 0.21% | 1 | 1.22% |
Arnd Bergmann | 4 | 0.21% | 1 | 1.22% |
Gerrit Renker | 3 | 0.16% | 1 | 1.22% |
Ulrich Weber | 3 | 0.16% | 1 | 1.22% |
Randy Dunlap | 3 | 0.16% | 1 | 1.22% |
Changli Gao | 2 | 0.11% | 1 | 1.22% |
Eric Dumazet | 2 | 0.11% | 2 | 2.44% |
Paul Gortmaker | 2 | 0.11% | 1 | 1.22% |
Denis V. Lunev | 1 | 0.05% | 1 | 1.22% |
Ian Morris | 1 | 0.05% | 1 | 1.22% |
Total | 1900 | 100.00% | 82 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.