cregit-Linux how code gets into the kernel

Release 4.11 net/ipv6/route.c

Directory: net/ipv6
/*
 *      Linux INET6 implementation
 *      FIB front-end.
 *
 *      Authors:
 *      Pedro Roque             <roque@di.fc.ul.pt>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*      Changes:
 *
 *      YOSHIFUJI Hideaki @USAGI
 *              reworked default router selection.
 *              - respect outgoing interface
 *              - select from (probably) reachable routers (i.e.
 *              routers in REACHABLE, STALE, DELAY or PROBE states).
 *              - always select the same router if it is (probably)
 *              reachable.  otherwise, round-robin the list.
 *      Ville Nuorvala
 *              Fixed routing subtrees.
 */


#define pr_fmt(fmt) "IPv6: " fmt

#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/mroute6.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include <net/lwtunnel.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
#include <trace/events/fib6.h>

#include <linux/uaccess.h>

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif


enum rt6_nud_state {
	
RT6_NUD_FAIL_HARD = -3,
	
RT6_NUD_FAIL_PROBE = -2,
	
RT6_NUD_FAIL_DO_RR = -1,
	
RT6_NUD_SUCCEED = 1
};

static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
static int		 ip6_dst_gc(struct dst_ops *ops);

static int		ip6_pkt_discard(struct sk_buff *skb);
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static int		ip6_pkt_prohibit(struct sk_buff *skb);
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void		ip6_link_failure(struct sk_buff *skb);
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
static void		rt6_dst_from_metrics_check(struct rt6_info *rt);
static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
static size_t rt6_nlmsg_size(struct rt6_info *rt);
static int rt6_fill_node(struct net *net,
			 struct sk_buff *skb, struct rt6_info *rt,
			 struct in6_addr *dst, struct in6_addr *src,
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
					   unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
#endif


struct uncached_list {
	
spinlock_t		lock;
	
struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);


static void rt6_uncached_list_add(struct rt6_info *rt) { struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); rt->dst.flags |= DST_NOCACHE; rt->rt6i_uncached_list = ul; spin_lock_bh(&ul->lock); list_add_tail(&rt->rt6i_uncached, &ul->head); spin_unlock_bh(&ul->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau65100.00%1100.00%
Total65100.00%1100.00%


static void rt6_uncached_list_del(struct rt6_info *rt) { if (!list_empty(&rt->rt6i_uncached)) { struct uncached_list *ul = rt->rt6i_uncached_list; spin_lock_bh(&ul->lock); list_del(&rt->rt6i_uncached); spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau57100.00%1100.00%
Total57100.00%1100.00%


static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) { struct net_device *loopback_dev = net->loopback_dev; int cpu; if (dev == loopback_dev) return; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; spin_lock_bh(&ul->lock); list_for_each_entry(rt, &ul->head, rt6i_uncached) { struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); } if (rt_dev == dev) { rt->dst.dev = loopback_dev; dev_hold(rt->dst.dev); dev_put(rt_dev); } } spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau15295.60%150.00%
Eric W. Biedermann74.40%150.00%
Total159100.00%2100.00%


static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt) { return dst_metrics_write_ptr(rt->dst.from); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau22100.00%1100.00%
Total22100.00%1100.00%


static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) { struct rt6_info *rt = (struct rt6_info *)dst; if (rt->rt6i_flags & RTF_PCPU) return rt6_pcpu_cow_metrics(rt); else if (rt->rt6i_flags & RTF_CACHE) return NULL; else return dst_cow_metrics_generic(dst, old); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3047.62%120.00%
Martin KaFai Lau2742.86%360.00%
Zheng Yan69.52%120.00%
Total63100.00%5100.00%


static inline const void *choose_neigh_daddr(struct rt6_info *rt, struct sk_buff *skb, const void *daddr) { struct in6_addr *p = &rt->rt6i_gateway; if (!ipv6_addr_any(p)) return (const void *) p; else if (skb) return &ipv6_hdr(skb)->daddr; return daddr; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller67100.00%3100.00%
Total67100.00%3100.00%


static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct rt6_info *rt = (struct rt6_info *) dst; struct neighbour *n; daddr = choose_neigh_daddr(rt, skb, daddr); n = __ipv6_neigh_lookup(dst->dev, daddr); if (n) return n; return neigh_create(&nd_tbl, daddr, dst->dev); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller83100.00%4100.00%
Total83100.00%4100.00%


static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) { struct net_device *dev = dst->dev; struct rt6_info *rt = (struct rt6_info *)dst; daddr = choose_neigh_daddr(rt, NULL, daddr); if (!daddr) return; if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) return; if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) return; __ipv6_confirm_neigh(dev, daddr); }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov89100.00%1100.00%
Total89100.00%1100.00%

static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, .gc = ip6_dst_gc, .gc_thresh = 1024, .check = ip6_dst_check, .default_advmss = ip6_default_advmss, .mtu = ip6_mtu, .cow_metrics = ipv6_cow_metrics, .destroy = ip6_dst_destroy, .ifdown = ip6_dst_ifdown, .negative_advice = ip6_negative_advice, .link_failure = ip6_link_failure, .update_pmtu = ip6_rt_update_pmtu, .redirect = rt6_do_redirect, .local_out = __ip6_local_out, .neigh_lookup = ip6_neigh_lookup, .confirm_neigh = ip6_confirm_neigh, };
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst->dev->mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert2058.82%375.00%
Roland Dreier1441.18%125.00%
Total34100.00%4100.00%


static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller23100.00%2100.00%
Total23100.00%2100.00%


static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller20100.00%2100.00%
Total20100.00%2100.00%

static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .destroy = ip6_dst_destroy, .check = ip6_dst_check, .mtu = ip6_blackhole_mtu, .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .redirect = ip6_rt_blackhole_redirect, .cow_metrics = dst_cow_metrics_generic, .neigh_lookup = ip6_neigh_lookup, }; static const u32 ip6_template_metrics[RTAX_MAX] = { [RTAX_HOPLIMIT - 1] = 0, }; static const struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #ifdef CONFIG_IPV6_MULTIPLE_TABLES static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; static const struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #endif
static void rt6_info_init(struct rt6_info *rt) { struct dst_entry *dst = &rt->dst; memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); INIT_LIST_HEAD(&rt->rt6i_siblings); INIT_LIST_HEAD(&rt->rt6i_uncached); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2136.21%222.22%
Steffen Klassert1424.14%111.11%
Martin KaFai Lau1118.97%222.22%
Nicolas Dichtel813.79%111.11%
Hideaki Yoshifuji / 吉藤英明23.45%222.22%
Benjamin Thery23.45%111.11%
Total58100.00%9100.00%

/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 0, DST_OBSOLETE_FORCE_CHK, flags); if (rt) rt6_info_init(rt); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5292.86%133.33%
David S. Miller23.57%133.33%
Kazunori Miyazawa23.57%133.33%
Total56100.00%3100.00%


struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); if (rt) { rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); if (rt->rt6i_pcpu) { int cpu; for_each_possible_cpu(cpu) { struct rt6_info **p; p = per_cpu_ptr(rt->rt6i_pcpu, cpu); /* no one shares rt */ *p = NULL; } } else { dst_destroy((struct dst_entry *)rt); return NULL; } } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau112100.00%1100.00%
Total112100.00%1100.00%

EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *)dst; struct dst_entry *from = dst->from; struct inet6_dev *idev; dst_destroy_metrics_generic(dst); free_percpu(rt->rt6i_pcpu); rt6_uncached_list_del(rt); idev = rt->rt6i_idev; if (idev) { rt->rt6i_idev = NULL; in6_dev_put(idev); } dst->from = NULL; dst_release(from); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明4955.68%337.50%
Martin KaFai Lau2326.14%225.00%
Gao Feng89.09%112.50%
Zheng Yan77.95%112.50%
David S. Miller11.14%112.50%
Total88100.00%8100.00%


static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *loopback_dev = dev_net(dev)->loopback_dev; if (dev != loopback_dev) { if (idev && idev->dev == dev) { struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); if (loopback_idev) { rt->rt6i_idev = loopback_idev; in6_dev_put(idev); } } } }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7575.76%457.14%
Herbert Xu1010.10%114.29%
Denis V. Lunev99.09%114.29%
David S. Miller55.05%114.29%
Total99100.00%7100.00%


static bool __rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) return time_after(jiffies, rt->dst.expires); else return false; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau36100.00%1100.00%
Total36100.00%1100.00%


static bool rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { return rt6_check_expired((struct rt6_info *) rt->dst.from); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Gao Feng4465.67%125.00%
Hideaki Yoshifuji / 吉藤英明1725.37%125.00%
Li RongQing34.48%125.00%
Eric Dumazet34.48%125.00%
Total67100.00%4100.00%

/* Multipath route selection: * Hash based function using packet header and flowlabel. * Adapted from fib_info_hashfn() */
static int rt6_info_hash_nhsfn(unsigned int candidate_count, const struct flowi6 *fl6) { return get_hash_from_flowi6(fl6) % candidate_count; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolas Dichtel2083.33%133.33%
Hideaki Yoshifuji / 吉藤英明28.33%133.33%
Tom Herbert28.33%133.33%
Total24100.00%3100.00%


static struct rt6_info *rt6_multipath_select(struct rt6_info *match, struct flowi6 *fl6, int oif, int strict) { struct rt6_info *sibling, *next_sibling; int route_choosen; route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6); /* Don't change the route, if route_choosen == 0 * (siblings does not include ourself) */ if (route_choosen) list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, rt6i_siblings) { route_choosen--; if (route_choosen == 0) { if (rt6_score_route(sibling, oif, strict) < 0) break; match = sibling; break; } } return match; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolas Dichtel98100.00%2100.00%
Total98100.00%2100.00%

/* * Route lookup. Any table->tb6_lock is implied. */
static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *rt, const struct in6_addr *saddr, int oif, int flags) { struct rt6_info *local = NULL; struct rt6_info *sprt; if (!oif && ipv6_addr_any(saddr)) goto out; for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { struct net_device *dev = sprt->dst.dev; if (oif) { if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { if (!sprt->rt6i_idev || sprt->rt6i_idev->dev->ifindex != oif) { if (flags & RT6_LOOKUP_F_IFACE) continue; if (local && local->rt6i_idev->dev->ifindex == oif) continue; } local = sprt; } } else { if (ipv6_chk_addr(net, saddr, dev, flags & RT6_LOOKUP_F_IFACE)) return sprt; } } if (oif) { if (local) return local; if (flags & RT6_LOOKUP_F_IFACE) return net->ipv6.ip6_null_entry; } out: return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明9344.50%433.33%
Linus Torvalds (pre-git)7133.97%325.00%
Daniel Lezcano3717.70%18.33%
Eric Dumazet41.91%216.67%
David S. Miller41.91%216.67%
Total209100.00%12100.00%

#ifdef CONFIG_IPV6_ROUTER_PREF struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; };
static void rt6_probe_deferred(struct work_struct *w) { struct in6_addr mcaddr; struct __rt6_probe_work *work = container_of(w, struct __rt6_probe_work, work); addrconf_addr_solict_mult(&work->target, &mcaddr); ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); dev_put(work->dev); kfree(work); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa6995.83%133.33%
Erik Nordmark22.78%133.33%
Michael Büsch11.39%133.33%
Total72100.00%3100.00%


static void rt6_probe(struct rt6_info *rt) { struct __rt6_probe_work *work; struct neighbour *neigh; /* * Okay, this does not seem to be appropriate * for now, however, we need to check if it * is really so; aka Router Reachability Probing. * * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) return; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { if (neigh->nud_state & NUD_VALID) goto out; work = NULL; write_lock(&neigh->lock); if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) __neigh_set_probe_once(neigh); } write_unlock(&neigh->lock); } else { work = kmalloc(sizeof(*work), GFP_ATOMIC); } if (work) { INIT_WORK(&work->work, rt6_probe_deferred); work->target = rt->rt6i_gateway; dev_hold(rt->dst.dev); work->dev = rt->dst.dev; schedule_work(&work->work); } out: rcu_read_unlock_bh(); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明11352.07%541.67%
Hannes Frederic Sowa4721.66%18.33%
Martin KaFai Lau4420.28%216.67%
Eric Dumazet62.76%18.33%
Jiri Benc31.38%18.33%
David S. Miller31.38%18.33%
Daniel Lezcano10.46%18.33%
Total217100.00%12100.00%

#else
static inline void rt6_probe(struct rt6_info *rt) { }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1090.91%150.00%
Joe Perches19.09%150.00%
Total11100.00%2100.00%

#endif /* * Default Router Selection (RFC 2461 6.3.6) */
static inline int rt6_check_dev(struct rt6_info *rt, int oif) { struct net_device *dev = rt->dst.dev; if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5578.57%457.14%
David S. Miller1420.00%228.57%
Dave Jones11.43%114.29%
Total70100.00%7100.00%


static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) { struct neighbour *neigh; enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) return RT6_NUD_SUCCEED; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { read_lock(&neigh->lock); if (neigh->nud_state & NUD_VALID) ret = RT6_NUD_SUCCEED; #ifdef CONFIG_IPV6_ROUTER_PREF else if (!(neigh->nud_state & NUD_FAILED)) ret = RT6_NUD_SUCCEED; else ret = RT6_NUD_FAIL_PROBE; #endif read_unlock(&neigh->lock); } else { ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; } rcu_read_unlock_bh(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明8559.44%642.86%
Hannes Frederic Sowa2114.69%214.29%
Linus Torvalds (pre-git)2114.69%321.43%
Paul Marks85.59%17.14%
Jiri Benc64.20%17.14%
Eric Dumazet21.40%17.14%
Total143100.00%14100.00%


static int rt6_score_route(struct rt6_info *rt, int oif, int strict) { int m; m = rt6_check_dev(rt, oif); if (!m && (strict & RT6_LOOKUP_F_IFACE)) return RT6_NUD_FAIL_HARD; #ifdef CONFIG_IPV6_ROUTER_PREF m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; #endif if (strict & RT6_LOOKUP_F_REACHABLE) { int n = rt6_check_neigh(rt); if (n < 0) return n; } return m; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6572.22%562.50%
Hannes Frederic Sowa1718.89%112.50%
Linus Torvalds (pre-git)66.67%112.50%
Paul Marks22.22%112.50%
Total90100.00%8100.00%


static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, int *mpri, struct rt6_info *match, bool *do_rr) { int m; bool match_do_rr = false; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *dev = rt->dst.dev; if (dev && !netif_carrier_ok(dev) && idev->cnf.ignore_routes_with_linkdown && !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) goto out; if (rt6_check_expired(rt)) goto out; m = rt6_score_route(rt, oif, strict); if (m == RT6_NUD_FAIL_DO_RR) { match_do_rr = true; m = 0; /* lowest valid score */ } else if (m == RT6_NUD_FAIL_HARD) { goto out; } if (strict & RT6_LOOKUP_F_REACHABLE) rt6_probe(rt); /* note that m can be RT6_NUD_FAIL_PROBE at this point */ if (m > *mpri) { *do_rr = match_do_rr; *mpri = m; match = rt; } out: return match; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6235.23%650.00%
Hannes Frederic Sowa4022.73%18.33%
Andy Gospodarek3922.16%18.33%
David S. Miller2111.93%18.33%
David Ahern73.98%18.33%
Jiri Benc42.27%18.33%
Linus Torvalds (pre-git)31.70%18.33%
Total176100.00%12100.00%


static struct rt6_info *find_rr_leaf(struct fib6_node *fn, struct rt6_info *rr_head, u32 metric, int oif, int strict, bool *do_rr) { struct rt6_info *rt, *match, *cont; int mpri = -1; match = NULL; cont = NULL; for (rt = rr_head; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } if (match || !cont) return match; for (rt = cont; rt; rt = rt->dst.rt6_next) match = find_match(rt, oif, strict, &mpri, match, do_rr); return match; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10650.24%114.29%
Steffen Klassert8138.39%114.29%
Hideaki Yoshifuji / 吉藤英明104.74%342.86%
Hannes Frederic Sowa83.79%114.29%
Eric Dumazet62.84%114.29%
Total211100.00%7100.00%


static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) { struct rt6_info *match, *rt0; struct net *net; bool do_rr = false; rt0 = fn->rr_ptr; if (!rt0) fn->rr_ptr = rt0 = fn->leaf; match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict, &do_rr); if (do_rr) { struct rt6_info *next = rt0->dst.rt6_next; /* no entries matched; do round-robin */ if (!next || next->rt6i_metric != rt0->rt6i_metric) next = fn->leaf; if (next != rt0) fn->rr_ptr = next; } net = dev_net(rt0->dst.dev); return match ? match : net->ipv6.ip6_null_entry; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10771.81%220.00%
Daniel Lezcano149.40%110.00%
Hideaki Yoshifuji / 吉藤英明138.72%440.00%
Hannes Frederic Sowa96.04%110.00%
Linus Torvalds (pre-git)32.01%110.00%
Eric Dumazet32.01%110.00%
Total149100.00%10100.00%


static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) { return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau25100.00%1100.00%
Total25100.00%1100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, const struct in6_addr *gwaddr) { struct net *net = dev_net(dev); struct route_info *rinfo = (struct route_info *) opt; struct in6_addr prefix_buf, *prefix; unsigned int pref; unsigned long lifetime; struct rt6_info *rt; if (len < sizeof(struct route_info)) { return -EINVAL; } /* Sanity check for prefix_len and length */ if (rinfo->length > 3) { return -EINVAL; } else if (rinfo->prefix_len > 128) { return -EINVAL; } else if (rinfo->prefix_len > 64) { if (rinfo->length < 2) { return -EINVAL; } } else if (rinfo->prefix_len > 0) { if (rinfo->length < 1) { return -EINVAL; } } pref = rinfo->route_pref; if (pref == ICMPV6_ROUTER_PREF_INVALID) return -EINVAL; lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); if (rinfo->length == 3) prefix = (struct in6_addr *)rinfo->prefix; else { /* this function is safe */ ipv6_addr_prefix(&prefix_buf, (struct in6_addr *)rinfo->prefix, rinfo->prefix_len); prefix = &prefix_buf; } if (rinfo->prefix_len == 0) rt = rt6_get_dflt_router(gwaddr, dev); else rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev); if (rt && !lifetime) { ip6_del_rt(rt); rt = NULL; } if (!rt && lifetime) rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev, pref); else if (rt) rt->rt6i_flags = RTF_ROUTEINFO | (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { if (!addrconf_finite_timeout(lifetime)) rt6_clean_expires(rt); else rt6_set_expires(rt, jiffies + HZ * lifetime); ip6_rt_put(rt); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明30379.53%320.00%
Linus Torvalds (pre-git)359.19%533.33%
Duan Jiong184.72%16.67%
Daniel Lezcano112.89%16.67%
Gao Feng71.84%16.67%
Jens Rosenboom41.05%16.67%
Eric Dumazet10.26%16.67%
Al Viro10.26%16.67%
Américo Wang10.26%16.67%
Total381100.00%15100.00%

#endif
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, struct in6_addr *saddr) { struct fib6_node *pn; while (1) { if (fn->fn_flags & RTN_TL_ROOT) return NULL; pn = fn->parent; if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); else fn = pn; if (fn->fn_flags & RTN_RTINFO) return fn; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau8695.56%133.33%
Hideaki Yoshifuji / 吉藤英明33.33%133.33%
Daniel Lezcano11.11%133.33%
Total90100.00%3100.00%


static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct fib6_node *fn; struct rt6_info *rt; read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: rt = fn->leaf; rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5630.43%321.43%
Thomas Graf3317.93%17.14%
Nicolas Dichtel2714.67%214.29%
Martin KaFai Lau2312.50%17.14%
David Ahern137.07%17.14%
David S. Miller126.52%17.14%
Daniel Lezcano94.89%17.14%
Linus Torvalds (pre-git)84.35%321.43%
Pavel Emelyanov31.63%17.14%
Total184100.00%14100.00%


struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags) { return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, int strict) { struct flowi6 fl6 = { .flowi6_oif = oif, .daddr = *daddr, }; struct dst_entry *dst; int flags = strict ? RT6_LOOKUP_F_IFACE : 0; if (saddr) { memcpy(&fl6.saddr, saddr, sizeof(*saddr)); flags |= RT6_LOOKUP_F_HAS_SADDR; } dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); if (dst->error == 0) return (struct rt6_info *) dst; dst_release(dst); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf9273.60%222.22%
Linus Torvalds (pre-git)1512.00%222.22%
Daniel Lezcano86.40%222.22%
David S. Miller75.60%111.11%
Hideaki Yoshifuji / 吉藤英明32.40%222.22%
Total125100.00%9100.00%

EXPORT_SYMBOL(rt6_lookup); /* ip6_ins_rt is called with FREE table->tb6_lock. It takes new route entry, the addition fails by any reason the route is freed. In any case, if caller does not hold it, it may be destroyed. */
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc) { int err; struct fib6_table *table; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); err = fib6_add(&table->tb6_root, rt, info, mxc); write_unlock_bh(&table->tb6_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3448.57%330.00%
Thomas Graf2535.71%330.00%
Michal Kubeček45.71%110.00%
Florian Westphal34.29%110.00%
Mathew Richardson34.29%110.00%
Jamal Hadi Salim11.43%110.00%
Total70100.00%10100.00%


int ip6_ins_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; struct mx6_config mxc = { .mx = NULL, }; return __ip6_ins_rt(rt, &info, &mxc); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1733.33%112.50%
Florian Westphal1427.45%112.50%
Denis V. Lunev1121.57%225.00%
David S. Miller35.88%112.50%
Hideaki Yoshifuji / 吉藤英明35.88%112.50%
Daniel Lezcano23.92%112.50%
Michal Kubeček11.96%112.50%
Total51100.00%8100.00%


static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct rt6_info *rt; /* * Clone the route. */ if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from; rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0); if (!rt) return NULL; ip6_rt_copy_init(rt, ort); rt->rt6i_flags |= RTF_CACHE; rt->rt6i_metric = 0; rt->dst.flags |= DST_HOST; rt->rt6i_dst.addr = *daddr; rt->rt6i_dst.plen = 128; if (!rt6_is_gw_or_nonexthop(ort)) { if (ort->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { rt->rt6i_src.addr = *saddr; rt->rt6i_src.plen = 128; } #endif } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau9847.80%320.00%
Linus Torvalds (pre-git)8139.51%533.33%
Hideaki Yoshifuji / 吉藤英明199.27%320.00%
Eric Dumazet41.95%213.33%
Alexey Dobriyan20.98%16.67%
David S. Miller10.49%16.67%
Total205100.00%15100.00%


static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) { struct rt6_info *pcpu_rt; pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev), rt->dst.dev, rt->dst.flags); if (!pcpu_rt) return NULL; ip6_rt_copy_init(pcpu_rt, rt); pcpu_rt->rt6i_protocol = rt->rt6i_protocol; pcpu_rt->rt6i_flags |= RTF_PCPU; return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau76100.00%1100.00%
Total76100.00%1100.00%

/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) { struct rt6_info *pcpu_rt, **p; p = this_cpu_ptr(rt->rt6i_pcpu); pcpu_rt = *p; if (pcpu_rt) { dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); } return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau58100.00%2100.00%
Total58100.00%2100.00%


static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) { struct fib6_table *table = rt->rt6i_table; struct rt6_info *pcpu_rt, *prev, **p; pcpu_rt = ip6_rt_pcpu_alloc(rt); if (!pcpu_rt) { struct net *net = dev_net(rt->dst.dev); dst_hold(&net->ipv6.ip6_null_entry->dst); return net->ipv6.ip6_null_entry; } read_lock_bh(&table->tb6_lock); if (rt->rt6i_pcpu) { p = this_cpu_ptr(rt->rt6i_pcpu); prev = cmpxchg(p, NULL, pcpu_rt); if (prev) { /* If someone did it before us, return prev instead */ dst_destroy(&pcpu_rt->dst); pcpu_rt = prev; } } else { /* rt has been removed from the fib6 tree * before we have a chance to acquire the read_lock. * In this case, don't brother to create a pcpu rt * since rt is going away anyway. The next * dst_check() will trigger a re-lookup. */ dst_destroy(&pcpu_rt->dst); pcpu_rt = rt; } dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); read_unlock_bh(&table->tb6_lock); return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau176100.00%3100.00%
Total176100.00%3100.00%


struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt; int strict = 0; strict |= flags & RT6_LOOKUP_F_IFACE; strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; if (net->ipv6.devconf_all->forwarding == 0) strict |= RT6_LOOKUP_F_REACHABLE; read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); saved_fn = fn; if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) oif = 0; redo_rt6_select: rt = rt6_select(fn, oif, strict); if (rt->rt6i_nsiblings) rt = rt6_multipath_select(rt, fl6, oif, strict); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto redo_rt6_select; else if (strict & RT6_LOOKUP_F_REACHABLE) { /* also consider unreachable route */ strict &= ~RT6_LOOKUP_F_REACHABLE; fn = saved_fn; goto redo_rt6_select; } } if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) { dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); rt6_dst_from_metrics_check(rt); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && !(rt->rt6i_flags & RTF_GATEWAY))) { /* Create a RTF_CACHE clone which will not be * owned by the fib6 tree. It is for the special case where * the daddr in the skb during the neighbor look-up is different * from the fl6->daddr used to look-up route here. */ struct rt6_info *uncached_rt; dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL); dst_release(&rt->dst); if (uncached_rt) rt6_uncached_list_add(uncached_rt); else uncached_rt = net->ipv6.ip6_null_entry; dst_hold(&uncached_rt->dst); trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6); return uncached_rt; } else { /* Get a percpu copy */ struct rt6_info *pcpu_rt; rt->dst.lastuse = jiffies; rt->dst.__use++; pcpu_rt = rt6_get_pcpu_route(rt); if (pcpu_rt) { read_unlock_bh(&table->tb6_lock); } else { /* We have to do the read_unlock first * because rt6_make_pcpu_route() may trigger * ip6_dst_gc() which will take the write_lock. */ dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); pcpu_rt = rt6_make_pcpu_route(rt); dst_release(&rt->dst); } trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6); return pcpu_rt; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau26656.48%827.59%
David Ahern5712.10%310.34%
Linus Torvalds (pre-git)388.07%413.79%
Hideaki Yoshifuji / 吉藤英明357.43%724.14%
Thomas Graf255.31%13.45%
Nicolas Dichtel194.03%26.90%
David S. Miller194.03%26.90%
Daniel Lezcano71.49%13.45%
Pavel Emelyanov51.06%13.45%
Total471100.00%29100.00%

EXPORT_SYMBOL_GPL(ip6_pol_route);
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov3071.43%133.33%
Daniel Lezcano716.67%133.33%
David S. Miller511.90%133.33%
Total42100.00%3100.00%


struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, struct flowi6 *fl6, int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE; return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); }

Contributors

PersonTokensPropCommitsCommitProp
Shmulik Ladkani57100.00%1100.00%
Total57100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
void ip6_route_input(struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip_tunnel_info *tun_info; struct flowi6 fl6 = { .flowi6_iif = skb->dev->ifindex, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; tun_info = skb_tunnel_info(skb); if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; skb_dst_drop(skb); skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf6140.67%315.79%
Jiri Benc4228.00%315.79%
Daniel Lezcano128.00%210.53%
David S. Miller96.00%15.26%
Hideaki Yoshifuji / 吉藤英明74.67%315.79%
Eric Dumazet53.33%210.53%
Shmulik Ladkani53.33%15.26%
Linus Torvalds (pre-git)42.67%210.53%
Arnaldo Carvalho de Melo32.00%15.26%
David Ahern21.33%15.26%
Total150100.00%19100.00%


static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1228.57%225.00%
Thomas Graf1126.19%112.50%
Daniel Lezcano716.67%112.50%
David S. Miller511.90%112.50%
Hideaki Yoshifuji / 吉藤英明49.52%225.00%
Pavel Emelyanov37.14%112.50%
Total42100.00%8100.00%


struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags) { bool any_src; if (rt6_need_strict(&fl6->daddr)) { struct dst_entry *dst; dst = l3mdev_link_scope_lookup(net, fl6); if (dst) return dst; } fl6->flowi6_iif = LOOPBACK_IFINDEX; any_src = ipv6_addr_any(&fl6->saddr); if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || (fl6->flowi6_oif && any_src)) flags |= RT6_LOOKUP_F_IFACE; if (!any_src) flags |= RT6_LOOKUP_F_HAS_SADDR; else if (sk) flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern5537.67%420.00%
Thomas Graf3926.71%210.00%
Hideaki Yoshifuji / 吉藤英明1812.33%315.00%
Daniel Lezcano74.79%210.00%
Brian Haley64.11%15.00%
David McCullough53.42%15.00%
David S. Miller53.42%15.00%
Paolo Abeni42.74%15.00%
Linus Torvalds (pre-git)32.05%210.00%
Jiri Olsa21.37%15.00%
Pavel Emelyanov10.68%15.00%
Florian Westphal10.68%15.00%
Total146100.00%20100.00%

EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) { struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; struct dst_entry *new = NULL; rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0); if (rt) { rt6_info_init(rt); new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_out; dst_copy_metrics(new, &ort->dst); rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); #ifdef CONFIG_IPV6_SUBTREES memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); #endif dst_free(new); } dst_release(dst_orig); return new ? new : ERR_PTR(-ENOMEM); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller19992.56%753.85%
Martin KaFai Lau83.72%17.69%
Steffen Klassert41.86%17.69%
Alexey Dobriyan10.47%17.69%
Herbert Xu10.47%17.69%
Gao Feng10.47%17.69%
Eric W. Biedermann10.47%17.69%
Total215100.00%13100.00%

/* * Destination cache support functions */
static void rt6_dst_from_metrics_check(struct rt6_info *rt) { if (rt->dst.from && dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from)) dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau55100.00%1100.00%
Total55100.00%1100.00%


static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) { if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie)) return NULL; if (rt6_check_expired(rt)) return NULL; return &rt->dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2955.77%350.00%
Hannes Frederic Sowa1121.15%116.67%
Martin KaFai Lau1019.23%116.67%
Nicolas Dichtel23.85%116.67%
Total52100.00%6100.00%


static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) { if (!__rt6_check_expired(rt) && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && rt6_check((struct rt6_info *)(rt->dst.from), cookie)) return &rt->dst; else return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau60100.00%2100.00%
Total60100.00%2100.00%


static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) { struct rt6_info *rt; rt = (struct rt6_info *) dst; /* All IPV6 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. */ rt6_dst_from_metrics_check(rt); if (rt->rt6i_flags & RTF_PCPU || (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from)) return rt6_dst_from_check(rt, cookie); else return rt6_check(rt, cookie); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7596.15%466.67%
Hannes Frederic Sowa22.56%116.67%
Linus Torvalds (pre-git)11.28%116.67%
Total78100.00%6100.00%


static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; if (rt) { if (rt->rt6i_flags & RTF_CACHE) { if (rt6_check_expired(rt)) { ip6_del_rt(rt); dst = NULL; } } else { dst_release(dst); dst = NULL; } } return dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5270.27%375.00%
Hideaki Yoshifuji / 吉藤英明2229.73%125.00%
Total74100.00%4100.00%


static void ip6_link_failure(struct sk_buff *skb) { struct rt6_info *rt; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); rt = (struct rt6_info *) skb_dst(skb); if (rt) { if (rt->rt6i_flags & RTF_CACHE) { dst_hold(&rt->dst); ip6_del_rt(rt); } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) { rt->rt6i_node->fn_sernum = -1; } } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7882.98%250.00%
Hannes Frederic Sowa1313.83%125.00%
Eric Dumazet33.19%125.00%
Total94100.00%4100.00%


static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) { struct net *net = dev_net(rt->dst.dev); rt->rt6i_flags |= RTF_MODIFIED; rt->rt6i_pmtu = mtu; rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5196.23%150.00%
Alexey Kuznetsov23.77%150.00%
Total53100.00%2100.00%


static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) { return !(rt->rt6i_flags & RTF_CACHE) && (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau34100.00%1100.00%
Total34100.00%1100.00%


static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct ipv6hdr *iph, u32 mtu) { const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; if (rt6->rt6i_flags & RTF_LOCAL) return; if (dst_metric_locked(dst, RTAX_MTU)) return; if (iph) { daddr = &iph->daddr; saddr = &iph->saddr; } else if (sk) { daddr = &sk->sk_v6_daddr; saddr = &inet6_sk(sk)->saddr; } else { daddr = NULL; saddr = NULL; } dst_confirm_neigh(dst, daddr); mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; if (!rt6_cache_allowed_for_pmtu(rt6)) { rt6_do_update_pmtu(rt6, mtu); } else if (daddr) { struct rt6_info *nrt6; nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); if (nrt6) { rt6_do_update_pmtu(nrt6, mtu); /* ip6_ins_rt(nrt6) will bump the * rt6->rt6i_node->fn_sernum * which will fail the next rt6_check() and * invalidate the sk->sk_dst_cache. */ ip6_ins_rt(nrt6); } } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau8239.42%225.00%
Julian Anastasov7234.62%112.50%
Alexey Kuznetsov2512.02%112.50%
David S. Miller157.21%225.00%
Xin Long104.81%112.50%
Shirley Ma41.92%112.50%
Total208100.00%8100.00%


static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau42100.00%1100.00%
Total42100.00%1100.00%


void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13083.33%337.50%
Lorenzo Colitti2012.82%225.00%
Hideaki Yoshifuji / 吉藤英明31.92%112.50%
Martin KaFai Lau21.28%112.50%
Alexey Kuznetsov10.64%112.50%
Total156100.00%8100.00%

EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { struct dst_entry *dst; ip6_update_pmtu(skb, sock_net(sk), mtu, sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) return; bh_lock_sock(sk); if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ip6_datagram_dst_update(sk, false); bh_unlock_sock(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7363.48%125.00%
David S. Miller3732.17%125.00%
Lorenzo Colitti43.48%125.00%
Alexey Kuznetsov10.87%125.00%
Total115100.00%4100.00%

EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); /* Handle redirects */ struct ip6rd_flowi { struct flowi6 fl6; struct in6_addr gateway; };
static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; struct rt6_info *rt; struct fib6_node *fn; /* Get the "current" route for this destination and * check if the redirect has come from appropriate router. * * RFC 4861 specifies that redirects should only be * accepted if they come from the nexthop to the target. * Due to the way the routes are chosen, this notion * is a bit fuzzy and one might need to check all possible * routes. */ read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (rt6_check_expired(rt)) continue; if (rt->dst.error) break; if (!(rt->rt6i_flags & RTF_GATEWAY)) continue; if (fl6->flowi6_oif != rt->dst.dev->ifindex) continue; if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) continue; break; } if (!rt) rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; goto out; } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } out: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong21983.91%120.00%
Martin KaFai Lau2810.73%240.00%
David Ahern134.98%120.00%
Alexander Alemayhu10.38%120.00%
Total261100.00%5100.00%

;
static struct dst_entry *ip6_route_redirect(struct net *net, const struct flowi6 *fl6, const struct in6_addr *gateway) { int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip6rd_flowi rdfl; rdfl.fl6 = *fl6; rdfl.gateway = *gateway; return fib6_rule_lookup(net, &rdfl.fl6, flags, __ip6_route_redirect); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong63100.00%1100.00%
Total63100.00%1100.00%


void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller11580.99%233.33%
Lorenzo Colitti96.34%116.67%
Duan Jiong96.34%116.67%
Julian Anastasov64.23%116.67%
Hideaki Yoshifuji / 吉藤英明32.11%116.67%
Total142100.00%6100.00%

EXPORT_SYMBOL_GPL(ip6_redirect);
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, u32 mark) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = msg->dest; fl6.saddr = iph->daddr; fl6.flowi6_uid = sock_net_uid(net, NULL); dst = ip6_route_redirect(net, &fl6, &iph->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong12788.19%250.00%
Lorenzo Colitti117.64%125.00%
Julian Anastasov64.17%125.00%
Total144100.00%4100.00%


void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) { ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3389.19%150.00%
Lorenzo Colitti410.81%150.00%
Total37100.00%2100.00%

EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst) { struct net_device *dev = dst->dev; unsigned int mtu = dst_mtu(dst); struct net *net = dev_net(dev); mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) mtu = net->ipv6.sysctl.ip6_rt_min_advmss; /* * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. * IPV6_MAXPLEN is also valid and means: "any MSS, * rely only on pmtu discovery" */ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) mtu = IPV6_MAXPLEN; return mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro4445.36%116.67%
David S. Miller3131.96%116.67%
Daniel Lezcano1414.43%233.33%
Shirley Ma77.22%116.67%
Hideaki Yoshifuji / 吉藤英明11.03%116.67%
Total97100.00%6100.00%


static unsigned int ip6_mtu(const struct dst_entry *dst) { const struct rt6_info *rt = (const struct rt6_info *)dst; unsigned int mtu = rt->rt6i_pmtu; struct inet6_dev *idev; if (mtu) goto out; mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) goto out; mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst->dev); if (idev) mtu = idev->cnf.mtu6; rcu_read_unlock(); out: mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); return mtu - lwtunnel_headroom(dst->lwtstate, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4336.13%116.67%
Martin KaFai Lau3025.21%116.67%
Steffen Klassert1915.97%233.33%
Roopa Prabhu1411.76%116.67%
Eric Dumazet1310.92%116.67%
Total119100.00%6100.00%

static struct dst_entry *icmp6_dst_gc_list; static DEFINE_SPINLOCK(icmp6_dst_lock);
struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6) { struct dst_entry *dst; struct rt6_info *rt; struct inet6_dev *idev = in6_dev_get(dev); struct net *net = dev_net(dev); if (unlikely(!idev)) return ERR_PTR(-ENODEV); rt = ip6_dst_alloc(net, dev, 0); if (unlikely(!rt)) { in6_dev_put(idev); dst = ERR_PTR(-ENOMEM); goto out; } rt->dst.flags |= DST_HOST; rt->dst.output = ip6_output; atomic_set(&rt->dst.__refcnt, 1); rt->rt6i_gateway = fl6->daddr; rt->rt6i_dst.addr = fl6->daddr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); spin_lock_bh(&icmp6_dst_lock); rt->dst.next = icmp6_dst_gc_list; icmp6_dst_gc_list = &rt->dst; spin_unlock_bh(&icmp6_dst_lock); fib6_force_start_gc(net); dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); out: return dst; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller11851.53%521.74%
Hideaki Yoshifuji / 吉藤英明3515.28%521.74%
Zheng Yan2410.48%14.35%
Gao Feng114.80%14.35%
Daniel Lezcano104.37%28.70%
Julian Anastasov83.49%14.35%
Patrick McHardy73.06%14.35%
Eric Dumazet52.18%14.35%
Benjamin Thery31.31%14.35%
Shirley Ma31.31%14.35%
Thomas Graf20.87%14.35%
Li RongQing10.44%14.35%
Alexey Dobriyan10.44%14.35%
Kazunori Miyazawa10.44%14.35%
Total229100.00%23100.00%


int icmp6_dst_gc(void) { struct dst_entry *dst, **pprev; int more = 0; spin_lock_bh(&icmp6_dst_lock); pprev = &icmp6_dst_gc_list; while ((dst = *pprev) != NULL) { if (!atomic_read(&dst->__refcnt)) { *pprev = dst->next; dst_free(dst); } else { pprev = &dst->next; ++more; } } spin_unlock_bh(&icmp6_dst_lock); return more; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller7278.26%125.00%
Thomas Graf1010.87%125.00%
Stephen Hemminger66.52%125.00%
Hideaki Yoshifuji / 吉藤英明44.35%125.00%
Total92100.00%4100.00%


static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg), void *arg) { struct dst_entry *dst, **pprev; spin_lock_bh(&icmp6_dst_lock); pprev = &icmp6_dst_gc_list; while ((dst = *pprev) != NULL) { struct rt6_info *rt = (struct rt6_info *) dst; if (func(rt, arg)) { *pprev = dst->next; dst_free(dst); } else { pprev = &dst->next; } } spin_unlock_bh(&icmp6_dst_lock); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller110100.00%1100.00%
Total110100.00%1100.00%


static int ip6_dst_gc(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; int entries; entries = dst_entries_get_fast(ops); if (time_after(rt_last_gc + rt_min_interval, jiffies) && entries <= rt_max_size) goto out; net->ipv6.ip6_rt_gc_expire++; fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true); entries = dst_entries_get_slow(ops); if (entries < ops->gc_thresh) net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; out: net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; return entries > rt_max_size; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano6838.64%531.25%
Linus Torvalds (pre-git)3821.59%318.75%
Benjamin Thery3620.45%16.25%
Eric Dumazet179.66%16.25%
Alexey Dobriyan105.68%16.25%
Randy Dunlap21.14%16.25%
Michal Kubeček21.14%212.50%
Arnaldo Carvalho de Melo21.14%16.25%
Li RongQing10.57%16.25%
Total176100.00%16100.00%


static int ip6_convert_metrics(struct mx6_config *mxc, const struct fib6_config *cfg) { bool ecn_ca = false; struct nlattr *nla; int remaining; u32 *mp; if (!cfg->fc_mx) return 0; mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); if (unlikely(!mp)) return -ENOMEM; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { int type = nla_type(nla); u32 val; if (!type) continue; if (unlikely(type > RTAX_MAX)) goto err; if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) goto err; } else { val = nla_get_u32(nla); } if (type == RTAX_HOPLIMIT && val > 255) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) goto err; mp[type - 1] = val; __set_bit(type - 1, mxc->mx_valid); } if (ecn_ca) { __set_bit(RTAX_FEATURES - 1, mxc->mx_valid); mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; } mxc->mx = mp; return 0; err: kfree(mp); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal13953.05%114.29%
Daniel Borkmann10841.22%457.14%
Paolo Abeni145.34%114.29%
Ian Morris10.38%114.29%
Total262100.00%7100.00%


static struct rt6_info *ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, const struct in6_addr *gw_addr) { struct flowi6 fl6 = { .flowi6_oif = cfg->fc_ifindex, .daddr = *gw_addr, .saddr = cfg->fc_prefsrc, }; struct fib6_table *table; struct rt6_info *rt; int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE; table = fib6_get_table(net, cfg->fc_table); if (!table) return NULL; if (!ipv6_addr_any(&cfg->fc_prefsrc)) flags |= RT6_LOOKUP_F_HAS_SADDR; rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); /* if table lookup failed, fall back to full lookup */ if (rt == net->ipv6.ip6_null_entry) { ip6_rt_put(rt); rt = NULL; } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern14399.31%266.67%
Paolo Abeni10.69%133.33%
Total144100.00%3100.00%


static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg) { struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; int addr_type; int err = -EINVAL; /* RTF_PCPU is an internal flag; can not be set by userspace */ if (cfg->fc_flags & RTF_PCPU) goto out; if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128) goto out; #ifndef CONFIG_IPV6_SUBTREES if (cfg->fc_src_len) goto out; #endif if (cfg->fc_ifindex) { err = -ENODEV; dev = dev_get_by_index(net, cfg->fc_ifindex); if (!dev) goto out; idev = in6_dev_get(dev); if (!idev) goto out; } if (cfg->fc_metric == 0) cfg->fc_metric = IP6_RT_PRIO_USER; err = -ENOBUFS; if (cfg->fc_nlinfo.nlh && !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { table = fib6_get_table(net, cfg->fc_table); if (!table) { pr_warn("NLM_F_CREATE should be specified when creating new route\n"); table = fib6_new_table(net, cfg->fc_table); } } else { table = fib6_new_table(net, cfg->fc_table); } if (!table) goto out; rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); if (!rt) { err = -ENOMEM; goto out; } if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); else rt6_clean_expires(rt); if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; rt->rt6i_protocol = cfg->fc_protocol; addr_type = ipv6_addr_type(&cfg->fc_dst); if (addr_type & IPV6_ADDR_MULTICAST) rt->dst.input = ip6_mc_input; else if (cfg->fc_flags & RTF_LOCAL) rt->dst.input = ip6_input; else rt->dst.input = ip6_forward; rt->dst.output = ip6_output; if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, &lwtstate); if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); if (lwtunnel_output_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_output = rt->dst.output; rt->dst.output = lwtunnel_output; } if (lwtunnel_input_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_input = rt->dst.input; rt->dst.input = lwtunnel_input; } } ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); rt->rt6i_src.plen = cfg->fc_src_len; #endif rt->rt6i_metric = cfg->fc_metric; /* We cannot add true routes via loopback here, they would result in kernel looping; promote them to reject routes */ if ((cfg->fc_flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && !(cfg->fc_flags & RTF_LOCAL))) { /* hold loopback dev/idev if we haven't done so. */ if (dev != net->loopback_dev) { if (dev) { dev_put(dev); in6_dev_put(idev); } dev = net->loopback_dev; dev_hold(dev); idev = in6_dev_get(dev); if (!idev) { err = -ENODEV; goto out; } } rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; switch (cfg->fc_type) { case RTN_BLACKHOLE: rt->dst.error = -EINVAL; rt->dst.output = dst_discard_out; rt->dst.input = dst_discard; break; case RTN_PROHIBIT: rt->dst.error = -EACCES; rt->dst.output = ip6_pkt_prohibit_out; rt->dst.input = ip6_pkt_prohibit; break; case RTN_THROW: case RTN_UNREACHABLE: default: rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN : (cfg->fc_type == RTN_UNREACHABLE) ? -EHOSTUNREACH : -ENETUNREACH; rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; break; } goto install_route; } if (cfg->fc_flags & RTF_GATEWAY) { const struct in6_addr *gw_addr; int gwa_type; gw_addr = &cfg->fc_gateway; gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() * will return already-added prefix route via interface that * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; if (ipv6_chk_addr_and_flags(net, gw_addr, gwa_type & IPV6_ADDR_LINKLOCAL ? dev : NULL, 0, 0)) goto out; rt->rt6i_gateway = *gw_addr; if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt = NULL; /* IPv6 strictly inhibits using not link-local addresses as nexthop address. Otherwise, router will not able to send redirects. It is very good, but in some (rare!) circumstances (SIT, PtP, NBMA NOARP links) it is handy to allow some exceptions. --ANK We allow IPv4-mapped nexthops to support RFC4798-type addressing */ if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) goto out; if (cfg->fc_table) { grt = ip6_nh_lookup_table(net, cfg, gw_addr); if (grt) { if (grt->rt6i_flags & RTF_GATEWAY || (dev && dev != grt->dst.dev)) { ip6_rt_put(grt); grt = NULL; } } } if (!grt) grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); err = -EHOSTUNREACH; if (!grt) goto out; if (dev) { if (dev != grt->dst.dev) { ip6_rt_put(grt); goto out; } } else { dev = grt->dst.dev; idev = grt->rt6i_idev; dev_hold(dev); in6_dev_hold(grt->rt6i_idev); } if (!(grt->rt6i_flags & RTF_GATEWAY)) err = 0; ip6_rt_put(grt); if (err) goto out; } err = -EINVAL; if (!dev || (dev->flags & IFF_LOOPBACK)) goto out; } err = -ENODEV; if (!dev) goto out; if (!ipv6_addr_any(&cfg->fc_prefsrc)) { if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { err = -EINVAL; goto out; } rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; rt->rt6i_prefsrc.plen = 128; } else rt->rt6i_prefsrc.plen = 0; rt->rt6i_flags = cfg->fc_flags; install_route: rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; cfg->fc_nlinfo.nl_net = dev_net(dev); return rt; out: if (dev) dev_put(dev); if (idev) in6_dev_put(idev); if (rt) dst_free(&rt->dst); return ERR_PTR(err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)41932.33%1218.18%
Hideaki Yoshifuji / 吉藤英明1269.72%812.12%
Roopa Prabhu1078.26%46.06%
Thomas Graf856.56%23.03%
Daniel Walter644.94%11.52%
Matti Vaittinen604.63%11.52%
Nicolas Dichtel604.63%46.06%
Kamala R534.09%11.52%
Tom Herbert453.47%23.03%
Vincent Bernat393.01%11.52%
Florian Westphal372.85%23.03%
David Ahern362.78%23.03%
Daniel Lezcano282.16%46.06%
Maciej Żenczykowski262.01%11.52%
David S. Miller181.39%69.09%
Jiri Benc171.31%11.52%
Nikola Forró141.08%11.52%
Gao Feng141.08%11.52%
Mathew Richardson110.85%11.52%
Sabrina Dubroca100.77%11.52%
Patrick McHardy80.62%11.52%
Erik Nordmark50.39%11.52%
Alexey Dobriyan30.23%11.52%
Benjamin Thery30.23%11.52%
Américo Wang20.15%11.52%
Joe Perches20.15%11.52%
Eric W. Biedermann20.15%23.03%
Jamal Hadi Salim10.08%11.52%
Eric Dumazet10.08%11.52%
Total1296100.00%66100.00%


int ip6_route_add(struct fib6_config *cfg) { struct mx6_config mxc = { .mx = NULL, }; struct rt6_info *rt; int err; rt = ip6_route_info_create(cfg); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto out; } err = ip6_convert_metrics(&mxc, cfg); if (err) goto out; err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc); kfree(mxc.mx); return err; out: if (rt) dst_free(&rt->dst); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu5849.15%214.29%
Florian Westphal3126.27%17.14%
Linus Torvalds (pre-git)1714.41%535.71%
Thomas Graf54.24%214.29%
Hideaki Yoshifuji / 吉藤英明32.54%17.14%
Patrick McHardy21.69%17.14%
Michal Kubeček10.85%17.14%
Mathew Richardson10.85%17.14%
Total118100.00%14100.00%


static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) { int err; struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); if (rt == net->ipv6.ip6_null_entry || rt->dst.flags & DST_NOCACHE) { err = -ENOENT; goto out; } table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); write_unlock_bh(&table->tb6_lock); out: ip6_rt_put(rt); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3028.04%423.53%
Thomas Graf1917.76%317.65%
Gao Feng1413.08%15.88%
Daniel Lezcano1211.21%15.88%
Martin KaFai Lau87.48%15.88%
Patrick McHardy87.48%15.88%
Herbert Xu54.67%15.88%
Hideaki Yoshifuji / 吉藤英明32.80%15.88%
David S. Miller32.80%15.88%
Mathew Richardson32.80%15.88%
Américo Wang10.93%15.88%
Jamal Hadi Salim10.93%15.88%
Total107100.00%17100.00%


int ip6_del_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; return __ip6_del_rt(rt, &info); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1745.95%116.67%
Denis V. Lunev1232.43%233.33%
David S. Miller38.11%116.67%
Hideaki Yoshifuji / 吉藤英明38.11%116.67%
Daniel Lezcano25.41%116.67%
Total37100.00%6100.00%


static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; struct net *net = info->nl_net; struct sk_buff *skb = NULL; struct fib6_table *table; int err = -ENOENT; if (rt == net->ipv6.ip6_null_entry) goto out_put; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { struct rt6_info *sibling, *next_sibling; /* prefer to send a single notification with all hops */ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (skb) { u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; if (rt6_fill_node(net, skb, rt, NULL, NULL, 0, RTM_DELROUTE, info->portid, seq, 0) < 0) { kfree_skb(skb); skb = NULL; } else info->skip_notify = 1; } list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { err = fib6_del(sibling, info); if (err) goto out_unlock; } } err = fib6_del(rt, info); out_unlock: write_unlock_bh(&table->tb6_lock); out_put: ip6_rt_put(rt); if (skb) { rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern23488.30%266.67%
Américo Wang3111.70%133.33%
Total265100.00%3100.00%


static int ip6_route_del(struct fib6_config *cfg) { struct fib6_table *table; struct fib6_node *fn; struct rt6_info *rt; int err = -ESRCH; table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); if (!table) return err; read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, &cfg->fc_dst, cfg->fc_dst_len, &cfg->fc_src, cfg->fc_src_len); if (fn) { for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if ((rt->rt6i_flags & RTF_CACHE) && !(cfg->fc_flags & RTF_CACHE)) continue; if (cfg->fc_ifindex && (!rt->dst.dev || rt->dst.dev->ifindex != cfg->fc_ifindex)) continue; if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) continue; dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); /* if gateway was specified only delete the one hop */ if (cfg->fc_flags & RTF_GATEWAY) return __ip6_del_rt(rt, &cfg->fc_nlinfo); return __ip6_del_rt_siblings(rt, cfg); } } read_unlock_bh(&table->tb6_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)13850.55%936.00%
Thomas Graf6021.98%312.00%
Martin KaFai Lau207.33%14.00%
David Ahern176.23%14.00%
Mantas M155.49%14.00%
David S. Miller82.93%28.00%
Daniel Lezcano62.20%28.00%
Eric Dumazet31.10%14.00%
Hideaki Yoshifuji / 吉藤英明20.73%14.00%
James Morris10.37%14.00%
Stephen Rothwell10.37%14.00%
Jamal Hadi Salim10.37%14.00%
Mathew Richardson10.37%14.00%
Total273100.00%25100.00%


static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct netevent_redirect netevent; struct rt6_info *rt, *nrt = NULL; struct ndisc_options ndopts; struct inet6_dev *in6_dev; struct neighbour *neigh; struct rd_msg *msg; int optlen, on_link; u8 *lladdr; optlen = skb_tail_pointer(skb) - skb_transport_header(skb); optlen -= sizeof(*msg); if (optlen < 0) { net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); return; } msg = (struct rd_msg *)icmp6_hdr(skb); if (ipv6_addr_is_multicast(&msg->dest)) { net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); return; } on_link = 0; if (ipv6_addr_equal(&msg->dest, &msg->target)) { on_link = 1; } else if (ipv6_addr_type(&msg->target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); return; } in6_dev = __in6_dev_get(skb->dev); if (!in6_dev) return; if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) return; /* RFC2461 8.1: * The IP source address of the Redirect MUST be the same as the current * first-hop router for the specified ICMP Destination Address. */ if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); return; } lladdr = NULL; if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); return; } } rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_REJECT) { net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); return; } /* Redirect received -> path was valid. * Look, redirects are sent only in response to data packets, * so that this nexthop apparently is reachable. --ANK */ dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); if (!neigh) return; /* * We have finally decided to accept it. */ ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| NEIGH_UPDATE_F_ISROUTER)), NDISC_REDIRECT, &ndopts); nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); if (!nrt) goto out; nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; if (ip6_ins_rt(nrt)) goto out; netevent.old = &rt->dst; netevent.new = &nrt->dst; netevent.daddr = &msg->dest; netevent.neigh = neigh; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); if (rt->rt6i_flags & RTF_CACHE) { rt = (struct rt6_info *) dst_clone(&rt->dst); ip6_del_rt(rt); } out: neigh_release(neigh); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller30357.93%520.00%
Hideaki Yoshifuji / 吉藤英明7714.72%624.00%
Linus Torvalds (pre-git)7514.34%520.00%
Tom Tucker264.97%14.00%
Alexander Aring142.68%14.00%
Julian Anastasov91.72%14.00%
Simon Horman61.15%14.00%
Martin KaFai Lau30.57%14.00%
Daniel Lezcano30.57%14.00%
Matthias Schiffer30.57%14.00%
Eric Dumazet20.38%14.00%
Alexey Dobriyan20.38%14.00%
Total523100.00%25100.00%

/* * Misc support functions */
static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) { BUG_ON(from->dst.from); rt->rt6i_flags &= ~RTF_EXPIRES; dst_hold(&from->dst); rt->dst.from = &from->dst; dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau69100.00%1100.00%
Total69100.00%1100.00%


static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) { rt->dst.input = ort->dst.input; rt->dst.output = ort->dst.output; rt->rt6i_dst = ort->rt6i_dst; rt->dst.error = ort->dst.error; rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags; rt6_set_from(rt, ort); rt->rt6i_metric = ort->rt6i_metric; #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->rt6i_src; #endif rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_table = ort->rt6i_table; rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7344.51%526.32%
Hideaki Yoshifuji / 吉藤英明2112.80%15.26%
Nicolas Dichtel106.10%210.53%
Martin KaFai Lau106.10%15.26%
Ville Nuorvala106.10%15.26%
Thomas Graf84.88%15.26%
Gao Feng84.88%15.26%
Florian Westphal74.27%15.26%
Jiri Benc63.66%15.26%
Eric Dumazet53.05%15.26%
Zheng Yan21.22%15.26%
Alexey Dobriyan21.22%15.26%
Benjamin Thery10.61%15.26%
David S. Miller10.61%15.26%
Total164100.00%19100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; int ifindex = dev->ifindex; struct fib6_node *fn; struct rt6_info *rt = NULL; struct fib6_table *table; table = fib6_get_table(net, tb_id); if (!table) return NULL; read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0); if (!fn) goto out; for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (rt->dst.dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) continue; dst_hold(&rt->dst); break; } out: read_unlock_bh(&table->tb6_lock); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明14167.46%110.00%
Thomas Graf2712.92%110.00%
David Ahern2311.00%110.00%
Daniel Lezcano73.35%220.00%
Eric Dumazet52.39%220.00%
David S. Miller41.91%220.00%
Li RongQing20.96%110.00%
Total209100.00%10100.00%


static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, }; cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, cfg.fc_dst = *prefix; cfg.fc_gateway = *gwaddr; /* We should treat it as a default route if prefix length is 0. */ if (!prefixlen) cfg.fc_flags |= RTF_DEFAULT; ip6_route_add(&cfg); return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7246.45%218.18%
Thomas Graf2818.06%19.09%
Daniel Lezcano2717.42%19.09%
David Ahern1912.26%218.18%
Alexey Dobriyan42.58%19.09%
Eric Dumazet31.94%218.18%
Rami Rosen10.65%19.09%
Eric W. Biedermann10.65%19.09%
Total155100.00%11100.00%

#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; struct rt6_info *rt; struct fib6_table *table; table = fib6_get_table(dev_net(dev), tb_id); if (!table) return NULL; read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) break; } if (rt) dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)8253.59%631.58%
Hideaki Yoshifuji / 吉藤英明2415.69%315.79%
Thomas Graf2214.38%15.26%
David Ahern127.84%15.26%
Eric Dumazet42.61%210.53%
David S. Miller42.61%210.53%
Li RongQing21.31%15.26%
Daniel Lezcano21.31%210.53%
James Morris10.65%15.26%
Total153100.00%19100.00%


struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), }; cfg.fc_gateway = *gwaddr; if (!ip6_route_add(&cfg)) { struct fib6_table *table; table = fib6_get_table(dev_net(dev), cfg.fc_table); if (table) table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; } return rt6_get_dflt_router(gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4631.08%426.67%
David Ahern4027.03%213.33%
Thomas Graf2315.54%16.67%
Daniel Lezcano2013.51%16.67%
Hideaki Yoshifuji / 吉藤英明149.46%320.00%
Alexey Dobriyan21.35%16.67%
Rami Rosen10.68%16.67%
Eric W. Biedermann10.68%16.67%
Eric Dumazet10.68%16.67%
Total148100.00%15100.00%


static void __rt6_purge_dflt_routers(struct fib6_table *table) { struct rt6_info *rt; restart: read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); ip6_del_rt(rt); goto restart; } } read_unlock_bh(&table->tb6_lock); table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6756.78%440.00%
Lorenzo Colitti1714.41%110.00%
Thomas Graf1210.17%110.00%
David Ahern1210.17%110.00%
Hideaki Yoshifuji / 吉藤英明54.24%110.00%
Eric Dumazet32.54%110.00%
Daniel Lezcano21.69%110.00%
Total118100.00%10100.00%


void rt6_purge_dflt_routers(struct net *net) { struct fib6_table *table; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, head, tb6_hlist) { if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) __rt6_purge_dflt_routers(table); } } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern7998.75%150.00%
Linus Torvalds (pre-git)11.25%150.00%
Total80100.00%2100.00%


static void rtmsg_to_fib6_config(struct net *net, struct in6_rtmsg *rtmsg, struct fib6_config *cfg) { memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? : RT6_TABLE_MAIN; cfg->fc_ifindex = rtmsg->rtmsg_ifindex; cfg->fc_metric = rtmsg->rtmsg_metric; cfg->fc_expires = rtmsg->rtmsg_info; cfg->fc_dst_len = rtmsg->rtmsg_dst_len; cfg->fc_src_len = rtmsg->rtmsg_src_len; cfg->fc_flags = rtmsg->rtmsg_flags; cfg->fc_nlinfo.nl_net = net; cfg->fc_dst = rtmsg->rtmsg_dst; cfg->fc_src = rtmsg->rtmsg_src; cfg->fc_gateway = rtmsg->rtmsg_gateway; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf10480.00%120.00%
David Ahern107.69%120.00%
Benjamin Thery75.38%120.00%
Daniel Lezcano64.62%120.00%
Alexey Dobriyan32.31%120.00%
Total130100.00%5100.00%


int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct fib6_config cfg; struct in6_rtmsg rtmsg; int err; switch (cmd) { case SIOCADDRT: /* Add a route */ case SIOCDELRT: /* Delete a route */ if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = copy_from_user(&rtmsg, arg, sizeof(struct in6_rtmsg)); if (err) return -EFAULT; rtmsg_to_fib6_config(net, &rtmsg, &cfg); rtnl_lock(); switch (cmd) { case SIOCADDRT: err = ip6_route_add(&cfg); break; case SIOCDELRT: err = ip6_route_del(&cfg); break; default: err = -EINVAL; } rtnl_unlock(); return err; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)11579.86%550.00%
Thomas Graf1510.42%110.00%
Daniel Lezcano74.86%110.00%
Eric W. Biedermann53.47%110.00%
Al Viro10.69%110.00%
David S. Miller10.69%110.00%
Total144100.00%10100.00%

/* * Drop the packet on the floor */
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) { int type; struct dst_entry *dst = skb_dst(skb); switch (ipstats_mib_noroutes) { case IPSTATS_MIB_INNOROUTES: type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); if (type == IPV6_ADDR_ANY) { IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); break; } /* FALLTHROUGH */ case IPSTATS_MIB_OUTNOROUTES: IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), ipstats_mib_noroutes); break; } icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明3529.41%214.29%
Linus Torvalds (pre-git)3126.05%535.71%
Denis V. Lunev2117.65%17.14%
Lv Liangying2016.81%17.14%
Arnaldo Carvalho de Melo43.36%214.29%
Thomas Graf43.36%17.14%
Eric Dumazet32.52%17.14%
Brian Haley10.84%17.14%
Total119100.00%14100.00%


static int ip6_pkt_discard(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
James Morris1330.95%112.50%
Eric Dumazet819.05%225.00%
Dave Craig614.29%112.50%
Hideaki Yoshifuji / 吉藤英明511.90%112.50%
Eric W. Biedermann511.90%112.50%
Herbert Xu49.52%112.50%
Arnaldo Carvalho de Melo12.38%112.50%
Total42100.00%8100.00%


static int ip6_pkt_prohibit(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2457.14%120.00%
Eric Dumazet819.05%240.00%
Hideaki Yoshifuji / 吉藤英明511.90%120.00%
Eric W. Biedermann511.90%120.00%
Total42100.00%5100.00%

/* * Allocate a dst for local (unicast / anycast) address. */
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast) { u32 tb_id; struct net *net = dev_net(idev->dev); struct net_device *dev = net->loopback_dev; struct rt6_info *rt; /* use L3 Master device as loopback for host routes if device * is enslaved and address is not link local or multicast */ if (!rt6_need_strict(addr)) dev = l3mdev_master_dev_rcu(idev->dev) ? : dev; rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM); in6_dev_hold(idev); rt->dst.flags |= DST_HOST; rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; rt->rt6i_protocol = RTPROT_KERNEL; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) rt->rt6i_flags |= RTF_ANYCAST; else rt->rt6i_flags |= RTF_LOCAL; rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; rt->rt6i_table = fib6_get_table(net, tb_id); rt->dst.flags |= DST_NOCACHE; atomic_set(&rt->dst.__refcnt, 1); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7833.91%517.86%
David Ahern5724.78%310.71%
Hideaki Yoshifuji / 吉藤英明3716.09%517.86%
Alexey Kuznetsov125.22%13.57%
Daniel Lezcano114.78%27.14%
Martin KaFai Lau83.48%13.57%
Thomas Graf83.48%13.57%
Julian Anastasov73.04%13.57%
David S. Miller62.61%621.43%
Benjamin Thery31.30%13.57%
Alexey Dobriyan20.87%13.57%
Hannes Frederic Sowa10.43%13.57%
Total230100.00%28100.00%

/* remove deleted ip from prefsrc entries */ struct arg_dev_net_ip { struct net_device *dev; struct net *net; struct in6_addr *addr; };
static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) { struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; struct net *net = ((struct arg_dev_net_ip *)arg)->net; struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; if (((void *)rt->dst.dev == dev || !dev) && rt != net->ipv6.ip6_null_entry && ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { /* remove prefsrc entry */ rt->rt6i_prefsrc.plen = 0; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter11296.55%133.33%
David S. Miller43.45%266.67%
Total116100.00%3100.00%


void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) { struct net *net = dev_net(ifp->idev->dev); struct arg_dev_net_ip adni = { .dev = ifp->idev->dev, .net = net, .addr = &ifp->addr, }; fib6_clean_all(net, fib6_remove_prefsrc, &adni); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter62100.00%1100.00%
Total62100.00%1100.00%

#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) /* Remove routers and update dst entries when gateway turn into host. */
static int fib6_clean_tohost(struct rt6_info *rt, void *arg) { struct in6_addr *gateway = (struct in6_addr *)arg; if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { return -1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong74100.00%1100.00%
Total74100.00%1100.00%


void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) { fib6_clean_all(net, fib6_clean_tohost, gateway); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong24100.00%1100.00%
Total24100.00%1100.00%

struct arg_dev_net { struct net_device *dev; struct net *net; }; /* called with write lock held for table with rt */
static int fib6_ifdown(struct rt6_info *rt, void *arg) { const struct arg_dev_net *adn = arg; const struct net_device *dev = adn->dev; if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3542.68%228.57%
David Ahern1720.73%114.29%
Daniel Lezcano1619.51%114.29%
Stephen Hemminger1012.20%114.29%
David S. Miller44.88%228.57%
Total82100.00%7100.00%


void rt6_ifdown(struct net *net, struct net_device *dev) { struct arg_dev_net adn = { .dev = dev, .net = net, }; fib6_clean_all(net, fib6_ifdown, &adn); icmp6_clean_all(fib6_ifdown, &adn); if (dev) rt6_uncached_list_flush_dev(net, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano2541.67%222.22%
Linus Torvalds (pre-git)1525.00%333.33%
David S. Miller813.33%111.11%
Martin KaFai Lau711.67%111.11%
Eric W. Biedermann46.67%111.11%
Thomas Graf11.67%111.11%
Total60100.00%9100.00%

struct rt6_mtu_change_arg { struct net_device *dev; unsigned int mtu; };
static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) { struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; struct inet6_dev *idev; /* In IPv6 pmtu discovery is not optional, so that RTAX_MTU lock cannot disable it. We still use this lock to block changes caused by addrconf/ndisc. */ idev = __in6_dev_get(arg->dev); if (!idev) return 0; /* For administrative MTU increase, there is no way to discover IPv6 PMTU increase, so PMTU increase should be updated here. Since RFC 1981 doesn't include administrative MTU increase update PMTU increase is a MUST. (i.e. jumbo frame) */ /* If new MTU is less than route PMTU, this new MTU will be the lowest MTU in the path, update the route PMTU to reflect PMTU decreases; if new MTU is greater than route PMTU, and the old MTU is the lowest MTU in the path, update the route PMTU to reflect the increase. In this case if the other nodes' MTU also have the lowest MTU, TOO BIG MESSAGE will be lead to PMTU discovery. */ if (rt->dst.dev == arg->dev && dst_metric_raw(&rt->dst, RTAX_MTU) && !dst_metric_locked(&rt->dst, RTAX_MTU)) { if (rt->rt6i_flags & RTF_CACHE) { /* For RTF_CACHE with rt6i_pmtu == 0 * (i.e. a redirected route), * the metrics of its rt->dst.from has already * been updated. */ if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu) rt->rt6i_pmtu = arg->mtu; } else if (dst_mtu(&rt->dst) >= arg->mtu || (dst_mtu(&rt->dst) < arg->mtu && dst_mtu(&rt->dst) == idev->cnf.mtu6)) { dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5128.02%318.75%
Shirley Ma4725.82%16.25%
Martin KaFai Lau3820.88%16.25%
Alexey Kuznetsov126.59%212.50%
David S. Miller105.49%318.75%
Maciej Żenczykowski105.49%16.25%
Linus Torvalds73.85%16.25%
Herbert Xu31.65%16.25%
Simon Arlott21.10%16.25%
Jim Paris10.55%16.25%
Alexander Alemayhu10.55%16.25%
Total182100.00%16100.00%


void rt6_mtu_change(struct net_device *dev, unsigned int mtu) { struct rt6_mtu_change_arg arg = { .dev = dev, .mtu = mtu, }; fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3172.09%342.86%
Thomas Graf613.95%114.29%
Hideaki Yoshifuji / 吉藤英明36.98%114.29%
Daniel Lezcano24.65%114.29%
Eric Dumazet12.33%114.29%
Total43100.00%7100.00%

static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, [RTA_OIF] = { .type = NLA_U32 }, [RTA_IIF] = { .type = NLA_U32 }, [RTA_PRIORITY] = { .type = NLA_U32 }, [RTA_METRICS] = { .type = NLA_NESTED }, [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, [RTA_PREF] = { .type = NLA_U8 }, [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, };
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib6_config *cfg) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; unsigned int pref; int err; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); if (err < 0) goto errout; err = -EINVAL; rtm = nlmsg_data(nlh); memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = rtm->rtm_table; cfg->fc_dst_len = rtm->rtm_dst_len; cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_flags = RTF_UP; cfg->fc_protocol = rtm->rtm_protocol; cfg->fc_type = rtm->rtm_type; if (rtm->rtm_type == RTN_UNREACHABLE || rtm->rtm_type == RTN_BLACKHOLE || rtm->rtm_type == RTN_PROHIBIT || rtm->rtm_type == RTN_THROW) cfg->fc_flags |= RTF_REJECT; if (rtm->rtm_type == RTN_LOCAL) cfg->fc_flags |= RTF_LOCAL; if (rtm->rtm_flags & RTM_F_CLONED) cfg->fc_flags |= RTF_CACHE; cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->fc_nlinfo.nlh = nlh; cfg->fc_nlinfo.nl_net = sock_net(skb->sk); if (tb[RTA_GATEWAY]) { cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); cfg->fc_flags |= RTF_GATEWAY; } if (tb[RTA_DST]) { int plen = (rtm->rtm_dst_len + 7) >> 3; if (nla_len(tb[RTA_DST]) < plen) goto errout; nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); } if (tb[RTA_SRC]) { int plen = (rtm->rtm_src_len + 7) >> 3; if (nla_len(tb[RTA_SRC]) < plen) goto errout; nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); } if (tb[RTA_PREFSRC]) cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); if (tb[RTA_OIF]) cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_PRIORITY]) cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); if (tb[RTA_METRICS]) { cfg->fc_mx = nla_data(tb[RTA_METRICS]); cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); } if (tb[RTA_TABLE]) cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); if (tb[RTA_MULTIPATH]) { cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len); if (err < 0) goto errout; } if (tb[RTA_PREF]) { pref = nla_get_u8(tb[RTA_PREF]); if (pref != ICMPV6_ROUTER_PREF_LOW && pref != ICMPV6_ROUTER_PREF_HIGH) pref = ICMPV6_ROUTER_PREF_MEDIUM; cfg->fc_flags |= RTF_PREF(pref); } if (tb[RTA_ENCAP]) cfg->fc_encap = tb[RTA_ENCAP]; if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); err = lwtunnel_valid_encap_type(cfg->fc_encap_type); if (err < 0) goto errout; } if (tb[RTA_EXPIRES]) { unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); if (addrconf_finite_timeout(timeout)) { cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); cfg->fc_flags |= RTF_EXPIRES; } } err = 0; errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf23433.96%15.56%
Linus Torvalds (pre-git)15622.64%316.67%
Nicolas Dichtel598.56%316.67%
Xin Long527.55%15.56%
Lubomir Rintel466.68%15.56%
David Ahern426.10%15.56%
Roopa Prabhu355.08%15.56%
Daniel Walter162.32%15.56%
Maciej Żenczykowski142.03%15.56%
Martin KaFai Lau142.03%15.56%
Benjamin Thery101.45%15.56%
Jiri Benc60.87%15.56%
Hideaki Yoshifuji / 吉藤英明30.44%15.56%
Eric W. Biedermann20.29%15.56%
Total689100.00%18100.00%

struct rt6_nh { struct rt6_info *rt6_info; struct fib6_config r_cfg; struct mx6_config mxc; struct list_head next; };
static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) { struct rt6_nh *nh; list_for_each_entry(nh, rt6_nh_list, next) { pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, nh->r_cfg.fc_ifindex); } }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu3466.67%133.33%
Nicolas Dichtel1631.37%133.33%
David Ahern11.96%133.33%
Total51100.00%3100.00%


static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; struct rt6_info *rtnh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ rtnh = nh->rt6_info; if (rtnh->dst.dev == rt->dst.dev && rtnh->rt6i_idev == rt->rt6i_idev && ipv6_addr_equal(&rtnh->rt6i_gateway, &rt->rt6i_gateway)) return err; } nh = kzalloc(sizeof(*nh), GFP_KERNEL); if (!nh) return -ENOMEM; nh->rt6_info = rt; err = ip6_convert_metrics(&nh->mxc, r_cfg); if (err) { kfree(nh); return err; } memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); list_add_tail(&nh->next, rt6_nh_list); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu175100.00%1100.00%
Total175100.00%1100.00%


static void ip6_route_mpath_notify(struct rt6_info *rt, struct rt6_info *rt_last, struct nl_info *info, __u16 nlflags) { /* if this is an APPEND route, then rt points to the first route * inserted and rt_last points to last route inserted. Userspace * wants a consistent dump of the route which starts at the first * nexthop. Since sibling routes are always added at the end of * the list, find the first sibling of the last route appended */ if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { rt = list_first_entry(&rt_last->rt6i_siblings, struct rt6_info, rt6i_siblings); } if (rt) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern71100.00%1100.00%
Total71100.00%1100.00%


static int ip6_route_multipath_add(struct fib6_config *cfg) { struct rt6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; struct fib6_config r_cfg; struct rtnexthop *rtnh; struct rt6_info *rt; struct rt6_nh *err_nh; struct rt6_nh *nh, *nh_safe; __u16 nlflags; int remaining; int attrlen; int err = 1; int nhn = 0; int replace = (cfg->fc_nlinfo.nlh && (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); LIST_HEAD(rt6_nh_list); nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry and build a list (rt6_nh_list) of * rt6_info structs per nexthop */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { r_cfg.fc_gateway = nla_get_in6_addr(nla); r_cfg.fc_flags |= RTF_GATEWAY; } r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) r_cfg.fc_encap_type = nla_get_u16(nla); } rt = ip6_route_info_create(&r_cfg); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto cleanup; } err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { dst_free(&rt->dst); goto cleanup; } rtnh = rtnh_next(rtnh, &remaining); } /* for add and replace send one notification with all nexthops. * Skip the notification in fib6_add_rt2node and send one with * the full route when done */ info->skip_notify = 1; err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { rt_last = nh->rt6_info; err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc); /* save reference to first route for notification */ if (!rt_notif && !err) rt_notif = nh->rt6_info; /* nh->rt6_info is used or freed at this point, reset to NULL*/ nh->rt6_info = NULL; if (err) { if (replace && nhn) ip6_print_replace_route_err(&rt6_nh_list); err_nh = nh; goto add_errout; } /* Because each route is added like a single route we remove * these flags after the first nexthop: if there is a collision, * we have already failed to add the first nexthop: * fib6_add_rt2node() has rejected it; when replacing, old * nexthops have been replaced by first new, the rest should * be added to it. */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); nhn++; } /* success ... tell user about new route */ ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); goto cleanup; add_errout: /* send notification for routes that were added so that * the delete notifications sent by ip6_route_del are * coherent */ if (rt_notif) ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); /* Delete routes that were already added */ list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) break; ip6_route_del(&nh->r_cfg); } cleanup: list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { if (nh->rt6_info) dst_free(&nh->rt6_info->dst); kfree(nh->mxc.mx); list_del(&nh->next); kfree(nh); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu45678.49%240.00%
David Ahern10818.59%120.00%
Nicolas Dichtel162.75%120.00%
Jiri Benc10.17%120.00%
Total581100.00%5100.00%


static int ip6_route_multipath_del(struct fib6_config *cfg) { struct fib6_config r_cfg; struct rtnexthop *rtnh; int remaining; int attrlen; int err = 1, last_err = 0; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { nla_memcpy(&r_cfg.fc_gateway, nla, 16); r_cfg.fc_flags |= RTF_GATEWAY; } } err = ip6_route_del(&r_cfg); if (err) last_err = err; rtnh = rtnh_next(rtnh, &remaining); } return last_err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu15282.61%266.67%
Nicolas Dichtel3217.39%133.33%
Total184100.00%3100.00%


static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_del(&cfg); else { cfg.fc_delete_all_nh = 1; return ip6_route_del(&cfg); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3142.47%225.00%
Thomas Graf1926.03%225.00%
Nicolas Dichtel1317.81%112.50%
David Ahern810.96%112.50%
Patrick McHardy11.37%112.50%
Roopa Prabhu11.37%112.50%
Total73100.00%8100.00%


static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_add(&cfg); else return ip6_route_add(&cfg); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3147.69%337.50%
Thomas Graf1929.23%225.00%
Nicolas Dichtel1320.00%112.50%
Patrick McHardy11.54%112.50%
Roopa Prabhu11.54%112.50%
Total65100.00%8100.00%


static size_t rt6_nlmsg_size(struct rt6_info *rt) { int nexthop_len = 0; if (rt->rt6i_nsiblings) { nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ + lwtunnel_get_encap_size(rt->dst.lwtstate); nexthop_len *= rt->rt6i_nsiblings; } return NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(16) /* RTA_SRC */ + nla_total_size(16) /* RTA_DST */ + nla_total_size(16) /* RTA_GATEWAY */ + nla_total_size(16) /* RTA_PREFSRC */ + nla_total_size(4) /* RTA_TABLE */ + nla_total_size(4) /* RTA_IIF */ + nla_total_size(4) /* RTA_OIF */ + nla_total_size(4) /* RTA_PRIORITY */ + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(1) /* RTA_PREF */ + lwtunnel_get_encap_size(rt->dst.lwtstate) + nexthop_len; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf7245.00%114.29%
David Ahern5534.38%114.29%
Roopa Prabhu106.25%114.29%
Noriaki Takamiya85.00%114.29%
Daniel Borkmann63.75%114.29%
Lubomir Rintel63.75%114.29%
Jiri Benc31.88%114.29%
Total160100.00%7100.00%


static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, unsigned int *flags, bool skip_oif) { if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { *flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; } if (rt->rt6i_flags & RTF_GATEWAY) { if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) goto nla_put_failure; } /* not needed for multipath encoding b/c it has a rtnexthop struct */ if (!skip_oif && rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; if (rt->dst.lwtstate && lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern160100.00%2100.00%
Total160100.00%2100.00%

/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) { struct rtnexthop *rtnh; unsigned int flags = 0; rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); if (!rtnh) goto nla_put_failure; rtnh->rtnh_hops = 0; rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; if (rt6_nexthop_info(skb, rt, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern12298.39%250.00%
Thomas Graf10.81%125.00%
Roopa Prabhu10.81%125.00%
Total124100.00%4100.00%


static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, unsigned int flags) { u32 metrics[RTAX_MAX]; struct rtmsg *rtm; struct nlmsghdr *nlh; long expires; u32 table; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; rtm->rtm_dst_len = rt->rt6i_dst.plen; rtm->rtm_src_len = rt->rt6i_src.plen; rtm->rtm_tos = 0; if (rt->rt6i_table) table = rt->rt6i_table->tb6_id; else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; if (rt->rt6i_flags & RTF_REJECT) { switch (rt->dst.error) { case -EINVAL: rtm->rtm_type = RTN_BLACKHOLE; break; case -EACCES: rtm->rtm_type = RTN_PROHIBIT; break; case -EAGAIN: rtm->rtm_type = RTN_THROW; break; default: rtm->rtm_type = RTN_UNREACHABLE; break; } } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; else if (rt->rt6i_flags & RTF_ANYCAST) rtm->rtm_type = RTN_ANYCAST; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) rtm->rtm_type = RTN_LOCAL; else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_DYNAMIC) rtm->rtm_protocol = RTPROT_REDIRECT; else if (rt->rt6i_flags & RTF_ADDRCONF) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO)) rtm->rtm_protocol = RTPROT_RA; else rtm->rtm_protocol = RTPROT_KERNEL; } if (rt->rt6i_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dst) { if (nla_put_in6_addr(skb, RTA_DST, dst)) goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { if (nla_put_in6_addr(skb, RTA_SRC, src)) goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) return 0; if (err < 0) goto nla_put_failure; } else #endif if (nla_put_u32(skb, RTA_IIF, iif)) goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; saddr_buf = rt->rt6i_prefsrc.addr; if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt6i_pmtu) metrics[RTAX_MTU - 1] = rt->rt6i_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) goto nla_put_failure; /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ if (rt->rt6i_nsiblings) { struct rt6_info *sibling, *next_sibling; struct nlattr *mp; mp = nla_nest_start(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; if (rt6_add_nexthop(skb, rt) < 0) goto nla_put_failure; list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { if (rt6_add_nexthop(skb, sibling) < 0) goto nla_put_failure; } nla_nest_end(skb, mp); } else { if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) goto nla_put_failure; } expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) goto nla_put_failure; if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)38141.96%613.64%
David Ahern10211.23%49.09%
David S. Miller738.04%49.09%
Hideaki Yoshifuji / 吉藤英明626.83%511.36%
Thomas Graf545.95%36.82%
Nicolas Dichtel475.18%24.55%
Martin KaFai Lau394.30%12.27%
Daniel Walter293.19%12.27%
Patrick McHardy222.42%24.55%
Lubomir Rintel192.09%12.27%
Maciej Żenczykowski151.65%12.27%
Denis Ovsienko131.43%12.27%
Brian Haley131.43%24.55%
Eric Dumazet101.10%24.55%
Jiri Benc60.66%12.27%
Li Wei50.55%12.27%
Jamal Hadi Salim50.55%12.27%
Mathew Richardson40.44%12.27%
Johannes Berg30.33%12.27%
Benjamin Thery20.22%12.27%
Eric W. Biedermann20.22%12.27%
Alexey Dobriyan10.11%12.27%
Nikolay Aleksandrov10.11%12.27%
Total908100.00%44100.00%


int rt6_dump_route(struct rt6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; struct net *net = arg->net; if (rt == net->ipv6.ip6_null_entry) return 0; if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); /* user wants prefix routes only */ if (rtm->rtm_flags & RTM_F_PREFIX && !(rt->rt6i_flags & RTF_PREFIX_RT)) { /* success since this is not a prefix route */ return 1; } } return rt6_fill_node(net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, NLM_F_MULTI); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6041.10%325.00%
David Ahern4027.40%216.67%
Ville Nuorvala2114.38%18.33%
Krishna Kumar149.59%18.33%
Thomas Graf42.74%18.33%
Brian Haley42.74%18.33%
Eric W. Biedermann10.68%18.33%
Mathew Richardson10.68%18.33%
Jamal Hadi Salim10.68%18.33%
Total146100.00%12100.00%


static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; struct rt6_info *rt; struct sk_buff *skb; struct rtmsg *rtm; struct flowi6 fl6; int err, iif = 0, oif = 0; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy); if (err < 0) goto errout; err = -EINVAL; memset(&fl6, 0, sizeof(fl6)); rtm = nlmsg_data(nlh); fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout; fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); } if (tb[RTA_DST]) { if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout; fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); } if (tb[RTA_IIF]) iif = nla_get_u32(tb[RTA_IIF]); if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_MARK]) fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); if (tb[RTA_UID]) fl6.flowi6_uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); else fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); if (iif) { struct net_device *dev; int flags = 0; dev = __dev_get_by_index(net, iif); if (!dev) { err = -ENODEV; goto errout; } fl6.flowi6_iif = iif; if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6, flags); } else { fl6.flowi6_oif = oif; rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6); } if (rt == net->ipv6.ip6_null_entry) { err = rt->dst.error; ip6_rt_put(rt); goto errout; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ip6_rt_put(rt); err = -ENOBUFS; goto errout; } skb_dst_set(skb, &rt->dst); err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); if (err < 0) { kfree_skb(skb); goto errout; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)16228.72%413.33%
Thomas Graf13624.11%310.00%
Shmulik Ladkani8014.18%26.67%
Lorenzo Colitti569.93%26.67%
Américo Wang295.14%26.67%
David S. Miller244.26%413.33%
Hannes Frederic Sowa203.55%13.33%
James Morris173.01%13.33%
Alexey Dobriyan142.48%13.33%
Denis V. Lunev101.77%26.67%
Eric Dumazet40.71%13.33%
Eric W. Biedermann30.53%26.67%
Hideaki Yoshifuji / 吉藤英明30.53%13.33%
Daniel Lezcano20.35%13.33%
Brian Haley20.35%13.33%
Krishna Kumar10.18%13.33%
Mathew Richardson10.18%13.33%
Total564100.00%30100.00%


void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, unsigned int nlm_flags) { struct sk_buff *skb; struct net *net = info->nl_net; u32 seq; int err; err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0; skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (!skb) goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, event, info->portid, seq, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5733.14%522.73%
Thomas Graf4526.16%313.64%
Patrick McHardy2112.21%29.09%
Denis V. Lunev169.30%29.09%
Daniel Lezcano116.40%14.55%
Roopa Prabhu84.65%29.09%
Jamal Hadi Salim42.33%14.55%
Mathew Richardson31.74%14.55%
Brian Haley21.16%14.55%
Eric W. Biedermann21.16%14.55%
David S. Miller10.58%14.55%
Krishna Kumar10.58%14.55%
Pablo Neira Ayuso10.58%14.55%
Total172100.00%22100.00%


static int ip6_route_dev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) { net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry->dst.dev = dev; net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano13094.20%133.33%
Jiri Pirko53.62%133.33%
Hideaki Yoshifuji / 吉藤英明32.17%133.33%
Total138100.00%3100.00%

/* * /proc */ #ifdef CONFIG_PROC_FS static const struct file_operations ipv6_route_proc_fops = { .owner = THIS_MODULE, .open = ipv6_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, };
static int rt6_stats_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", net->ipv6.rt6_stats->fib_nodes, net->ipv6.rt6_stats->fib_route_nodes, net->ipv6.rt6_stats->fib_rt_alloc, net->ipv6.rt6_stats->fib_rt_entries, net->ipv6.rt6_stats->fib_rt_cache, dst_entries_get_slow(&net->ipv6.ip6_dst_ops), net->ipv6.rt6_stats->fib_discarded_routes); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3334.02%330.00%
Daniel Lezcano3232.99%220.00%
Benjamin Thery1616.49%220.00%
Randy Dunlap1515.46%220.00%
Eric Dumazet11.03%110.00%
Total97100.00%10100.00%


static int rt6_stats_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, rt6_stats_seq_show); }

Contributors

PersonTokensPropCommitsCommitProp
Randy Dunlap1973.08%120.00%
Pavel Emelyanov415.38%240.00%
Daniel Lezcano27.69%120.00%
Linus Torvalds (pre-git)13.85%120.00%
Total26100.00%5100.00%

static const struct file_operations rt6_stats_seq_fops = { .owner = THIS_MODULE, .open = rt6_stats_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSCTL
static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net; int delay; if (!write) return -EINVAL; net = (struct net *)ctl->extra1; delay = net->ipv6.sysctl.flush_delay; proc_dointvec(ctl, write, buffer, lenp, ppos); fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4342.16%111.11%
Lucian Adrian Grijincu2423.53%111.11%
Daniel Lezcano1817.65%222.22%
Linus Torvalds65.88%111.11%
Michal Kubeček54.90%111.11%
Hideaki Yoshifuji / 吉藤英明43.92%111.11%
Al Viro10.98%111.11%
Joe Perches10.98%111.11%
Total102100.00%9100.00%

struct ctl_table ipv6_route_table_template[] = { { .procname = "flush", .data = &init_net.ipv6.sysctl.flush_delay, .maxlen = sizeof(int), .mode = 0200, .proc_handler = ipv6_sysctl_rtcache_flush }, { .procname = "gc_thresh", .data = &ip6_dst_ops_template.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_size", .data = &init_net.ipv6.sysctl.ip6_rt_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_timeout", .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_elasticity", .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mtu_expires", .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "min_adv_mss", .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval_ms", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { } };
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_route_table_template, sizeof(ipv6_route_table_template), GFP_KERNEL); if (table) { table[0].data = &net->ipv6.sysctl.flush_delay; table[0].extra1 = net; table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; } return table; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明13458.52%111.11%
Daniel Lezcano4519.65%222.22%
Eric W. Biedermann198.30%111.11%
Alexey Dobriyan187.86%333.33%
Lucian Adrian Grijincu93.93%111.11%
Benjamin Thery41.75%111.11%
Total229100.00%9100.00%

#endif
static int __net_init ip6_route_net_init(struct net *net) { int ret = -ENOMEM; memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, sizeof(net->ipv6.ip6_dst_ops)); if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) goto out_ip6_dst_ops; net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, sizeof(*net->ipv6.ip6_null_entry), GFP_KERNEL); if (!net->ipv6.ip6_null_entry) goto out_ip6_dst_entries; net->ipv6.ip6_null_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_null_entry; net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_null_entry->dst, ip6_template_metrics, true); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, sizeof(*net->ipv6.ip6_prohibit_entry), GFP_KERNEL); if (!net->ipv6.ip6_prohibit_entry) goto out_ip6_null_entry; net->ipv6.ip6_prohibit_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_prohibit_entry; net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, ip6_template_metrics, true); net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, sizeof(*net->ipv6.ip6_blk_hole_entry), GFP_KERNEL); if (!net->ipv6.ip6_blk_hole_entry) goto out_ip6_prohibit_entry; net->ipv6.ip6_blk_hole_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, ip6_template_metrics, true); #endif net->ipv6.sysctl.flush_delay = 0; net->ipv6.sysctl.ip6_rt_max_size = 4096; net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; net->ipv6.ip6_rt_gc_expire = 30*HZ; ret = 0; out: return ret; #ifdef CONFIG_IPV6_MULTIPLE_TABLES out_ip6_prohibit_entry: kfree(net->ipv6.ip6_prohibit_entry); out_ip6_null_entry: kfree(net->ipv6.ip6_null_entry); #endif out_ip6_dst_entries: dst_entries_destroy(&net->ipv6.ip6_dst_ops); out_ip6_dst_ops: goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano23447.85%426.67%
Peter Zijlstra12325.15%213.33%
David S. Miller489.82%213.33%
Benjamin Thery449.00%213.33%
Eric Dumazet306.13%16.67%
Alexey Dobriyan81.64%213.33%
Pavel Emelyanov10.20%16.67%
Denis V. Lunev10.20%16.67%
Total489100.00%15100.00%


static void __net_exit ip6_route_net_exit(struct net *net) { kfree(net->ipv6.ip6_null_entry); #ifdef CONFIG_IPV6_MULTIPLE_TABLES kfree(net->ipv6.ip6_prohibit_entry); kfree(net->ipv6.ip6_blk_hole_entry); #endif dst_entries_destroy(&net->ipv6.ip6_dst_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano4379.63%133.33%
Xiaotian Feng1018.52%133.33%
Alexey Dobriyan11.85%133.33%
Total54100.00%3100.00%


static int __net_init ip6_route_net_init_late(struct net *net) { #ifdef CONFIG_PROC_FS proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops); proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf3879.17%150.00%
Gao Feng1020.83%150.00%
Total48100.00%2100.00%


static void __net_exit ip6_route_net_exit_late(struct net *net) { #ifdef CONFIG_PROC_FS remove_proc_entry("ipv6_route", net->proc_net); remove_proc_entry("rt6_stats", net->proc_net); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2571.43%150.00%
Gao Feng1028.57%150.00%
Total35100.00%2100.00%

static struct pernet_operations ip6_route_net_ops = { .init = ip6_route_net_init, .exit = ip6_route_net_exit, };
static int __net_init ipv6_inetpeer_init(struct net *net) { struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; inet_peer_base_init(bp); net->ipv6.peers = bp; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller53100.00%1100.00%
Total53100.00%1100.00%


static void __net_exit ipv6_inetpeer_exit(struct net *net) { struct inet_peer_base *bp = net->ipv6.peers; net->ipv6.peers = NULL; inetpeer_invalidate_tree(bp); kfree(bp); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller41100.00%2100.00%
Total41100.00%2100.00%

static struct pernet_operations ipv6_inetpeer_ops = { .init = ipv6_inetpeer_init, .exit = ipv6_inetpeer_exit, }; static struct pernet_operations ip6_route_net_late_ops = { .init = ip6_route_net_init_late, .exit = ip6_route_net_exit_late, }; static struct notifier_block ip6_route_dev_notifier = { .notifier_call = ip6_route_dev_notify, .priority = 0, };
int __init ip6_route_init(void) { int ret; int cpu; ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ip6_dst_ops_template.kmem_cachep) goto out; ret = dst_entries_init(&ip6_dst_blackhole_ops); if (ret) goto out_kmem_cache; ret = register_pernet_subsys(&ipv6_inetpeer_ops); if (ret) goto out_dst_entries; ret = register_pernet_subsys(&ip6_route_net_ops); if (ret) goto out_register_inetpeer; ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; /* Registering of the loopback is done before this portion of code, * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif ret = fib6_init(); if (ret) goto out_register_subsys; ret = xfrm6_init(); if (ret) goto out_fib6_init; ret = fib6_rules_init(); if (ret) goto xfrm6_init; ret = register_pernet_subsys(&ip6_route_net_late_ops); if (ret) goto fib6_rules_init; ret = -ENOBUFS; if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) || __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) || __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); if (ret) goto out_register_late_subsys; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); spin_lock_init(&ul->lock); } out: return ret; out_register_late_subsys: unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_init: fib6_rules_cleanup(); xfrm6_init: xfrm6_fini(); out_fib6_init: fib6_gc_cleanup(); out_register_subsys: unregister_pernet_subsys(&ip6_route_net_ops); out_register_inetpeer: unregister_pernet_subsys(&ipv6_inetpeer_ops); out_dst_entries: dst_entries_destroy(&ip6_dst_blackhole_ops); out_kmem_cache: kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano21251.71%627.27%
Thomas Graf5914.39%313.64%
Martin KaFai Lau389.27%14.55%
David S. Miller297.07%313.64%
Linus Torvalds (pre-git)297.07%418.18%
Eric Dumazet235.61%14.55%
Arnaud Ebalard81.95%14.55%
Greg Rose61.46%14.55%
Hideaki Yoshifuji / 吉藤英明30.73%14.55%
Benjamin Thery30.73%14.55%
Total410100.00%22100.00%


void ip6_route_cleanup(void) { unregister_netdevice_notifier(&ip6_route_dev_notifier); unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_cleanup(); xfrm6_fini(); fib6_gc_cleanup(); unregister_pernet_subsys(&ipv6_inetpeer_ops); unregister_pernet_subsys(&ip6_route_net_ops); dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1324.53%428.57%
Daniel Lezcano1120.75%214.29%
David S. Miller611.32%17.14%
Xiaotian Feng611.32%17.14%
Thomas Graf611.32%17.14%
Daniel McNeil59.43%17.14%
Benjamin Thery23.77%17.14%
Hideaki Yoshifuji / 吉藤英明23.77%17.14%
Kazunori Miyazawa11.89%17.14%
Eric W. Biedermann11.89%17.14%
Total53100.00%14100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)317615.13%265.30%
David S. Miller233411.12%5010.18%
Hideaki Yoshifuji / 吉藤英明225810.76%8316.90%
Martin KaFai Lau225210.73%234.68%
Thomas Graf17768.46%183.67%
David Ahern16377.80%265.30%
Daniel Lezcano12656.03%214.28%
Roopa Prabhu10905.19%61.22%
Duan Jiong5582.66%40.81%
Nicolas Dichtel4872.32%91.83%
Daniel Walter3041.45%10.20%
Hannes Frederic Sowa2941.40%81.63%
Florian Westphal2691.28%61.22%
Eric Dumazet2141.02%142.85%
Julian Anastasov2020.96%40.81%
Steffen Klassert1430.68%51.02%
Shmulik Ladkani1420.68%20.41%
Benjamin Thery1360.65%61.22%
Lorenzo Colitti1320.63%51.02%
Gao Feng1270.61%51.02%
Peter Zijlstra1230.59%20.41%
Daniel Borkmann1140.54%40.81%
Jiri Benc1060.51%71.43%
Alexey Kuznetsov920.44%30.61%
Alexey Dobriyan910.43%51.02%
Lubomir Rintel820.39%10.20%
Denis V. Lunev800.38%71.43%
Eric W. Biedermann770.37%91.83%
Kamala R730.35%10.20%
Xin Long730.35%20.41%
Patrick McHardy710.34%71.43%
Randy Dunlap680.32%40.81%
Maciej Żenczykowski650.31%20.41%
Américo Wang640.30%30.61%
Shirley Ma610.29%30.61%
Matti Vaittinen600.29%10.20%
Pavel Emelyanov480.23%71.43%
Al Viro470.22%30.61%
Tom Herbert470.22%30.61%
James Morris420.20%30.61%
Zheng Yan390.19%10.20%
Andy Gospodarek390.19%10.20%
Vincent Bernat390.19%10.20%
Lucian Adrian Grijincu330.16%10.20%
Herbert Xu320.15%61.22%
Ville Nuorvala310.15%20.41%
Tom Tucker290.14%10.20%
Brian Haley280.13%40.81%
Mathew Richardson280.13%10.20%
Paolo Abeni210.10%30.61%
Krishna Kumar200.10%20.41%
Lv Liangying200.10%10.20%
Roland Dreier170.08%10.20%
Stephen Hemminger160.08%20.41%
Xiaotian Feng160.08%10.20%
Jean-Mickael Guerin150.07%10.20%
Mantas M150.07%10.20%
Jamal Hadi Salim140.07%20.41%
Nikola Forró140.07%10.20%
Linus Torvalds140.07%30.61%
Alexander Aring140.07%10.20%
Michal Kubeček130.06%30.61%
Denis Ovsienko130.06%10.20%
Joe Perches120.06%30.61%
Liping Zhang110.05%10.20%
Paul Marks100.05%10.20%
Arnaldo Carvalho de Melo100.05%30.61%
Sabrina Dubroca100.05%10.20%
Li RongQing100.05%40.81%
Kazunori Miyazawa100.05%20.41%
Noriaki Takamiya80.04%10.20%
Arnaud Ebalard80.04%10.20%
Erik Nordmark70.03%20.41%
Dave Craig60.03%10.20%
Simon Horman60.03%10.20%
Greg Rose60.03%10.20%
Jiri Pirko50.02%10.20%
Mahesh Bandewar50.02%10.20%
Daniel McNeil50.02%10.20%
Li Wei50.02%10.20%
David McCullough50.02%10.20%
Held Bernhard40.02%10.20%
Jens Rosenboom40.02%10.20%
Tejun Heo30.01%10.20%
Changli Gao30.01%10.20%
Matthias Schiffer30.01%10.20%
Adrian Bunk30.01%30.61%
Johannes Berg30.01%10.20%
Paul Gortmaker30.01%10.20%
Alexander Alemayhu20.01%10.20%
Simon Arlott20.01%10.20%
Jiri Olsa20.01%10.20%
Dave Jones20.01%20.41%
Rami Rosen20.01%10.20%
Stephen Rothwell20.01%10.20%
Min Zhang20.01%10.20%
Ian Morris10.00%10.20%
Arjan van de Ven10.00%10.20%
Nikolay Aleksandrov10.00%10.20%
Pablo Neira Ayuso10.00%10.20%
Michael Büsch10.00%10.20%
Jim Paris10.00%10.20%
Total20985100.00%491100.00%
Directory: net/ipv6
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.