cregit-Linux how code gets into the kernel

Release 4.15 net/ipv6/route.c

Directory: net/ipv6
/*
 *      Linux INET6 implementation
 *      FIB front-end.
 *
 *      Authors:
 *      Pedro Roque             <roque@di.fc.ul.pt>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*      Changes:
 *
 *      YOSHIFUJI Hideaki @USAGI
 *              reworked default router selection.
 *              - respect outgoing interface
 *              - select from (probably) reachable routers (i.e.
 *              routers in REACHABLE, STALE, DELAY or PROBE states).
 *              - always select the same router if it is (probably)
 *              reachable.  otherwise, round-robin the list.
 *      Ville Nuorvala
 *              Fixed routing subtrees.
 */


#define pr_fmt(fmt) "IPv6: " fmt

#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/mroute6.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/net_namespace.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include <net/lwtunnel.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
#include <trace/events/fib6.h>

#include <linux/uaccess.h>

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif


enum rt6_nud_state {
	
RT6_NUD_FAIL_HARD = -3,
	
RT6_NUD_FAIL_PROBE = -2,
	
RT6_NUD_FAIL_DO_RR = -1,
	
RT6_NUD_SUCCEED = 1
};

static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
static int		 ip6_dst_gc(struct dst_ops *ops);

static int		ip6_pkt_discard(struct sk_buff *skb);
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static int		ip6_pkt_prohibit(struct sk_buff *skb);
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void		ip6_link_failure(struct sk_buff *skb);
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
static void		rt6_dst_from_metrics_check(struct rt6_info *rt);
static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
static size_t rt6_nlmsg_size(struct rt6_info *rt);
static int rt6_fill_node(struct net *net,
			 struct sk_buff *skb, struct rt6_info *rt,
			 struct in6_addr *dst, struct in6_addr *src,
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);
static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
					   struct in6_addr *daddr,
					   struct in6_addr *saddr);

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
					   unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
#endif


struct uncached_list {
	
spinlock_t		lock;
	
struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);


static void rt6_uncached_list_add(struct rt6_info *rt) { struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); rt->rt6i_uncached_list = ul; spin_lock_bh(&ul->lock); list_add_tail(&rt->rt6i_uncached, &ul->head); spin_unlock_bh(&ul->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau57100.00%1100.00%
Total57100.00%1100.00%


static void rt6_uncached_list_del(struct rt6_info *rt) { if (!list_empty(&rt->rt6i_uncached)) { struct uncached_list *ul = rt->rt6i_uncached_list; struct net *net = dev_net(rt->dst.dev); spin_lock_bh(&ul->lock); list_del(&rt->rt6i_uncached); atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache); spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5768.67%150.00%
Wei Wang2631.33%150.00%
Total83100.00%2100.00%


static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) { struct net_device *loopback_dev = net->loopback_dev; int cpu; if (dev == loopback_dev) return; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; spin_lock_bh(&ul->lock); list_for_each_entry(rt, &ul->head, rt6i_uncached) { struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); } if (rt_dev == dev) { rt->dst.dev = loopback_dev; dev_hold(rt->dst.dev); dev_put(rt_dev); } } spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau15295.60%150.00%
Eric W. Biedermann74.40%150.00%
Total159100.00%2100.00%


static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt) { return dst_metrics_write_ptr(rt->dst.from); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau22100.00%1100.00%
Total22100.00%1100.00%


static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) { struct rt6_info *rt = (struct rt6_info *)dst; if (rt->rt6i_flags & RTF_PCPU) return rt6_pcpu_cow_metrics(rt); else if (rt->rt6i_flags & RTF_CACHE) return NULL; else return dst_cow_metrics_generic(dst, old); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3657.14%125.00%
Martin KaFai Lau2133.33%250.00%
Zheng Yan69.52%125.00%
Total63100.00%4100.00%


static inline const void *choose_neigh_daddr(struct rt6_info *rt, struct sk_buff *skb, const void *daddr) { struct in6_addr *p = &rt->rt6i_gateway; if (!ipv6_addr_any(p)) return (const void *) p; else if (skb) return &ipv6_hdr(skb)->daddr; return daddr; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller67100.00%3100.00%
Total67100.00%3100.00%


static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct rt6_info *rt = (struct rt6_info *) dst; struct neighbour *n; daddr = choose_neigh_daddr(rt, skb, daddr); n = __ipv6_neigh_lookup(dst->dev, daddr); if (n) return n; return neigh_create(&nd_tbl, daddr, dst->dev); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller83100.00%4100.00%
Total83100.00%4100.00%


static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) { struct net_device *dev = dst->dev; struct rt6_info *rt = (struct rt6_info *)dst; daddr = choose_neigh_daddr(rt, NULL, daddr); if (!daddr) return; if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) return; if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) return; __ipv6_confirm_neigh(dev, daddr); }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov89100.00%1100.00%
Total89100.00%1100.00%

static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, .gc = ip6_dst_gc, .gc_thresh = 1024, .check = ip6_dst_check, .default_advmss = ip6_default_advmss, .mtu = ip6_mtu, .cow_metrics = ipv6_cow_metrics, .destroy = ip6_dst_destroy, .ifdown = ip6_dst_ifdown, .negative_advice = ip6_negative_advice, .link_failure = ip6_link_failure, .update_pmtu = ip6_rt_update_pmtu, .redirect = rt6_do_redirect, .local_out = __ip6_local_out, .neigh_lookup = ip6_neigh_lookup, .confirm_neigh = ip6_confirm_neigh, };
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst->dev->mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert2058.82%375.00%
Roland Dreier1441.18%125.00%
Total34100.00%4100.00%


static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller23100.00%2100.00%
Total23100.00%2100.00%


static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller20100.00%2100.00%
Total20100.00%2100.00%

static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .destroy = ip6_dst_destroy, .check = ip6_dst_check, .mtu = ip6_blackhole_mtu, .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .redirect = ip6_rt_blackhole_redirect, .cow_metrics = dst_cow_metrics_generic, .neigh_lookup = ip6_neigh_lookup, }; static const u32 ip6_template_metrics[RTAX_MAX] = { [RTAX_HOPLIMIT - 1] = 0, }; static const struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #ifdef CONFIG_IPV6_MULTIPLE_TABLES static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; static const struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #endif
static void rt6_info_init(struct rt6_info *rt) { struct dst_entry *dst = &rt->dst; memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); INIT_LIST_HEAD(&rt->rt6i_siblings); INIT_LIST_HEAD(&rt->rt6i_uncached); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau58100.00%1100.00%
Total58100.00%1100.00%

/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, flags); if (rt) { rt6_info_init(rt); atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2941.43%529.41%
Wei Wang1521.43%211.76%
Kazunori Miyazawa710.00%15.88%
Benjamin Thery68.57%15.88%
Madalin Bucur45.71%15.88%
Martin KaFai Lau45.71%317.65%
Nicolas Dichtel34.29%211.76%
Hideaki Yoshifuji / 吉藤英明22.86%211.76%
Total70100.00%17100.00%


struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); if (rt) { rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); if (!rt->rt6i_pcpu) { dst_release_immediate(&rt->dst); return NULL; } } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau6788.16%125.00%
Wei Wang810.53%250.00%
Eric Dumazet11.32%125.00%
Total76100.00%4100.00%

EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *)dst; struct rt6_exception_bucket *bucket; struct dst_entry *from = dst->from; struct inet6_dev *idev; dst_destroy_metrics_generic(dst); free_percpu(rt->rt6i_pcpu); rt6_uncached_list_del(rt); idev = rt->rt6i_idev; if (idev) { rt->rt6i_idev = NULL; in6_dev_put(idev); } bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1); if (bucket) { rt->rt6i_exception_bucket = NULL; kfree(bucket); } dst->from = NULL; dst_release(from); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明4940.50%333.33%
Wei Wang3327.27%111.11%
Martin KaFai Lau2319.01%222.22%
Gao Feng86.61%111.11%
Zheng Yan75.79%111.11%
David S. Miller10.83%111.11%
Total121100.00%9100.00%


static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *loopback_dev = dev_net(dev)->loopback_dev; if (idev && idev->dev != loopback_dev) { struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); if (loopback_idev) { rt->rt6i_idev = loopback_idev; in6_dev_put(idev); } } }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7481.32%450.00%
Denis V. Lunev99.89%112.50%
Herbert Xu55.49%112.50%
Wei Wang22.20%112.50%
David S. Miller11.10%112.50%
Total91100.00%8100.00%


static bool __rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) return time_after(jiffies, rt->dst.expires); else return false; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau36100.00%1100.00%
Total36100.00%1100.00%


static bool rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || rt6_check_expired((struct rt6_info *)rt->dst.from); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Gao Feng4458.67%120.00%
Hideaki Yoshifuji / 吉藤英明1722.67%120.00%
Xin Long810.67%120.00%
Eric Dumazet34.00%120.00%
Li RongQing34.00%120.00%
Total75100.00%5100.00%


static struct rt6_info *rt6_multipath_select(struct rt6_info *match, struct flowi6 *fl6, int oif, int strict) { struct rt6_info *sibling, *next_sibling; int route_choosen; /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. */ if (!fl6->mp_hash) fl6->mp_hash = rt6_multipath_hash(fl6, NULL); route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1); /* Don't change the route, if route_choosen == 0 * (siblings does not include ourself) */ if (route_choosen) list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, rt6i_siblings) { route_choosen--; if (route_choosen == 0) { struct inet6_dev *idev = sibling->rt6i_idev; if (!netif_carrier_ok(sibling->dst.dev) && idev->cnf.ignore_routes_with_linkdown) break; if (rt6_score_route(sibling, oif, strict) < 0) break; match = sibling; break; } } return match; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolas Dichtel9363.70%240.00%
Ido Schimmel2819.18%120.00%
Jakub Sitnicki2517.12%240.00%
Total146100.00%5100.00%

/* * Route lookup. rcu_read_lock() should be held. */
static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *rt, const struct in6_addr *saddr, int oif, int flags) { struct rt6_info *local = NULL; struct rt6_info *sprt; if (!oif && ipv6_addr_any(saddr)) goto out; for (sprt = rt; sprt; sprt = rcu_dereference(sprt->dst.rt6_next)) { struct net_device *dev = sprt->dst.dev; if (oif) { if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { if (!sprt->rt6i_idev || sprt->rt6i_idev->dev->ifindex != oif) { if (flags & RT6_LOOKUP_F_IFACE) continue; if (local && local->rt6i_idev->dev->ifindex == oif) continue; } local = sprt; } } else { if (ipv6_chk_addr(net, saddr, dev, flags & RT6_LOOKUP_F_IFACE)) return sprt; } } if (oif) { if (local) return local; if (flags & RT6_LOOKUP_F_IFACE) return net->ipv6.ip6_null_entry; } out: return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明9343.87%428.57%
Linus Torvalds (pre-git)7133.49%428.57%
Daniel Lezcano3717.45%17.14%
David S. Miller41.89%214.29%
Eric Dumazet41.89%214.29%
Wei Wang31.42%17.14%
Total212100.00%14100.00%

#ifdef CONFIG_IPV6_ROUTER_PREF struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; };
static void rt6_probe_deferred(struct work_struct *w) { struct in6_addr mcaddr; struct __rt6_probe_work *work = container_of(w, struct __rt6_probe_work, work); addrconf_addr_solict_mult(&work->target, &mcaddr); ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); dev_put(work->dev); kfree(work); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa6894.44%125.00%
Erik Nordmark22.78%125.00%
Jiri Benc11.39%125.00%
Michael Büsch11.39%125.00%
Total72100.00%4100.00%


static void rt6_probe(struct rt6_info *rt) { struct __rt6_probe_work *work; struct neighbour *neigh; /* * Okay, this does not seem to be appropriate * for now, however, we need to check if it * is really so; aka Router Reachability Probing. * * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) return; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { if (neigh->nud_state & NUD_VALID) goto out; work = NULL; write_lock(&neigh->lock); if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) __neigh_set_probe_once(neigh); } write_unlock(&neigh->lock); } else { work = kmalloc(sizeof(*work), GFP_ATOMIC); } if (work) { INIT_WORK(&work->work, rt6_probe_deferred); work->target = rt->rt6i_gateway; dev_hold(rt->dst.dev); work->dev = rt->dst.dev; schedule_work(&work->work); } out: rcu_read_unlock_bh(); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明11352.07%541.67%
Hannes Frederic Sowa4721.66%18.33%
Martin KaFai Lau4420.28%216.67%
Eric Dumazet62.76%18.33%
David S. Miller31.38%18.33%
Jiri Benc31.38%18.33%
Daniel Lezcano10.46%18.33%
Total217100.00%12100.00%

#else
static inline void rt6_probe(struct rt6_info *rt) { }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1090.91%150.00%
Joe Perches19.09%150.00%
Total11100.00%2100.00%

#endif /* * Default Router Selection (RFC 2461 6.3.6) */
static inline int rt6_check_dev(struct rt6_info *rt, int oif) { struct net_device *dev = rt->dst.dev; if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5578.57%457.14%
David S. Miller1420.00%228.57%
Dave Jones11.43%114.29%
Total70100.00%7100.00%


static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) { struct neighbour *neigh; enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) return RT6_NUD_SUCCEED; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { read_lock(&neigh->lock); if (neigh->nud_state & NUD_VALID) ret = RT6_NUD_SUCCEED; #ifdef CONFIG_IPV6_ROUTER_PREF else if (!(neigh->nud_state & NUD_FAILED)) ret = RT6_NUD_SUCCEED; else ret = RT6_NUD_FAIL_PROBE; #endif read_unlock(&neigh->lock); } else { ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; } rcu_read_unlock_bh(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明8559.44%642.86%
Linus Torvalds (pre-git)2114.69%321.43%
Hannes Frederic Sowa2114.69%214.29%
Paul Marks85.59%17.14%
Jiri Benc64.20%17.14%
Eric Dumazet21.40%17.14%
Total143100.00%14100.00%


static int rt6_score_route(struct rt6_info *rt, int oif, int strict) { int m; m = rt6_check_dev(rt, oif); if (!m && (strict & RT6_LOOKUP_F_IFACE)) return RT6_NUD_FAIL_HARD; #ifdef CONFIG_IPV6_ROUTER_PREF m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; #endif if (strict & RT6_LOOKUP_F_REACHABLE) { int n = rt6_check_neigh(rt); if (n < 0) return n; } return m; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6572.22%562.50%
Hannes Frederic Sowa1718.89%112.50%
Linus Torvalds (pre-git)66.67%112.50%
Paul Marks22.22%112.50%
Total90100.00%8100.00%


static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, int *mpri, struct rt6_info *match, bool *do_rr) { int m; bool match_do_rr = false; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *dev = rt->dst.dev; if (dev && !netif_carrier_ok(dev) && idev->cnf.ignore_routes_with_linkdown && !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) goto out; if (rt6_check_expired(rt)) goto out; m = rt6_score_route(rt, oif, strict); if (m == RT6_NUD_FAIL_DO_RR) { match_do_rr = true; m = 0; /* lowest valid score */ } else if (m == RT6_NUD_FAIL_HARD) { goto out; } if (strict & RT6_LOOKUP_F_REACHABLE) rt6_probe(rt); /* note that m can be RT6_NUD_FAIL_PROBE at this point */ if (m > *mpri) { *do_rr = match_do_rr; *mpri = m; match = rt; } out: return match; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6235.23%646.15%
Hannes Frederic Sowa4022.73%17.69%
Andy Gospodarek3922.16%17.69%
David S. Miller2111.93%17.69%
David Ahern73.98%17.69%
Jiri Benc42.27%17.69%
Linus Torvalds (pre-git)31.70%215.38%
Total176100.00%13100.00%


static struct rt6_info *find_rr_leaf(struct fib6_node *fn, struct rt6_info *leaf, struct rt6_info *rr_head, u32 metric, int oif, int strict, bool *do_rr) { struct rt6_info *rt, *match, *cont; int mpri = -1; match = NULL; cont = NULL; for (rt = rr_head; rt; rt = rcu_dereference(rt->dst.rt6_next)) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } for (rt = leaf; rt && rt != rr_head; rt = rcu_dereference(rt->dst.rt6_next)) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } if (match || !cont) return match; for (rt = cont; rt; rt = rcu_dereference(rt->dst.rt6_next)) match = find_match(rt, oif, strict, &mpri, match, do_rr); return match; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10647.53%111.11%
Steffen Klassert8136.32%111.11%
Wei Wang146.28%222.22%
Hannes Frederic Sowa83.59%111.11%
Hideaki Yoshifuji / 吉藤英明83.59%333.33%
Eric Dumazet62.69%111.11%
Total223100.00%9100.00%


static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn, int oif, int strict) { struct rt6_info *leaf = rcu_dereference(fn->leaf); struct rt6_info *match, *rt0; bool do_rr = false; int key_plen; if (!leaf || leaf == net->ipv6.ip6_null_entry) return net->ipv6.ip6_null_entry; rt0 = rcu_dereference(fn->rr_ptr); if (!rt0) rt0 = leaf; /* Double check to make sure fn is not an intermediate node * and fn->leaf does not points to its child's leaf * (This might happen if all routes under fn are deleted from * the tree and fib6_repair_tree() is called on the node.) */ key_plen = rt0->rt6i_dst.plen; #ifdef CONFIG_IPV6_SUBTREES if (rt0->rt6i_src.plen) key_plen = rt0->rt6i_src.plen; #endif if (fn->fn_bit != key_plen) return net->ipv6.ip6_null_entry; match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict, &do_rr); if (do_rr) { struct rt6_info *next = rcu_dereference(rt0->dst.rt6_next); /* no entries matched; do round-robin */ if (!next || next->rt6i_metric != rt0->rt6i_metric) next = leaf; if (next != rt0) { spin_lock_bh(&leaf->rt6i_table->tb6_lock); /* make sure next is not being deleted from the tree */ if (next->rt6i_node) rcu_assign_pointer(fn->rr_ptr, next); spin_unlock_bh(&leaf->rt6i_table->tb6_lock); } } return match ? match : net->ipv6.ip6_null_entry; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang12650.40%430.77%
David S. Miller9538.00%17.69%
Hideaki Yoshifuji / 吉藤英明104.00%323.08%
Hannes Frederic Sowa93.60%17.69%
Daniel Lezcano41.60%17.69%
Linus Torvalds (pre-git)31.20%215.38%
Eric Dumazet31.20%17.69%
Total250100.00%13100.00%


static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) { return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau25100.00%1100.00%
Total25100.00%1100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, const struct in6_addr *gwaddr) { struct net *net = dev_net(dev); struct route_info *rinfo = (struct route_info *) opt; struct in6_addr prefix_buf, *prefix; unsigned int pref; unsigned long lifetime; struct rt6_info *rt; if (len < sizeof(struct route_info)) { return -EINVAL; } /* Sanity check for prefix_len and length */ if (rinfo->length > 3) { return -EINVAL; } else if (rinfo->prefix_len > 128) { return -EINVAL; } else if (rinfo->prefix_len > 64) { if (rinfo->length < 2) { return -EINVAL; } } else if (rinfo->prefix_len > 0) { if (rinfo->length < 1) { return -EINVAL; } } pref = rinfo->route_pref; if (pref == ICMPV6_ROUTER_PREF_INVALID) return -EINVAL; lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); if (rinfo->length == 3) prefix = (struct in6_addr *)rinfo->prefix; else { /* this function is safe */ ipv6_addr_prefix(&prefix_buf, (struct in6_addr *)rinfo->prefix, rinfo->prefix_len); prefix = &prefix_buf; } if (rinfo->prefix_len == 0) rt = rt6_get_dflt_router(gwaddr, dev); else rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev); if (rt && !lifetime) { ip6_del_rt(rt); rt = NULL; } if (!rt && lifetime) rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev, pref); else if (rt) rt->rt6i_flags = RTF_ROUTEINFO | (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { if (!addrconf_finite_timeout(lifetime)) rt6_clean_expires(rt); else rt6_set_expires(rt, jiffies + HZ * lifetime); ip6_rt_put(rt); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明30479.79%320.00%
Linus Torvalds (pre-git)348.92%533.33%
Duan Jiong184.72%16.67%
Daniel Lezcano112.89%16.67%
Gao Feng71.84%16.67%
Jens Rosenboom41.05%16.67%
Eric Dumazet10.26%16.67%
Américo Wang10.26%16.67%
Al Viro10.26%16.67%
Total381100.00%15100.00%

#endif
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, struct in6_addr *saddr) { struct fib6_node *pn, *sn; while (1) { if (fn->fn_flags & RTN_TL_ROOT) return NULL; pn = rcu_dereference(fn->parent); sn = FIB6_SUBTREE(pn); if (sn && sn != fn) fn = fib6_lookup(sn, NULL, saddr); else fn = pn; if (fn->fn_flags & RTN_RTINFO) return fn; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7680.85%125.00%
Wei Wang1414.89%125.00%
Hideaki Yoshifuji / 吉藤英明33.19%125.00%
Daniel Lezcano11.06%125.00%
Total94100.00%4100.00%


static bool ip6_hold_safe(struct net *net, struct rt6_info **prt, bool null_fallback) { struct rt6_info *rt = *prt; if (dst_hold_safe(&rt->dst)) return true; if (null_fallback) { rt = net->ipv6.ip6_null_entry; dst_hold(&rt->dst); } else { rt = NULL; } *prt = rt; return false; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang78100.00%1100.00%
Total78100.00%1100.00%


static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct rt6_info *rt, *rt_cache; struct fib6_node *fn; rcu_read_lock(); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: rt = rcu_dereference(fn->leaf); if (!rt) { rt = net->ipv6.ip6_null_entry; } else { rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } /* Search through exception table */ rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); if (rt_cache) rt = rt_cache; if (ip6_hold_safe(net, &rt, true)) dst_use_noref(&rt->dst, jiffies); rcu_read_unlock(); trace_fib6_table_lookup(net, rt, table, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang7130.34%317.65%
Hideaki Yoshifuji / 吉藤英明4519.23%317.65%
Nicolas Dichtel2711.54%211.76%
Thomas Graf2711.54%15.88%
Martin KaFai Lau239.83%15.88%
David S. Miller125.13%15.88%
David Ahern114.70%15.88%
Daniel Lezcano93.85%15.88%
Linus Torvalds (pre-git)72.99%317.65%
Pavel Emelyanov20.85%15.88%
Total234100.00%17100.00%


struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags) { return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, int strict) { struct flowi6 fl6 = { .flowi6_oif = oif, .daddr = *daddr, }; struct dst_entry *dst; int flags = strict ? RT6_LOOKUP_F_IFACE : 0; if (saddr) { memcpy(&fl6.saddr, saddr, sizeof(*saddr)); flags |= RT6_LOOKUP_F_HAS_SADDR; } dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); if (dst->error == 0) return (struct rt6_info *) dst; dst_release(dst); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf9273.60%220.00%
Linus Torvalds (pre-git)1512.00%330.00%
Daniel Lezcano86.40%220.00%
David S. Miller75.60%110.00%
Hideaki Yoshifuji / 吉藤英明32.40%220.00%
Total125100.00%10100.00%

EXPORT_SYMBOL(rt6_lookup); /* ip6_ins_rt is called with FREE table->tb6_lock. * It takes new route entry, the addition fails by any reason the * route is released. * Caller must hold dst before calling it. */
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc, struct netlink_ext_ack *extack) { int err; struct fib6_table *table; table = rt->rt6i_table; spin_lock_bh(&table->tb6_lock); err = fib6_add(&table->tb6_root, rt, info, mxc, extack); spin_unlock_bh(&table->tb6_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3241.56%430.77%
Thomas Graf2532.47%323.08%
David Ahern79.09%17.69%
Michal Kubeček45.19%17.69%
Florian Westphal33.90%17.69%
Mathew Richardson33.90%17.69%
Wei Wang22.60%17.69%
Jamal Hadi Salim11.30%17.69%
Total77100.00%13100.00%


int ip6_ins_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; struct mx6_config mxc = { .mx = NULL, }; /* Hold dst to account for the reference from the fib6 tree */ dst_hold(&rt->dst); return __ip6_ins_rt(rt, &info, &mxc, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1727.42%110.00%
Florian Westphal1422.58%110.00%
Denis V. Lunev1117.74%220.00%
Wei Wang914.52%110.00%
David S. Miller34.84%110.00%
Hideaki Yoshifuji / 吉藤英明34.84%110.00%
David Ahern23.23%110.00%
Daniel Lezcano23.23%110.00%
Michal Kubeček11.61%110.00%
Total62100.00%10100.00%

/* called with rcu_lock held */
static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) { struct net_device *dev = rt->dst.dev; if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) { /* for copies of local routes, dst->dev needs to be the * device if it is a master device, the master device if * device is enslaved, and the loopback as the default */ if (netif_is_l3_slave(dev) && !rt6_need_strict(&rt->rt6i_dst.addr)) dev = l3mdev_master_dev_rcu(dev); else if (!netif_is_l3_master(dev)) dev = dev_net(dev)->loopback_dev; /* last case is netif_is_l3_master(dev) is true in which * case we want dev returned to be dev */ } return dev; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern86100.00%2100.00%
Total86100.00%2100.00%


static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct net_device *dev; struct rt6_info *rt; /* * Clone the route. */ if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from; rcu_read_lock(); dev = ip6_rt_get_dev_rcu(ort); rt = __ip6_dst_alloc(dev_net(dev), dev, 0); rcu_read_unlock(); if (!rt) return NULL; ip6_rt_copy_init(rt, ort); rt->rt6i_flags |= RTF_CACHE; rt->rt6i_metric = 0; rt->dst.flags |= DST_HOST; rt->rt6i_dst.addr = *daddr; rt->rt6i_dst.plen = 128; if (!rt6_is_gw_or_nonexthop(ort)) { if (ort->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { rt->rt6i_src.addr = *saddr; rt->rt6i_src.plen = 128; } #endif } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau9142.33%320.00%
Linus Torvalds (pre-git)8037.21%426.67%
Hideaki Yoshifuji / 吉藤英明198.84%320.00%
David Ahern188.37%16.67%
Eric Dumazet41.86%213.33%
Alexey Dobriyan20.93%16.67%
David S. Miller10.47%16.67%
Total215100.00%15100.00%


static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) { struct net_device *dev; struct rt6_info *pcpu_rt; rcu_read_lock(); dev = ip6_rt_get_dev_rcu(rt); pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags); rcu_read_unlock(); if (!pcpu_rt) return NULL; ip6_rt_copy_init(pcpu_rt, rt); pcpu_rt->rt6i_protocol = rt->rt6i_protocol; pcpu_rt->rt6i_flags |= RTF_PCPU; return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau6879.07%150.00%
David Ahern1820.93%150.00%
Total86100.00%2100.00%

/* It should be called with rcu_read_lock() acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) { struct rt6_info *pcpu_rt, **p; p = this_cpu_ptr(rt->rt6i_pcpu); pcpu_rt = *p; if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false)) rt6_dst_from_metrics_check(pcpu_rt); return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5187.93%266.67%
Wei Wang712.07%133.33%
Total58100.00%3100.00%


static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) { struct rt6_info *pcpu_rt, *prev, **p; pcpu_rt = ip6_rt_pcpu_alloc(rt); if (!pcpu_rt) { struct net *net = dev_net(rt->dst.dev); dst_hold(&net->ipv6.ip6_null_entry->dst); return net->ipv6.ip6_null_entry; } dst_hold(&pcpu_rt->dst); p = this_cpu_ptr(rt->rt6i_pcpu); prev = cmpxchg(p, NULL, pcpu_rt); BUG_ON(prev); rt6_dst_from_metrics_check(pcpu_rt); return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau10693.81%360.00%
Wei Wang65.31%120.00%
Eric Dumazet10.88%120.00%
Total113100.00%5100.00%

/* exception hash table implementation */ static DEFINE_SPINLOCK(rt6_exception_lock); /* Remove rt6_ex from hash table and free the memory * Caller must hold rt6_exception_lock */
static void rt6_remove_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex) { struct net *net; if (!bucket || !rt6_ex) return; net = dev_net(rt6_ex->rt6i->dst.dev); rt6_ex->rt6i->rt6i_node = NULL; hlist_del_rcu(&rt6_ex->hlist); rt6_release(rt6_ex->rt6i); kfree_rcu(rt6_ex, rcu); WARN_ON_ONCE(!bucket->depth); bucket->depth--; net->ipv6.rt6_stats->fib_rt_cache--; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang8286.32%266.67%
Colin Ian King1313.68%133.33%
Total95100.00%3100.00%

/* Remove oldest rt6_ex in bucket and free the memory * Caller must hold rt6_exception_lock */
static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket) { struct rt6_exception *rt6_ex, *oldest = NULL; if (!bucket) return; hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { if (!oldest || time_before(rt6_ex->stamp, oldest->stamp)) oldest = rt6_ex; } rt6_remove_exception(bucket, oldest); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang64100.00%1100.00%
Total64100.00%1100.00%


static u32 rt6_exception_hash(const struct in6_addr *dst, const struct in6_addr *src) { static u32 seed __read_mostly; u32 val; net_get_random_once(&seed, sizeof(seed)); val = jhash(dst, sizeof(*dst), seed); #ifdef CONFIG_IPV6_SUBTREES if (src) val = jhash(src, sizeof(*src), val); #endif return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang84100.00%1100.00%
Total84100.00%1100.00%

/* Helper function to find the cached rt in the hash table * and update bucket pointer to point to the bucket for this * (daddr, saddr) pair * Caller must hold rt6_exception_lock */
static struct rt6_exception * __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct rt6_exception *rt6_ex; u32 hval; if (!(*bucket) || !daddr) return NULL; hval = rt6_exception_hash(daddr, saddr); *bucket += hval; hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) { struct rt6_info *rt6 = rt6_ex->rt6i; bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); #ifdef CONFIG_IPV6_SUBTREES if (matched && saddr) matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); #endif if (matched) return rt6_ex; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang131100.00%1100.00%
Total131100.00%1100.00%

/* Helper function to find the cached rt in the hash table * and update bucket pointer to point to the bucket for this * (daddr, saddr) pair * Caller must hold rcu_read_lock() */
static struct rt6_exception * __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct rt6_exception *rt6_ex; u32 hval; WARN_ON_ONCE(!rcu_read_lock_held()); if (!(*bucket) || !daddr) return NULL; hval = rt6_exception_hash(daddr, saddr); *bucket += hval; hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) { struct rt6_info *rt6 = rt6_ex->rt6i; bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr); #ifdef CONFIG_IPV6_SUBTREES if (matched && saddr) matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr); #endif if (matched) return rt6_ex; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang138100.00%1100.00%
Total138100.00%1100.00%


static int rt6_insert_exception(struct rt6_info *nrt, struct rt6_info *ort) { struct net *net = dev_net(ort->dst.dev); struct rt6_exception_bucket *bucket; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; int err = 0; /* ort can't be a cache or pcpu route */ if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from; WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)); spin_lock_bh(&rt6_exception_lock); if (ort->exception_bucket_flushed) { err = -EINVAL; goto out; } bucket = rcu_dereference_protected(ort->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (!bucket) { bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket), GFP_ATOMIC); if (!bucket) { err = -ENOMEM; goto out; } rcu_assign_pointer(ort->rt6i_exception_bucket, bucket); } #ifdef CONFIG_IPV6_SUBTREES /* rt6i_src.plen != 0 indicates ort is in subtree * and exception table is indexed by a hash of * both rt6i_dst and rt6i_src. * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ if (ort->rt6i_src.plen) src_key = &nrt->rt6i_src.addr; #endif /* Update rt6i_prefsrc as it could be changed * in rt6_remove_prefsrc() */ nrt->rt6i_prefsrc = ort->rt6i_prefsrc; /* rt6_mtu_change() might lower mtu on ort. * Only insert this exception route if its mtu * is less than ort's mtu value. */ if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) { err = -EINVAL; goto out; } rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr, src_key); if (rt6_ex) rt6_remove_exception(bucket, rt6_ex); rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC); if (!rt6_ex) { err = -ENOMEM; goto out; } rt6_ex->rt6i = nrt; rt6_ex->stamp = jiffies; atomic_inc(&nrt->rt6i_ref); nrt->rt6i_node = ort->rt6i_node; hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain); bucket->depth++; net->ipv6.rt6_stats->fib_rt_cache++; if (bucket->depth > FIB6_MAX_DEPTH) rt6_exception_remove_oldest(bucket); out: spin_unlock_bh(&rt6_exception_lock); /* Update fn->fn_sernum to invalidate all cached dst */ if (!err) { fib6_update_sernum(ort); fib6_force_start_gc(net); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang37798.18%480.00%
Paolo Abeni71.82%120.00%
Total384100.00%5100.00%


void rt6_flush_exceptions(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; struct hlist_node *tmp; int i; spin_lock_bh(&rt6_exception_lock); /* Prevent rt6_insert_exception() to recreate the bucket list */ rt->exception_bucket_flushed = 1; bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (!bucket) goto out; for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) rt6_remove_exception(bucket, rt6_ex); WARN_ON_ONCE(bucket->depth); bucket++; } out: spin_unlock_bh(&rt6_exception_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang114100.00%1100.00%
Total114100.00%1100.00%

/* Find cached rt in the hash table inside passed in rt * Caller has to hold rcu_read_lock() */
static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt, struct in6_addr *daddr, struct in6_addr *saddr) { struct rt6_exception_bucket *bucket; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; struct rt6_info *res = NULL; bucket = rcu_dereference(rt->rt6i_exception_bucket); #ifdef CONFIG_IPV6_SUBTREES /* rt6i_src.plen != 0 indicates rt is in subtree * and exception table is indexed by a hash of * both rt6i_dst and rt6i_src. * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ if (rt->rt6i_src.plen) src_key = saddr; #endif rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key); if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i)) res = rt6_ex->rt6i; return res; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang107100.00%1100.00%
Total107100.00%1100.00%

/* Remove the passed in cached rt from the hash table that contains it */
int rt6_remove_exception_rt(struct rt6_info *rt) { struct rt6_info *from = (struct rt6_info *)rt->dst.from; struct rt6_exception_bucket *bucket; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; int err; if (!from || !(rt->rt6i_flags & RTF_CACHE)) return -EINVAL; if (!rcu_access_pointer(from->rt6i_exception_bucket)) return -ENOENT; spin_lock_bh(&rt6_exception_lock); bucket = rcu_dereference_protected(from->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); #ifdef CONFIG_IPV6_SUBTREES /* rt6i_src.plen != 0 indicates 'from' is in subtree * and exception table is indexed by a hash of * both rt6i_dst and rt6i_src. * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ if (from->rt6i_src.plen) src_key = &rt->rt6i_src.addr; #endif rt6_ex = __rt6_find_exception_spinlock(&bucket, &rt->rt6i_dst.addr, src_key); if (rt6_ex) { rt6_remove_exception(bucket, rt6_ex); err = 0; } else { err = -ENOENT; } spin_unlock_bh(&rt6_exception_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang17299.42%150.00%
Colin Ian King10.58%150.00%
Total173100.00%2100.00%

/* Find rt6_ex which contains the passed in rt cache and * refresh its stamp */
static void rt6_update_exception_stamp_rt(struct rt6_info *rt) { struct rt6_info *from = (struct rt6_info *)rt->dst.from; struct rt6_exception_bucket *bucket; struct in6_addr *src_key = NULL; struct rt6_exception *rt6_ex; if (!from || !(rt->rt6i_flags & RTF_CACHE)) return; rcu_read_lock(); bucket = rcu_dereference(from->rt6i_exception_bucket); #ifdef CONFIG_IPV6_SUBTREES /* rt6i_src.plen != 0 indicates 'from' is in subtree * and exception table is indexed by a hash of * both rt6i_dst and rt6i_src. * Otherwise, the exception table is indexed by * a hash of only rt6i_dst. */ if (from->rt6i_src.plen) src_key = &rt->rt6i_src.addr; #endif rt6_ex = __rt6_find_exception_rcu(&bucket, &rt->rt6i_dst.addr, src_key); if (rt6_ex) rt6_ex->stamp = jiffies; rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang12399.19%150.00%
Colin Ian King10.81%150.00%
Total124100.00%2100.00%


static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; int i; bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (bucket) { for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { rt6_ex->rt6i->rt6i_prefsrc.plen = 0; } bucket++; } } }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang83100.00%1100.00%
Total83100.00%1100.00%


static void rt6_exceptions_update_pmtu(struct rt6_info *rt, int mtu) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; int i; bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (bucket) { for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) { struct rt6_info *entry = rt6_ex->rt6i; /* For RTF_CACHE with rt6i_pmtu == 0 * (i.e. a redirected route), * the metrics of its rt->dst.from has already * been updated. */ if (entry->rt6i_pmtu && entry->rt6i_pmtu > mtu) entry->rt6i_pmtu = mtu; } bucket++; } } }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang104100.00%1100.00%
Total104100.00%1100.00%

#define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
static void rt6_exceptions_clean_tohost(struct rt6_info *rt, struct in6_addr *gateway) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; struct hlist_node *tmp; int i; if (!rcu_access_pointer(rt->rt6i_exception_bucket)) return; spin_lock_bh(&rt6_exception_lock); bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (bucket) { for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) { struct rt6_info *entry = rt6_ex->rt6i; if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY && ipv6_addr_equal(gateway, &entry->rt6i_gateway)) { rt6_remove_exception(bucket, rt6_ex); } } bucket++; } } spin_unlock_bh(&rt6_exception_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang148100.00%1100.00%
Total148100.00%1100.00%


static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket, struct rt6_exception *rt6_ex, struct fib6_gc_args *gc_args, unsigned long now) { struct rt6_info *rt = rt6_ex->rt6i; /* we are pruning and obsoleting aged-out and non gateway exceptions * even if others have still references to them, so that on next * dst_check() such references can be dropped. * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when * expired, independently from their aging, as per RFC 8201 section 4 */ if (!(rt->rt6i_flags & RTF_EXPIRES) && time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) { RT6_TRACE("aging clone %p\n", rt); rt6_remove_exception(bucket, rt6_ex); return; } else if (rt->rt6i_flags & RTF_GATEWAY) { struct neighbour *neigh; __u8 neigh_flags = 0; neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway); if (neigh) { neigh_flags = neigh->flags; neigh_release(neigh); } if (!(neigh_flags & NTF_ROUTER)) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); rt6_remove_exception(bucket, rt6_ex); return; } } else if (__rt6_check_expired(rt)) { RT6_TRACE("purging expired route %p\n", rt); rt6_remove_exception(bucket, rt6_ex); return; } gc_args->more++; }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang15582.89%150.00%
Paolo Abeni3217.11%150.00%
Total187100.00%2100.00%


void rt6_age_exceptions(struct rt6_info *rt, struct fib6_gc_args *gc_args, unsigned long now) { struct rt6_exception_bucket *bucket; struct rt6_exception *rt6_ex; struct hlist_node *tmp; int i; if (!rcu_access_pointer(rt->rt6i_exception_bucket)) return; spin_lock_bh(&rt6_exception_lock); bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, lockdep_is_held(&rt6_exception_lock)); if (bucket) { for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) { hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist) { rt6_age_examine_exception(bucket, rt6_ex, gc_args, now); } bucket++; } } spin_unlock_bh(&rt6_exception_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Wei Wang122100.00%1100.00%
Total122100.00%1100.00%


struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt, *rt_cache; int strict = 0; strict |= flags & RT6_LOOKUP_F_IFACE; strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; if (net->ipv6.devconf_all->forwarding == 0) strict |= RT6_LOOKUP_F_REACHABLE; rcu_read_lock(); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); saved_fn = fn; if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) oif = 0; redo_rt6_select: rt = rt6_select(net, fn, oif, strict); if (rt->rt6i_nsiblings) rt = rt6_multipath_select(rt, fl6, oif, strict); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto redo_rt6_select; else if (strict & RT6_LOOKUP_F_REACHABLE) { /* also consider unreachable route */ strict &= ~RT6_LOOKUP_F_REACHABLE; fn = saved_fn; goto redo_rt6_select; } } /*Search through exception table */ rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); if (rt_cache) rt = rt_cache; if (rt == net->ipv6.ip6_null_entry) { rcu_read_unlock(); dst_hold(&rt->dst); trace_fib6_table_lookup(net, rt, table, fl6); return rt; } else if (rt->rt6i_flags & RTF_CACHE) { if (ip6_hold_safe(net, &rt, true)) { dst_use_noref(&rt->dst, jiffies); rt6_dst_from_metrics_check(rt); } rcu_read_unlock(); trace_fib6_table_lookup(net, rt, table, fl6); return rt; } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && !(rt->rt6i_flags & RTF_GATEWAY))) { /* Create a RTF_CACHE clone which will not be * owned by the fib6 tree. It is for the special case where * the daddr in the skb during the neighbor look-up is different * from the fl6->daddr used to look-up route here. */ struct rt6_info *uncached_rt; if (ip6_hold_safe(net, &rt, true)) { dst_use_noref(&rt->dst, jiffies); } else { rcu_read_unlock(); uncached_rt = rt; goto uncached_rt_out; } rcu_read_unlock(); uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL); dst_release(&rt->dst); if (uncached_rt) { /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc() * No need for another dst_hold() */ rt6_uncached_list_add(uncached_rt); atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); } else { uncached_rt = net->ipv6.ip6_null_entry; dst_hold(&uncached_rt->dst); } uncached_rt_out: trace_fib6_table_lookup(net, uncached_rt, table, fl6); return uncached_rt; } else { /* Get a percpu copy */ struct rt6_info *pcpu_rt; dst_use_noref(&rt->dst, jiffies); local_bh_disable(); pcpu_rt = rt6_get_pcpu_route(rt); if (!pcpu_rt) { /* atomic_inc_not_zero() is needed when using rcu */ if (atomic_inc_not_zero(&rt->rt6i_ref)) { /* No dst_hold() on rt is needed because grabbing * rt->rt6i_ref makes sure rt can't be released. */ pcpu_rt = rt6_make_pcpu_route(rt); rt6_release(rt); } else { /* rt is already removed from tree */ pcpu_rt = net->ipv6.ip6_null_entry; dst_hold(&pcpu_rt->dst); } } local_bh_enable(); rcu_read_unlock(); trace_fib6_table_lookup(net, pcpu_rt, table, fl6); return pcpu_rt; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau21437.09%822.22%
Wei Wang16728.94%719.44%
David Ahern518.84%38.33%
Linus Torvalds (pre-git)457.80%411.11%
Hideaki Yoshifuji / 吉藤英明315.37%719.44%
Nicolas Dichtel193.29%25.56%
Thomas Graf193.29%12.78%
Eric Dumazet111.91%12.78%
David S. Miller81.39%12.78%
Daniel Lezcano71.21%12.78%
Pavel Emelyanov50.87%12.78%
Total577100.00%36100.00%

EXPORT_SYMBOL_GPL(ip6_pol_route);
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov3071.43%133.33%
Daniel Lezcano716.67%133.33%
David S. Miller511.90%133.33%
Total42100.00%3100.00%


struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, struct flowi6 *fl6, int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE; return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); }

Contributors

PersonTokensPropCommitsCommitProp
Shmulik Ladkani57100.00%1100.00%
Total57100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
static void ip6_multipath_l3_keys(const struct sk_buff *skb, struct flow_keys *keys) { const struct ipv6hdr *outer_iph = ipv6_hdr(skb); const struct ipv6hdr *key_iph = outer_iph; const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) goto out; icmph = icmp6_hdr(skb); if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && icmph->icmp6_type != ICMPV6_PKT_TOOBIG && icmph->icmp6_type != ICMPV6_TIME_EXCEED && icmph->icmp6_type != ICMPV6_PARAMPROB) goto out; inner_iph = skb_header_pointer(skb, skb_transport_offset(skb) + sizeof(*icmph), sizeof(_inner_iph), &_inner_iph); if (!inner_iph) goto out; key_iph = inner_iph; out: memset(keys, 0, sizeof(*keys)); keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys->addrs.v6addrs.src = key_iph->saddr; keys->addrs.v6addrs.dst = key_iph->daddr; keys->tags.flow_label = ip6_flowinfo(key_iph); keys->basic.ip_proto = key_iph->nexthdr; }

Contributors

PersonTokensPropCommitsCommitProp
Jakub Sitnicki208100.00%1100.00%
Total208100.00%1100.00%

/* if skb is set it will be used and fl6 can be NULL */
u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb) { struct flow_keys hash_keys; if (skb) { ip6_multipath_l3_keys(skb, &hash_keys); return flow_hash_from_keys(&hash_keys); } return get_hash_from_flowi6(fl6); }

Contributors

PersonTokensPropCommitsCommitProp
Jakub Sitnicki48100.00%1100.00%
Total48100.00%1100.00%


void ip6_route_input(struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip_tunnel_info *tun_info; struct flowi6 fl6 = { .flowi6_iif = skb->dev->ifindex, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; tun_info = skb_tunnel_info(skb); if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) fl6.mp_hash = rt6_multipath_hash(&fl6, skb); skb_dst_drop(skb); skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf6135.26%315.00%
Jiri Benc4224.28%315.00%
Jakub Sitnicki2313.29%15.00%
Daniel Lezcano126.94%210.00%
David S. Miller95.20%15.00%
Hideaki Yoshifuji / 吉藤英明74.05%315.00%
Shmulik Ladkani52.89%15.00%
Eric Dumazet52.89%210.00%
Linus Torvalds (pre-git)42.31%210.00%
Arnaldo Carvalho de Melo31.73%15.00%
David Ahern21.16%15.00%
Total173100.00%20100.00%


static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1228.57%225.00%
Thomas Graf1126.19%112.50%
Daniel Lezcano716.67%112.50%
David S. Miller511.90%112.50%
Hideaki Yoshifuji / 吉藤英明49.52%225.00%
Pavel Emelyanov37.14%112.50%
Total42100.00%8100.00%


struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags) { bool any_src; if (rt6_need_strict(&fl6->daddr)) { struct dst_entry *dst; dst = l3mdev_link_scope_lookup(net, fl6); if (dst) return dst; } fl6->flowi6_iif = LOOPBACK_IFINDEX; any_src = ipv6_addr_any(&fl6->saddr); if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || (fl6->flowi6_oif && any_src)) flags |= RT6_LOOKUP_F_IFACE; if (!any_src) flags |= RT6_LOOKUP_F_HAS_SADDR; else if (sk) flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern5537.67%420.00%
Thomas Graf3926.71%210.00%
Hideaki Yoshifuji / 吉藤英明1812.33%315.00%
Daniel Lezcano74.79%210.00%
Brian Haley64.11%15.00%
David S. Miller53.42%15.00%
David McCullough53.42%15.00%
Paolo Abeni42.74%15.00%
Linus Torvalds (pre-git)32.05%210.00%
Jiri Olsa21.37%15.00%
Pavel Emelyanov10.68%15.00%
Florian Westphal10.68%15.00%
Total146100.00%20100.00%

EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) { struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; struct net_device *loopback_dev = net->loopback_dev; struct dst_entry *new = NULL; rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, DST_OBSOLETE_DEAD, 0); if (rt) { rt6_info_init(rt); atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc); new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_out; dst_copy_metrics(new, &ort->dst); rt->rt6i_idev = in6_dev_get(loopback_dev); rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); #ifdef CONFIG_IPV6_SUBTREES memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); #endif } dst_release(dst_orig); return new ? new : ERR_PTR(-ENOMEM); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller17380.47%637.50%
Wei Wang2511.63%318.75%
Martin KaFai Lau83.72%16.25%
Steffen Klassert52.33%212.50%
Herbert Xu10.47%16.25%
Eric W. Biedermann10.47%16.25%
Gao Feng10.47%16.25%
Alexey Dobriyan10.47%16.25%
Total215100.00%16100.00%

/* * Destination cache support functions */
static void rt6_dst_from_metrics_check(struct rt6_info *rt) { if (rt->dst.from && dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from)) dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau55100.00%1100.00%
Total55100.00%1100.00%


static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) { u32 rt_cookie = 0; if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie) return NULL; if (rt6_check_expired(rt)) return NULL; return &rt->dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2036.36%444.44%
Hannes Frederic Sowa1120.00%111.11%
Wei Wang1018.18%111.11%
Martin KaFai Lau1018.18%111.11%
Steffen Klassert23.64%111.11%
Nicolas Dichtel23.64%111.11%
Total55100.00%9100.00%


static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) { if (!__rt6_check_expired(rt) && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && rt6_check((struct rt6_info *)(rt->dst.from), cookie)) return &rt->dst; else return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau60100.00%2100.00%
Total60100.00%2100.00%


static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) { struct rt6_info *rt; rt = (struct rt6_info *) dst; /* All IPV6 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. */ rt6_dst_from_metrics_check(rt); if (rt->rt6i_flags & RTF_PCPU || (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) return rt6_dst_from_check(rt, cookie); else return rt6_check(rt, cookie); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7288.89%457.14%
Wei Wang78.64%114.29%
Linus Torvalds (pre-git)11.23%114.29%
Hannes Frederic Sowa11.23%114.29%
Total81100.00%7100.00%


static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; if (rt) { if (rt->rt6i_flags & RTF_CACHE) { if (rt6_check_expired(rt)) { ip6_del_rt(rt); dst = NULL; } } else { dst_release(dst); dst = NULL; } } return dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5270.27%375.00%
Hideaki Yoshifuji / 吉藤英明2229.73%125.00%
Total74100.00%4100.00%


static void ip6_link_failure(struct sk_buff *skb) { struct rt6_info *rt; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); rt = (struct rt6_info *) skb_dst(skb); if (rt) { if (rt->rt6i_flags & RTF_CACHE) { if (dst_hold_safe(&rt->dst)) ip6_del_rt(rt); } else { struct fib6_node *fn; rcu_read_lock(); fn = rcu_dereference(rt->rt6i_node); if (fn && (rt->rt6i_flags & RTF_DEFAULT)) fn->fn_sernum = -1; rcu_read_unlock(); } } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7163.39%233.33%
Wei Wang2724.11%233.33%
Hannes Frederic Sowa119.82%116.67%
Eric Dumazet32.68%116.67%
Total112100.00%6100.00%


static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) { struct net *net = dev_net(rt->dst.dev); rt->rt6i_flags |= RTF_MODIFIED; rt->rt6i_pmtu = mtu; rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5196.23%150.00%
Alexey Kuznetsov23.77%150.00%
Total53100.00%2100.00%


static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) { return !(rt->rt6i_flags & RTF_CACHE) && (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->rt6i_node)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau3491.89%150.00%
Wei Wang38.11%150.00%
Total37100.00%2100.00%


static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct ipv6hdr *iph, u32 mtu) { const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; if (rt6->rt6i_flags & RTF_LOCAL) return; if (dst_metric_locked(dst, RTAX_MTU)) return; if (iph) { daddr = &iph->daddr; saddr = &iph->saddr; } else if (sk) { daddr = &sk->sk_v6_daddr; saddr = &inet6_sk(sk)->saddr; } else { daddr = NULL; saddr = NULL; } dst_confirm_neigh(dst, daddr); mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; if (!rt6_cache_allowed_for_pmtu(rt6)) { rt6_do_update_pmtu(rt6, mtu); /* update rt6_ex->stamp for cache */ if (rt6->rt6i_flags & RTF_CACHE) rt6_update_exception_stamp_rt(rt6); } else if (daddr) { struct rt6_info *nrt6; nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); if (nrt6) { rt6_do_update_pmtu(nrt6, mtu); if (rt6_insert_exception(nrt6, rt6)) dst_release_immediate(&nrt6->dst); } } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau8034.33%220.00%
Julian Anastasov7230.90%110.00%
Wei Wang2812.02%220.00%
Alexey Kuznetsov2510.73%110.00%
David S. Miller146.01%220.00%
Xin Long104.29%110.00%
Shirley Ma41.72%110.00%
Total233100.00%10100.00%


static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau42100.00%1100.00%
Total42100.00%1100.00%


void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13083.33%337.50%
Lorenzo Colitti2012.82%225.00%
Hideaki Yoshifuji / 吉藤英明31.92%112.50%
Martin KaFai Lau21.28%112.50%
Alexey Kuznetsov10.64%112.50%
Total156100.00%8100.00%

EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { struct dst_entry *dst; ip6_update_pmtu(skb, sock_net(sk), mtu, sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) return; bh_lock_sock(sk); if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ip6_datagram_dst_update(sk, false); bh_unlock_sock(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7363.48%125.00%
David S. Miller3732.17%125.00%
Lorenzo Colitti43.48%125.00%
Alexey Kuznetsov10.87%125.00%
Total115100.00%4100.00%

EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); /* Handle redirects */ struct ip6rd_flowi { struct flowi6 fl6; struct in6_addr gateway; };
static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; struct rt6_info *rt, *rt_cache; struct fib6_node *fn; /* Get the "current" route for this destination and * check if the redirect has come from appropriate router. * * RFC 4861 specifies that redirects should only be * accepted if they come from the nexthop to the target. * Due to the way the routes are chosen, this notion * is a bit fuzzy and one might need to check all possible * routes. */ rcu_read_lock(); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: for_each_fib6_node_rt_rcu(fn) { if (rt6_check_expired(rt)) continue; if (rt->dst.error) break; if (!(rt->rt6i_flags & RTF_GATEWAY)) continue; if (fl6->flowi6_oif != rt->dst.dev->ifindex) continue; /* rt_cache's gateway might be different from its 'parent' * in the case of an ip redirect. * So we keep searching in the exception table if the gateway * is different. */ if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) { rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr); if (rt_cache && ipv6_addr_equal(&rdfl->gateway, &rt_cache->rt6i_gateway)) { rt = rt_cache; break; } continue; } break; } if (!rt) rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; goto out; } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } out: ip6_hold_safe(net, &rt, true); rcu_read_unlock(); trace_fib6_table_lookup(net, rt, table, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong18464.79%112.50%
Wei Wang6021.13%337.50%
Martin KaFai Lau289.86%225.00%
David Ahern113.87%112.50%
Alexander Alemayhu10.35%112.50%
Total284100.00%8100.00%

;
static struct dst_entry *ip6_route_redirect(struct net *net, const struct flowi6 *fl6, const struct in6_addr *gateway) { int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip6rd_flowi rdfl; rdfl.fl6 = *fl6; rdfl.gateway = *gateway; return fib6_rule_lookup(net, &rdfl.fl6, flags, __ip6_route_redirect); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong63100.00%1100.00%
Total63100.00%1100.00%


void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller11580.99%233.33%
Duan Jiong96.34%116.67%
Lorenzo Colitti96.34%116.67%
Julian Anastasov64.23%116.67%
Hideaki Yoshifuji / 吉藤英明32.11%116.67%
Total142100.00%6100.00%

EXPORT_SYMBOL_GPL(ip6_redirect);
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, u32 mark) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = msg->dest; fl6.saddr = iph->daddr; fl6.flowi6_uid = sock_net_uid(net, NULL); dst = ip6_route_redirect(net, &fl6, &iph->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong12788.19%250.00%
Lorenzo Colitti117.64%125.00%
Julian Anastasov64.17%125.00%
Total144100.00%4100.00%


void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) { ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3389.19%150.00%
Lorenzo Colitti410.81%150.00%
Total37100.00%2100.00%

EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst) { struct net_device *dev = dst->dev; unsigned int mtu = dst_mtu(dst); struct net *net = dev_net(dev); mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) mtu = net->ipv6.sysctl.ip6_rt_min_advmss; /* * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. * IPV6_MAXPLEN is also valid and means: "any MSS, * rely only on pmtu discovery" */ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) mtu = IPV6_MAXPLEN; return mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro4445.36%116.67%
David S. Miller3131.96%116.67%
Daniel Lezcano1414.43%233.33%
Shirley Ma77.22%116.67%
Hideaki Yoshifuji / 吉藤英明11.03%116.67%
Total97100.00%6100.00%


static unsigned int ip6_mtu(const struct dst_entry *dst) { const struct rt6_info *rt = (const struct rt6_info *)dst; unsigned int mtu = rt->rt6i_pmtu; struct inet6_dev *idev; if (mtu) goto out; mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) goto out; mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst->dev); if (idev) mtu = idev->cnf.mtu6; rcu_read_unlock(); out: mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); return mtu - lwtunnel_headroom(dst->lwtstate, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4336.13%116.67%
Martin KaFai Lau3025.21%116.67%
Steffen Klassert1915.97%233.33%
Roopa Prabhu1411.76%116.67%
Eric Dumazet1310.92%116.67%
Total119100.00%6100.00%


struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6) { struct dst_entry *dst; struct rt6_info *rt; struct inet6_dev *idev = in6_dev_get(dev); struct net *net = dev_net(dev); if (unlikely(!idev)) return ERR_PTR(-ENODEV); rt = ip6_dst_alloc(net, dev, 0); if (unlikely(!rt)) { in6_dev_put(idev); dst = ERR_PTR(-ENOMEM); goto out; } rt->dst.flags |= DST_HOST; rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_gateway = fl6->daddr; rt->rt6i_dst.addr = fl6->daddr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); /* Add this dst into uncached_list so that rt6_ifdown() can * do proper release of the net_device */ rt6_uncached_list_add(rt); atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache); dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); out: return dst; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller8640.76%520.00%
Hideaki Yoshifuji / 吉藤英明3114.69%520.00%
Zheng Yan2411.37%14.00%
Wei Wang146.64%28.00%
Gao Feng115.21%14.00%
Brendan McGrath83.79%14.00%
Daniel Lezcano83.79%28.00%
Julian Anastasov83.79%14.00%
Patrick McHardy73.32%14.00%
Eric Dumazet52.37%14.00%
Benjamin Thery31.42%14.00%
Shirley Ma31.42%14.00%
Kazunori Miyazawa10.47%14.00%
Li RongQing10.47%14.00%
Alexey Dobriyan10.47%14.00%
Total211100.00%25100.00%


static int ip6_dst_gc(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; int entries; entries = dst_entries_get_fast(ops); if (time_after(rt_last_gc + rt_min_interval, jiffies) && entries <= rt_max_size) goto out; net->ipv6.ip6_rt_gc_expire++; fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true); entries = dst_entries_get_slow(ops); if (entries < ops->gc_thresh) net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; out: net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; return entries > rt_max_size; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano6838.64%531.25%
Linus Torvalds (pre-git)3821.59%318.75%
Benjamin Thery3620.45%16.25%
Eric Dumazet179.66%16.25%
Alexey Dobriyan105.68%16.25%
Michal Kubeček21.14%212.50%
Randy Dunlap21.14%16.25%
Arnaldo Carvalho de Melo21.14%16.25%
Li RongQing10.57%16.25%
Total176100.00%16100.00%


static int ip6_convert_metrics(struct mx6_config *mxc, const struct fib6_config *cfg) { struct net *net = cfg->fc_nlinfo.nl_net; bool ecn_ca = false; struct nlattr *nla; int remaining; u32 *mp; if (!cfg->fc_mx) return 0; mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); if (unlikely(!mp)) return -ENOMEM; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { int type = nla_type(nla); u32 val; if (!type) continue; if (unlikely(type > RTAX_MAX)) goto err; if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) goto err; } else { val = nla_get_u32(nla); } if (type == RTAX_HOPLIMIT && val > 255) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) goto err; mp[type - 1] = val; __set_bit(type - 1, mxc->mx_valid); } if (ecn_ca) { __set_bit(RTAX_FEATURES - 1, mxc->mx_valid); mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; } mxc->mx = mp; return 0; err: kfree(mp); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal13950.55%112.50%
Daniel Borkmann10839.27%450.00%
Paolo Abeni145.09%112.50%
Stephen Hemminger134.73%112.50%
Ian Morris10.36%112.50%
Total275100.00%8100.00%


static struct rt6_info *ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, const struct in6_addr *gw_addr) { struct flowi6 fl6 = { .flowi6_oif = cfg->fc_ifindex, .daddr = *gw_addr, .saddr = cfg->fc_prefsrc, }; struct fib6_table *table; struct rt6_info *rt; int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE; table = fib6_get_table(net, cfg->fc_table); if (!table) return NULL; if (!ipv6_addr_any(&cfg->fc_prefsrc)) flags |= RT6_LOOKUP_F_HAS_SADDR; rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); /* if table lookup failed, fall back to full lookup */ if (rt == net->ipv6.ip6_null_entry) { ip6_rt_put(rt); rt = NULL; } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern14399.31%266.67%
Paolo Abeni10.69%133.33%
Total144100.00%3100.00%


static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; int addr_type; int err = -EINVAL; /* RTF_PCPU is an internal flag; can not be set by userspace */ if (cfg->fc_flags & RTF_PCPU) { NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); goto out; } /* RTF_CACHE is an internal flag; can not be set by userspace */ if (cfg->fc_flags & RTF_CACHE) { NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE"); goto out; } if (cfg->fc_dst_len > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length"); goto out; } if (cfg->fc_src_len > 128) { NL_SET_ERR_MSG(extack, "Invalid source address length"); goto out; } #ifndef CONFIG_IPV6_SUBTREES if (cfg->fc_src_len) { NL_SET_ERR_MSG(extack, "Specifying source address requires IPV6_SUBTREES to be enabled"); goto out; } #endif if (cfg->fc_ifindex) { err = -ENODEV; dev = dev_get_by_index(net, cfg->fc_ifindex); if (!dev) goto out; idev = in6_dev_get(dev); if (!idev) goto out; } if (cfg->fc_metric == 0) cfg->fc_metric = IP6_RT_PRIO_USER; err = -ENOBUFS; if (cfg->fc_nlinfo.nlh && !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { table = fib6_get_table(net, cfg->fc_table); if (!table) { pr_warn("NLM_F_CREATE should be specified when creating new route\n"); table = fib6_new_table(net, cfg->fc_table); } } else { table = fib6_new_table(net, cfg->fc_table); } if (!table) goto out; rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); if (!rt) { err = -ENOMEM; goto out; } if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); else rt6_clean_expires(rt); if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; rt->rt6i_protocol = cfg->fc_protocol; addr_type = ipv6_addr_type(&cfg->fc_dst); if (addr_type & IPV6_ADDR_MULTICAST) rt->dst.input = ip6_mc_input; else if (cfg->fc_flags & RTF_LOCAL) rt->dst.input = ip6_input; else rt->dst.input = ip6_forward; rt->dst.output = ip6_output; if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, &lwtstate, extack); if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); if (lwtunnel_output_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_output = rt->dst.output; rt->dst.output = lwtunnel_output; } if (lwtunnel_input_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_input = rt->dst.input; rt->dst.input = lwtunnel_input; } } ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); rt->rt6i_src.plen = cfg->fc_src_len; #endif rt->rt6i_metric = cfg->fc_metric; /* We cannot add true routes via loopback here, they would result in kernel looping; promote them to reject routes */ if ((cfg->fc_flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && !(cfg->fc_flags & RTF_LOCAL))) { /* hold loopback dev/idev if we haven't done so. */ if (dev != net->loopback_dev) { if (dev) { dev_put(dev); in6_dev_put(idev); } dev = net->loopback_dev; dev_hold(dev); idev = in6_dev_get(dev); if (!idev) { err = -ENODEV; goto out; } } rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; switch (cfg->fc_type) { case RTN_BLACKHOLE: rt->dst.error = -EINVAL; rt->dst.output = dst_discard_out; rt->dst.input = dst_discard; break; case RTN_PROHIBIT: rt->dst.error = -EACCES; rt->dst.output = ip6_pkt_prohibit_out; rt->dst.input = ip6_pkt_prohibit; break; case RTN_THROW: case RTN_UNREACHABLE: default: rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN : (cfg->fc_type == RTN_UNREACHABLE) ? -EHOSTUNREACH : -ENETUNREACH; rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; break; } goto install_route; } if (cfg->fc_flags & RTF_GATEWAY) { const struct in6_addr *gw_addr; int gwa_type; gw_addr = &cfg->fc_gateway; gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() * will return already-added prefix route via interface that * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; if (ipv6_chk_addr_and_flags(net, gw_addr, gwa_type & IPV6_ADDR_LINKLOCAL ? dev : NULL, 0, 0)) { NL_SET_ERR_MSG(extack, "Invalid gateway address"); goto out; } rt->rt6i_gateway = *gw_addr; if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt = NULL; /* IPv6 strictly inhibits using not link-local addresses as nexthop address. Otherwise, router will not able to send redirects. It is very good, but in some (rare!) circumstances (SIT, PtP, NBMA NOARP links) it is handy to allow some exceptions. --ANK We allow IPv4-mapped nexthops to support RFC4798-type addressing */ if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) { NL_SET_ERR_MSG(extack, "Invalid gateway address"); goto out; } if (cfg->fc_table) { grt = ip6_nh_lookup_table(net, cfg, gw_addr); if (grt) { if (grt->rt6i_flags & RTF_GATEWAY || (dev && dev != grt->dst.dev)) { ip6_rt_put(grt); grt = NULL; } } } if (!grt) grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); err = -EHOSTUNREACH; if (!grt) goto out; if (dev) { if (dev != grt->dst.dev) { ip6_rt_put(grt); goto out; } } else { dev = grt->dst.dev; idev = grt->rt6i_idev; dev_hold(dev); in6_dev_hold(grt->rt6i_idev); } if (!(grt->rt6i_flags & RTF_GATEWAY)) err = 0; ip6_rt_put(grt); if (err) goto out; } err = -EINVAL; if (!dev) { NL_SET_ERR_MSG(extack, "Egress device not specified"); goto out; } else if (dev->flags & IFF_LOOPBACK) { NL_SET_ERR_MSG(extack, "Egress device can not be loopback device for this route"); goto out; } } err = -ENODEV; if (!dev) goto out; if (!ipv6_addr_any(&cfg->fc_prefsrc)) { if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { NL_SET_ERR_MSG(extack, "Invalid source address"); err = -EINVAL; goto out; } rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; rt->rt6i_prefsrc.plen = 128; } else rt->rt6i_prefsrc.plen = 0; rt->rt6i_flags = cfg->fc_flags; install_route: rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; cfg->fc_nlinfo.nl_net = dev_net(dev); return rt; out: if (dev) dev_put(dev); if (idev) in6_dev_put(idev); if (rt) dst_release_immediate(&rt->dst); return ERR_PTR(err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)41529.39%1317.81%
David Ahern1359.56%56.85%
Hideaki Yoshifuji / 吉藤英明1268.92%810.96%
Roopa Prabhu997.01%45.48%
Thomas Graf856.02%22.74%
Daniel Walter644.53%11.37%
Nicolas Dichtel604.25%45.48%
Matti Vaittinen604.25%11.37%
Kamala R533.75%11.37%
Tom Herbert453.19%22.74%
Vincent Bernat392.76%11.37%
Florian Westphal372.62%22.74%
Wei Wang292.05%34.11%
Daniel Lezcano281.98%45.48%
Maciej Żenczykowski261.84%11.37%
David S. Miller181.27%68.22%
Jiri Benc171.20%11.37%
Nikola Forró140.99%11.37%
Gao Feng140.99%11.37%
Mathew Richardson110.78%11.37%
Sabrina Dubroca100.71%11.37%
Patrick McHardy80.57%11.37%
Erik Nordmark50.35%11.37%
Benjamin Thery30.21%11.37%
Alexey Dobriyan30.21%11.37%
Eric W. Biedermann20.14%22.74%
Américo Wang20.14%11.37%
Joe Perches20.14%11.37%
Eric Dumazet10.07%11.37%
Jamal Hadi Salim10.07%11.37%
Total1412100.00%73100.00%


int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct mx6_config mxc = { .mx = NULL, }; struct rt6_info *rt; int err; rt = ip6_route_info_create(cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto out; } err = ip6_convert_metrics(&mxc, cfg); if (err) goto out; err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack); kfree(mxc.mx); return err; out: if (rt) dst_release_immediate(&rt->dst); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu5845.67%213.33%
Florian Westphal3124.41%16.67%
Linus Torvalds (pre-git)129.45%426.67%
David Ahern97.09%16.67%
Wei Wang86.30%213.33%
Thomas Graf53.94%213.33%
Patrick McHardy21.57%16.67%
Michal Kubeček10.79%16.67%
Mathew Richardson10.79%16.67%
Total127100.00%15100.00%


static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) { int err; struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); if (rt == net->ipv6.ip6_null_entry) { err = -ENOENT; goto out; } table = rt->rt6i_table; spin_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); spin_unlock_bh(&table->tb6_lock); out: ip6_rt_put(rt); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2929.29%423.53%
Thomas Graf1919.19%317.65%
Gao Feng1313.13%15.88%
Daniel Lezcano1212.12%15.88%
Patrick McHardy88.08%15.88%
Herbert Xu55.05%15.88%
Mathew Richardson33.03%15.88%
Hideaki Yoshifuji / 吉藤英明33.03%15.88%
David S. Miller33.03%15.88%
Wei Wang22.02%15.88%
Jamal Hadi Salim11.01%15.88%
Américo Wang11.01%15.88%
Total99100.00%17100.00%


int ip6_del_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; return __ip6_del_rt(rt, &info); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1745.95%116.67%
Denis V. Lunev1232.43%233.33%
David S. Miller38.11%116.67%
Hideaki Yoshifuji / 吉藤英明38.11%116.67%
Daniel Lezcano25.41%116.67%
Total37100.00%6100.00%


static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; struct net *net = info->nl_net; struct sk_buff *skb = NULL; struct fib6_table *table; int err = -ENOENT; if (rt == net->ipv6.ip6_null_entry) goto out_put; table = rt->rt6i_table; spin_lock_bh(&table->tb6_lock); if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { struct rt6_info *sibling, *next_sibling; /* prefer to send a single notification with all hops */ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (skb) { u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; if (rt6_fill_node(net, skb, rt, NULL, NULL, 0, RTM_DELROUTE, info->portid, seq, 0) < 0) { kfree_skb(skb); skb = NULL; } else info->skip_notify = 1; } list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { err = fib6_del(sibling, info); if (err) goto out_unlock; } } err = fib6_del(rt, info); out_unlock: spin_unlock_bh(&table->tb6_lock); out_put: ip6_rt_put(rt); if (skb) { rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern23287.55%250.00%
Américo Wang3111.70%125.00%
Wei Wang20.75%125.00%
Total265100.00%4100.00%


static int ip6_route_del(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct rt6_info *rt, *rt_cache; struct fib6_table *table; struct fib6_node *fn; int err = -ESRCH; table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); if (!table) { NL_SET_ERR_MSG(extack, "FIB table does not exist"); return err; } rcu_read_lock(); fn = fib6_locate(&table->tb6_root, &cfg->fc_dst, cfg->fc_dst_len, &cfg->fc_src, cfg->fc_src_len, !(cfg->fc_flags & RTF_CACHE)); if (fn) { for_each_fib6_node_rt_rcu(fn) { if (cfg->fc_flags & RTF_CACHE) { rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst, &cfg->fc_src); if (!rt_cache) continue; rt = rt_cache; } if (cfg->fc_ifindex && (!rt->dst.dev || rt->dst.dev->ifindex != cfg->fc_ifindex)) continue; if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) continue; if (!dst_hold_safe(&rt->dst)) break; rcu_read_unlock(); /* if gateway was specified only delete the one hop */ if (cfg->fc_flags & RTF_GATEWAY) return __ip6_del_rt(rt, &cfg->fc_nlinfo); return __ip6_del_rt_siblings(rt, cfg); } } rcu_read_unlock(); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)10536.08%828.57%
Wei Wang6020.62%414.29%
Thomas Graf5117.53%310.71%
David Ahern3110.65%310.71%
Mantas M155.15%13.57%
Martin KaFai Lau103.44%13.57%
David S. Miller82.75%27.14%
Daniel Lezcano62.06%27.14%
Hideaki Yoshifuji / 吉藤英明20.69%13.57%
Jamal Hadi Salim10.34%13.57%
Mathew Richardson10.34%13.57%
Stephen Rothwell10.34%13.57%
Total291100.00%28100.00%


static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct netevent_redirect netevent; struct rt6_info *rt, *nrt = NULL; struct ndisc_options ndopts; struct inet6_dev *in6_dev; struct neighbour *neigh; struct rd_msg *msg; int optlen, on_link; u8 *lladdr; optlen = skb_tail_pointer(skb) - skb_transport_header(skb); optlen -= sizeof(*msg); if (optlen < 0) { net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); return; } msg = (struct rd_msg *)icmp6_hdr(skb); if (ipv6_addr_is_multicast(&msg->dest)) { net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); return; } on_link = 0; if (ipv6_addr_equal(&msg->dest, &msg->target)) { on_link = 1; } else if (ipv6_addr_type(&msg->target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); return; } in6_dev = __in6_dev_get(skb->dev); if (!in6_dev) return; if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) return; /* RFC2461 8.1: * The IP source address of the Redirect MUST be the same as the current * first-hop router for the specified ICMP Destination Address. */ if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); return; } lladdr = NULL; if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); return; } } rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_REJECT) { net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); return; } /* Redirect received -> path was valid. * Look, redirects are sent only in response to data packets, * so that this nexthop apparently is reachable. --ANK */ dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); if (!neigh) return; /* * We have finally decided to accept it. */ ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| NEIGH_UPDATE_F_ISROUTER)), NDISC_REDIRECT, &ndopts); nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); if (!nrt) goto out; nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; nrt->rt6i_protocol = RTPROT_REDIRECT; nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; /* No need to remove rt from the exception table if rt is * a cached route because rt6_insert_exception() will * takes care of it */ if (rt6_insert_exception(nrt, rt)) { dst_release_immediate(&nrt->dst); goto out; } netevent.old = &rt->dst; netevent.new = &nrt->dst; netevent.daddr = &msg->dest; netevent.neigh = neigh; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); out: neigh_release(neigh); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller28856.25%520.00%
Hideaki Yoshifuji / 吉藤英明7614.84%520.00%
Linus Torvalds (pre-git)5911.52%416.00%
Tom Tucker265.08%14.00%
Wei Wang152.93%14.00%
Alexander Aring142.73%14.00%
Julian Anastasov91.76%14.00%
Xin Long61.17%14.00%
Simon Horman61.17%14.00%
Matthias Schiffer30.59%14.00%
Martin KaFai Lau30.59%14.00%
Daniel Lezcano30.59%14.00%
Eric Dumazet20.39%14.00%
Alexey Dobriyan20.39%14.00%
Total512100.00%25100.00%

/* * Misc support functions */
static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) { BUG_ON(from->dst.from); rt->rt6i_flags &= ~RTF_EXPIRES; dst_hold(&from->dst); rt->dst.from = &from->dst; dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau69100.00%1100.00%
Total69100.00%1100.00%


static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) { rt->dst.input = ort->dst.input; rt->dst.output = ort->dst.output; rt->rt6i_dst = ort->rt6i_dst; rt->dst.error = ort->dst.error; rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags; rt6_set_from(rt, ort); rt->rt6i_metric = ort->rt6i_metric; #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->rt6i_src; #endif rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_table = ort->rt6i_table; rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7143.29%526.32%
Hideaki Yoshifuji / 吉藤英明2112.80%15.26%
Ville Nuorvala127.32%15.26%
Nicolas Dichtel106.10%210.53%
Martin KaFai Lau106.10%15.26%
Gao Feng84.88%15.26%
Thomas Graf84.88%15.26%
Florian Westphal74.27%15.26%
Jiri Benc63.66%15.26%
Eric Dumazet53.05%15.26%
Alexey Dobriyan21.22%15.26%
Zheng Yan21.22%15.26%
David S. Miller10.61%15.26%
Benjamin Thery10.61%15.26%
Total164100.00%19100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; int ifindex = dev->ifindex; struct fib6_node *fn; struct rt6_info *rt = NULL; struct fib6_table *table; table = fib6_get_table(net, tb_id); if (!table) return NULL; rcu_read_lock(); fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true); if (!fn) goto out; for_each_fib6_node_rt_rcu(fn) { if (rt->dst.dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) continue; ip6_hold_safe(NULL, &rt, false); break; } out: rcu_read_unlock(); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明11761.90%19.09%
David Ahern2312.17%19.09%
Thomas Graf2111.11%19.09%
Wei Wang157.94%327.27%
Daniel Lezcano73.70%218.18%
David S. Miller42.12%218.18%
Eric Dumazet21.06%19.09%
Total189100.00%11100.00%


static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, }; cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, cfg.fc_dst = *prefix; cfg.fc_gateway = *gwaddr; /* We should treat it as a default route if prefix length is 0. */ if (!prefixlen) cfg.fc_flags |= RTF_DEFAULT; ip6_route_add(&cfg, NULL); return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7244.44%215.38%
Thomas Graf2817.28%17.69%
Daniel Lezcano2716.67%17.69%
David Ahern2112.96%323.08%
Xin Long53.09%17.69%
Alexey Dobriyan42.47%17.69%
Eric Dumazet31.85%215.38%
Eric W. Biedermann10.62%17.69%
Rami Rosen10.62%17.69%
Total162100.00%13100.00%

#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; struct rt6_info *rt; struct fib6_table *table; table = fib6_get_table(dev_net(dev), tb_id); if (!table) return NULL; rcu_read_lock(); for_each_fib6_node_rt_rcu(&table->tb6_root) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) break; } if (rt) ip6_hold_safe(NULL, &rt, false); rcu_read_unlock(); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6147.29%633.33%
Hideaki Yoshifuji / 吉藤英明2418.60%316.67%
Thomas Graf1310.08%15.56%
David Ahern129.30%15.56%
Wei Wang129.30%211.11%
David S. Miller43.10%211.11%
Daniel Lezcano21.55%211.11%
Eric Dumazet10.78%15.56%
Total129100.00%18100.00%


struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), }; cfg.fc_gateway = *gwaddr; if (!ip6_route_add(&cfg, NULL)) { struct fib6_table *table; table = fib6_get_table(dev_net(dev), cfg.fc_table); if (table) table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; } return rt6_get_dflt_router(gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4629.68%423.53%
David Ahern4227.10%317.65%
Thomas Graf2314.84%15.88%
Daniel Lezcano2012.90%15.88%
Hideaki Yoshifuji / 吉藤英明149.03%317.65%
Xin Long53.23%15.88%
Alexey Dobriyan21.29%15.88%
Rami Rosen10.65%15.88%
Eric W. Biedermann10.65%15.88%
Eric Dumazet10.65%15.88%
Total155100.00%17100.00%


static void __rt6_purge_dflt_routers(struct fib6_table *table) { struct rt6_info *rt; restart: rcu_read_lock(); for_each_fib6_node_rt_rcu(&table->tb6_root) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { if (dst_hold_safe(&rt->dst)) { rcu_read_unlock(); ip6_del_rt(rt); } else { rcu_read_unlock(); } goto restart; } } rcu_read_unlock(); table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4041.24%440.00%
Wei Wang2121.65%220.00%
Lorenzo Colitti1717.53%110.00%
David Ahern1212.37%110.00%
Hideaki Yoshifuji / 吉藤英明55.15%110.00%
Daniel Lezcano22.06%110.00%
Total97100.00%10100.00%


void rt6_purge_dflt_routers(struct net *net) { struct fib6_table *table; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, head, tb6_hlist) { if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) __rt6_purge_dflt_routers(table); } } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern7998.75%150.00%
Linus Torvalds (pre-git)11.25%150.00%
Total80100.00%2100.00%


static void rtmsg_to_fib6_config(struct net *net, struct in6_rtmsg *rtmsg, struct fib6_config *cfg) { memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? : RT6_TABLE_MAIN; cfg->fc_ifindex = rtmsg->rtmsg_ifindex; cfg->fc_metric = rtmsg->rtmsg_metric; cfg->fc_expires = rtmsg->rtmsg_info; cfg->fc_dst_len = rtmsg->rtmsg_dst_len; cfg->fc_src_len = rtmsg->rtmsg_src_len; cfg->fc_flags = rtmsg->rtmsg_flags; cfg->fc_nlinfo.nl_net = net; cfg->fc_dst = rtmsg->rtmsg_dst; cfg->fc_src = rtmsg->rtmsg_src; cfg->fc_gateway = rtmsg->rtmsg_gateway; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf10480.00%120.00%
David Ahern107.69%120.00%
Benjamin Thery75.38%120.00%
Daniel Lezcano64.62%120.00%
Alexey Dobriyan32.31%120.00%
Total130100.00%5100.00%


int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct fib6_config cfg; struct in6_rtmsg rtmsg; int err; switch (cmd) { case SIOCADDRT: /* Add a route */ case SIOCDELRT: /* Delete a route */ if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = copy_from_user(&rtmsg, arg, sizeof(struct in6_rtmsg)); if (err) return -EFAULT; rtmsg_to_fib6_config(net, &rtmsg, &cfg); rtnl_lock(); switch (cmd) { case SIOCADDRT: err = ip6_route_add(&cfg, NULL); break; case SIOCDELRT: err = ip6_route_del(&cfg, NULL); break; default: err = -EINVAL; } rtnl_unlock(); return err; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)11577.70%545.45%
Thomas Graf1510.14%19.09%
Daniel Lezcano74.73%19.09%
Eric W. Biedermann53.38%19.09%
David Ahern42.70%19.09%
David S. Miller10.68%19.09%
Al Viro10.68%19.09%
Total148100.00%11100.00%

/* * Drop the packet on the floor */
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) { int type; struct dst_entry *dst = skb_dst(skb); switch (ipstats_mib_noroutes) { case IPSTATS_MIB_INNOROUTES: type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); if (type == IPV6_ADDR_ANY) { IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); break; } /* FALLTHROUGH */ case IPSTATS_MIB_OUTNOROUTES: IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), ipstats_mib_noroutes); break; } icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明3529.41%215.38%
Linus Torvalds (pre-git)3126.05%430.77%
Denis V. Lunev2117.65%17.69%
Lv Liangying2016.81%17.69%
Thomas Graf43.36%17.69%
Arnaldo Carvalho de Melo43.36%215.38%
Eric Dumazet32.52%17.69%
Brian Haley10.84%17.69%
Total119100.00%13100.00%


static int ip6_pkt_discard(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
James Morris1330.95%112.50%
Eric Dumazet819.05%225.00%
Dave Craig614.29%112.50%
Hideaki Yoshifuji / 吉藤英明511.90%112.50%
Eric W. Biedermann511.90%112.50%
Herbert Xu49.52%112.50%
Arnaldo Carvalho de Melo12.38%112.50%
Total42100.00%8100.00%


static int ip6_pkt_prohibit(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2457.14%120.00%
Eric Dumazet819.05%240.00%
Eric W. Biedermann511.90%120.00%
Hideaki Yoshifuji / 吉藤英明511.90%120.00%
Total42100.00%5100.00%

/* * Allocate a dst for local (unicast / anycast) address. */
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast) { u32 tb_id; struct net *net = dev_net(idev->dev); struct net_device *dev = idev->dev; struct rt6_info *rt; rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM); in6_dev_hold(idev); rt->dst.flags |= DST_HOST; rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; rt->rt6i_protocol = RTPROT_KERNEL; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) rt->rt6i_flags |= RTF_ANYCAST; else rt->rt6i_flags |= RTF_LOCAL; rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; rt->rt6i_table = fib6_get_table(net, tb_id); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7137.57%414.81%
David Ahern3619.05%414.81%
Hideaki Yoshifuji / 吉藤英明3216.93%518.52%
Alexey Kuznetsov126.35%13.70%
Daniel Lezcano115.82%27.41%
Thomas Graf84.23%13.70%
Julian Anastasov73.70%13.70%
David S. Miller63.17%622.22%
Benjamin Thery31.59%13.70%
Alexey Dobriyan21.06%13.70%
Hannes Frederic Sowa10.53%13.70%
Total189100.00%27100.00%

/* remove deleted ip from prefsrc entries */ struct arg_dev_net_ip { struct net_device *dev; struct net *net; struct in6_addr *addr; };
static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) { struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; struct net *net = ((struct arg_dev_net_ip *)arg)->net; struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; if (((void *)rt->dst.dev == dev || !dev) && rt != net->ipv6.ip6_null_entry && ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { spin_lock_bh(&rt6_exception_lock); /* remove prefsrc entry */ rt->rt6i_prefsrc.plen = 0; /* need to update cache as well */ rt6_exceptions_remove_prefsrc(rt); spin_unlock_bh(&rt6_exception_lock); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter11283.58%125.00%
Wei Wang1813.43%125.00%
David S. Miller42.99%250.00%
Total134100.00%4100.00%


void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) { struct net *net = dev_net(ifp->idev->dev); struct arg_dev_net_ip adni = { .dev = ifp->idev->dev, .net = net, .addr = &ifp->addr, }; fib6_clean_all(net, fib6_remove_prefsrc, &adni); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter62100.00%1100.00%
Total62100.00%1100.00%

#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) /* Remove routers and update dst entries when gateway turn into host. */
static int fib6_clean_tohost(struct rt6_info *rt, void *arg) { struct in6_addr *gateway = (struct in6_addr *)arg; if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) && ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { return -1; } /* Further clean up cached routes in exception table. * This is needed because cached route may have a different * gateway than its 'parent' in the case of an ip redirect. */ rt6_exceptions_clean_tohost(rt, gateway); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong6088.24%150.00%
Wei Wang811.76%150.00%
Total68100.00%2100.00%


void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) { fib6_clean_all(net, fib6_clean_tohost, gateway); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong24100.00%1100.00%
Total24100.00%1100.00%

struct arg_dev_net { struct net_device *dev; struct net *net; }; /* called with write lock held for table with rt */
static int fib6_ifdown(struct rt6_info *rt, void *arg) { const struct arg_dev_net *adn = arg; const struct net_device *dev = adn->dev; if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || (dev && netdev_unregistering(dev)) || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3538.46%114.29%
David Ahern2628.57%228.57%
Daniel Lezcano1617.58%114.29%
Stephen Hemminger1010.99%114.29%
David S. Miller44.40%228.57%
Total91100.00%7100.00%


void rt6_ifdown(struct net *net, struct net_device *dev) { struct arg_dev_net adn = { .dev = dev, .net = net, }; fib6_clean_all(net, fib6_ifdown, &adn); if (dev) rt6_uncached_list_flush_dev(net, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano2548.08%228.57%
Linus Torvalds (pre-git)1528.85%228.57%
Martin KaFai Lau713.46%114.29%
Eric W. Biedermann47.69%114.29%
Thomas Graf11.92%114.29%
Total52100.00%7100.00%

struct rt6_mtu_change_arg { struct net_device *dev; unsigned int mtu; };
static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) { struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; struct inet6_dev *idev; /* In IPv6 pmtu discovery is not optional, so that RTAX_MTU lock cannot disable it. We still use this lock to block changes caused by addrconf/ndisc. */ idev = __in6_dev_get(arg->dev); if (!idev) return 0; /* For administrative MTU increase, there is no way to discover IPv6 PMTU increase, so PMTU increase should be updated here. Since RFC 1981 doesn't include administrative MTU increase update PMTU increase is a MUST. (i.e. jumbo frame) */ /* If new MTU is less than route PMTU, this new MTU will be the lowest MTU in the path, update the route PMTU to reflect PMTU decreases; if new MTU is greater than route PMTU, and the old MTU is the lowest MTU in the path, update the route PMTU to reflect the increase. In this case if the other nodes' MTU also have the lowest MTU, TOO BIG MESSAGE will be lead to PMTU discovery. */ if (rt->dst.dev == arg->dev && dst_metric_raw(&rt->dst, RTAX_MTU) && !dst_metric_locked(&rt->dst, RTAX_MTU)) { spin_lock_bh(&rt6_exception_lock); if (dst_mtu(&rt->dst) >= arg->mtu || (dst_mtu(&rt->dst) < arg->mtu && dst_mtu(&rt->dst) == idev->cnf.mtu6)) { dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); } rt6_exceptions_update_pmtu(rt, arg->mtu); spin_unlock_bh(&rt6_exception_lock); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5130.18%526.32%
Shirley Ma4627.22%15.26%
Wei Wang2112.43%15.26%
Alexey Kuznetsov127.10%210.53%
Maciej Żenczykowski105.92%15.26%
David S. Miller105.92%315.79%
Linus Torvalds74.14%15.26%
Martin KaFai Lau52.96%15.26%
Herbert Xu31.78%15.26%
Simon Arlott21.18%15.26%
Jim Paris10.59%15.26%
Alexander Alemayhu10.59%15.26%
Total169100.00%19100.00%


void rt6_mtu_change(struct net_device *dev, unsigned int mtu) { struct rt6_mtu_change_arg arg = { .dev = dev, .mtu = mtu, }; fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3172.09%555.56%
Thomas Graf613.95%111.11%
Hideaki Yoshifuji / 吉藤英明36.98%111.11%
Daniel Lezcano24.65%111.11%
Eric Dumazet12.33%111.11%
Total43100.00%9100.00%

static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, [RTA_OIF] = { .type = NLA_U32 }, [RTA_IIF] = { .type = NLA_U32 }, [RTA_PRIORITY] = { .type = NLA_U32 }, [RTA_METRICS] = { .type = NLA_NESTED }, [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, [RTA_PREF] = { .type = NLA_U8 }, [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, };
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; unsigned int pref; int err; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, NULL); if (err < 0) goto errout; err = -EINVAL; rtm = nlmsg_data(nlh); memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = rtm->rtm_table; cfg->fc_dst_len = rtm->rtm_dst_len; cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_flags = RTF_UP; cfg->fc_protocol = rtm->rtm_protocol; cfg->fc_type = rtm->rtm_type; if (rtm->rtm_type == RTN_UNREACHABLE || rtm->rtm_type == RTN_BLACKHOLE || rtm->rtm_type == RTN_PROHIBIT || rtm->rtm_type == RTN_THROW) cfg->fc_flags |= RTF_REJECT; if (rtm->rtm_type == RTN_LOCAL) cfg->fc_flags |= RTF_LOCAL; if (rtm->rtm_flags & RTM_F_CLONED) cfg->fc_flags |= RTF_CACHE; cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->fc_nlinfo.nlh = nlh; cfg->fc_nlinfo.nl_net = sock_net(skb->sk); if (tb[RTA_GATEWAY]) { cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); cfg->fc_flags |= RTF_GATEWAY; } if (tb[RTA_DST]) { int plen = (rtm->rtm_dst_len + 7) >> 3; if (nla_len(tb[RTA_DST]) < plen) goto errout; nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); } if (tb[RTA_SRC]) { int plen = (rtm->rtm_src_len + 7) >> 3; if (nla_len(tb[RTA_SRC]) < plen) goto errout; nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); } if (tb[RTA_PREFSRC]) cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); if (tb[RTA_OIF]) cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_PRIORITY]) cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); if (tb[RTA_METRICS]) { cfg->fc_mx = nla_data(tb[RTA_METRICS]); cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); } if (tb[RTA_TABLE]) cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); if (tb[RTA_MULTIPATH]) { cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack); if (err < 0) goto errout; } if (tb[RTA_PREF]) { pref = nla_get_u8(tb[RTA_PREF]); if (pref != ICMPV6_ROUTER_PREF_LOW && pref != ICMPV6_ROUTER_PREF_HIGH) pref = ICMPV6_ROUTER_PREF_MEDIUM; cfg->fc_flags |= RTF_PREF(pref); } if (tb[RTA_ENCAP]) cfg->fc_encap = tb[RTA_ENCAP]; if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); if (err < 0) goto errout; } if (tb[RTA_EXPIRES]) { unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); if (addrconf_finite_timeout(timeout)) { cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); cfg->fc_flags |= RTF_EXPIRES; } } err = 0; errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf23433.43%14.76%
Linus Torvalds (pre-git)15622.29%314.29%
Nicolas Dichtel598.43%314.29%
Xin Long527.43%14.76%
David Ahern517.29%314.29%
Lubomir Rintel466.57%14.76%
Roopa Prabhu355.00%14.76%
Daniel Walter162.29%14.76%
Maciej Żenczykowski142.00%14.76%
Martin KaFai Lau142.00%14.76%
Benjamin Thery101.43%14.76%
Jiri Benc60.86%14.76%
Hideaki Yoshifuji / 吉藤英明30.43%14.76%
Johannes Berg20.29%14.76%
Eric W. Biedermann20.29%14.76%
Total700100.00%21100.00%

struct rt6_nh { struct rt6_info *rt6_info; struct fib6_config r_cfg; struct mx6_config mxc; struct list_head next; };
static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) { struct rt6_nh *nh; list_for_each_entry(nh, rt6_nh_list, next) { pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, nh->r_cfg.fc_ifindex); } }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu3466.67%133.33%
Nicolas Dichtel1631.37%133.33%
David Ahern11.96%133.33%
Total51100.00%3100.00%


static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } nh = kzalloc(sizeof(*nh), GFP_KERNEL); if (!nh) return -ENOMEM; nh->rt6_info = rt; err = ip6_convert_metrics(&nh->mxc, r_cfg); if (err) { kfree(nh); return err; } memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); list_add_tail(&nh->next, rt6_nh_list); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu13797.86%150.00%
David Ahern32.14%150.00%
Total140100.00%2100.00%


static void ip6_route_mpath_notify(struct rt6_info *rt, struct rt6_info *rt_last, struct nl_info *info, __u16 nlflags) { /* if this is an APPEND route, then rt points to the first route * inserted and rt_last points to last route inserted. Userspace * wants a consistent dump of the route which starts at the first * nexthop. Since sibling routes are always added at the end of * the list, find the first sibling of the last route appended */ if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { rt = list_first_entry(&rt_last->rt6i_siblings, struct rt6_info, rt6i_siblings); } if (rt) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern71100.00%1100.00%
Total71100.00%1100.00%


static int ip6_route_multipath_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct rt6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; struct fib6_config r_cfg; struct rtnexthop *rtnh; struct rt6_info *rt; struct rt6_nh *err_nh; struct rt6_nh *nh, *nh_safe; __u16 nlflags; int remaining; int attrlen; int err = 1; int nhn = 0; int replace = (cfg->fc_nlinfo.nlh && (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); LIST_HEAD(rt6_nh_list); nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry and build a list (rt6_nh_list) of * rt6_info structs per nexthop */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { r_cfg.fc_gateway = nla_get_in6_addr(nla); r_cfg.fc_flags |= RTF_GATEWAY; } r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) r_cfg.fc_encap_type = nla_get_u16(nla); } rt = ip6_route_info_create(&r_cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto cleanup; } err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { dst_release_immediate(&rt->dst); goto cleanup; } rtnh = rtnh_next(rtnh, &remaining); } /* for add and replace send one notification with all nexthops. * Skip the notification in fib6_add_rt2node and send one with * the full route when done */ info->skip_notify = 1; err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { rt_last = nh->rt6_info; err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); /* save reference to first route for notification */ if (!rt_notif && !err) rt_notif = nh->rt6_info; /* nh->rt6_info is used or freed at this point, reset to NULL*/ nh->rt6_info = NULL; if (err) { if (replace && nhn) ip6_print_replace_route_err(&rt6_nh_list); err_nh = nh; goto add_errout; } /* Because each route is added like a single route we remove * these flags after the first nexthop: if there is a collision, * we have already failed to add the first nexthop: * fib6_add_rt2node() has rejected it; when replacing, old * nexthops have been replaced by first new, the rest should * be added to it. */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); nhn++; } /* success ... tell user about new route */ ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); goto cleanup; add_errout: /* send notification for routes that were added so that * the delete notifications sent by ip6_route_del are * coherent */ if (rt_notif) ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); /* Delete routes that were already added */ list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) break; ip6_route_del(&nh->r_cfg, extack); } cleanup: list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { if (nh->rt6_info) dst_release_immediate(&nh->rt6_info->dst); kfree(nh->mxc.mx); list_del(&nh->next); kfree(nh); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu45276.35%228.57%
David Ahern11920.10%228.57%
Nicolas Dichtel183.04%114.29%
Wei Wang20.34%114.29%
Jiri Benc10.17%114.29%
Total592100.00%7100.00%


static int ip6_route_multipath_del(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct fib6_config r_cfg; struct rtnexthop *rtnh; int remaining; int attrlen; int err = 1, last_err = 0; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { nla_memcpy(&r_cfg.fc_gateway, nla, 16); r_cfg.fc_flags |= RTF_GATEWAY; } } err = ip6_route_del(&r_cfg, extack); if (err) last_err = err; rtnh = rtnh_next(rtnh, &remaining); } return last_err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu15279.58%250.00%
Nicolas Dichtel3216.75%125.00%
David Ahern73.66%125.00%
Total191100.00%4100.00%


static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_del(&cfg, extack); else { cfg.fc_delete_all_nh = 1; return ip6_route_del(&cfg, extack); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3136.90%220.00%
Thomas Graf1922.62%220.00%
David Ahern1922.62%330.00%
Nicolas Dichtel1315.48%110.00%
Roopa Prabhu11.19%110.00%
Patrick McHardy11.19%110.00%
Total84100.00%10100.00%


static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_add(&cfg, extack); else return ip6_route_add(&cfg, extack); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3140.79%330.00%
Thomas Graf1925.00%220.00%
Nicolas Dichtel1317.11%110.00%
David Ahern1114.47%220.00%
Roopa Prabhu11.32%110.00%
Patrick McHardy11.32%110.00%
Total76100.00%10100.00%


static size_t rt6_nlmsg_size(struct rt6_info *rt) { int nexthop_len = 0; if (rt->rt6i_nsiblings) { nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ + lwtunnel_get_encap_size(rt->dst.lwtstate); nexthop_len *= rt->rt6i_nsiblings; } return NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(16) /* RTA_SRC */ + nla_total_size(16) /* RTA_DST */ + nla_total_size(16) /* RTA_GATEWAY */ + nla_total_size(16) /* RTA_PREFSRC */ + nla_total_size(4) /* RTA_TABLE */ + nla_total_size(4) /* RTA_IIF */ + nla_total_size(4) /* RTA_OIF */ + nla_total_size(4) /* RTA_PRIORITY */ + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(1) /* RTA_PREF */ + lwtunnel_get_encap_size(rt->dst.lwtstate) + nexthop_len; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf7245.00%114.29%
David Ahern5534.38%114.29%
Roopa Prabhu106.25%114.29%
Noriaki Takamiya85.00%114.29%
Daniel Borkmann63.75%114.29%
Lubomir Rintel63.75%114.29%
Jiri Benc31.88%114.29%
Total160100.00%7100.00%


static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, unsigned int *flags, bool skip_oif) { if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { *flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; } if (rt->rt6i_flags & RTF_GATEWAY) { if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) goto nla_put_failure; } if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD) *flags |= RTNH_F_OFFLOAD; /* not needed for multipath encoding b/c it has a rtnexthop struct */ if (!skip_oif && rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; if (rt->dst.lwtstate && lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern16092.49%250.00%
Ido Schimmel137.51%250.00%
Total173100.00%4100.00%

/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) { struct rtnexthop *rtnh; unsigned int flags = 0; rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); if (!rtnh) goto nla_put_failure; rtnh->rtnh_hops = 0; rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; if (rt6_nexthop_info(skb, rt, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern12298.39%250.00%
Thomas Graf10.81%125.00%
Roopa Prabhu10.81%125.00%
Total124100.00%4100.00%


static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, unsigned int flags) { u32 metrics[RTAX_MAX]; struct rtmsg *rtm; struct nlmsghdr *nlh; long expires; u32 table; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; rtm->rtm_dst_len = rt->rt6i_dst.plen; rtm->rtm_src_len = rt->rt6i_src.plen; rtm->rtm_tos = 0; if (rt->rt6i_table) table = rt->rt6i_table->tb6_id; else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; if (rt->rt6i_flags & RTF_REJECT) { switch (rt->dst.error) { case -EINVAL: rtm->rtm_type = RTN_BLACKHOLE; break; case -EACCES: rtm->rtm_type = RTN_PROHIBIT; break; case -EAGAIN: rtm->rtm_type = RTN_THROW; break; default: rtm->rtm_type = RTN_UNREACHABLE; break; } } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; else if (rt->rt6i_flags & RTF_ANYCAST) rtm->rtm_type = RTN_ANYCAST; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) rtm->rtm_type = RTN_LOCAL; else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dst) { if (nla_put_in6_addr(skb, RTA_DST, dst)) goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { if (nla_put_in6_addr(skb, RTA_SRC, src)) goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) return 0; if (err < 0) goto nla_put_failure; } else #endif if (nla_put_u32(skb, RTA_IIF, iif)) goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; saddr_buf = rt->rt6i_prefsrc.addr; if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt6i_pmtu) metrics[RTAX_MTU - 1] = rt->rt6i_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) goto nla_put_failure; /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ if (rt->rt6i_nsiblings) { struct rt6_info *sibling, *next_sibling; struct nlattr *mp; mp = nla_nest_start(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; if (rt6_add_nexthop(skb, rt) < 0) goto nla_put_failure; list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { if (rt6_add_nexthop(skb, sibling) < 0) goto nla_put_failure; } nla_nest_end(skb, mp); } else { if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) goto nla_put_failure; } expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) goto nla_put_failure; if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)34139.74%715.91%
David Ahern10211.89%49.09%
David S. Miller738.51%49.09%
Hideaki Yoshifuji / 吉藤英明627.23%511.36%
Thomas Graf546.29%36.82%
Nicolas Dichtel475.48%24.55%
Martin KaFai Lau394.55%12.27%
Daniel Walter313.61%12.27%
Patrick McHardy222.56%24.55%
Lubomir Rintel192.21%12.27%
Maciej Żenczykowski151.75%12.27%
Brian Haley131.52%24.55%
Eric Dumazet111.28%24.55%
Jiri Benc60.70%12.27%
Li Wei50.58%12.27%
Jamal Hadi Salim50.58%12.27%
Mathew Richardson40.47%12.27%
Johannes Berg30.35%12.27%
Eric W. Biedermann20.23%12.27%
Benjamin Thery20.23%12.27%
Nikolay Aleksandrov10.12%12.27%
Alexey Dobriyan10.12%12.27%
Total858100.00%44100.00%


int rt6_dump_route(struct rt6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; struct net *net = arg->net; if (rt == net->ipv6.ip6_null_entry) return 0; if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); /* user wants prefix routes only */ if (rtm->rtm_flags & RTM_F_PREFIX && !(rt->rt6i_flags & RTF_PREFIX_RT)) { /* success since this is not a prefix route */ return 1; } } return rt6_fill_node(net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, NLM_F_MULTI); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6041.10%325.00%
David Ahern4027.40%216.67%
Ville Nuorvala2114.38%18.33%
Krishna Kumar149.59%18.33%
Thomas Graf42.74%18.33%
Brian Haley42.74%18.33%
Eric W. Biedermann10.68%18.33%
Mathew Richardson10.68%18.33%
Jamal Hadi Salim10.68%18.33%
Total146100.00%12100.00%


static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; int err, iif = 0, oif = 0; struct dst_entry *dst; struct rt6_info *rt; struct sk_buff *skb; struct rtmsg *rtm; struct flowi6 fl6; bool fibmatch; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, extack); if (err < 0) goto errout; err = -EINVAL; memset(&fl6, 0, sizeof(fl6)); rtm = nlmsg_data(nlh); fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout; fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); } if (tb[RTA_DST]) { if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout; fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); } if (tb[RTA_IIF]) iif = nla_get_u32(tb[RTA_IIF]); if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_MARK]) fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); if (tb[RTA_UID]) fl6.flowi6_uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); else fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); if (iif) { struct net_device *dev; int flags = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(net, iif); if (!dev) { rcu_read_unlock(); err = -ENODEV; goto errout; } fl6.flowi6_iif = iif; if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; dst = ip6_route_input_lookup(net, dev, &fl6, flags); rcu_read_unlock(); } else { fl6.flowi6_oif = oif; dst = ip6_route_output(net, NULL, &fl6); } rt = container_of(dst, struct rt6_info, dst); if (rt->dst.error) { err = rt->dst.error; ip6_rt_put(rt); goto errout; } if (rt == net->ipv6.ip6_null_entry) { err = rt->dst.error; ip6_rt_put(rt); goto errout; } if (fibmatch && rt->dst.from) { struct rt6_info *ort = container_of(rt->dst.from, struct rt6_info, dst); dst_hold(&ort->dst); ip6_rt_put(rt); rt = ort; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ip6_rt_put(rt); err = -ENOBUFS; goto errout; } skb_dst_set(skb, &rt->dst); if (fibmatch) err = rt6_fill_node(net, skb, rt, NULL, NULL, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); else err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); if (err < 0) { kfree_skb(skb); goto errout; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)16022.44%615.79%
Thomas Graf13018.23%37.89%
Roopa Prabhu10915.29%12.63%
Shmulik Ladkani648.98%25.26%
Lorenzo Colitti567.85%25.26%
Ido Schimmel486.73%12.63%
Américo Wang273.79%25.26%
David S. Miller243.37%410.53%
Hannes Frederic Sowa202.81%12.63%
James Morris172.38%12.63%
Alexey Dobriyan141.96%12.63%
Denis V. Lunev101.40%25.26%
Florian Westphal101.40%12.63%
David Ahern60.84%12.63%
Eric Dumazet40.56%12.63%
Hideaki Yoshifuji / 吉藤英明30.42%12.63%
Eric W. Biedermann30.42%25.26%
Brian Haley20.28%12.63%
Daniel Lezcano20.28%12.63%
Krishna Kumar10.14%12.63%
Mathew Richardson10.14%12.63%
Arnd Bergmann10.14%12.63%
Johannes Berg10.14%12.63%
Total713100.00%38100.00%


void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, unsigned int nlm_flags) { struct sk_buff *skb; struct net *net = info->nl_net; u32 seq; int err; err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0; skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (!skb) goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, event, info->portid, seq, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5833.72%419.05%
Thomas Graf4425.58%314.29%
Patrick McHardy2112.21%29.52%
Denis V. Lunev169.30%29.52%
Daniel Lezcano116.40%14.76%
Roopa Prabhu84.65%29.52%
Jamal Hadi Salim42.33%14.76%
Mathew Richardson31.74%14.76%
Brian Haley21.16%14.76%
Eric W. Biedermann21.16%14.76%
Krishna Kumar10.58%14.76%
David S. Miller10.58%14.76%
Pablo Neira Ayuso10.58%14.76%
Total172100.00%21100.00%


static int ip6_route_dev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (!(dev->flags & IFF_LOOPBACK)) return NOTIFY_OK; if (event == NETDEV_REGISTER) { net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry->dst.dev = dev; net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif } else if (event == NETDEV_UNREGISTER && dev->reg_state != NETREG_UNREGISTERED) { /* NETDEV_UNREGISTER could be fired for multiple times by * netdev_wait_allrefs(). Make sure we only call this once. */ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev); #endif } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano12662.69%116.67%
Américo Wang6130.35%233.33%
Eric Dumazet62.99%116.67%
Jiri Pirko52.49%116.67%
Hideaki Yoshifuji / 吉藤英明31.49%116.67%
Total201100.00%6100.00%

/* * /proc */ #ifdef CONFIG_PROC_FS static const struct file_operations ipv6_route_proc_fops = { .owner = THIS_MODULE, .open = ipv6_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, };
static int rt6_stats_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", net->ipv6.rt6_stats->fib_nodes, net->ipv6.rt6_stats->fib_route_nodes, atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc), net->ipv6.rt6_stats->fib_rt_entries, net->ipv6.rt6_stats->fib_rt_cache, dst_entries_get_slow(&net->ipv6.ip6_dst_ops), net->ipv6.rt6_stats->fib_discarded_routes); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3332.67%327.27%
Daniel Lezcano3231.68%218.18%
Benjamin Thery1615.84%218.18%
Randy Dunlap1514.85%218.18%
Wei Wang43.96%19.09%
Eric Dumazet10.99%19.09%
Total101100.00%11100.00%


static int rt6_stats_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, rt6_stats_seq_show); }

Contributors

PersonTokensPropCommitsCommitProp
Randy Dunlap1973.08%120.00%
Pavel Emelyanov415.38%240.00%
Daniel Lezcano27.69%120.00%
Linus Torvalds (pre-git)13.85%120.00%
Total26100.00%5100.00%

static const struct file_operations rt6_stats_seq_fops = { .owner = THIS_MODULE, .open = rt6_stats_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSCTL
static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net; int delay; if (!write) return -EINVAL; net = (struct net *)ctl->extra1; delay = net->ipv6.sysctl.flush_delay; proc_dointvec(ctl, write, buffer, lenp, ppos); fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4342.16%111.11%
Lucian Adrian Grijincu2423.53%111.11%
Daniel Lezcano1817.65%222.22%
Linus Torvalds65.88%111.11%
Michal Kubeček54.90%111.11%
Hideaki Yoshifuji / 吉藤英明43.92%111.11%
Joe Perches10.98%111.11%
Al Viro10.98%111.11%
Total102100.00%9100.00%

struct ctl_table ipv6_route_table_template[] = { { .procname = "flush", .data = &init_net.ipv6.sysctl.flush_delay, .maxlen = sizeof(int), .mode = 0200, .proc_handler = ipv6_sysctl_rtcache_flush }, { .procname = "gc_thresh", .data = &ip6_dst_ops_template.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_size", .data = &init_net.ipv6.sysctl.ip6_rt_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_timeout", .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_elasticity", .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mtu_expires", .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "min_adv_mss", .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval_ms", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { } };
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_route_table_template, sizeof(ipv6_route_table_template), GFP_KERNEL); if (table) { table[0].data = &net->ipv6.sysctl.flush_delay; table[0].extra1 = net; table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; } return table; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明13458.52%111.11%
Daniel Lezcano4519.65%222.22%
Eric W. Biedermann198.30%111.11%
Alexey Dobriyan187.86%333.33%
Lucian Adrian Grijincu93.93%111.11%
Benjamin Thery41.75%111.11%
Total229100.00%9100.00%

#endif
static int __net_init ip6_route_net_init(struct net *net) { int ret = -ENOMEM; memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, sizeof(net->ipv6.ip6_dst_ops)); if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) goto out_ip6_dst_ops; net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, sizeof(*net->ipv6.ip6_null_entry), GFP_KERNEL); if (!net->ipv6.ip6_null_entry) goto out_ip6_dst_entries; net->ipv6.ip6_null_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_null_entry; net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_null_entry->dst, ip6_template_metrics, true); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.fib6_has_custom_rules = false; net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, sizeof(*net->ipv6.ip6_prohibit_entry), GFP_KERNEL); if (!net->ipv6.ip6_prohibit_entry) goto out_ip6_null_entry; net->ipv6.ip6_prohibit_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_prohibit_entry; net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, ip6_template_metrics, true); net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, sizeof(*net->ipv6.ip6_blk_hole_entry), GFP_KERNEL); if (!net->ipv6.ip6_blk_hole_entry) goto out_ip6_prohibit_entry; net->ipv6.ip6_blk_hole_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, ip6_template_metrics, true); #endif net->ipv6.sysctl.flush_delay = 0; net->ipv6.sysctl.ip6_rt_max_size = 4096; net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; net->ipv6.ip6_rt_gc_expire = 30*HZ; ret = 0; out: return ret; #ifdef CONFIG_IPV6_MULTIPLE_TABLES out_ip6_prohibit_entry: kfree(net->ipv6.ip6_prohibit_entry); out_ip6_null_entry: kfree(net->ipv6.ip6_null_entry); #endif out_ip6_dst_entries: dst_entries_destroy(&net->ipv6.ip6_dst_ops); out_ip6_dst_ops: goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano23447.08%320.00%
Peter Zijlstra12324.75%213.33%
David S. Miller489.66%213.33%
Benjamin Thery448.85%213.33%
Eric Dumazet306.04%16.67%
Vincent Bernat81.61%16.67%
Alexey Dobriyan81.61%213.33%
Denis V. Lunev10.20%16.67%
Pavel Emelyanov10.20%16.67%
Total497100.00%15100.00%


static void __net_exit ip6_route_net_exit(struct net *net) { kfree(net->ipv6.ip6_null_entry); #ifdef CONFIG_IPV6_MULTIPLE_TABLES kfree(net->ipv6.ip6_prohibit_entry); kfree(net->ipv6.ip6_blk_hole_entry); #endif dst_entries_destroy(&net->ipv6.ip6_dst_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano4379.63%360.00%
Xiaotian Feng1018.52%120.00%
Alexey Dobriyan11.85%120.00%
Total54100.00%5100.00%


static int __net_init ip6_route_net_init_late(struct net *net) { #ifdef CONFIG_PROC_FS proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops); proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf3879.17%150.00%
Gao Feng1020.83%150.00%
Total48100.00%2100.00%


static void __net_exit ip6_route_net_exit_late(struct net *net) { #ifdef CONFIG_PROC_FS remove_proc_entry("ipv6_route", net->proc_net); remove_proc_entry("rt6_stats", net->proc_net); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2571.43%150.00%
Gao Feng1028.57%150.00%
Total35100.00%2100.00%

static struct pernet_operations ip6_route_net_ops = { .init = ip6_route_net_init, .exit = ip6_route_net_exit, };
static int __net_init ipv6_inetpeer_init(struct net *net) { struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; inet_peer_base_init(bp); net->ipv6.peers = bp; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller53100.00%1100.00%
Total53100.00%1100.00%


static void __net_exit ipv6_inetpeer_exit(struct net *net) { struct inet_peer_base *bp = net->ipv6.peers; net->ipv6.peers = NULL; inetpeer_invalidate_tree(bp); kfree(bp); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller41100.00%2100.00%
Total41100.00%2100.00%

static struct pernet_operations ipv6_inetpeer_ops = { .init = ipv6_inetpeer_init, .exit = ipv6_inetpeer_exit, }; static struct pernet_operations ip6_route_net_late_ops = { .init = ip6_route_net_init_late, .exit = ip6_route_net_exit_late, }; static struct notifier_block ip6_route_dev_notifier = { .notifier_call = ip6_route_dev_notify, .priority = ADDRCONF_NOTIFY_PRIORITY - 10, };
void __init ip6_route_init_special_entries(void) { /* Registering of the loopback is done before this portion of code, * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang101100.00%1100.00%
Total101100.00%1100.00%


int __init ip6_route_init(void) { int ret; int cpu; ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ip6_dst_ops_template.kmem_cachep) goto out; ret = dst_entries_init(&ip6_dst_blackhole_ops); if (ret) goto out_kmem_cache; ret = register_pernet_subsys(&ipv6_inetpeer_ops); if (ret) goto out_dst_entries; ret = register_pernet_subsys(&ip6_route_net_ops); if (ret) goto out_register_inetpeer; ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; ret = fib6_init(); if (ret) goto out_register_subsys; ret = xfrm6_init(); if (ret) goto out_fib6_init; ret = fib6_rules_init(); if (ret) goto xfrm6_init; ret = register_pernet_subsys(&ip6_route_net_late_ops); if (ret) goto fib6_rules_init; ret = -ENOBUFS; if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) || __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) || __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, RTNL_FLAG_DOIT_UNLOCKED)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); if (ret) goto out_register_late_subsys; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); spin_lock_init(&ul->lock); } out: return ret; out_register_late_subsys: unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_init: fib6_rules_cleanup(); xfrm6_init: xfrm6_fini(); out_fib6_init: fib6_gc_cleanup(); out_register_subsys: unregister_pernet_subsys(&ip6_route_net_ops); out_register_inetpeer: unregister_pernet_subsys(&ipv6_inetpeer_ops); out_dst_entries: dst_entries_destroy(&ip6_dst_blackhole_ops); out_kmem_cache: kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano11937.54%625.00%
Thomas Graf5918.61%312.50%
Martin KaFai Lau3811.99%14.17%
Linus Torvalds (pre-git)299.15%416.67%
David S. Miller299.15%312.50%
Eric Dumazet237.26%14.17%
Arnaud Ebalard82.52%14.17%
Benjamin Thery30.95%14.17%
Florian Westphal30.95%28.33%
Greg Rose30.95%14.17%
Hideaki Yoshifuji / 吉藤英明30.95%14.17%
Total317100.00%24100.00%


void ip6_route_cleanup(void) { unregister_netdevice_notifier(&ip6_route_dev_notifier); unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_cleanup(); xfrm6_fini(); fib6_gc_cleanup(); unregister_pernet_subsys(&ipv6_inetpeer_ops); unregister_pernet_subsys(&ip6_route_net_ops); dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1324.53%428.57%
Daniel Lezcano1120.75%214.29%
Xiaotian Feng611.32%17.14%
David S. Miller611.32%17.14%
Thomas Graf611.32%17.14%
Daniel McNeil59.43%17.14%
Hideaki Yoshifuji / 吉藤英明23.77%17.14%
Benjamin Thery23.77%17.14%
Kazunori Miyazawa11.89%17.14%
Eric W. Biedermann11.89%17.14%
Total53100.00%14100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Wei Wang302212.63%244.41%
Linus Torvalds (pre-git)300912.57%264.78%
Hideaki Yoshifuji / 吉藤英明21949.17%8114.89%
David S. Miller20558.59%499.01%
Martin KaFai Lau19728.24%213.86%
David Ahern19338.08%356.43%
Thomas Graf16987.09%173.12%
Daniel Lezcano11514.81%213.86%
Roopa Prabhu11494.80%71.29%
Duan Jiong5052.11%40.74%
Nicolas Dichtel4581.91%91.65%
Daniel Walter3061.28%10.18%
Jakub Sitnicki3051.27%30.55%
Hannes Frederic Sowa2901.21%81.47%
Florian Westphal2821.18%91.65%
Américo Wang2270.95%61.10%
Eric Dumazet2220.93%173.12%
Julian Anastasov2020.84%40.74%
Benjamin Thery1400.58%61.10%
Lorenzo Colitti1320.55%50.92%
Steffen Klassert1320.55%71.29%
Gao Feng1260.53%50.92%
Shmulik Ladkani1260.53%20.37%
Peter Zijlstra1230.51%20.37%
Daniel Borkmann1140.48%40.74%
Jiri Benc1070.45%81.47%
Xin Long970.41%40.74%
Alexey Kuznetsov920.38%30.55%
Alexey Dobriyan910.38%50.92%
Ido Schimmel890.37%40.74%
Lubomir Rintel820.34%10.18%
Denis V. Lunev800.33%71.29%
Eric W. Biedermann770.32%91.65%
Kamala R730.30%10.18%
Patrick McHardy710.30%71.29%
Randy Dunlap680.28%40.74%
Maciej Żenczykowski650.27%20.37%
Paolo Abeni600.25%50.92%
Shirley Ma600.25%30.55%
Matti Vaittinen600.25%10.18%
Pavel Emelyanov470.20%71.29%
Al Viro470.20%30.55%
Vincent Bernat470.20%20.37%
Tom Herbert450.19%20.37%
James Morris400.17%20.37%
Andy Gospodarek390.16%10.18%
Zheng Yan390.16%10.18%
Lucian Adrian Grijincu330.14%10.18%
Ville Nuorvala330.14%20.37%
Tom Tucker290.12%10.18%
Mathew Richardson280.12%10.18%
Brian Haley280.12%40.74%
Herbert Xu270.11%61.10%
Stephen Hemminger230.10%20.37%
Lv Liangying200.08%10.18%
Krishna Kumar200.08%20.37%
Roland Dreier170.07%10.18%
Xiaotian Feng160.07%10.18%
Jean-Mickael Guerin150.06%10.18%
Mantas M150.06%10.18%
Kazunori Miyazawa150.06%20.37%
Colin Ian King150.06%20.37%
Alexander Aring140.06%10.18%
Jamal Hadi Salim140.06%20.37%
Linus Torvalds140.06%30.55%
Nikola Forró140.06%10.18%
Michal Kubeček130.05%30.55%
Joe Perches120.05%30.55%
Liping Zhang110.05%10.18%
Sabrina Dubroca100.04%10.18%
Paul Marks100.04%10.18%
Arnaldo Carvalho de Melo100.04%30.55%
Brendan McGrath80.03%10.18%
Noriaki Takamiya80.03%10.18%
Arnaud Ebalard80.03%10.18%
Erik Nordmark70.03%20.37%
Dave Craig60.03%10.18%
Johannes Berg60.03%20.37%
Li RongQing60.03%30.55%
Simon Horman60.03%10.18%
Jiri Pirko50.02%10.18%
Daniel McNeil50.02%10.18%
Mahesh Bandewar50.02%10.18%
David McCullough50.02%10.18%
Li Wei50.02%10.18%
Held Bernhard40.02%10.18%
Jens Rosenboom40.02%10.18%
Madalin Bucur40.02%10.18%
Paul Gortmaker30.01%10.18%
Changli Gao30.01%10.18%
Greg Rose30.01%10.18%
Matthias Schiffer30.01%10.18%
Tejun Heo30.01%10.18%
Stephen Rothwell20.01%10.18%
Alexander Alemayhu20.01%10.18%
Adrian Bunk20.01%20.37%
Jiri Olsa20.01%10.18%
Rami Rosen20.01%10.18%
Dave Jones20.01%20.37%
Min Zhang20.01%10.18%
Simon Arlott20.01%10.18%
Michael Büsch10.00%10.18%
Nikolay Aleksandrov10.00%10.18%
Arjan van de Ven10.00%10.18%
Jim Paris10.00%10.18%
Pablo Neira Ayuso10.00%10.18%
Arnd Bergmann10.00%10.18%
Ian Morris10.00%10.18%
Total23935100.00%544100.00%
Directory: net/ipv6
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.