cregit-Linux how code gets into the kernel

Release 4.14 net/ipv6/route.c

Directory: net/ipv6
/*
 *      Linux INET6 implementation
 *      FIB front-end.
 *
 *      Authors:
 *      Pedro Roque             <roque@di.fc.ul.pt>
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */

/*      Changes:
 *
 *      YOSHIFUJI Hideaki @USAGI
 *              reworked default router selection.
 *              - respect outgoing interface
 *              - select from (probably) reachable routers (i.e.
 *              routers in REACHABLE, STALE, DELAY or PROBE states).
 *              - always select the same router if it is (probably)
 *              reachable.  otherwise, round-robin the list.
 *      Ville Nuorvala
 *              Fixed routing subtrees.
 */


#define pr_fmt(fmt) "IPv6: " fmt

#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/times.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/route.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/mroute6.h>
#include <linux/init.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/nsproxy.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
#include <net/ndisc.h>
#include <net/addrconf.h>
#include <net/tcp.h>
#include <linux/rtnetlink.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/xfrm.h>
#include <net/netevent.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include <net/lwtunnel.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
#include <trace/events/fib6.h>

#include <linux/uaccess.h>

#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif


enum rt6_nud_state {
	
RT6_NUD_FAIL_HARD = -3,
	
RT6_NUD_FAIL_PROBE = -2,
	
RT6_NUD_FAIL_DO_RR = -1,
	
RT6_NUD_SUCCEED = 1
};

static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
static struct dst_entry	*ip6_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int	 ip6_default_advmss(const struct dst_entry *dst);
static unsigned int	 ip6_mtu(const struct dst_entry *dst);
static struct dst_entry *ip6_negative_advice(struct dst_entry *);
static void		ip6_dst_destroy(struct dst_entry *);
static void		ip6_dst_ifdown(struct dst_entry *,
				       struct net_device *dev, int how);
static int		 ip6_dst_gc(struct dst_ops *ops);

static int		ip6_pkt_discard(struct sk_buff *skb);
static int		ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static int		ip6_pkt_prohibit(struct sk_buff *skb);
static int		ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
static void		ip6_link_failure(struct sk_buff *skb);
static void		ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
static void		rt6_dst_from_metrics_check(struct rt6_info *rt);
static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
static size_t rt6_nlmsg_size(struct rt6_info *rt);
static int rt6_fill_node(struct net *net,
			 struct sk_buff *skb, struct rt6_info *rt,
			 struct in6_addr *dst, struct in6_addr *src,
			 int iif, int type, u32 portid, u32 seq,
			 unsigned int flags);

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_add_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev,
					   unsigned int pref);
static struct rt6_info *rt6_get_route_info(struct net *net,
					   const struct in6_addr *prefix, int prefixlen,
					   const struct in6_addr *gwaddr,
					   struct net_device *dev);
#endif


struct uncached_list {
	
spinlock_t		lock;
	
struct list_head	head;
};

static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);


static void rt6_uncached_list_add(struct rt6_info *rt) { struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list); rt->rt6i_uncached_list = ul; spin_lock_bh(&ul->lock); list_add_tail(&rt->rt6i_uncached, &ul->head); spin_unlock_bh(&ul->lock); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau57100.00%1100.00%
Total57100.00%1100.00%


static void rt6_uncached_list_del(struct rt6_info *rt) { if (!list_empty(&rt->rt6i_uncached)) { struct uncached_list *ul = rt->rt6i_uncached_list; spin_lock_bh(&ul->lock); list_del(&rt->rt6i_uncached); spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau57100.00%1100.00%
Total57100.00%1100.00%


static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev) { struct net_device *loopback_dev = net->loopback_dev; int cpu; if (dev == loopback_dev) return; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); struct rt6_info *rt; spin_lock_bh(&ul->lock); list_for_each_entry(rt, &ul->head, rt6i_uncached) { struct inet6_dev *rt_idev = rt->rt6i_idev; struct net_device *rt_dev = rt->dst.dev; if (rt_idev->dev == dev) { rt->rt6i_idev = in6_dev_get(loopback_dev); in6_dev_put(rt_idev); } if (rt_dev == dev) { rt->dst.dev = loopback_dev; dev_hold(rt->dst.dev); dev_put(rt_dev); } } spin_unlock_bh(&ul->lock); } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau15295.60%150.00%
Eric W. Biedermann74.40%150.00%
Total159100.00%2100.00%


static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt) { return dst_metrics_write_ptr(rt->dst.from); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau22100.00%1100.00%
Total22100.00%1100.00%


static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old) { struct rt6_info *rt = (struct rt6_info *)dst; if (rt->rt6i_flags & RTF_PCPU) return rt6_pcpu_cow_metrics(rt); else if (rt->rt6i_flags & RTF_CACHE) return NULL; else return dst_cow_metrics_generic(dst, old); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3657.14%125.00%
Martin KaFai Lau2133.33%250.00%
Zheng Yan69.52%125.00%
Total63100.00%4100.00%


static inline const void *choose_neigh_daddr(struct rt6_info *rt, struct sk_buff *skb, const void *daddr) { struct in6_addr *p = &rt->rt6i_gateway; if (!ipv6_addr_any(p)) return (const void *) p; else if (skb) return &ipv6_hdr(skb)->daddr; return daddr; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller67100.00%3100.00%
Total67100.00%3100.00%


static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct rt6_info *rt = (struct rt6_info *) dst; struct neighbour *n; daddr = choose_neigh_daddr(rt, skb, daddr); n = __ipv6_neigh_lookup(dst->dev, daddr); if (n) return n; return neigh_create(&nd_tbl, daddr, dst->dev); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller83100.00%4100.00%
Total83100.00%4100.00%


static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr) { struct net_device *dev = dst->dev; struct rt6_info *rt = (struct rt6_info *)dst; daddr = choose_neigh_daddr(rt, NULL, daddr); if (!daddr) return; if (dev->flags & (IFF_NOARP | IFF_LOOPBACK)) return; if (ipv6_addr_is_multicast((const struct in6_addr *)daddr)) return; __ipv6_confirm_neigh(dev, daddr); }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov89100.00%1100.00%
Total89100.00%1100.00%

static struct dst_ops ip6_dst_ops_template = { .family = AF_INET6, .gc = ip6_dst_gc, .gc_thresh = 1024, .check = ip6_dst_check, .default_advmss = ip6_default_advmss, .mtu = ip6_mtu, .cow_metrics = ipv6_cow_metrics, .destroy = ip6_dst_destroy, .ifdown = ip6_dst_ifdown, .negative_advice = ip6_negative_advice, .link_failure = ip6_link_failure, .update_pmtu = ip6_rt_update_pmtu, .redirect = rt6_do_redirect, .local_out = __ip6_local_out, .neigh_lookup = ip6_neigh_lookup, .confirm_neigh = ip6_confirm_neigh, };
static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst) { unsigned int mtu = dst_metric_raw(dst, RTAX_MTU); return mtu ? : dst->dev->mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert2058.82%375.00%
Roland Dreier1441.18%125.00%
Total34100.00%4100.00%


static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller23100.00%2100.00%
Total23100.00%2100.00%


static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller20100.00%2100.00%
Total20100.00%2100.00%

static struct dst_ops ip6_dst_blackhole_ops = { .family = AF_INET6, .destroy = ip6_dst_destroy, .check = ip6_dst_check, .mtu = ip6_blackhole_mtu, .default_advmss = ip6_default_advmss, .update_pmtu = ip6_rt_blackhole_update_pmtu, .redirect = ip6_rt_blackhole_redirect, .cow_metrics = dst_cow_metrics_generic, .neigh_lookup = ip6_neigh_lookup, }; static const u32 ip6_template_metrics[RTAX_MAX] = { [RTAX_HOPLIMIT - 1] = 0, }; static const struct rt6_info ip6_null_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -ENETUNREACH, .input = ip6_pkt_discard, .output = ip6_pkt_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #ifdef CONFIG_IPV6_MULTIPLE_TABLES static const struct rt6_info ip6_prohibit_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EACCES, .input = ip6_pkt_prohibit, .output = ip6_pkt_prohibit_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; static const struct rt6_info ip6_blk_hole_entry_template = { .dst = { .__refcnt = ATOMIC_INIT(1), .__use = 1, .obsolete = DST_OBSOLETE_FORCE_CHK, .error = -EINVAL, .input = dst_discard, .output = dst_discard_out, }, .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP), .rt6i_protocol = RTPROT_KERNEL, .rt6i_metric = ~(u32) 0, .rt6i_ref = ATOMIC_INIT(1), }; #endif
static void rt6_info_init(struct rt6_info *rt) { struct dst_entry *dst = &rt->dst; memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst)); INIT_LIST_HEAD(&rt->rt6i_siblings); INIT_LIST_HEAD(&rt->rt6i_uncached); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau58100.00%1100.00%
Total58100.00%1100.00%

/* allocate dst with ip6_dst_ops */
static struct rt6_info *__ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK, flags); if (rt) rt6_info_init(rt); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2951.79%531.25%
Kazunori Miyazawa712.50%16.25%
Benjamin Thery610.71%16.25%
Martin KaFai Lau47.14%318.75%
Madalin Bucur47.14%16.25%
Nicolas Dichtel35.36%212.50%
Hideaki Yoshifuji / 吉藤英明23.57%212.50%
Wei Wang11.79%16.25%
Total56100.00%16100.00%


struct rt6_info *ip6_dst_alloc(struct net *net, struct net_device *dev, int flags) { struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags); if (rt) { rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC); if (rt->rt6i_pcpu) { int cpu; for_each_possible_cpu(cpu) { struct rt6_info **p; p = per_cpu_ptr(rt->rt6i_pcpu, cpu); /* no one shares rt */ *p = NULL; } } else { dst_release_immediate(&rt->dst); return NULL; } } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau10292.73%133.33%
Wei Wang87.27%266.67%
Total110100.00%3100.00%

EXPORT_SYMBOL(ip6_dst_alloc);
static void ip6_dst_destroy(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *)dst; struct dst_entry *from = dst->from; struct inet6_dev *idev; dst_destroy_metrics_generic(dst); free_percpu(rt->rt6i_pcpu); rt6_uncached_list_del(rt); idev = rt->rt6i_idev; if (idev) { rt->rt6i_idev = NULL; in6_dev_put(idev); } dst->from = NULL; dst_release(from); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明4955.68%337.50%
Martin KaFai Lau2326.14%225.00%
Gao Feng89.09%112.50%
Zheng Yan77.95%112.50%
David S. Miller11.14%112.50%
Total88100.00%8100.00%


static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how) { struct rt6_info *rt = (struct rt6_info *)dst; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *loopback_dev = dev_net(dev)->loopback_dev; if (idev && idev->dev != loopback_dev) { struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev); if (loopback_idev) { rt->rt6i_idev = loopback_idev; in6_dev_put(idev); } } }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7481.32%450.00%
Denis V. Lunev99.89%112.50%
Herbert Xu55.49%112.50%
Wei Wang22.20%112.50%
David S. Miller11.10%112.50%
Total91100.00%8100.00%


static bool __rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) return time_after(jiffies, rt->dst.expires); else return false; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau36100.00%1100.00%
Total36100.00%1100.00%


static bool rt6_check_expired(const struct rt6_info *rt) { if (rt->rt6i_flags & RTF_EXPIRES) { if (time_after(jiffies, rt->dst.expires)) return true; } else if (rt->dst.from) { return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK || rt6_check_expired((struct rt6_info *)rt->dst.from); } return false; }

Contributors

PersonTokensPropCommitsCommitProp
Gao Feng4458.67%120.00%
Hideaki Yoshifuji / 吉藤英明1722.67%120.00%
Xin Long810.67%120.00%
Eric Dumazet34.00%120.00%
Li RongQing34.00%120.00%
Total75100.00%5100.00%


static struct rt6_info *rt6_multipath_select(struct rt6_info *match, struct flowi6 *fl6, int oif, int strict) { struct rt6_info *sibling, *next_sibling; int route_choosen; /* We might have already computed the hash for ICMPv6 errors. In such * case it will always be non-zero. Otherwise now is the time to do it. */ if (!fl6->mp_hash) fl6->mp_hash = rt6_multipath_hash(fl6, NULL); route_choosen = fl6->mp_hash % (match->rt6i_nsiblings + 1); /* Don't change the route, if route_choosen == 0 * (siblings does not include ourself) */ if (route_choosen) list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings, rt6i_siblings) { route_choosen--; if (route_choosen == 0) { if (rt6_score_route(sibling, oif, strict) < 0) break; match = sibling; break; } } return match; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolas Dichtel9378.81%250.00%
Jakub Sitnicki2521.19%250.00%
Total118100.00%4100.00%

/* * Route lookup. Any table->tb6_lock is implied. */
static inline struct rt6_info *rt6_device_match(struct net *net, struct rt6_info *rt, const struct in6_addr *saddr, int oif, int flags) { struct rt6_info *local = NULL; struct rt6_info *sprt; if (!oif && ipv6_addr_any(saddr)) goto out; for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) { struct net_device *dev = sprt->dst.dev; if (oif) { if (dev->ifindex == oif) return sprt; if (dev->flags & IFF_LOOPBACK) { if (!sprt->rt6i_idev || sprt->rt6i_idev->dev->ifindex != oif) { if (flags & RT6_LOOKUP_F_IFACE) continue; if (local && local->rt6i_idev->dev->ifindex == oif) continue; } local = sprt; } } else { if (ipv6_chk_addr(net, saddr, dev, flags & RT6_LOOKUP_F_IFACE)) return sprt; } } if (oif) { if (local) return local; if (flags & RT6_LOOKUP_F_IFACE) return net->ipv6.ip6_null_entry; } out: return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明9344.50%430.77%
Linus Torvalds (pre-git)7133.97%430.77%
Daniel Lezcano3717.70%17.69%
Eric Dumazet41.91%215.38%
David S. Miller41.91%215.38%
Total209100.00%13100.00%

#ifdef CONFIG_IPV6_ROUTER_PREF struct __rt6_probe_work { struct work_struct work; struct in6_addr target; struct net_device *dev; };
static void rt6_probe_deferred(struct work_struct *w) { struct in6_addr mcaddr; struct __rt6_probe_work *work = container_of(w, struct __rt6_probe_work, work); addrconf_addr_solict_mult(&work->target, &mcaddr); ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0); dev_put(work->dev); kfree(work); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa6894.44%125.00%
Erik Nordmark22.78%125.00%
Michael Büsch11.39%125.00%
Jiri Benc11.39%125.00%
Total72100.00%4100.00%


static void rt6_probe(struct rt6_info *rt) { struct __rt6_probe_work *work; struct neighbour *neigh; /* * Okay, this does not seem to be appropriate * for now, however, we need to check if it * is really so; aka Router Reachability Probing. * * Router Reachability Probe MUST be rate-limited * to no more than one per minute. */ if (!rt || !(rt->rt6i_flags & RTF_GATEWAY)) return; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { if (neigh->nud_state & NUD_VALID) goto out; work = NULL; write_lock(&neigh->lock); if (!(neigh->nud_state & NUD_VALID) && time_after(jiffies, neigh->updated + rt->rt6i_idev->cnf.rtr_probe_interval)) { work = kmalloc(sizeof(*work), GFP_ATOMIC); if (work) __neigh_set_probe_once(neigh); } write_unlock(&neigh->lock); } else { work = kmalloc(sizeof(*work), GFP_ATOMIC); } if (work) { INIT_WORK(&work->work, rt6_probe_deferred); work->target = rt->rt6i_gateway; dev_hold(rt->dst.dev); work->dev = rt->dst.dev; schedule_work(&work->work); } out: rcu_read_unlock_bh(); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明11352.07%541.67%
Hannes Frederic Sowa4721.66%18.33%
Martin KaFai Lau4420.28%216.67%
Eric Dumazet62.76%18.33%
Jiri Benc31.38%18.33%
David S. Miller31.38%18.33%
Daniel Lezcano10.46%18.33%
Total217100.00%12100.00%

#else
static inline void rt6_probe(struct rt6_info *rt) { }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明1090.91%150.00%
Joe Perches19.09%150.00%
Total11100.00%2100.00%

#endif /* * Default Router Selection (RFC 2461 6.3.6) */
static inline int rt6_check_dev(struct rt6_info *rt, int oif) { struct net_device *dev = rt->dst.dev; if (!oif || dev->ifindex == oif) return 2; if ((dev->flags & IFF_LOOPBACK) && rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif) return 1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5578.57%457.14%
David S. Miller1420.00%228.57%
Dave Jones11.43%114.29%
Total70100.00%7100.00%


static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt) { struct neighbour *neigh; enum rt6_nud_state ret = RT6_NUD_FAIL_HARD; if (rt->rt6i_flags & RTF_NONEXTHOP || !(rt->rt6i_flags & RTF_GATEWAY)) return RT6_NUD_SUCCEED; rcu_read_lock_bh(); neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway); if (neigh) { read_lock(&neigh->lock); if (neigh->nud_state & NUD_VALID) ret = RT6_NUD_SUCCEED; #ifdef CONFIG_IPV6_ROUTER_PREF else if (!(neigh->nud_state & NUD_FAILED)) ret = RT6_NUD_SUCCEED; else ret = RT6_NUD_FAIL_PROBE; #endif read_unlock(&neigh->lock); } else { ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ? RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR; } rcu_read_unlock_bh(); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明8559.44%642.86%
Hannes Frederic Sowa2114.69%214.29%
Linus Torvalds (pre-git)2114.69%321.43%
Paul Marks85.59%17.14%
Jiri Benc64.20%17.14%
Eric Dumazet21.40%17.14%
Total143100.00%14100.00%


static int rt6_score_route(struct rt6_info *rt, int oif, int strict) { int m; m = rt6_check_dev(rt, oif); if (!m && (strict & RT6_LOOKUP_F_IFACE)) return RT6_NUD_FAIL_HARD; #ifdef CONFIG_IPV6_ROUTER_PREF m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2; #endif if (strict & RT6_LOOKUP_F_REACHABLE) { int n = rt6_check_neigh(rt); if (n < 0) return n; } return m; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6572.22%562.50%
Hannes Frederic Sowa1718.89%112.50%
Linus Torvalds (pre-git)66.67%112.50%
Paul Marks22.22%112.50%
Total90100.00%8100.00%


static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict, int *mpri, struct rt6_info *match, bool *do_rr) { int m; bool match_do_rr = false; struct inet6_dev *idev = rt->rt6i_idev; struct net_device *dev = rt->dst.dev; if (dev && !netif_carrier_ok(dev) && idev->cnf.ignore_routes_with_linkdown && !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE)) goto out; if (rt6_check_expired(rt)) goto out; m = rt6_score_route(rt, oif, strict); if (m == RT6_NUD_FAIL_DO_RR) { match_do_rr = true; m = 0; /* lowest valid score */ } else if (m == RT6_NUD_FAIL_HARD) { goto out; } if (strict & RT6_LOOKUP_F_REACHABLE) rt6_probe(rt); /* note that m can be RT6_NUD_FAIL_PROBE at this point */ if (m > *mpri) { *do_rr = match_do_rr; *mpri = m; match = rt; } out: return match; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明6235.23%646.15%
Hannes Frederic Sowa4022.73%17.69%
Andy Gospodarek3922.16%17.69%
David S. Miller2111.93%17.69%
David Ahern73.98%17.69%
Jiri Benc42.27%17.69%
Linus Torvalds (pre-git)31.70%215.38%
Total176100.00%13100.00%


static struct rt6_info *find_rr_leaf(struct fib6_node *fn, struct rt6_info *rr_head, u32 metric, int oif, int strict, bool *do_rr) { struct rt6_info *rt, *match, *cont; int mpri = -1; match = NULL; cont = NULL; for (rt = rr_head; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) { if (rt->rt6i_metric != metric) { cont = rt; break; } match = find_match(rt, oif, strict, &mpri, match, do_rr); } if (match || !cont) return match; for (rt = cont; rt; rt = rt->dst.rt6_next) match = find_match(rt, oif, strict, &mpri, match, do_rr); return match; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10750.71%114.29%
Steffen Klassert8138.39%114.29%
Hideaki Yoshifuji / 吉藤英明94.27%342.86%
Hannes Frederic Sowa83.79%114.29%
Eric Dumazet62.84%114.29%
Total211100.00%7100.00%


static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict) { struct rt6_info *match, *rt0; struct net *net; bool do_rr = false; rt0 = fn->rr_ptr; if (!rt0) fn->rr_ptr = rt0 = fn->leaf; match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict, &do_rr); if (do_rr) { struct rt6_info *next = rt0->dst.rt6_next; /* no entries matched; do round-robin */ if (!next || next->rt6i_metric != rt0->rt6i_metric) next = fn->leaf; if (next != rt0) fn->rr_ptr = next; } net = dev_net(rt0->dst.dev); return match ? match : net->ipv6.ip6_null_entry; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10771.81%218.18%
Daniel Lezcano149.40%19.09%
Hideaki Yoshifuji / 吉藤英明138.72%436.36%
Hannes Frederic Sowa96.04%19.09%
Eric Dumazet32.01%19.09%
Linus Torvalds (pre-git)32.01%218.18%
Total149100.00%11100.00%


static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt) { return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau25100.00%1100.00%
Total25100.00%1100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
int rt6_route_rcv(struct net_device *dev, u8 *opt, int len, const struct in6_addr *gwaddr) { struct net *net = dev_net(dev); struct route_info *rinfo = (struct route_info *) opt; struct in6_addr prefix_buf, *prefix; unsigned int pref; unsigned long lifetime; struct rt6_info *rt; if (len < sizeof(struct route_info)) { return -EINVAL; } /* Sanity check for prefix_len and length */ if (rinfo->length > 3) { return -EINVAL; } else if (rinfo->prefix_len > 128) { return -EINVAL; } else if (rinfo->prefix_len > 64) { if (rinfo->length < 2) { return -EINVAL; } } else if (rinfo->prefix_len > 0) { if (rinfo->length < 1) { return -EINVAL; } } pref = rinfo->route_pref; if (pref == ICMPV6_ROUTER_PREF_INVALID) return -EINVAL; lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ); if (rinfo->length == 3) prefix = (struct in6_addr *)rinfo->prefix; else { /* this function is safe */ ipv6_addr_prefix(&prefix_buf, (struct in6_addr *)rinfo->prefix, rinfo->prefix_len); prefix = &prefix_buf; } if (rinfo->prefix_len == 0) rt = rt6_get_dflt_router(gwaddr, dev); else rt = rt6_get_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev); if (rt && !lifetime) { ip6_del_rt(rt); rt = NULL; } if (!rt && lifetime) rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev, pref); else if (rt) rt->rt6i_flags = RTF_ROUTEINFO | (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref); if (rt) { if (!addrconf_finite_timeout(lifetime)) rt6_clean_expires(rt); else rt6_set_expires(rt, jiffies + HZ * lifetime); ip6_rt_put(rt); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明30479.79%320.00%
Linus Torvalds (pre-git)348.92%533.33%
Duan Jiong184.72%16.67%
Daniel Lezcano112.89%16.67%
Gao Feng71.84%16.67%
Jens Rosenboom41.05%16.67%
Al Viro10.26%16.67%
Eric Dumazet10.26%16.67%
Américo Wang10.26%16.67%
Total381100.00%15100.00%

#endif
static struct fib6_node* fib6_backtrack(struct fib6_node *fn, struct in6_addr *saddr) { struct fib6_node *pn; while (1) { if (fn->fn_flags & RTN_TL_ROOT) return NULL; pn = fn->parent; if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn) fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr); else fn = pn; if (fn->fn_flags & RTN_RTINFO) return fn; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau8695.56%133.33%
Hideaki Yoshifuji / 吉藤英明33.33%133.33%
Daniel Lezcano11.11%133.33%
Total90100.00%3100.00%


static struct rt6_info *ip6_pol_route_lookup(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct fib6_node *fn; struct rt6_info *rt; read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: rt = fn->leaf; rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags); if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0) rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明5328.80%320.00%
Thomas Graf3317.93%16.67%
Nicolas Dichtel2714.67%213.33%
Martin KaFai Lau2312.50%16.67%
David Ahern137.07%16.67%
David S. Miller126.52%16.67%
Linus Torvalds (pre-git)115.98%426.67%
Daniel Lezcano94.89%16.67%
Pavel Emelyanov31.63%16.67%
Total184100.00%15100.00%


struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6, int flags) { return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal32100.00%1100.00%
Total32100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_lookup);
struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr, const struct in6_addr *saddr, int oif, int strict) { struct flowi6 fl6 = { .flowi6_oif = oif, .daddr = *daddr, }; struct dst_entry *dst; int flags = strict ? RT6_LOOKUP_F_IFACE : 0; if (saddr) { memcpy(&fl6.saddr, saddr, sizeof(*saddr)); flags |= RT6_LOOKUP_F_HAS_SADDR; } dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup); if (dst->error == 0) return (struct rt6_info *) dst; dst_release(dst); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf9273.60%220.00%
Linus Torvalds (pre-git)1512.00%330.00%
Daniel Lezcano86.40%220.00%
David S. Miller75.60%110.00%
Hideaki Yoshifuji / 吉藤英明32.40%220.00%
Total125100.00%10100.00%

EXPORT_SYMBOL(rt6_lookup); /* ip6_ins_rt is called with FREE table->tb6_lock. * It takes new route entry, the addition fails by any reason the * route is released. * Caller must hold dst before calling it. */
static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info, struct mx6_config *mxc, struct netlink_ext_ack *extack) { int err; struct fib6_table *table; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); err = fib6_add(&table->tb6_root, rt, info, mxc, extack); write_unlock_bh(&table->tb6_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3444.16%433.33%
Thomas Graf2532.47%325.00%
David Ahern79.09%18.33%
Michal Kubeček45.19%18.33%
Florian Westphal33.90%18.33%
Mathew Richardson33.90%18.33%
Jamal Hadi Salim11.30%18.33%
Total77100.00%12100.00%


int ip6_ins_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; struct mx6_config mxc = { .mx = NULL, }; /* Hold dst to account for the reference from the fib6 tree */ dst_hold(&rt->dst); return __ip6_ins_rt(rt, &info, &mxc, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1727.42%110.00%
Florian Westphal1422.58%110.00%
Denis V. Lunev1117.74%220.00%
Wei Wang914.52%110.00%
Hideaki Yoshifuji / 吉藤英明34.84%110.00%
David S. Miller34.84%110.00%
David Ahern23.23%110.00%
Daniel Lezcano23.23%110.00%
Michal Kubeček11.61%110.00%
Total62100.00%10100.00%

/* called with rcu_lock held */
static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt) { struct net_device *dev = rt->dst.dev; if (rt->rt6i_flags & RTF_LOCAL) { /* for copies of local routes, dst->dev needs to be the * device if it is a master device, the master device if * device is enslaved, and the loopback as the default */ if (netif_is_l3_slave(dev) && !rt6_need_strict(&rt->rt6i_dst.addr)) dev = l3mdev_master_dev_rcu(dev); else if (!netif_is_l3_master(dev)) dev = dev_net(dev)->loopback_dev; /* last case is netif_is_l3_master(dev) is true in which * case we want dev returned to be dev */ } return dev; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern82100.00%1100.00%
Total82100.00%1100.00%


static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort, const struct in6_addr *daddr, const struct in6_addr *saddr) { struct net_device *dev; struct rt6_info *rt; /* * Clone the route. */ if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU)) ort = (struct rt6_info *)ort->dst.from; rcu_read_lock(); dev = ip6_rt_get_dev_rcu(ort); rt = __ip6_dst_alloc(dev_net(dev), dev, 0); rcu_read_unlock(); if (!rt) return NULL; ip6_rt_copy_init(rt, ort); rt->rt6i_flags |= RTF_CACHE; rt->rt6i_metric = 0; rt->dst.flags |= DST_HOST; rt->rt6i_dst.addr = *daddr; rt->rt6i_dst.plen = 128; if (!rt6_is_gw_or_nonexthop(ort)) { if (ort->rt6i_dst.plen != 128 && ipv6_addr_equal(&ort->rt6i_dst.addr, daddr)) rt->rt6i_flags |= RTF_ANYCAST; #ifdef CONFIG_IPV6_SUBTREES if (rt->rt6i_src.plen && saddr) { rt->rt6i_src.addr = *saddr; rt->rt6i_src.plen = 128; } #endif } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau9142.33%320.00%
Linus Torvalds (pre-git)8037.21%426.67%
Hideaki Yoshifuji / 吉藤英明198.84%320.00%
David Ahern188.37%16.67%
Eric Dumazet41.86%213.33%
Alexey Dobriyan20.93%16.67%
David S. Miller10.47%16.67%
Total215100.00%15100.00%


static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt) { struct net_device *dev; struct rt6_info *pcpu_rt; rcu_read_lock(); dev = ip6_rt_get_dev_rcu(rt); pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags); rcu_read_unlock(); if (!pcpu_rt) return NULL; ip6_rt_copy_init(pcpu_rt, rt); pcpu_rt->rt6i_protocol = rt->rt6i_protocol; pcpu_rt->rt6i_flags |= RTF_PCPU; return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau6879.07%150.00%
David Ahern1820.93%150.00%
Total86100.00%2100.00%

/* It should be called with read_lock_bh(&tb6_lock) acquired */
static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt) { struct rt6_info *pcpu_rt, **p; p = this_cpu_ptr(rt->rt6i_pcpu); pcpu_rt = *p; if (pcpu_rt) { dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); } return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau58100.00%2100.00%
Total58100.00%2100.00%


static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt) { struct fib6_table *table = rt->rt6i_table; struct rt6_info *pcpu_rt, *prev, **p; pcpu_rt = ip6_rt_pcpu_alloc(rt); if (!pcpu_rt) { struct net *net = dev_net(rt->dst.dev); dst_hold(&net->ipv6.ip6_null_entry->dst); return net->ipv6.ip6_null_entry; } read_lock_bh(&table->tb6_lock); if (rt->rt6i_pcpu) { p = this_cpu_ptr(rt->rt6i_pcpu); prev = cmpxchg(p, NULL, pcpu_rt); if (prev) { /* If someone did it before us, return prev instead */ dst_release_immediate(&pcpu_rt->dst); pcpu_rt = prev; } } else { /* rt has been removed from the fib6 tree * before we have a chance to acquire the read_lock. * In this case, don't brother to create a pcpu rt * since rt is going away anyway. The next * dst_check() will trigger a re-lookup. */ dst_release_immediate(&pcpu_rt->dst); pcpu_rt = rt; } dst_hold(&pcpu_rt->dst); rt6_dst_from_metrics_check(pcpu_rt); read_unlock_bh(&table->tb6_lock); return pcpu_rt; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau17498.86%375.00%
Wei Wang21.14%125.00%
Total176100.00%4100.00%


struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif, struct flowi6 *fl6, int flags) { struct fib6_node *fn, *saved_fn; struct rt6_info *rt; int strict = 0; strict |= flags & RT6_LOOKUP_F_IFACE; strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE; if (net->ipv6.devconf_all->forwarding == 0) strict |= RT6_LOOKUP_F_REACHABLE; read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); saved_fn = fn; if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF) oif = 0; redo_rt6_select: rt = rt6_select(fn, oif, strict); if (rt->rt6i_nsiblings) rt = rt6_multipath_select(rt, fl6, oif, strict); if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto redo_rt6_select; else if (strict & RT6_LOOKUP_F_REACHABLE) { /* also consider unreachable route */ strict &= ~RT6_LOOKUP_F_REACHABLE; fn = saved_fn; goto redo_rt6_select; } } if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) { dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); rt6_dst_from_metrics_check(rt); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) && !(rt->rt6i_flags & RTF_GATEWAY))) { /* Create a RTF_CACHE clone which will not be * owned by the fib6 tree. It is for the special case where * the daddr in the skb during the neighbor look-up is different * from the fl6->daddr used to look-up route here. */ struct rt6_info *uncached_rt; dst_use(&rt->dst, jiffies); read_unlock_bh(&table->tb6_lock); uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL); dst_release(&rt->dst); if (uncached_rt) { /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc() * No need for another dst_hold() */ rt6_uncached_list_add(uncached_rt); } else { uncached_rt = net->ipv6.ip6_null_entry; dst_hold(&uncached_rt->dst); } trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6); return uncached_rt; } else { /* Get a percpu copy */ struct rt6_info *pcpu_rt; rt->dst.lastuse = jiffies; rt->dst.__use++; pcpu_rt = rt6_get_pcpu_route(rt); if (pcpu_rt) { read_unlock_bh(&table->tb6_lock); } else { /* We have to do the read_unlock first * because rt6_make_pcpu_route() may trigger * ip6_dst_gc() which will take the write_lock. */ dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); pcpu_rt = rt6_make_pcpu_route(rt); dst_release(&rt->dst); } trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6); return pcpu_rt; } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau26655.88%827.59%
David Ahern5711.97%310.34%
Linus Torvalds (pre-git)4910.29%413.79%
Hideaki Yoshifuji / 吉藤英明357.35%724.14%
Thomas Graf255.25%13.45%
Nicolas Dichtel193.99%26.90%
David S. Miller81.68%13.45%
Daniel Lezcano71.47%13.45%
Wei Wang51.05%13.45%
Pavel Emelyanov51.05%13.45%
Total476100.00%29100.00%

EXPORT_SYMBOL_GPL(ip6_pol_route);
static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov3071.43%133.33%
Daniel Lezcano716.67%133.33%
David S. Miller511.90%133.33%
Total42100.00%3100.00%


struct dst_entry *ip6_route_input_lookup(struct net *net, struct net_device *dev, struct flowi6 *fl6, int flags) { if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG) flags |= RT6_LOOKUP_F_IFACE; return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input); }

Contributors

PersonTokensPropCommitsCommitProp
Shmulik Ladkani57100.00%1100.00%
Total57100.00%1100.00%

EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
static void ip6_multipath_l3_keys(const struct sk_buff *skb, struct flow_keys *keys) { const struct ipv6hdr *outer_iph = ipv6_hdr(skb); const struct ipv6hdr *key_iph = outer_iph; const struct ipv6hdr *inner_iph; const struct icmp6hdr *icmph; struct ipv6hdr _inner_iph; if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6)) goto out; icmph = icmp6_hdr(skb); if (icmph->icmp6_type != ICMPV6_DEST_UNREACH && icmph->icmp6_type != ICMPV6_PKT_TOOBIG && icmph->icmp6_type != ICMPV6_TIME_EXCEED && icmph->icmp6_type != ICMPV6_PARAMPROB) goto out; inner_iph = skb_header_pointer(skb, skb_transport_offset(skb) + sizeof(*icmph), sizeof(_inner_iph), &_inner_iph); if (!inner_iph) goto out; key_iph = inner_iph; out: memset(keys, 0, sizeof(*keys)); keys->control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS; keys->addrs.v6addrs.src = key_iph->saddr; keys->addrs.v6addrs.dst = key_iph->daddr; keys->tags.flow_label = ip6_flowinfo(key_iph); keys->basic.ip_proto = key_iph->nexthdr; }

Contributors

PersonTokensPropCommitsCommitProp
Jakub Sitnicki208100.00%1100.00%
Total208100.00%1100.00%

/* if skb is set it will be used and fl6 can be NULL */
u32 rt6_multipath_hash(const struct flowi6 *fl6, const struct sk_buff *skb) { struct flow_keys hash_keys; if (skb) { ip6_multipath_l3_keys(skb, &hash_keys); return flow_hash_from_keys(&hash_keys); } return get_hash_from_flowi6(fl6); }

Contributors

PersonTokensPropCommitsCommitProp
Jakub Sitnicki48100.00%1100.00%
Total48100.00%1100.00%


void ip6_route_input(struct sk_buff *skb) { const struct ipv6hdr *iph = ipv6_hdr(skb); struct net *net = dev_net(skb->dev); int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip_tunnel_info *tun_info; struct flowi6 fl6 = { .flowi6_iif = skb->dev->ifindex, .daddr = iph->daddr, .saddr = iph->saddr, .flowlabel = ip6_flowinfo(iph), .flowi6_mark = skb->mark, .flowi6_proto = iph->nexthdr, }; tun_info = skb_tunnel_info(skb); if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX)) fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id; if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6)) fl6.mp_hash = rt6_multipath_hash(&fl6, skb); skb_dst_drop(skb); skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags)); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf6135.26%315.00%
Jiri Benc4224.28%315.00%
Jakub Sitnicki2313.29%15.00%
Daniel Lezcano126.94%210.00%
David S. Miller95.20%15.00%
Hideaki Yoshifuji / 吉藤英明74.05%315.00%
Shmulik Ladkani52.89%15.00%
Eric Dumazet52.89%210.00%
Linus Torvalds (pre-git)42.31%210.00%
Arnaldo Carvalho de Melo31.73%15.00%
David Ahern21.16%15.00%
Total173100.00%20100.00%


static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1228.57%225.00%
Thomas Graf1126.19%112.50%
Daniel Lezcano716.67%112.50%
David S. Miller511.90%112.50%
Hideaki Yoshifuji / 吉藤英明49.52%225.00%
Pavel Emelyanov37.14%112.50%
Total42100.00%8100.00%


struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk, struct flowi6 *fl6, int flags) { bool any_src; if (rt6_need_strict(&fl6->daddr)) { struct dst_entry *dst; dst = l3mdev_link_scope_lookup(net, fl6); if (dst) return dst; } fl6->flowi6_iif = LOOPBACK_IFINDEX; any_src = ipv6_addr_any(&fl6->saddr); if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) || (fl6->flowi6_oif && any_src)) flags |= RT6_LOOKUP_F_IFACE; if (!any_src) flags |= RT6_LOOKUP_F_HAS_SADDR; else if (sk) flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs); return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern5537.67%420.00%
Thomas Graf3926.71%210.00%
Hideaki Yoshifuji / 吉藤英明1812.33%315.00%
Daniel Lezcano74.79%210.00%
Brian Haley64.11%15.00%
David McCullough53.42%15.00%
David S. Miller53.42%15.00%
Paolo Abeni42.74%15.00%
Linus Torvalds (pre-git)32.05%210.00%
Jiri Olsa21.37%15.00%
Florian Westphal10.68%15.00%
Pavel Emelyanov10.68%15.00%
Total146100.00%20100.00%

EXPORT_SYMBOL_GPL(ip6_route_output_flags);
struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig) { struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig; struct net_device *loopback_dev = net->loopback_dev; struct dst_entry *new = NULL; rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1, DST_OBSOLETE_DEAD, 0); if (rt) { rt6_info_init(rt); new = &rt->dst; new->__use = 1; new->input = dst_discard; new->output = dst_discard_out; dst_copy_metrics(new, &ort->dst); rt->rt6i_idev = in6_dev_get(loopback_dev); rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU; rt->rt6i_metric = 0; memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key)); #ifdef CONFIG_IPV6_SUBTREES memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key)); #endif } dst_release(dst_orig); return new ? new : ERR_PTR(-ENOMEM); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller17385.22%640.00%
Wei Wang136.40%213.33%
Martin KaFai Lau83.94%16.67%
Steffen Klassert52.46%213.33%
Eric W. Biedermann10.49%16.67%
Herbert Xu10.49%16.67%
Alexey Dobriyan10.49%16.67%
Gao Feng10.49%16.67%
Total203100.00%15100.00%

/* * Destination cache support functions */
static void rt6_dst_from_metrics_check(struct rt6_info *rt) { if (rt->dst.from && dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from)) dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau55100.00%1100.00%
Total55100.00%1100.00%


static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie) { u32 rt_cookie = 0; if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie) return NULL; if (rt6_check_expired(rt)) return NULL; return &rt->dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2036.36%444.44%
Hannes Frederic Sowa1120.00%111.11%
Martin KaFai Lau1018.18%111.11%
Wei Wang1018.18%111.11%
Nicolas Dichtel23.64%111.11%
Steffen Klassert23.64%111.11%
Total55100.00%9100.00%


static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie) { if (!__rt6_check_expired(rt) && rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK && rt6_check((struct rt6_info *)(rt->dst.from), cookie)) return &rt->dst; else return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau60100.00%2100.00%
Total60100.00%2100.00%


static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie) { struct rt6_info *rt; rt = (struct rt6_info *) dst; /* All IPV6 dsts are created with ->obsolete set to the value * DST_OBSOLETE_FORCE_CHK which forces validation calls down * into this function always. */ rt6_dst_from_metrics_check(rt); if (rt->rt6i_flags & RTF_PCPU || (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from)) return rt6_dst_from_check(rt, cookie); else return rt6_check(rt, cookie); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7288.89%457.14%
Wei Wang78.64%114.29%
Linus Torvalds (pre-git)11.23%114.29%
Hannes Frederic Sowa11.23%114.29%
Total81100.00%7100.00%


static struct dst_entry *ip6_negative_advice(struct dst_entry *dst) { struct rt6_info *rt = (struct rt6_info *) dst; if (rt) { if (rt->rt6i_flags & RTF_CACHE) { if (rt6_check_expired(rt)) { ip6_del_rt(rt); dst = NULL; } } else { dst_release(dst); dst = NULL; } } return dst; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5270.27%375.00%
Hideaki Yoshifuji / 吉藤英明2229.73%125.00%
Total74100.00%4100.00%


static void ip6_link_failure(struct sk_buff *skb) { struct rt6_info *rt; icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0); rt = (struct rt6_info *) skb_dst(skb); if (rt) { if (rt->rt6i_flags & RTF_CACHE) { if (dst_hold_safe(&rt->dst)) ip6_del_rt(rt); } else { struct fib6_node *fn; rcu_read_lock(); fn = rcu_dereference(rt->rt6i_node); if (fn && (rt->rt6i_flags & RTF_DEFAULT)) fn->fn_sernum = -1; rcu_read_unlock(); } } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7163.39%233.33%
Wei Wang2724.11%233.33%
Hannes Frederic Sowa119.82%116.67%
Eric Dumazet32.68%116.67%
Total112100.00%6100.00%


static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu) { struct net *net = dev_net(rt->dst.dev); rt->rt6i_flags |= RTF_MODIFIED; rt->rt6i_pmtu = mtu; rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau5196.23%150.00%
Alexey Kuznetsov23.77%150.00%
Total53100.00%2100.00%


static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt) { return !(rt->rt6i_flags & RTF_CACHE) && (rt->rt6i_flags & RTF_PCPU || rcu_access_pointer(rt->rt6i_node)); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau3491.89%150.00%
Wei Wang38.11%150.00%
Total37100.00%2100.00%


static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk, const struct ipv6hdr *iph, u32 mtu) { const struct in6_addr *daddr, *saddr; struct rt6_info *rt6 = (struct rt6_info *)dst; if (rt6->rt6i_flags & RTF_LOCAL) return; if (dst_metric_locked(dst, RTAX_MTU)) return; if (iph) { daddr = &iph->daddr; saddr = &iph->saddr; } else if (sk) { daddr = &sk->sk_v6_daddr; saddr = &inet6_sk(sk)->saddr; } else { daddr = NULL; saddr = NULL; } dst_confirm_neigh(dst, daddr); mtu = max_t(u32, mtu, IPV6_MIN_MTU); if (mtu >= dst_mtu(dst)) return; if (!rt6_cache_allowed_for_pmtu(rt6)) { rt6_do_update_pmtu(rt6, mtu); } else if (daddr) { struct rt6_info *nrt6; nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr); if (nrt6) { rt6_do_update_pmtu(nrt6, mtu); /* ip6_ins_rt(nrt6) will bump the * rt6->rt6i_node->fn_sernum * which will fail the next rt6_check() and * invalidate the sk->sk_dst_cache. */ ip6_ins_rt(nrt6); /* Release the reference taken in * ip6_rt_cache_alloc() */ dst_release(&nrt6->dst); } } }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau8237.79%222.22%
Julian Anastasov7233.18%111.11%
Alexey Kuznetsov2511.52%111.11%
David S. Miller156.91%222.22%
Xin Long104.61%111.11%
Wei Wang94.15%111.11%
Shirley Ma41.84%111.11%
Total217100.00%9100.00%


static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb, u32 mtu) { __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau42100.00%1100.00%
Total42100.00%1100.00%


void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_oif = oif; fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark); fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_output(net, NULL, &fl6); if (!dst->error) __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu)); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13083.33%337.50%
Lorenzo Colitti2012.82%225.00%
Hideaki Yoshifuji / 吉藤英明31.92%112.50%
Martin KaFai Lau21.28%112.50%
Alexey Kuznetsov10.64%112.50%
Total156100.00%8100.00%

EXPORT_SYMBOL_GPL(ip6_update_pmtu);
void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu) { struct dst_entry *dst; ip6_update_pmtu(skb, sock_net(sk), mtu, sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); dst = __sk_dst_get(sk); if (!dst || !dst->obsolete || dst->ops->check(dst, inet6_sk(sk)->dst_cookie)) return; bh_lock_sock(sk); if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr)) ip6_datagram_dst_update(sk, false); bh_unlock_sock(sk); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau7363.48%125.00%
David S. Miller3732.17%125.00%
Lorenzo Colitti43.48%125.00%
Alexey Kuznetsov10.87%125.00%
Total115100.00%4100.00%

EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu); /* Handle redirects */ struct ip6rd_flowi { struct flowi6 fl6; struct in6_addr gateway; };
static struct rt6_info *__ip6_route_redirect(struct net *net, struct fib6_table *table, struct flowi6 *fl6, int flags) { struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6; struct rt6_info *rt; struct fib6_node *fn; /* Get the "current" route for this destination and * check if the redirect has come from appropriate router. * * RFC 4861 specifies that redirects should only be * accepted if they come from the nexthop to the target. * Due to the way the routes are chosen, this notion * is a bit fuzzy and one might need to check all possible * routes. */ read_lock_bh(&table->tb6_lock); fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr); restart: for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (rt6_check_expired(rt)) continue; if (rt->dst.error) break; if (!(rt->rt6i_flags & RTF_GATEWAY)) continue; if (fl6->flowi6_oif != rt->dst.dev->ifindex) continue; if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) continue; break; } if (!rt) rt = net->ipv6.ip6_null_entry; else if (rt->dst.error) { rt = net->ipv6.ip6_null_entry; goto out; } if (rt == net->ipv6.ip6_null_entry) { fn = fib6_backtrack(fn, &fl6->saddr); if (fn) goto restart; } out: dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); trace_fib6_table_lookup(net, rt, table->tb6_id, fl6); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong21983.91%120.00%
Martin KaFai Lau2810.73%240.00%
David Ahern134.98%120.00%
Alexander Alemayhu10.38%120.00%
Total261100.00%5100.00%

;
static struct dst_entry *ip6_route_redirect(struct net *net, const struct flowi6 *fl6, const struct in6_addr *gateway) { int flags = RT6_LOOKUP_F_HAS_SADDR; struct ip6rd_flowi rdfl; rdfl.fl6 = *fl6; rdfl.gateway = *gateway; return fib6_rule_lookup(net, &rdfl.fl6, flags, __ip6_route_redirect); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong63100.00%1100.00%
Total63100.00%1100.00%


void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark, kuid_t uid) { const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data; struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = iph->daddr; fl6.saddr = iph->saddr; fl6.flowlabel = ip6_flowinfo(iph); fl6.flowi6_uid = uid; dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller11580.99%233.33%
Duan Jiong96.34%116.67%
Lorenzo Colitti96.34%116.67%
Julian Anastasov64.23%116.67%
Hideaki Yoshifuji / 吉藤英明32.11%116.67%
Total142100.00%6100.00%

EXPORT_SYMBOL_GPL(ip6_redirect);
void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif, u32 mark) { const struct ipv6hdr *iph = ipv6_hdr(skb); const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb); struct dst_entry *dst; struct flowi6 fl6; memset(&fl6, 0, sizeof(fl6)); fl6.flowi6_iif = LOOPBACK_IFINDEX; fl6.flowi6_oif = oif; fl6.flowi6_mark = mark; fl6.daddr = msg->dest; fl6.saddr = iph->daddr; fl6.flowi6_uid = sock_net_uid(net, NULL); dst = ip6_route_redirect(net, &fl6, &iph->saddr); rt6_do_redirect(dst, NULL, skb); dst_release(dst); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong12788.19%250.00%
Lorenzo Colitti117.64%125.00%
Julian Anastasov64.17%125.00%
Total144100.00%4100.00%


void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk) { ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3389.19%150.00%
Lorenzo Colitti410.81%150.00%
Total37100.00%2100.00%

EXPORT_SYMBOL_GPL(ip6_sk_redirect);
static unsigned int ip6_default_advmss(const struct dst_entry *dst) { struct net_device *dev = dst->dev; unsigned int mtu = dst_mtu(dst); struct net *net = dev_net(dev); mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr); if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss) mtu = net->ipv6.sysctl.ip6_rt_min_advmss; /* * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and * corresponding MSS is IPV6_MAXPLEN - tcp_header_size. * IPV6_MAXPLEN is also valid and means: "any MSS, * rely only on pmtu discovery" */ if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr)) mtu = IPV6_MAXPLEN; return mtu; }

Contributors

PersonTokensPropCommitsCommitProp
Al Viro4445.36%116.67%
David S. Miller3131.96%116.67%
Daniel Lezcano1414.43%233.33%
Shirley Ma77.22%116.67%
Hideaki Yoshifuji / 吉藤英明11.03%116.67%
Total97100.00%6100.00%


static unsigned int ip6_mtu(const struct dst_entry *dst) { const struct rt6_info *rt = (const struct rt6_info *)dst; unsigned int mtu = rt->rt6i_pmtu; struct inet6_dev *idev; if (mtu) goto out; mtu = dst_metric_raw(dst, RTAX_MTU); if (mtu) goto out; mtu = IPV6_MIN_MTU; rcu_read_lock(); idev = __in6_dev_get(dst->dev); if (idev) mtu = idev->cnf.mtu6; rcu_read_unlock(); out: mtu = min_t(unsigned int, mtu, IP6_MAX_MTU); return mtu - lwtunnel_headroom(dst->lwtstate, mtu); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4336.13%116.67%
Martin KaFai Lau3025.21%116.67%
Steffen Klassert1915.97%233.33%
Roopa Prabhu1411.76%116.67%
Eric Dumazet1310.92%116.67%
Total119100.00%6100.00%


struct dst_entry *icmp6_dst_alloc(struct net_device *dev, struct flowi6 *fl6) { struct dst_entry *dst; struct rt6_info *rt; struct inet6_dev *idev = in6_dev_get(dev); struct net *net = dev_net(dev); if (unlikely(!idev)) return ERR_PTR(-ENODEV); rt = ip6_dst_alloc(net, dev, 0); if (unlikely(!rt)) { in6_dev_put(idev); dst = ERR_PTR(-ENOMEM); goto out; } rt->dst.flags |= DST_HOST; rt->dst.output = ip6_output; rt->rt6i_gateway = fl6->daddr; rt->rt6i_dst.addr = fl6->daddr; rt->rt6i_dst.plen = 128; rt->rt6i_idev = idev; dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0); /* Add this dst into uncached_list so that rt6_ifdown() can * do proper release of the net_device */ rt6_uncached_list_add(rt); dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0); out: return dst; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller8645.03%521.74%
Hideaki Yoshifuji / 吉藤英明3116.23%521.74%
Zheng Yan2412.57%14.35%
Gao Feng115.76%14.35%
Daniel Lezcano84.19%28.70%
Julian Anastasov84.19%14.35%
Patrick McHardy73.66%14.35%
Eric Dumazet52.62%14.35%
Benjamin Thery31.57%14.35%
Shirley Ma31.57%14.35%
Wei Wang21.05%14.35%
Alexey Dobriyan10.52%14.35%
Li RongQing10.52%14.35%
Kazunori Miyazawa10.52%14.35%
Total191100.00%23100.00%


static int ip6_dst_gc(struct dst_ops *ops) { struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops); int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval; int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size; int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity; int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout; unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc; int entries; entries = dst_entries_get_fast(ops); if (time_after(rt_last_gc + rt_min_interval, jiffies) && entries <= rt_max_size) goto out; net->ipv6.ip6_rt_gc_expire++; fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true); entries = dst_entries_get_slow(ops); if (entries < ops->gc_thresh) net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1; out: net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity; return entries > rt_max_size; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano6838.64%531.25%
Linus Torvalds (pre-git)3821.59%318.75%
Benjamin Thery3620.45%16.25%
Eric Dumazet179.66%16.25%
Alexey Dobriyan105.68%16.25%
Randy Dunlap21.14%16.25%
Michal Kubeček21.14%212.50%
Arnaldo Carvalho de Melo21.14%16.25%
Li RongQing10.57%16.25%
Total176100.00%16100.00%


static int ip6_convert_metrics(struct mx6_config *mxc, const struct fib6_config *cfg) { bool ecn_ca = false; struct nlattr *nla; int remaining; u32 *mp; if (!cfg->fc_mx) return 0; mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL); if (unlikely(!mp)) return -ENOMEM; nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) { int type = nla_type(nla); u32 val; if (!type) continue; if (unlikely(type > RTAX_MAX)) goto err; if (type == RTAX_CC_ALGO) { char tmp[TCP_CA_NAME_MAX]; nla_strlcpy(tmp, nla, sizeof(tmp)); val = tcp_ca_get_key_by_name(tmp, &ecn_ca); if (val == TCP_CA_UNSPEC) goto err; } else { val = nla_get_u32(nla); } if (type == RTAX_HOPLIMIT && val > 255) val = 255; if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK)) goto err; mp[type - 1] = val; __set_bit(type - 1, mxc->mx_valid); } if (ecn_ca) { __set_bit(RTAX_FEATURES - 1, mxc->mx_valid); mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA; } mxc->mx = mp; return 0; err: kfree(mp); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal13953.05%114.29%
Daniel Borkmann10841.22%457.14%
Paolo Abeni145.34%114.29%
Ian Morris10.38%114.29%
Total262100.00%7100.00%


static struct rt6_info *ip6_nh_lookup_table(struct net *net, struct fib6_config *cfg, const struct in6_addr *gw_addr) { struct flowi6 fl6 = { .flowi6_oif = cfg->fc_ifindex, .daddr = *gw_addr, .saddr = cfg->fc_prefsrc, }; struct fib6_table *table; struct rt6_info *rt; int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE; table = fib6_get_table(net, cfg->fc_table); if (!table) return NULL; if (!ipv6_addr_any(&cfg->fc_prefsrc)) flags |= RT6_LOOKUP_F_HAS_SADDR; rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags); /* if table lookup failed, fall back to full lookup */ if (rt == net->ipv6.ip6_null_entry) { ip6_rt_put(rt); rt = NULL; } return rt; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern14399.31%266.67%
Paolo Abeni10.69%133.33%
Total144100.00%3100.00%


static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct net *net = cfg->fc_nlinfo.nl_net; struct rt6_info *rt = NULL; struct net_device *dev = NULL; struct inet6_dev *idev = NULL; struct fib6_table *table; int addr_type; int err = -EINVAL; /* RTF_PCPU is an internal flag; can not be set by userspace */ if (cfg->fc_flags & RTF_PCPU) { NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU"); goto out; } if (cfg->fc_dst_len > 128) { NL_SET_ERR_MSG(extack, "Invalid prefix length"); goto out; } if (cfg->fc_src_len > 128) { NL_SET_ERR_MSG(extack, "Invalid source address length"); goto out; } #ifndef CONFIG_IPV6_SUBTREES if (cfg->fc_src_len) { NL_SET_ERR_MSG(extack, "Specifying source address requires IPV6_SUBTREES to be enabled"); goto out; } #endif if (cfg->fc_ifindex) { err = -ENODEV; dev = dev_get_by_index(net, cfg->fc_ifindex); if (!dev) goto out; idev = in6_dev_get(dev); if (!idev) goto out; } if (cfg->fc_metric == 0) cfg->fc_metric = IP6_RT_PRIO_USER; err = -ENOBUFS; if (cfg->fc_nlinfo.nlh && !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) { table = fib6_get_table(net, cfg->fc_table); if (!table) { pr_warn("NLM_F_CREATE should be specified when creating new route\n"); table = fib6_new_table(net, cfg->fc_table); } } else { table = fib6_new_table(net, cfg->fc_table); } if (!table) goto out; rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT); if (!rt) { err = -ENOMEM; goto out; } if (cfg->fc_flags & RTF_EXPIRES) rt6_set_expires(rt, jiffies + clock_t_to_jiffies(cfg->fc_expires)); else rt6_clean_expires(rt); if (cfg->fc_protocol == RTPROT_UNSPEC) cfg->fc_protocol = RTPROT_BOOT; rt->rt6i_protocol = cfg->fc_protocol; addr_type = ipv6_addr_type(&cfg->fc_dst); if (addr_type & IPV6_ADDR_MULTICAST) rt->dst.input = ip6_mc_input; else if (cfg->fc_flags & RTF_LOCAL) rt->dst.input = ip6_input; else rt->dst.input = ip6_forward; rt->dst.output = ip6_output; if (cfg->fc_encap) { struct lwtunnel_state *lwtstate; err = lwtunnel_build_state(cfg->fc_encap_type, cfg->fc_encap, AF_INET6, cfg, &lwtstate, extack); if (err) goto out; rt->dst.lwtstate = lwtstate_get(lwtstate); if (lwtunnel_output_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_output = rt->dst.output; rt->dst.output = lwtunnel_output; } if (lwtunnel_input_redirect(rt->dst.lwtstate)) { rt->dst.lwtstate->orig_input = rt->dst.input; rt->dst.input = lwtunnel_input; } } ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len); rt->rt6i_dst.plen = cfg->fc_dst_len; if (rt->rt6i_dst.plen == 128) rt->dst.flags |= DST_HOST; #ifdef CONFIG_IPV6_SUBTREES ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len); rt->rt6i_src.plen = cfg->fc_src_len; #endif rt->rt6i_metric = cfg->fc_metric; /* We cannot add true routes via loopback here, they would result in kernel looping; promote them to reject routes */ if ((cfg->fc_flags & RTF_REJECT) || (dev && (dev->flags & IFF_LOOPBACK) && !(addr_type & IPV6_ADDR_LOOPBACK) && !(cfg->fc_flags & RTF_LOCAL))) { /* hold loopback dev/idev if we haven't done so. */ if (dev != net->loopback_dev) { if (dev) { dev_put(dev); in6_dev_put(idev); } dev = net->loopback_dev; dev_hold(dev); idev = in6_dev_get(dev); if (!idev) { err = -ENODEV; goto out; } } rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP; switch (cfg->fc_type) { case RTN_BLACKHOLE: rt->dst.error = -EINVAL; rt->dst.output = dst_discard_out; rt->dst.input = dst_discard; break; case RTN_PROHIBIT: rt->dst.error = -EACCES; rt->dst.output = ip6_pkt_prohibit_out; rt->dst.input = ip6_pkt_prohibit; break; case RTN_THROW: case RTN_UNREACHABLE: default: rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN : (cfg->fc_type == RTN_UNREACHABLE) ? -EHOSTUNREACH : -ENETUNREACH; rt->dst.output = ip6_pkt_discard_out; rt->dst.input = ip6_pkt_discard; break; } goto install_route; } if (cfg->fc_flags & RTF_GATEWAY) { const struct in6_addr *gw_addr; int gwa_type; gw_addr = &cfg->fc_gateway; gwa_type = ipv6_addr_type(gw_addr); /* if gw_addr is local we will fail to detect this in case * address is still TENTATIVE (DAD in progress). rt6_lookup() * will return already-added prefix route via interface that * prefix route was assigned to, which might be non-loopback. */ err = -EINVAL; if (ipv6_chk_addr_and_flags(net, gw_addr, gwa_type & IPV6_ADDR_LINKLOCAL ? dev : NULL, 0, 0)) { NL_SET_ERR_MSG(extack, "Invalid gateway address"); goto out; } rt->rt6i_gateway = *gw_addr; if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { struct rt6_info *grt = NULL; /* IPv6 strictly inhibits using not link-local addresses as nexthop address. Otherwise, router will not able to send redirects. It is very good, but in some (rare!) circumstances (SIT, PtP, NBMA NOARP links) it is handy to allow some exceptions. --ANK We allow IPv4-mapped nexthops to support RFC4798-type addressing */ if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) { NL_SET_ERR_MSG(extack, "Invalid gateway address"); goto out; } if (cfg->fc_table) { grt = ip6_nh_lookup_table(net, cfg, gw_addr); if (grt) { if (grt->rt6i_flags & RTF_GATEWAY || (dev && dev != grt->dst.dev)) { ip6_rt_put(grt); grt = NULL; } } } if (!grt) grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1); err = -EHOSTUNREACH; if (!grt) goto out; if (dev) { if (dev != grt->dst.dev) { ip6_rt_put(grt); goto out; } } else { dev = grt->dst.dev; idev = grt->rt6i_idev; dev_hold(dev); in6_dev_hold(grt->rt6i_idev); } if (!(grt->rt6i_flags & RTF_GATEWAY)) err = 0; ip6_rt_put(grt); if (err) goto out; } err = -EINVAL; if (!dev) { NL_SET_ERR_MSG(extack, "Egress device not specified"); goto out; } else if (dev->flags & IFF_LOOPBACK) { NL_SET_ERR_MSG(extack, "Egress device can not be loopback device for this route"); goto out; } } err = -ENODEV; if (!dev) goto out; if (!ipv6_addr_any(&cfg->fc_prefsrc)) { if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) { NL_SET_ERR_MSG(extack, "Invalid source address"); err = -EINVAL; goto out; } rt->rt6i_prefsrc.addr = cfg->fc_prefsrc; rt->rt6i_prefsrc.plen = 128; } else rt->rt6i_prefsrc.plen = 0; rt->rt6i_flags = cfg->fc_flags; install_route: rt->dst.dev = dev; rt->rt6i_idev = idev; rt->rt6i_table = table; cfg->fc_nlinfo.nl_net = dev_net(dev); return rt; out: if (dev) dev_put(dev); if (idev) in6_dev_put(idev); if (rt) dst_release_immediate(&rt->dst); return ERR_PTR(err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)41529.83%1318.06%
David Ahern1359.71%56.94%
Hideaki Yoshifuji / 吉藤英明1269.06%811.11%
Roopa Prabhu997.12%45.56%
Thomas Graf856.11%22.78%
Daniel Walter644.60%11.39%
Nicolas Dichtel604.31%45.56%
Matti Vaittinen604.31%11.39%
Kamala R533.81%11.39%
Tom Herbert453.24%22.78%
Vincent Bernat392.80%11.39%
Florian Westphal372.66%22.78%
Daniel Lezcano282.01%45.56%
Maciej Żenczykowski261.87%11.39%
David S. Miller181.29%68.33%
Jiri Benc171.22%11.39%
Gao Feng141.01%11.39%
Nikola Forró141.01%11.39%
Mathew Richardson110.79%11.39%
Sabrina Dubroca100.72%11.39%
Patrick McHardy80.58%11.39%
Wei Wang80.58%22.78%
Erik Nordmark50.36%11.39%
Benjamin Thery30.22%11.39%
Alexey Dobriyan30.22%11.39%
Eric W. Biedermann20.14%22.78%
Américo Wang20.14%11.39%
Joe Perches20.14%11.39%
Eric Dumazet10.07%11.39%
Jamal Hadi Salim10.07%11.39%
Total1391100.00%72100.00%


int ip6_route_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct mx6_config mxc = { .mx = NULL, }; struct rt6_info *rt; int err; rt = ip6_route_info_create(cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto out; } err = ip6_convert_metrics(&mxc, cfg); if (err) goto out; err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack); kfree(mxc.mx); return err; out: if (rt) dst_release_immediate(&rt->dst); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu5845.67%213.33%
Florian Westphal3124.41%16.67%
Linus Torvalds (pre-git)129.45%426.67%
David Ahern97.09%16.67%
Wei Wang86.30%213.33%
Thomas Graf53.94%213.33%
Patrick McHardy21.57%16.67%
Mathew Richardson10.79%16.67%
Michal Kubeček10.79%16.67%
Total127100.00%15100.00%


static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info) { int err; struct fib6_table *table; struct net *net = dev_net(rt->dst.dev); if (rt == net->ipv6.ip6_null_entry) { err = -ENOENT; goto out; } table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); err = fib6_del(rt, info); write_unlock_bh(&table->tb6_lock); out: ip6_rt_put(rt); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3030.30%425.00%
Thomas Graf1919.19%318.75%
Gao Feng1414.14%16.25%
Daniel Lezcano1212.12%16.25%
Patrick McHardy88.08%16.25%
Herbert Xu55.05%16.25%
Mathew Richardson33.03%16.25%
Hideaki Yoshifuji / 吉藤英明33.03%16.25%
David S. Miller33.03%16.25%
Américo Wang11.01%16.25%
Jamal Hadi Salim11.01%16.25%
Total99100.00%16100.00%


int ip6_del_rt(struct rt6_info *rt) { struct nl_info info = { .nl_net = dev_net(rt->dst.dev), }; return __ip6_del_rt(rt, &info); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1745.95%116.67%
Denis V. Lunev1232.43%233.33%
Hideaki Yoshifuji / 吉藤英明38.11%116.67%
David S. Miller38.11%116.67%
Daniel Lezcano25.41%116.67%
Total37100.00%6100.00%


static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg) { struct nl_info *info = &cfg->fc_nlinfo; struct net *net = info->nl_net; struct sk_buff *skb = NULL; struct fib6_table *table; int err = -ENOENT; if (rt == net->ipv6.ip6_null_entry) goto out_put; table = rt->rt6i_table; write_lock_bh(&table->tb6_lock); if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) { struct rt6_info *sibling, *next_sibling; /* prefer to send a single notification with all hops */ skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (skb) { u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0; if (rt6_fill_node(net, skb, rt, NULL, NULL, 0, RTM_DELROUTE, info->portid, seq, 0) < 0) { kfree_skb(skb); skb = NULL; } else info->skip_notify = 1; } list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { err = fib6_del(sibling, info); if (err) goto out_unlock; } } err = fib6_del(rt, info); out_unlock: write_unlock_bh(&table->tb6_lock); out_put: ip6_rt_put(rt); if (skb) { rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern23488.30%266.67%
Américo Wang3111.70%133.33%
Total265100.00%3100.00%


static int ip6_route_del(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct fib6_table *table; struct fib6_node *fn; struct rt6_info *rt; int err = -ESRCH; table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table); if (!table) { NL_SET_ERR_MSG(extack, "FIB table does not exist"); return err; } read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, &cfg->fc_dst, cfg->fc_dst_len, &cfg->fc_src, cfg->fc_src_len); if (fn) { for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if ((rt->rt6i_flags & RTF_CACHE) && !(cfg->fc_flags & RTF_CACHE)) continue; if (cfg->fc_ifindex && (!rt->dst.dev || rt->dst.dev->ifindex != cfg->fc_ifindex)) continue; if (cfg->fc_flags & RTF_GATEWAY && !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway)) continue; if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric) continue; if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol) continue; dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); /* if gateway was specified only delete the one hop */ if (cfg->fc_flags & RTF_GATEWAY) return __ip6_del_rt(rt, &cfg->fc_nlinfo); return __ip6_del_rt_siblings(rt, cfg); } } read_unlock_bh(&table->tb6_lock); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)13848.08%933.33%
Thomas Graf6020.91%311.11%
David Ahern3110.80%311.11%
Martin KaFai Lau206.97%13.70%
Mantas M155.23%13.70%
David S. Miller82.79%27.41%
Daniel Lezcano62.09%27.41%
Eric Dumazet31.05%13.70%
Hideaki Yoshifuji / 吉藤英明20.70%13.70%
Stephen Rothwell10.35%13.70%
Jamal Hadi Salim10.35%13.70%
Mathew Richardson10.35%13.70%
James Morris10.35%13.70%
Total287100.00%27100.00%


static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb) { struct netevent_redirect netevent; struct rt6_info *rt, *nrt = NULL; struct ndisc_options ndopts; struct inet6_dev *in6_dev; struct neighbour *neigh; struct rd_msg *msg; int optlen, on_link; u8 *lladdr; optlen = skb_tail_pointer(skb) - skb_transport_header(skb); optlen -= sizeof(*msg); if (optlen < 0) { net_dbg_ratelimited("rt6_do_redirect: packet too short\n"); return; } msg = (struct rd_msg *)icmp6_hdr(skb); if (ipv6_addr_is_multicast(&msg->dest)) { net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n"); return; } on_link = 0; if (ipv6_addr_equal(&msg->dest, &msg->target)) { on_link = 1; } else if (ipv6_addr_type(&msg->target) != (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) { net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n"); return; } in6_dev = __in6_dev_get(skb->dev); if (!in6_dev) return; if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects) return; /* RFC2461 8.1: * The IP source address of the Redirect MUST be the same as the current * first-hop router for the specified ICMP Destination Address. */ if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) { net_dbg_ratelimited("rt6_redirect: invalid ND options\n"); return; } lladdr = NULL; if (ndopts.nd_opts_tgt_lladdr) { lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr, skb->dev); if (!lladdr) { net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n"); return; } } rt = (struct rt6_info *) dst; if (rt->rt6i_flags & RTF_REJECT) { net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n"); return; } /* Redirect received -> path was valid. * Look, redirects are sent only in response to data packets, * so that this nexthop apparently is reachable. --ANK */ dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr); neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1); if (!neigh) return; /* * We have finally decided to accept it. */ ndisc_update(skb->dev, neigh, lladdr, NUD_STALE, NEIGH_UPDATE_F_WEAK_OVERRIDE| NEIGH_UPDATE_F_OVERRIDE| (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER| NEIGH_UPDATE_F_ISROUTER)), NDISC_REDIRECT, &ndopts); nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL); if (!nrt) goto out; nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE; if (on_link) nrt->rt6i_flags &= ~RTF_GATEWAY; nrt->rt6i_protocol = RTPROT_REDIRECT; nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key; if (ip6_ins_rt(nrt)) goto out_release; netevent.old = &rt->dst; netevent.new = &nrt->dst; netevent.daddr = &msg->dest; netevent.neigh = neigh; call_netevent_notifiers(NETEVENT_REDIRECT, &netevent); if (rt->rt6i_flags & RTF_CACHE) { rt = (struct rt6_info *) dst_clone(&rt->dst); ip6_del_rt(rt); } out_release: /* Release the reference taken in * ip6_rt_cache_alloc() */ dst_release(&nrt->dst); out: neigh_release(neigh); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller30356.11%518.52%
Hideaki Yoshifuji / 吉藤英明7714.26%622.22%
Linus Torvalds (pre-git)7413.70%518.52%
Tom Tucker264.81%13.70%
Alexander Aring142.59%13.70%
Wei Wang122.22%13.70%
Julian Anastasov91.67%13.70%
Simon Horman61.11%13.70%
Xin Long61.11%13.70%
Matthias Schiffer30.56%13.70%
Martin KaFai Lau30.56%13.70%
Daniel Lezcano30.56%13.70%
Alexey Dobriyan20.37%13.70%
Eric Dumazet20.37%13.70%
Total540100.00%27100.00%

/* * Misc support functions */
static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from) { BUG_ON(from->dst.from); rt->rt6i_flags &= ~RTF_EXPIRES; dst_hold(&from->dst); rt->dst.from = &from->dst; dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true); }

Contributors

PersonTokensPropCommitsCommitProp
Martin KaFai Lau69100.00%1100.00%
Total69100.00%1100.00%


static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort) { rt->dst.input = ort->dst.input; rt->dst.output = ort->dst.output; rt->rt6i_dst = ort->rt6i_dst; rt->dst.error = ort->dst.error; rt->rt6i_idev = ort->rt6i_idev; if (rt->rt6i_idev) in6_dev_hold(rt->rt6i_idev); rt->dst.lastuse = jiffies; rt->rt6i_gateway = ort->rt6i_gateway; rt->rt6i_flags = ort->rt6i_flags; rt6_set_from(rt, ort); rt->rt6i_metric = ort->rt6i_metric; #ifdef CONFIG_IPV6_SUBTREES rt->rt6i_src = ort->rt6i_src; #endif rt->rt6i_prefsrc = ort->rt6i_prefsrc; rt->rt6i_table = ort->rt6i_table; rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7143.29%526.32%
Hideaki Yoshifuji / 吉藤英明2112.80%15.26%
Ville Nuorvala127.32%15.26%
Martin KaFai Lau106.10%15.26%
Nicolas Dichtel106.10%210.53%
Thomas Graf84.88%15.26%
Gao Feng84.88%15.26%
Florian Westphal74.27%15.26%
Jiri Benc63.66%15.26%
Eric Dumazet53.05%15.26%
Alexey Dobriyan21.22%15.26%
Zheng Yan21.22%15.26%
David S. Miller10.61%15.26%
Benjamin Thery10.61%15.26%
Total164100.00%19100.00%

#ifdef CONFIG_IPV6_ROUTE_INFO
static struct rt6_info *rt6_get_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; int ifindex = dev->ifindex; struct fib6_node *fn; struct rt6_info *rt = NULL; struct fib6_table *table; table = fib6_get_table(net, tb_id); if (!table) return NULL; read_lock_bh(&table->tb6_lock); fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0); if (!fn) goto out; for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) { if (rt->dst.dev->ifindex != ifindex) continue; if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY)) continue; if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr)) continue; dst_hold(&rt->dst); break; } out: read_unlock_bh(&table->tb6_lock); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明14167.46%110.00%
Thomas Graf2712.92%110.00%
David Ahern2311.00%110.00%
Daniel Lezcano73.35%220.00%
Eric Dumazet52.39%220.00%
David S. Miller41.91%220.00%
Li RongQing20.96%110.00%
Total209100.00%10100.00%


static struct rt6_info *rt6_add_route_info(struct net *net, const struct in6_addr *prefix, int prefixlen, const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_dst_len = prefixlen, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO | RTF_UP | RTF_PREF(pref), .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = net, }; cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, cfg.fc_dst = *prefix; cfg.fc_gateway = *gwaddr; /* We should treat it as a default route if prefix length is 0. */ if (!prefixlen) cfg.fc_flags |= RTF_DEFAULT; ip6_route_add(&cfg, NULL); return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7244.44%215.38%
Thomas Graf2817.28%17.69%
Daniel Lezcano2716.67%17.69%
David Ahern2112.96%323.08%
Xin Long53.09%17.69%
Alexey Dobriyan42.47%17.69%
Eric Dumazet31.85%215.38%
Eric W. Biedermann10.62%17.69%
Rami Rosen10.62%17.69%
Total162100.00%13100.00%

#endif
struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev) { u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT; struct rt6_info *rt; struct fib6_table *table; table = fib6_get_table(dev_net(dev), tb_id); if (!table) return NULL; read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (dev == rt->dst.dev && ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) && ipv6_addr_equal(&rt->rt6i_gateway, addr)) break; } if (rt) dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)8253.59%631.58%
Hideaki Yoshifuji / 吉藤英明2415.69%315.79%
Thomas Graf2214.38%15.26%
David Ahern127.84%15.26%
David S. Miller42.61%210.53%
Eric Dumazet42.61%210.53%
Li RongQing21.31%15.26%
Daniel Lezcano21.31%210.53%
James Morris10.65%15.26%
Total153100.00%19100.00%


struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr, struct net_device *dev, unsigned int pref) { struct fib6_config cfg = { .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT, .fc_metric = IP6_RT_PRIO_USER, .fc_ifindex = dev->ifindex, .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT | RTF_UP | RTF_EXPIRES | RTF_PREF(pref), .fc_protocol = RTPROT_RA, .fc_nlinfo.portid = 0, .fc_nlinfo.nlh = NULL, .fc_nlinfo.nl_net = dev_net(dev), }; cfg.fc_gateway = *gwaddr; if (!ip6_route_add(&cfg, NULL)) { struct fib6_table *table; table = fib6_get_table(dev_net(dev), cfg.fc_table); if (table) table->flags |= RT6_TABLE_HAS_DFLT_ROUTER; } return rt6_get_dflt_router(gwaddr, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4629.68%423.53%
David Ahern4227.10%317.65%
Thomas Graf2314.84%15.88%
Daniel Lezcano2012.90%15.88%
Hideaki Yoshifuji / 吉藤英明149.03%317.65%
Xin Long53.23%15.88%
Alexey Dobriyan21.29%15.88%
Rami Rosen10.65%15.88%
Eric Dumazet10.65%15.88%
Eric W. Biedermann10.65%15.88%
Total155100.00%17100.00%


static void __rt6_purge_dflt_routers(struct fib6_table *table) { struct rt6_info *rt; restart: read_lock_bh(&table->tb6_lock); for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) { if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) && (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) { dst_hold(&rt->dst); read_unlock_bh(&table->tb6_lock); ip6_del_rt(rt); goto restart; } } read_unlock_bh(&table->tb6_lock); table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6756.78%440.00%
Lorenzo Colitti1714.41%110.00%
Thomas Graf1210.17%110.00%
David Ahern1210.17%110.00%
Hideaki Yoshifuji / 吉藤英明54.24%110.00%
Eric Dumazet32.54%110.00%
Daniel Lezcano21.69%110.00%
Total118100.00%10100.00%


void rt6_purge_dflt_routers(struct net *net) { struct fib6_table *table; struct hlist_head *head; unsigned int h; rcu_read_lock(); for (h = 0; h < FIB6_TABLE_HASHSZ; h++) { head = &net->ipv6.fib_table_hash[h]; hlist_for_each_entry_rcu(table, head, tb6_hlist) { if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER) __rt6_purge_dflt_routers(table); } } rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern7998.75%150.00%
Linus Torvalds (pre-git)11.25%150.00%
Total80100.00%2100.00%


static void rtmsg_to_fib6_config(struct net *net, struct in6_rtmsg *rtmsg, struct fib6_config *cfg) { memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ? : RT6_TABLE_MAIN; cfg->fc_ifindex = rtmsg->rtmsg_ifindex; cfg->fc_metric = rtmsg->rtmsg_metric; cfg->fc_expires = rtmsg->rtmsg_info; cfg->fc_dst_len = rtmsg->rtmsg_dst_len; cfg->fc_src_len = rtmsg->rtmsg_src_len; cfg->fc_flags = rtmsg->rtmsg_flags; cfg->fc_nlinfo.nl_net = net; cfg->fc_dst = rtmsg->rtmsg_dst; cfg->fc_src = rtmsg->rtmsg_src; cfg->fc_gateway = rtmsg->rtmsg_gateway; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf10480.00%120.00%
David Ahern107.69%120.00%
Benjamin Thery75.38%120.00%
Daniel Lezcano64.62%120.00%
Alexey Dobriyan32.31%120.00%
Total130100.00%5100.00%


int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg) { struct fib6_config cfg; struct in6_rtmsg rtmsg; int err; switch (cmd) { case SIOCADDRT: /* Add a route */ case SIOCDELRT: /* Delete a route */ if (!ns_capable(net->user_ns, CAP_NET_ADMIN)) return -EPERM; err = copy_from_user(&rtmsg, arg, sizeof(struct in6_rtmsg)); if (err) return -EFAULT; rtmsg_to_fib6_config(net, &rtmsg, &cfg); rtnl_lock(); switch (cmd) { case SIOCADDRT: err = ip6_route_add(&cfg, NULL); break; case SIOCDELRT: err = ip6_route_del(&cfg, NULL); break; default: err = -EINVAL; } rtnl_unlock(); return err; } return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)11577.70%545.45%
Thomas Graf1510.14%19.09%
Daniel Lezcano74.73%19.09%
Eric W. Biedermann53.38%19.09%
David Ahern42.70%19.09%
Al Viro10.68%19.09%
David S. Miller10.68%19.09%
Total148100.00%11100.00%

/* * Drop the packet on the floor */
static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes) { int type; struct dst_entry *dst = skb_dst(skb); switch (ipstats_mib_noroutes) { case IPSTATS_MIB_INNOROUTES: type = ipv6_addr_type(&ipv6_hdr(skb)->daddr); if (type == IPV6_ADDR_ANY) { IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS); break; } /* FALLTHROUGH */ case IPSTATS_MIB_OUTNOROUTES: IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst), ipstats_mib_noroutes); break; } icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明3529.41%215.38%
Linus Torvalds (pre-git)3126.05%430.77%
Denis V. Lunev2117.65%17.69%
Lv Liangying2016.81%17.69%
Thomas Graf43.36%17.69%
Arnaldo Carvalho de Melo43.36%215.38%
Eric Dumazet32.52%17.69%
Brian Haley10.84%17.69%
Total119100.00%13100.00%


static int ip6_pkt_discard(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
James Morris1330.95%112.50%
Eric Dumazet819.05%225.00%
Dave Craig614.29%112.50%
Hideaki Yoshifuji / 吉藤英明511.90%112.50%
Eric W. Biedermann511.90%112.50%
Herbert Xu49.52%112.50%
Arnaldo Carvalho de Melo12.38%112.50%
Total42100.00%8100.00%


static int ip6_pkt_prohibit(struct sk_buff *skb) { return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf1990.48%150.00%
Hideaki Yoshifuji / 吉藤英明29.52%150.00%
Total21100.00%2100.00%


static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb) { skb->dev = skb_dst(skb)->dev; return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES); }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2457.14%120.00%
Eric Dumazet819.05%240.00%
Hideaki Yoshifuji / 吉藤英明511.90%120.00%
Eric W. Biedermann511.90%120.00%
Total42100.00%5100.00%

/* * Allocate a dst for local (unicast / anycast) address. */
struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, const struct in6_addr *addr, bool anycast) { u32 tb_id; struct net *net = dev_net(idev->dev); struct net_device *dev = idev->dev; struct rt6_info *rt; rt = ip6_dst_alloc(net, dev, DST_NOCOUNT); if (!rt) return ERR_PTR(-ENOMEM); in6_dev_hold(idev); rt->dst.flags |= DST_HOST; rt->dst.input = ip6_input; rt->dst.output = ip6_output; rt->rt6i_idev = idev; rt->rt6i_protocol = RTPROT_KERNEL; rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP; if (anycast) rt->rt6i_flags |= RTF_ANYCAST; else rt->rt6i_flags |= RTF_LOCAL; rt->rt6i_gateway = *addr; rt->rt6i_dst.addr = *addr; rt->rt6i_dst.plen = 128; tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL; rt->rt6i_table = fib6_get_table(net, tb_id); return rt; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)7137.57%414.81%
David Ahern3619.05%414.81%
Hideaki Yoshifuji / 吉藤英明3216.93%518.52%
Alexey Kuznetsov126.35%13.70%
Daniel Lezcano115.82%27.41%
Thomas Graf84.23%13.70%
Julian Anastasov73.70%13.70%
David S. Miller63.17%622.22%
Benjamin Thery31.59%13.70%
Alexey Dobriyan21.06%13.70%
Hannes Frederic Sowa10.53%13.70%
Total189100.00%27100.00%

/* remove deleted ip from prefsrc entries */ struct arg_dev_net_ip { struct net_device *dev; struct net *net; struct in6_addr *addr; };
static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg) { struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev; struct net *net = ((struct arg_dev_net_ip *)arg)->net; struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr; if (((void *)rt->dst.dev == dev || !dev) && rt != net->ipv6.ip6_null_entry && ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) { /* remove prefsrc entry */ rt->rt6i_prefsrc.plen = 0; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter11296.55%133.33%
David S. Miller43.45%266.67%
Total116100.00%3100.00%


void rt6_remove_prefsrc(struct inet6_ifaddr *ifp) { struct net *net = dev_net(ifp->idev->dev); struct arg_dev_net_ip adni = { .dev = ifp->idev->dev, .net = net, .addr = &ifp->addr, }; fib6_clean_all(net, fib6_remove_prefsrc, &adni); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Walter62100.00%1100.00%
Total62100.00%1100.00%

#define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY) #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE) /* Remove routers and update dst entries when gateway turn into host. */
static int fib6_clean_tohost(struct rt6_info *rt, void *arg) { struct in6_addr *gateway = (struct in6_addr *)arg; if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) || ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) && ipv6_addr_equal(gateway, &rt->rt6i_gateway)) { return -1; } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong74100.00%1100.00%
Total74100.00%1100.00%


void rt6_clean_tohost(struct net *net, struct in6_addr *gateway) { fib6_clean_all(net, fib6_clean_tohost, gateway); }

Contributors

PersonTokensPropCommitsCommitProp
Duan Jiong24100.00%1100.00%
Total24100.00%1100.00%

struct arg_dev_net { struct net_device *dev; struct net *net; }; /* called with write lock held for table with rt */
static int fib6_ifdown(struct rt6_info *rt, void *arg) { const struct arg_dev_net *adn = arg; const struct net_device *dev = adn->dev; if ((rt->dst.dev == dev || !dev) && rt != adn->net->ipv6.ip6_null_entry && (rt->rt6i_nsiblings == 0 || (dev && netdev_unregistering(dev)) || !rt->rt6i_idev->cnf.ignore_routes_with_linkdown)) return -1; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3538.46%114.29%
David Ahern2628.57%228.57%
Daniel Lezcano1617.58%114.29%
Stephen Hemminger1010.99%114.29%
David S. Miller44.40%228.57%
Total91100.00%7100.00%


void rt6_ifdown(struct net *net, struct net_device *dev) { struct arg_dev_net adn = { .dev = dev, .net = net, }; fib6_clean_all(net, fib6_ifdown, &adn); if (dev) rt6_uncached_list_flush_dev(net, dev); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano2548.08%228.57%
Linus Torvalds (pre-git)1528.85%228.57%
Martin KaFai Lau713.46%114.29%
Eric W. Biedermann47.69%114.29%
Thomas Graf11.92%114.29%
Total52100.00%7100.00%

struct rt6_mtu_change_arg { struct net_device *dev; unsigned int mtu; };
static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg) { struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg; struct inet6_dev *idev; /* In IPv6 pmtu discovery is not optional, so that RTAX_MTU lock cannot disable it. We still use this lock to block changes caused by addrconf/ndisc. */ idev = __in6_dev_get(arg->dev); if (!idev) return 0; /* For administrative MTU increase, there is no way to discover IPv6 PMTU increase, so PMTU increase should be updated here. Since RFC 1981 doesn't include administrative MTU increase update PMTU increase is a MUST. (i.e. jumbo frame) */ /* If new MTU is less than route PMTU, this new MTU will be the lowest MTU in the path, update the route PMTU to reflect PMTU decreases; if new MTU is greater than route PMTU, and the old MTU is the lowest MTU in the path, update the route PMTU to reflect the increase. In this case if the other nodes' MTU also have the lowest MTU, TOO BIG MESSAGE will be lead to PMTU discovery. */ if (rt->dst.dev == arg->dev && dst_metric_raw(&rt->dst, RTAX_MTU) && !dst_metric_locked(&rt->dst, RTAX_MTU)) { if (rt->rt6i_flags & RTF_CACHE) { /* For RTF_CACHE with rt6i_pmtu == 0 * (i.e. a redirected route), * the metrics of its rt->dst.from has already * been updated. */ if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu) rt->rt6i_pmtu = arg->mtu; } else if (dst_mtu(&rt->dst) >= arg->mtu || (dst_mtu(&rt->dst) < arg->mtu && dst_mtu(&rt->dst) == idev->cnf.mtu6)) { dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu); } } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5128.02%527.78%
Shirley Ma4725.82%15.56%
Martin KaFai Lau3820.88%15.56%
Alexey Kuznetsov126.59%211.11%
David S. Miller105.49%316.67%
Maciej Żenczykowski105.49%15.56%
Linus Torvalds73.85%15.56%
Herbert Xu31.65%15.56%
Simon Arlott21.10%15.56%
Jim Paris10.55%15.56%
Alexander Alemayhu10.55%15.56%
Total182100.00%18100.00%


void rt6_mtu_change(struct net_device *dev, unsigned int mtu) { struct rt6_mtu_change_arg arg = { .dev = dev, .mtu = mtu, }; fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3172.09%555.56%
Thomas Graf613.95%111.11%
Hideaki Yoshifuji / 吉藤英明36.98%111.11%
Daniel Lezcano24.65%111.11%
Eric Dumazet12.33%111.11%
Total43100.00%9100.00%

static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = { [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) }, [RTA_OIF] = { .type = NLA_U32 }, [RTA_IIF] = { .type = NLA_U32 }, [RTA_PRIORITY] = { .type = NLA_U32 }, [RTA_METRICS] = { .type = NLA_NESTED }, [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) }, [RTA_PREF] = { .type = NLA_U8 }, [RTA_ENCAP_TYPE] = { .type = NLA_U16 }, [RTA_ENCAP] = { .type = NLA_NESTED }, [RTA_EXPIRES] = { .type = NLA_U32 }, [RTA_UID] = { .type = NLA_U32 }, [RTA_MARK] = { .type = NLA_U32 }, };
static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh, struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct rtmsg *rtm; struct nlattr *tb[RTA_MAX+1]; unsigned int pref; int err; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, NULL); if (err < 0) goto errout; err = -EINVAL; rtm = nlmsg_data(nlh); memset(cfg, 0, sizeof(*cfg)); cfg->fc_table = rtm->rtm_table; cfg->fc_dst_len = rtm->rtm_dst_len; cfg->fc_src_len = rtm->rtm_src_len; cfg->fc_flags = RTF_UP; cfg->fc_protocol = rtm->rtm_protocol; cfg->fc_type = rtm->rtm_type; if (rtm->rtm_type == RTN_UNREACHABLE || rtm->rtm_type == RTN_BLACKHOLE || rtm->rtm_type == RTN_PROHIBIT || rtm->rtm_type == RTN_THROW) cfg->fc_flags |= RTF_REJECT; if (rtm->rtm_type == RTN_LOCAL) cfg->fc_flags |= RTF_LOCAL; if (rtm->rtm_flags & RTM_F_CLONED) cfg->fc_flags |= RTF_CACHE; cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid; cfg->fc_nlinfo.nlh = nlh; cfg->fc_nlinfo.nl_net = sock_net(skb->sk); if (tb[RTA_GATEWAY]) { cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]); cfg->fc_flags |= RTF_GATEWAY; } if (tb[RTA_DST]) { int plen = (rtm->rtm_dst_len + 7) >> 3; if (nla_len(tb[RTA_DST]) < plen) goto errout; nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen); } if (tb[RTA_SRC]) { int plen = (rtm->rtm_src_len + 7) >> 3; if (nla_len(tb[RTA_SRC]) < plen) goto errout; nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen); } if (tb[RTA_PREFSRC]) cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]); if (tb[RTA_OIF]) cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_PRIORITY]) cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]); if (tb[RTA_METRICS]) { cfg->fc_mx = nla_data(tb[RTA_METRICS]); cfg->fc_mx_len = nla_len(tb[RTA_METRICS]); } if (tb[RTA_TABLE]) cfg->fc_table = nla_get_u32(tb[RTA_TABLE]); if (tb[RTA_MULTIPATH]) { cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]); cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]); err = lwtunnel_valid_encap_type_attr(cfg->fc_mp, cfg->fc_mp_len, extack); if (err < 0) goto errout; } if (tb[RTA_PREF]) { pref = nla_get_u8(tb[RTA_PREF]); if (pref != ICMPV6_ROUTER_PREF_LOW && pref != ICMPV6_ROUTER_PREF_HIGH) pref = ICMPV6_ROUTER_PREF_MEDIUM; cfg->fc_flags |= RTF_PREF(pref); } if (tb[RTA_ENCAP]) cfg->fc_encap = tb[RTA_ENCAP]; if (tb[RTA_ENCAP_TYPE]) { cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]); err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack); if (err < 0) goto errout; } if (tb[RTA_EXPIRES]) { unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ); if (addrconf_finite_timeout(timeout)) { cfg->fc_expires = jiffies_to_clock_t(timeout * HZ); cfg->fc_flags |= RTF_EXPIRES; } } err = 0; errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf23433.43%14.76%
Linus Torvalds (pre-git)15622.29%314.29%
Nicolas Dichtel598.43%314.29%
Xin Long527.43%14.76%
David Ahern517.29%314.29%
Lubomir Rintel466.57%14.76%
Roopa Prabhu355.00%14.76%
Daniel Walter162.29%14.76%
Maciej Żenczykowski142.00%14.76%
Martin KaFai Lau142.00%14.76%
Benjamin Thery101.43%14.76%
Jiri Benc60.86%14.76%
Hideaki Yoshifuji / 吉藤英明30.43%14.76%
Eric W. Biedermann20.29%14.76%
Johannes Berg20.29%14.76%
Total700100.00%21100.00%

struct rt6_nh { struct rt6_info *rt6_info; struct fib6_config r_cfg; struct mx6_config mxc; struct list_head next; };
static void ip6_print_replace_route_err(struct list_head *rt6_nh_list) { struct rt6_nh *nh; list_for_each_entry(nh, rt6_nh_list, next) { pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n", &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway, nh->r_cfg.fc_ifindex); } }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu3466.67%133.33%
Nicolas Dichtel1631.37%133.33%
David Ahern11.96%133.33%
Total51100.00%3100.00%


static int ip6_route_info_append(struct list_head *rt6_nh_list, struct rt6_info *rt, struct fib6_config *r_cfg) { struct rt6_nh *nh; int err = -EEXIST; list_for_each_entry(nh, rt6_nh_list, next) { /* check if rt6_info already exists */ if (rt6_duplicate_nexthop(nh->rt6_info, rt)) return err; } nh = kzalloc(sizeof(*nh), GFP_KERNEL); if (!nh) return -ENOMEM; nh->rt6_info = rt; err = ip6_convert_metrics(&nh->mxc, r_cfg); if (err) { kfree(nh); return err; } memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg)); list_add_tail(&nh->next, rt6_nh_list); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu13797.86%150.00%
David Ahern32.14%150.00%
Total140100.00%2100.00%


static void ip6_route_mpath_notify(struct rt6_info *rt, struct rt6_info *rt_last, struct nl_info *info, __u16 nlflags) { /* if this is an APPEND route, then rt points to the first route * inserted and rt_last points to last route inserted. Userspace * wants a consistent dump of the route which starts at the first * nexthop. Since sibling routes are always added at the end of * the list, find the first sibling of the last route appended */ if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) { rt = list_first_entry(&rt_last->rt6i_siblings, struct rt6_info, rt6i_siblings); } if (rt) inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags); }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern71100.00%1100.00%
Total71100.00%1100.00%


static int ip6_route_multipath_add(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct rt6_info *rt_notif = NULL, *rt_last = NULL; struct nl_info *info = &cfg->fc_nlinfo; struct fib6_config r_cfg; struct rtnexthop *rtnh; struct rt6_info *rt; struct rt6_nh *err_nh; struct rt6_nh *nh, *nh_safe; __u16 nlflags; int remaining; int attrlen; int err = 1; int nhn = 0; int replace = (cfg->fc_nlinfo.nlh && (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE)); LIST_HEAD(rt6_nh_list); nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE; if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND) nlflags |= NLM_F_APPEND; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry and build a list (rt6_nh_list) of * rt6_info structs per nexthop */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { r_cfg.fc_gateway = nla_get_in6_addr(nla); r_cfg.fc_flags |= RTF_GATEWAY; } r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP); nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE); if (nla) r_cfg.fc_encap_type = nla_get_u16(nla); } rt = ip6_route_info_create(&r_cfg, extack); if (IS_ERR(rt)) { err = PTR_ERR(rt); rt = NULL; goto cleanup; } err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg); if (err) { dst_release_immediate(&rt->dst); goto cleanup; } rtnh = rtnh_next(rtnh, &remaining); } /* for add and replace send one notification with all nexthops. * Skip the notification in fib6_add_rt2node and send one with * the full route when done */ info->skip_notify = 1; err_nh = NULL; list_for_each_entry(nh, &rt6_nh_list, next) { rt_last = nh->rt6_info; err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack); /* save reference to first route for notification */ if (!rt_notif && !err) rt_notif = nh->rt6_info; /* nh->rt6_info is used or freed at this point, reset to NULL*/ nh->rt6_info = NULL; if (err) { if (replace && nhn) ip6_print_replace_route_err(&rt6_nh_list); err_nh = nh; goto add_errout; } /* Because each route is added like a single route we remove * these flags after the first nexthop: if there is a collision, * we have already failed to add the first nexthop: * fib6_add_rt2node() has rejected it; when replacing, old * nexthops have been replaced by first new, the rest should * be added to it. */ cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL | NLM_F_REPLACE); nhn++; } /* success ... tell user about new route */ ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); goto cleanup; add_errout: /* send notification for routes that were added so that * the delete notifications sent by ip6_route_del are * coherent */ if (rt_notif) ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags); /* Delete routes that were already added */ list_for_each_entry(nh, &rt6_nh_list, next) { if (err_nh == nh) break; ip6_route_del(&nh->r_cfg, extack); } cleanup: list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) { if (nh->rt6_info) dst_release_immediate(&nh->rt6_info->dst); kfree(nh->mxc.mx); list_del(&nh->next); kfree(nh); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu45276.35%228.57%
David Ahern11920.10%228.57%
Nicolas Dichtel183.04%114.29%
Wei Wang20.34%114.29%
Jiri Benc10.17%114.29%
Total592100.00%7100.00%


static int ip6_route_multipath_del(struct fib6_config *cfg, struct netlink_ext_ack *extack) { struct fib6_config r_cfg; struct rtnexthop *rtnh; int remaining; int attrlen; int err = 1, last_err = 0; remaining = cfg->fc_mp_len; rtnh = (struct rtnexthop *)cfg->fc_mp; /* Parse a Multipath Entry */ while (rtnh_ok(rtnh, remaining)) { memcpy(&r_cfg, cfg, sizeof(*cfg)); if (rtnh->rtnh_ifindex) r_cfg.fc_ifindex = rtnh->rtnh_ifindex; attrlen = rtnh_attrlen(rtnh); if (attrlen > 0) { struct nlattr *nla, *attrs = rtnh_attrs(rtnh); nla = nla_find(attrs, attrlen, RTA_GATEWAY); if (nla) { nla_memcpy(&r_cfg.fc_gateway, nla, 16); r_cfg.fc_flags |= RTF_GATEWAY; } } err = ip6_route_del(&r_cfg, extack); if (err) last_err = err; rtnh = rtnh_next(rtnh, &remaining); } return last_err; }

Contributors

PersonTokensPropCommitsCommitProp
Roopa Prabhu15279.58%250.00%
Nicolas Dichtel3216.75%125.00%
David Ahern73.66%125.00%
Total191100.00%4100.00%


static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_del(&cfg, extack); else { cfg.fc_delete_all_nh = 1; return ip6_route_del(&cfg, extack); } }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3136.90%220.00%
David Ahern1922.62%330.00%
Thomas Graf1922.62%220.00%
Nicolas Dichtel1315.48%110.00%
Patrick McHardy11.19%110.00%
Roopa Prabhu11.19%110.00%
Total84100.00%10100.00%


static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct fib6_config cfg; int err; err = rtm_to_fib6_config(skb, nlh, &cfg, extack); if (err < 0) return err; if (cfg.fc_mp) return ip6_route_multipath_add(&cfg, extack); else return ip6_route_add(&cfg, extack); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3140.79%330.00%
Thomas Graf1925.00%220.00%
Nicolas Dichtel1317.11%110.00%
David Ahern1114.47%220.00%
Patrick McHardy11.32%110.00%
Roopa Prabhu11.32%110.00%
Total76100.00%10100.00%


static size_t rt6_nlmsg_size(struct rt6_info *rt) { int nexthop_len = 0; if (rt->rt6i_nsiblings) { nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */ + NLA_ALIGN(sizeof(struct rtnexthop)) + nla_total_size(16) /* RTA_GATEWAY */ + lwtunnel_get_encap_size(rt->dst.lwtstate); nexthop_len *= rt->rt6i_nsiblings; } return NLMSG_ALIGN(sizeof(struct rtmsg)) + nla_total_size(16) /* RTA_SRC */ + nla_total_size(16) /* RTA_DST */ + nla_total_size(16) /* RTA_GATEWAY */ + nla_total_size(16) /* RTA_PREFSRC */ + nla_total_size(4) /* RTA_TABLE */ + nla_total_size(4) /* RTA_IIF */ + nla_total_size(4) /* RTA_OIF */ + nla_total_size(4) /* RTA_PRIORITY */ + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */ + nla_total_size(sizeof(struct rta_cacheinfo)) + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */ + nla_total_size(1) /* RTA_PREF */ + lwtunnel_get_encap_size(rt->dst.lwtstate) + nexthop_len; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf7245.00%114.29%
David Ahern5534.38%114.29%
Roopa Prabhu106.25%114.29%
Noriaki Takamiya85.00%114.29%
Daniel Borkmann63.75%114.29%
Lubomir Rintel63.75%114.29%
Jiri Benc31.88%114.29%
Total160100.00%7100.00%


static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt, unsigned int *flags, bool skip_oif) { if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) { *flags |= RTNH_F_LINKDOWN; if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown) *flags |= RTNH_F_DEAD; } if (rt->rt6i_flags & RTF_GATEWAY) { if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0) goto nla_put_failure; } if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD) *flags |= RTNH_F_OFFLOAD; /* not needed for multipath encoding b/c it has a rtnexthop struct */ if (!skip_oif && rt->dst.dev && nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex)) goto nla_put_failure; if (rt->dst.lwtstate && lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0) goto nla_put_failure; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern16092.49%250.00%
Ido Schimmel137.51%250.00%
Total173100.00%4100.00%

/* add multipath next hop */
static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt) { struct rtnexthop *rtnh; unsigned int flags = 0; rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh)); if (!rtnh) goto nla_put_failure; rtnh->rtnh_hops = 0; rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0; if (rt6_nexthop_info(skb, rt, &flags, true) < 0) goto nla_put_failure; rtnh->rtnh_flags = flags; /* length of rtnetlink header + attributes */ rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh; return 0; nla_put_failure: return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
David Ahern12298.39%250.00%
Thomas Graf10.81%125.00%
Roopa Prabhu10.81%125.00%
Total124100.00%4100.00%


static int rt6_fill_node(struct net *net, struct sk_buff *skb, struct rt6_info *rt, struct in6_addr *dst, struct in6_addr *src, int iif, int type, u32 portid, u32 seq, unsigned int flags) { u32 metrics[RTAX_MAX]; struct rtmsg *rtm; struct nlmsghdr *nlh; long expires; u32 table; nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags); if (!nlh) return -EMSGSIZE; rtm = nlmsg_data(nlh); rtm->rtm_family = AF_INET6; rtm->rtm_dst_len = rt->rt6i_dst.plen; rtm->rtm_src_len = rt->rt6i_src.plen; rtm->rtm_tos = 0; if (rt->rt6i_table) table = rt->rt6i_table->tb6_id; else table = RT6_TABLE_UNSPEC; rtm->rtm_table = table; if (nla_put_u32(skb, RTA_TABLE, table)) goto nla_put_failure; if (rt->rt6i_flags & RTF_REJECT) { switch (rt->dst.error) { case -EINVAL: rtm->rtm_type = RTN_BLACKHOLE; break; case -EACCES: rtm->rtm_type = RTN_PROHIBIT; break; case -EAGAIN: rtm->rtm_type = RTN_THROW; break; default: rtm->rtm_type = RTN_UNREACHABLE; break; } } else if (rt->rt6i_flags & RTF_LOCAL) rtm->rtm_type = RTN_LOCAL; else if (rt->rt6i_flags & RTF_ANYCAST) rtm->rtm_type = RTN_ANYCAST; else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK)) rtm->rtm_type = RTN_LOCAL; else rtm->rtm_type = RTN_UNICAST; rtm->rtm_flags = 0; rtm->rtm_scope = RT_SCOPE_UNIVERSE; rtm->rtm_protocol = rt->rt6i_protocol; if (rt->rt6i_flags & RTF_CACHE) rtm->rtm_flags |= RTM_F_CLONED; if (dst) { if (nla_put_in6_addr(skb, RTA_DST, dst)) goto nla_put_failure; rtm->rtm_dst_len = 128; } else if (rtm->rtm_dst_len) if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr)) goto nla_put_failure; #ifdef CONFIG_IPV6_SUBTREES if (src) { if (nla_put_in6_addr(skb, RTA_SRC, src)) goto nla_put_failure; rtm->rtm_src_len = 128; } else if (rtm->rtm_src_len && nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr)) goto nla_put_failure; #endif if (iif) { #ifdef CONFIG_IPV6_MROUTE if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) { int err = ip6mr_get_route(net, skb, rtm, portid); if (err == 0) return 0; if (err < 0) goto nla_put_failure; } else #endif if (nla_put_u32(skb, RTA_IIF, iif)) goto nla_put_failure; } else if (dst) { struct in6_addr saddr_buf; if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 && nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } if (rt->rt6i_prefsrc.plen) { struct in6_addr saddr_buf; saddr_buf = rt->rt6i_prefsrc.addr; if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf)) goto nla_put_failure; } memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics)); if (rt->rt6i_pmtu) metrics[RTAX_MTU - 1] = rt->rt6i_pmtu; if (rtnetlink_put_metrics(skb, metrics) < 0) goto nla_put_failure; if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric)) goto nla_put_failure; /* For multipath routes, walk the siblings list and add * each as a nexthop within RTA_MULTIPATH. */ if (rt->rt6i_nsiblings) { struct rt6_info *sibling, *next_sibling; struct nlattr *mp; mp = nla_nest_start(skb, RTA_MULTIPATH); if (!mp) goto nla_put_failure; if (rt6_add_nexthop(skb, rt) < 0) goto nla_put_failure; list_for_each_entry_safe(sibling, next_sibling, &rt->rt6i_siblings, rt6i_siblings) { if (rt6_add_nexthop(skb, sibling) < 0) goto nla_put_failure; } nla_nest_end(skb, mp); } else { if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0) goto nla_put_failure; } expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0; if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0) goto nla_put_failure; if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags))) goto nla_put_failure; nlmsg_end(skb, nlh); return 0; nla_put_failure: nlmsg_cancel(skb, nlh); return -EMSGSIZE; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)34139.74%715.91%
David Ahern10211.89%49.09%
David S. Miller738.51%49.09%
Hideaki Yoshifuji / 吉藤英明627.23%511.36%
Thomas Graf546.29%36.82%
Nicolas Dichtel475.48%24.55%
Martin KaFai Lau394.55%12.27%
Daniel Walter313.61%12.27%
Patrick McHardy222.56%24.55%
Lubomir Rintel192.21%12.27%
Maciej Żenczykowski151.75%12.27%
Brian Haley131.52%24.55%
Eric Dumazet111.28%24.55%
Jiri Benc60.70%12.27%
Jamal Hadi Salim50.58%12.27%
Li Wei50.58%12.27%
Mathew Richardson40.47%12.27%
Johannes Berg30.35%12.27%
Benjamin Thery20.23%12.27%
Eric W. Biedermann20.23%12.27%
Alexey Dobriyan10.12%12.27%
Nikolay Aleksandrov10.12%12.27%
Total858100.00%44100.00%


int rt6_dump_route(struct rt6_info *rt, void *p_arg) { struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg; struct net *net = arg->net; if (rt == net->ipv6.ip6_null_entry) return 0; if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) { struct rtmsg *rtm = nlmsg_data(arg->cb->nlh); /* user wants prefix routes only */ if (rtm->rtm_flags & RTM_F_PREFIX && !(rt->rt6i_flags & RTF_PREFIX_RT)) { /* success since this is not a prefix route */ return 1; } } return rt6_fill_node(net, arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE, NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq, NLM_F_MULTI); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)6041.10%325.00%
David Ahern4027.40%216.67%
Ville Nuorvala2114.38%18.33%
Krishna Kumar149.59%18.33%
Thomas Graf42.74%18.33%
Brian Haley42.74%18.33%
Mathew Richardson10.68%18.33%
Eric W. Biedermann10.68%18.33%
Jamal Hadi Salim10.68%18.33%
Total146100.00%12100.00%


static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { struct net *net = sock_net(in_skb->sk); struct nlattr *tb[RTA_MAX+1]; int err, iif = 0, oif = 0; struct dst_entry *dst; struct rt6_info *rt; struct sk_buff *skb; struct rtmsg *rtm; struct flowi6 fl6; bool fibmatch; err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy, extack); if (err < 0) goto errout; err = -EINVAL; memset(&fl6, 0, sizeof(fl6)); rtm = nlmsg_data(nlh); fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0); fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH); if (tb[RTA_SRC]) { if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr)) goto errout; fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]); } if (tb[RTA_DST]) { if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr)) goto errout; fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]); } if (tb[RTA_IIF]) iif = nla_get_u32(tb[RTA_IIF]); if (tb[RTA_OIF]) oif = nla_get_u32(tb[RTA_OIF]); if (tb[RTA_MARK]) fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]); if (tb[RTA_UID]) fl6.flowi6_uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID])); else fl6.flowi6_uid = iif ? INVALID_UID : current_uid(); if (iif) { struct net_device *dev; int flags = 0; rcu_read_lock(); dev = dev_get_by_index_rcu(net, iif); if (!dev) { rcu_read_unlock(); err = -ENODEV; goto errout; } fl6.flowi6_iif = iif; if (!ipv6_addr_any(&fl6.saddr)) flags |= RT6_LOOKUP_F_HAS_SADDR; if (!fibmatch) dst = ip6_route_input_lookup(net, dev, &fl6, flags); else dst = ip6_route_lookup(net, &fl6, 0); rcu_read_unlock(); } else { fl6.flowi6_oif = oif; if (!fibmatch) dst = ip6_route_output(net, NULL, &fl6); else dst = ip6_route_lookup(net, &fl6, 0); } rt = container_of(dst, struct rt6_info, dst); if (rt->dst.error) { err = rt->dst.error; ip6_rt_put(rt); goto errout; } if (rt == net->ipv6.ip6_null_entry) { err = rt->dst.error; ip6_rt_put(rt); goto errout; } skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL); if (!skb) { ip6_rt_put(rt); err = -ENOBUFS; goto errout; } skb_dst_set(skb, &rt->dst); if (fibmatch) err = rt6_fill_node(net, skb, rt, NULL, NULL, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); else err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif, RTM_NEWROUTE, NETLINK_CB(in_skb).portid, nlh->nlmsg_seq, 0); if (err < 0) { kfree_skb(skb); goto errout; } err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid); errout: return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)16022.82%616.22%
Roopa Prabhu13118.69%12.70%
Thomas Graf13018.54%38.11%
Shmulik Ladkani649.13%25.41%
Lorenzo Colitti567.99%25.41%
Américo Wang273.85%25.41%
David S. Miller243.42%410.81%
Hannes Frederic Sowa202.85%12.70%
James Morris172.43%12.70%
Arnd Bergmann152.14%12.70%
Alexey Dobriyan142.00%12.70%
Florian Westphal101.43%12.70%
Denis V. Lunev101.43%25.41%
David Ahern60.86%12.70%
Eric Dumazet40.57%12.70%
Hideaki Yoshifuji / 吉藤英明30.43%12.70%
Eric W. Biedermann30.43%25.41%
Daniel Lezcano20.29%12.70%
Brian Haley20.29%12.70%
Mathew Richardson10.14%12.70%
Krishna Kumar10.14%12.70%
Johannes Berg10.14%12.70%
Total701100.00%37100.00%


void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info, unsigned int nlm_flags) { struct sk_buff *skb; struct net *net = info->nl_net; u32 seq; int err; err = -ENOBUFS; seq = info->nlh ? info->nlh->nlmsg_seq : 0; skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any()); if (!skb) goto errout; err = rt6_fill_node(net, skb, rt, NULL, NULL, 0, event, info->portid, seq, nlm_flags); if (err < 0) { /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */ WARN_ON(err == -EMSGSIZE); kfree_skb(skb); goto errout; } rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE, info->nlh, gfp_any()); return; errout: if (err < 0) rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5833.72%419.05%
Thomas Graf4425.58%314.29%
Patrick McHardy2112.21%29.52%
Denis V. Lunev169.30%29.52%
Daniel Lezcano116.40%14.76%
Roopa Prabhu84.65%29.52%
Jamal Hadi Salim42.33%14.76%
Mathew Richardson31.74%14.76%
Brian Haley21.16%14.76%
Eric W. Biedermann21.16%14.76%
Pablo Neira Ayuso10.58%14.76%
David S. Miller10.58%14.76%
Krishna Kumar10.58%14.76%
Total172100.00%21100.00%


static int ip6_route_dev_notify(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = netdev_notifier_info_to_dev(ptr); struct net *net = dev_net(dev); if (!(dev->flags & IFF_LOOPBACK)) return NOTIFY_OK; if (event == NETDEV_REGISTER) { net->ipv6.ip6_null_entry->dst.dev = dev; net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.ip6_prohibit_entry->dst.dev = dev; net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev); net->ipv6.ip6_blk_hole_entry->dst.dev = dev; net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev); #endif } else if (event == NETDEV_UNREGISTER && dev->reg_state != NETREG_UNREGISTERED) { /* NETDEV_UNREGISTER could be fired for multiple times by * netdev_wait_allrefs(). Make sure we only call this once. */ in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev); in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev); #endif } return NOTIFY_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano12662.69%116.67%
Américo Wang6130.35%233.33%
Eric Dumazet62.99%116.67%
Jiri Pirko52.49%116.67%
Hideaki Yoshifuji / 吉藤英明31.49%116.67%
Total201100.00%6100.00%

/* * /proc */ #ifdef CONFIG_PROC_FS static const struct file_operations ipv6_route_proc_fops = { .owner = THIS_MODULE, .open = ipv6_route_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_net, };
static int rt6_stats_seq_show(struct seq_file *seq, void *v) { struct net *net = (struct net *)seq->private; seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n", net->ipv6.rt6_stats->fib_nodes, net->ipv6.rt6_stats->fib_route_nodes, net->ipv6.rt6_stats->fib_rt_alloc, net->ipv6.rt6_stats->fib_rt_entries, net->ipv6.rt6_stats->fib_rt_cache, dst_entries_get_slow(&net->ipv6.ip6_dst_ops), net->ipv6.rt6_stats->fib_discarded_routes); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3334.02%330.00%
Daniel Lezcano3232.99%220.00%
Benjamin Thery1616.49%220.00%
Randy Dunlap1515.46%220.00%
Eric Dumazet11.03%110.00%
Total97100.00%10100.00%


static int rt6_stats_seq_open(struct inode *inode, struct file *file) { return single_open_net(inode, file, rt6_stats_seq_show); }

Contributors

PersonTokensPropCommitsCommitProp
Randy Dunlap1973.08%120.00%
Pavel Emelyanov415.38%240.00%
Daniel Lezcano27.69%120.00%
Linus Torvalds (pre-git)13.85%120.00%
Total26100.00%5100.00%

static const struct file_operations rt6_stats_seq_fops = { .owner = THIS_MODULE, .open = rt6_stats_seq_open, .read = seq_read, .llseek = seq_lseek, .release = single_release_net, }; #endif /* CONFIG_PROC_FS */ #ifdef CONFIG_SYSCTL
static int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct net *net; int delay; if (!write) return -EINVAL; net = (struct net *)ctl->extra1; delay = net->ipv6.sysctl.flush_delay; proc_dointvec(ctl, write, buffer, lenp, ppos); fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4342.16%111.11%
Lucian Adrian Grijincu2423.53%111.11%
Daniel Lezcano1817.65%222.22%
Linus Torvalds65.88%111.11%
Michal Kubeček54.90%111.11%
Hideaki Yoshifuji / 吉藤英明43.92%111.11%
Joe Perches10.98%111.11%
Al Viro10.98%111.11%
Total102100.00%9100.00%

struct ctl_table ipv6_route_table_template[] = { { .procname = "flush", .data = &init_net.ipv6.sysctl.flush_delay, .maxlen = sizeof(int), .mode = 0200, .proc_handler = ipv6_sysctl_rtcache_flush }, { .procname = "gc_thresh", .data = &ip6_dst_ops_template.gc_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "max_size", .data = &init_net.ipv6.sysctl.ip6_rt_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_timeout", .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_interval", .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "gc_elasticity", .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mtu_expires", .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "min_adv_mss", .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "gc_min_interval_ms", .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_ms_jiffies, }, { } };
struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net) { struct ctl_table *table; table = kmemdup(ipv6_route_table_template, sizeof(ipv6_route_table_template), GFP_KERNEL); if (table) { table[0].data = &net->ipv6.sysctl.flush_delay; table[0].extra1 = net; table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh; table[2].data = &net->ipv6.sysctl.ip6_rt_max_size; table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout; table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval; table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity; table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires; table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss; table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval; /* Don't export sysctls to unprivileged users */ if (net->user_ns != &init_user_ns) table[0].procname = NULL; } return table; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明13458.52%111.11%
Daniel Lezcano4519.65%222.22%
Eric W. Biedermann198.30%111.11%
Alexey Dobriyan187.86%333.33%
Lucian Adrian Grijincu93.93%111.11%
Benjamin Thery41.75%111.11%
Total229100.00%9100.00%

#endif
static int __net_init ip6_route_net_init(struct net *net) { int ret = -ENOMEM; memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template, sizeof(net->ipv6.ip6_dst_ops)); if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0) goto out_ip6_dst_ops; net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template, sizeof(*net->ipv6.ip6_null_entry), GFP_KERNEL); if (!net->ipv6.ip6_null_entry) goto out_ip6_dst_entries; net->ipv6.ip6_null_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_null_entry; net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_null_entry->dst, ip6_template_metrics, true); #ifdef CONFIG_IPV6_MULTIPLE_TABLES net->ipv6.fib6_has_custom_rules = false; net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template, sizeof(*net->ipv6.ip6_prohibit_entry), GFP_KERNEL); if (!net->ipv6.ip6_prohibit_entry) goto out_ip6_null_entry; net->ipv6.ip6_prohibit_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_prohibit_entry; net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst, ip6_template_metrics, true); net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template, sizeof(*net->ipv6.ip6_blk_hole_entry), GFP_KERNEL); if (!net->ipv6.ip6_blk_hole_entry) goto out_ip6_prohibit_entry; net->ipv6.ip6_blk_hole_entry->dst.path = (struct dst_entry *)net->ipv6.ip6_blk_hole_entry; net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops; dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst, ip6_template_metrics, true); #endif net->ipv6.sysctl.flush_delay = 0; net->ipv6.sysctl.ip6_rt_max_size = 4096; net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2; net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ; net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ; net->ipv6.sysctl.ip6_rt_gc_elasticity = 9; net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ; net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40; net->ipv6.ip6_rt_gc_expire = 30*HZ; ret = 0; out: return ret; #ifdef CONFIG_IPV6_MULTIPLE_TABLES out_ip6_prohibit_entry: kfree(net->ipv6.ip6_prohibit_entry); out_ip6_null_entry: kfree(net->ipv6.ip6_null_entry); #endif out_ip6_dst_entries: dst_entries_destroy(&net->ipv6.ip6_dst_ops); out_ip6_dst_ops: goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano23447.08%320.00%
Peter Zijlstra12324.75%213.33%
David S. Miller489.66%213.33%
Benjamin Thery448.85%213.33%
Eric Dumazet306.04%16.67%
Vincent Bernat81.61%16.67%
Alexey Dobriyan81.61%213.33%
Denis V. Lunev10.20%16.67%
Pavel Emelyanov10.20%16.67%
Total497100.00%15100.00%


static void __net_exit ip6_route_net_exit(struct net *net) { kfree(net->ipv6.ip6_null_entry); #ifdef CONFIG_IPV6_MULTIPLE_TABLES kfree(net->ipv6.ip6_prohibit_entry); kfree(net->ipv6.ip6_blk_hole_entry); #endif dst_entries_destroy(&net->ipv6.ip6_dst_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano4379.63%360.00%
Xiaotian Feng1018.52%120.00%
Alexey Dobriyan11.85%120.00%
Total54100.00%5100.00%


static int __net_init ip6_route_net_init_late(struct net *net) { #ifdef CONFIG_PROC_FS proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops); proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops); #endif return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf3879.17%150.00%
Gao Feng1020.83%150.00%
Total48100.00%2100.00%


static void __net_exit ip6_route_net_exit_late(struct net *net) { #ifdef CONFIG_PROC_FS remove_proc_entry("ipv6_route", net->proc_net); remove_proc_entry("rt6_stats", net->proc_net); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Thomas Graf2571.43%150.00%
Gao Feng1028.57%150.00%
Total35100.00%2100.00%

static struct pernet_operations ip6_route_net_ops = { .init = ip6_route_net_init, .exit = ip6_route_net_exit, };
static int __net_init ipv6_inetpeer_init(struct net *net) { struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL); if (!bp) return -ENOMEM; inet_peer_base_init(bp); net->ipv6.peers = bp; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller53100.00%1100.00%
Total53100.00%1100.00%


static void __net_exit ipv6_inetpeer_exit(struct net *net) { struct inet_peer_base *bp = net->ipv6.peers; net->ipv6.peers = NULL; inetpeer_invalidate_tree(bp); kfree(bp); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller41100.00%2100.00%
Total41100.00%2100.00%

static struct pernet_operations ipv6_inetpeer_ops = { .init = ipv6_inetpeer_init, .exit = ipv6_inetpeer_exit, }; static struct pernet_operations ip6_route_net_late_ops = { .init = ip6_route_net_init_late, .exit = ip6_route_net_exit_late, }; static struct notifier_block ip6_route_dev_notifier = { .notifier_call = ip6_route_dev_notify, .priority = ADDRCONF_NOTIFY_PRIORITY - 10, };
void __init ip6_route_init_special_entries(void) { /* Registering of the loopback is done before this portion of code, * the loopback reference in rt6_info will not be taken, do it * manually for init_net */ init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #ifdef CONFIG_IPV6_MULTIPLE_TABLES init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev; init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Américo Wang101100.00%1100.00%
Total101100.00%1100.00%


int __init ip6_route_init(void) { int ret; int cpu; ret = -ENOMEM; ip6_dst_ops_template.kmem_cachep = kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0, SLAB_HWCACHE_ALIGN, NULL); if (!ip6_dst_ops_template.kmem_cachep) goto out; ret = dst_entries_init(&ip6_dst_blackhole_ops); if (ret) goto out_kmem_cache; ret = register_pernet_subsys(&ipv6_inetpeer_ops); if (ret) goto out_dst_entries; ret = register_pernet_subsys(&ip6_route_net_ops); if (ret) goto out_register_inetpeer; ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep; ret = fib6_init(); if (ret) goto out_register_subsys; ret = xfrm6_init(); if (ret) goto out_fib6_init; ret = fib6_rules_init(); if (ret) goto xfrm6_init; ret = register_pernet_subsys(&ip6_route_net_late_ops); if (ret) goto fib6_rules_init; ret = -ENOBUFS; if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, 0) || __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, 0) || __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, RTNL_FLAG_DOIT_UNLOCKED)) goto out_register_late_subsys; ret = register_netdevice_notifier(&ip6_route_dev_notifier); if (ret) goto out_register_late_subsys; for_each_possible_cpu(cpu) { struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu); INIT_LIST_HEAD(&ul->head); spin_lock_init(&ul->lock); } out: return ret; out_register_late_subsys: unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_init: fib6_rules_cleanup(); xfrm6_init: xfrm6_fini(); out_fib6_init: fib6_gc_cleanup(); out_register_subsys: unregister_pernet_subsys(&ip6_route_net_ops); out_register_inetpeer: unregister_pernet_subsys(&ipv6_inetpeer_ops); out_dst_entries: dst_entries_destroy(&ip6_dst_blackhole_ops); out_kmem_cache: kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); goto out; }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Lezcano11937.54%625.00%
Thomas Graf5918.61%312.50%
Martin KaFai Lau3811.99%14.17%
Linus Torvalds (pre-git)299.15%416.67%
David S. Miller299.15%312.50%
Eric Dumazet237.26%14.17%
Arnaud Ebalard82.52%14.17%
Hideaki Yoshifuji / 吉藤英明30.95%14.17%
Greg Rose30.95%14.17%
Florian Westphal30.95%28.33%
Benjamin Thery30.95%14.17%
Total317100.00%24100.00%


void ip6_route_cleanup(void) { unregister_netdevice_notifier(&ip6_route_dev_notifier); unregister_pernet_subsys(&ip6_route_net_late_ops); fib6_rules_cleanup(); xfrm6_fini(); fib6_gc_cleanup(); unregister_pernet_subsys(&ipv6_inetpeer_ops); unregister_pernet_subsys(&ip6_route_net_ops); dst_entries_destroy(&ip6_dst_blackhole_ops); kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1324.53%428.57%
Daniel Lezcano1120.75%214.29%
Thomas Graf611.32%17.14%
David S. Miller611.32%17.14%
Xiaotian Feng611.32%17.14%
Daniel McNeil59.43%17.14%
Benjamin Thery23.77%17.14%
Hideaki Yoshifuji / 吉藤英明23.77%17.14%
Kazunori Miyazawa11.89%17.14%
Eric W. Biedermann11.89%17.14%
Total53100.00%14100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)311614.53%275.17%
Hideaki Yoshifuji / 吉藤英明223510.42%8215.71%
Martin KaFai Lau219010.21%214.02%
David S. Miller20849.72%499.39%
David Ahern19419.05%346.51%
Thomas Graf17478.14%173.26%
Roopa Prabhu11715.46%71.34%
Daniel Lezcano11615.41%214.02%
Duan Jiong5582.60%40.77%
Nicolas Dichtel4582.14%91.72%
Daniel Walter3061.43%10.19%
Jakub Sitnicki3051.42%30.57%
Hannes Frederic Sowa2901.35%81.53%
Florian Westphal2821.31%91.72%
Américo Wang2271.06%61.15%
Eric Dumazet2211.03%152.87%
Julian Anastasov2020.94%40.77%
Benjamin Thery1400.65%61.15%
Steffen Klassert1320.62%71.34%
Lorenzo Colitti1320.62%50.96%
Wei Wang1290.60%91.72%
Gao Feng1270.59%50.96%
Shmulik Ladkani1260.59%20.38%
Peter Zijlstra1230.57%20.38%
Daniel Borkmann1140.53%40.77%
Jiri Benc1070.50%81.53%
Xin Long970.45%40.77%
Alexey Kuznetsov920.43%30.57%
Alexey Dobriyan910.42%50.96%
Lubomir Rintel820.38%10.19%
Denis V. Lunev800.37%71.34%
Eric W. Biedermann770.36%91.72%
Kamala R730.34%10.19%
Patrick McHardy710.33%71.34%
Randy Dunlap680.32%40.77%
Maciej Żenczykowski650.30%20.38%
Shirley Ma610.28%30.57%
Matti Vaittinen600.28%10.19%
Pavel Emelyanov480.22%71.34%
Vincent Bernat470.22%20.38%
Al Viro470.22%30.57%
Tom Herbert450.21%20.38%
James Morris420.20%30.57%
Zheng Yan390.18%10.19%
Andy Gospodarek390.18%10.19%
Ville Nuorvala330.15%20.38%
Lucian Adrian Grijincu330.15%10.19%
Tom Tucker290.14%10.19%
Brian Haley280.13%40.77%
Mathew Richardson280.13%10.19%
Herbert Xu270.13%61.15%
Paolo Abeni210.10%30.57%
Krishna Kumar200.09%20.38%
Lv Liangying200.09%10.19%
Roland Dreier170.08%10.19%
Xiaotian Feng160.07%10.19%
Kazunori Miyazawa150.07%20.38%
Arnd Bergmann150.07%10.19%
Mantas M150.07%10.19%
Jean-Mickael Guerin150.07%10.19%
Nikola Forró140.07%10.19%
Linus Torvalds140.07%30.57%
Alexander Aring140.07%10.19%
Jamal Hadi Salim140.07%20.38%
Michal Kubeček130.06%30.57%
Ido Schimmel130.06%20.38%
Joe Perches120.06%30.57%
Liping Zhang110.05%10.19%
Paul Marks100.05%10.19%
Sabrina Dubroca100.05%10.19%
Stephen Hemminger100.05%10.19%
Li RongQing100.05%40.77%
Arnaldo Carvalho de Melo100.05%30.57%
Arnaud Ebalard80.04%10.19%
Noriaki Takamiya80.04%10.19%
Erik Nordmark70.03%20.38%
Johannes Berg60.03%20.38%
Dave Craig60.03%10.19%
Simon Horman60.03%10.19%
David McCullough50.02%10.19%
Jiri Pirko50.02%10.19%
Li Wei50.02%10.19%
Daniel McNeil50.02%10.19%
Mahesh Bandewar50.02%10.19%
Madalin Bucur40.02%10.19%
Held Bernhard40.02%10.19%
Jens Rosenboom40.02%10.19%
Paul Gortmaker30.01%10.19%
Tejun Heo30.01%10.19%
Matthias Schiffer30.01%10.19%
Changli Gao30.01%10.19%
Greg Rose30.01%10.19%
Simon Arlott20.01%10.19%
Stephen Rothwell20.01%10.19%
Jiri Olsa20.01%10.19%
Min Zhang20.01%10.19%
Alexander Alemayhu20.01%10.19%
Dave Jones20.01%20.38%
Rami Rosen20.01%10.19%
Adrian Bunk20.01%20.38%
Nikolay Aleksandrov10.00%10.19%
Arjan van de Ven10.00%10.19%
Jim Paris10.00%10.19%
Ian Morris10.00%10.19%
Michael Büsch10.00%10.19%
Pablo Neira Ayuso10.00%10.19%
Total21450100.00%522100.00%
Directory: net/ipv6
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.