Release 4.14 net/ipv4/fib_semantics.c
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IPv4 Forwarding Information Base: semantics.
*
* Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/netlink.h>
#include <net/arp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/tcp.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/netlink.h>
#include <net/nexthop.h>
#include <net/lwtunnel.h>
#include <net/fib_notifier.h>
#include "fib_lookup.h"
static DEFINE_SPINLOCK(fib_info_lock);
static struct hlist_head *fib_info_hash;
static struct hlist_head *fib_info_laddrhash;
static unsigned int fib_info_hash_size;
static unsigned int fib_info_cnt;
#define DEVINDEX_HASHBITS 8
#define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
#ifdef CONFIG_IP_ROUTE_MULTIPATH
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh; \
for (nhsel = 0, nh = (fi)->fib_nh; \
nhsel < (fi)->fib_nhs; \
nh++, nhsel++)
#define change_nexthops(fi) { \
int nhsel; struct fib_nh *nexthop_nh; \
for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
nhsel < (fi)->fib_nhs; \
nexthop_nh++, nhsel++)
#else /* CONFIG_IP_ROUTE_MULTIPATH */
/* Hope, that gcc will optimize it to get rid of dummy loop */
#define for_nexthops(fi) { \
int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
for (nhsel = 0; nhsel < 1; nhsel++)
#define change_nexthops(fi) { \
int nhsel; \
struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
for (nhsel = 0; nhsel < 1; nhsel++)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
#define endfor_nexthops(fi) }
const struct fib_prop fib_props[RTN_MAX + 1] = {
[RTN_UNSPEC] = {
.error = 0,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_UNICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_LOCAL] = {
.error = 0,
.scope = RT_SCOPE_HOST,
},
[RTN_BROADCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_ANYCAST] = {
.error = 0,
.scope = RT_SCOPE_LINK,
},
[RTN_MULTICAST] = {
.error = 0,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_BLACKHOLE] = {
.error = -EINVAL,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_UNREACHABLE] = {
.error = -EHOSTUNREACH,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_PROHIBIT] = {
.error = -EACCES,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_THROW] = {
.error = -EAGAIN,
.scope = RT_SCOPE_UNIVERSE,
},
[RTN_NAT] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
[RTN_XRESOLVE] = {
.error = -EINVAL,
.scope = RT_SCOPE_NOWHERE,
},
};
static void rt_fibinfo_free(struct rtable __rcu **rtp)
{
struct rtable *rt = rcu_dereference_protected(*rtp, 1);
if (!rt)
return;
/* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
* because we waited an RCU grace period before calling
* free_fib_info_rcu()
*/
dst_dev_put(&rt->dst);
dst_release_immediate(&rt->dst);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 40 | 81.63% | 1 | 33.33% |
Wei Wang | 9 | 18.37% | 2 | 66.67% |
Total | 49 | 100.00% | 3 | 100.00% |
static void free_nh_exceptions(struct fib_nh *nh)
{
struct fnhe_hash_bucket *hash;
int i;
hash = rcu_dereference_protected(nh->nh_exceptions, 1);
if (!hash)
return;
for (i = 0; i < FNHE_HASH_SIZE; i++) {
struct fib_nh_exception *fnhe;
fnhe = rcu_dereference_protected(hash[i].chain, 1);
while (fnhe) {
struct fib_nh_exception *next;
next = rcu_dereference_protected(fnhe->fnhe_next, 1);
rt_fibinfo_free(&fnhe->fnhe_rth_input);
rt_fibinfo_free(&fnhe->fnhe_rth_output);
kfree(fnhe);
fnhe = next;
}
}
kfree(hash);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 90 | 73.77% | 2 | 40.00% |
Eric Dumazet | 23 | 18.85% | 2 | 40.00% |
Timo Teräs | 9 | 7.38% | 1 | 20.00% |
Total | 122 | 100.00% | 5 | 100.00% |
static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
{
int cpu;
if (!rtp)
return;
for_each_possible_cpu(cpu) {
struct rtable *rt;
rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
if (rt) {
dst_dev_put(&rt->dst);
dst_release_immediate(&rt->dst);
}
}
free_percpu(rtp);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 64 | 84.21% | 1 | 20.00% |
Wei Wang | 11 | 14.47% | 3 | 60.00% |
David S. Miller | 1 | 1.32% | 1 | 20.00% |
Total | 76 | 100.00% | 5 | 100.00% |
/* Release a nexthop info record */
static void free_fib_info_rcu(struct rcu_head *head)
{
struct fib_info *fi = container_of(head, struct fib_info, rcu);
struct dst_metrics *m;
change_nexthops(fi) {
if (nexthop_nh->nh_dev)
dev_put(nexthop_nh->nh_dev);
lwtstate_put(nexthop_nh->nh_lwtstate);
free_nh_exceptions(nexthop_nh);
rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
rt_fibinfo_free(&nexthop_nh->nh_rth_input);
} endfor_nexthops(fi);
m = fi->fib_metrics;
if (m != &dst_default_metrics && refcount_dec_and_test(&m->refcnt))
kfree(m);
kfree(fi);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Zheng Yan | 41 | 36.28% | 1 | 9.09% |
Yanmin Zhang | 24 | 21.24% | 1 | 9.09% |
Eric Dumazet | 22 | 19.47% | 3 | 27.27% |
David S. Miller | 19 | 16.81% | 4 | 36.36% |
Roopa Prabhu | 6 | 5.31% | 1 | 9.09% |
Nicolas Dichtel | 1 | 0.88% | 1 | 9.09% |
Total | 113 | 100.00% | 11 | 100.00% |
void free_fib_info(struct fib_info *fi)
{
if (fi->fib_dead == 0) {
pr_warn("Freeing alive fib_info %p\n", fi);
return;
}
fib_info_cnt--;
#ifdef CONFIG_IP_ROUTE_CLASSID
change_nexthops(fi) {
if (nexthop_nh->nh_tclassid)
fi->fib_net->ipv4.fib_num_tclassid_users--;
} endfor_nexthops(fi);
#endif
call_rcu(&fi->rcu, free_fib_info_rcu);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 34 | 47.22% | 1 | 16.67% |
David S. Miller | 31 | 43.06% | 2 | 33.33% |
Zheng Yan | 5 | 6.94% | 1 | 16.67% |
Joe Perches | 1 | 1.39% | 1 | 16.67% |
Lai Jiangshan | 1 | 1.39% | 1 | 16.67% |
Total | 72 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL_GPL(free_fib_info);
void fib_release_info(struct fib_info *fi)
{
spin_lock_bh(&fib_info_lock);
if (fi && --fi->fib_treeref == 0) {
hlist_del(&fi->fib_hash);
if (fi->fib_prefsrc)
hlist_del(&fi->fib_lhash);
change_nexthops(fi) {
if (!nexthop_nh->nh_dev)
continue;
hlist_del(&nexthop_nh->nh_hash);
} endfor_nexthops(fi)
fi->fib_dead = 1;
fib_info_put(fi);
}
spin_unlock_bh(&fib_info_lock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 47 | 50.00% | 2 | 28.57% |
David S. Miller | 38 | 40.43% | 3 | 42.86% |
Christian Ehrhardt | 7 | 7.45% | 1 | 14.29% |
Stephen Hemminger | 2 | 2.13% | 1 | 14.29% |
Total | 94 | 100.00% | 7 | 100.00% |
static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
{
const struct fib_nh *onh = ofi->fib_nh;
for_nexthops(fi) {
if (nh->nh_oif != onh->nh_oif ||
nh->nh_gw != onh->nh_gw ||
nh->nh_scope != onh->nh_scope ||
#ifdef CONFIG_IP_ROUTE_MULTIPATH
nh->nh_weight != onh->nh_weight ||
#endif
#ifdef CONFIG_IP_ROUTE_CLASSID
nh->nh_tclassid != onh->nh_tclassid ||
#endif
lwtunnel_cmp_encap(nh->nh_lwtstate, onh->nh_lwtstate) ||
((nh->nh_flags ^ onh->nh_flags) & ~RTNH_COMPARE_MASK))
return -1;
onh++;
} endfor_nexthops(fi);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 113 | 88.28% | 3 | 37.50% |
Roopa Prabhu | 11 | 8.59% | 1 | 12.50% |
Eric Dumazet | 1 | 0.78% | 1 | 12.50% |
Andy Gospodarek | 1 | 0.78% | 1 | 12.50% |
Patrick McHardy | 1 | 0.78% | 1 | 12.50% |
Linus Torvalds | 1 | 0.78% | 1 | 12.50% |
Total | 128 | 100.00% | 8 | 100.00% |
static inline unsigned int fib_devindex_hashfn(unsigned int val)
{
unsigned int mask = DEVINDEX_HASHSIZE - 1;
return (val ^
(val >> DEVINDEX_HASHBITS) ^
(val >> (DEVINDEX_HASHBITS * 2))) & mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 43 | 100.00% | 1 | 100.00% |
Total | 43 | 100.00% | 1 | 100.00% |
static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
{
unsigned int mask = (fib_info_hash_size - 1);
unsigned int val = fi->fib_nhs;
val ^= (fi->fib_protocol << 8) | fi->fib_scope;
val ^= (__force u32)fi->fib_prefsrc;
val ^= fi->fib_priority;
for_nexthops(fi) {
val ^= fib_devindex_hashfn(nh->nh_oif);
} endfor_nexthops(fi)
return (val ^ (val >> 7) ^ (val >> 12)) & mask;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 96 | 96.00% | 4 | 80.00% |
Al Viro | 4 | 4.00% | 1 | 20.00% |
Total | 100 | 100.00% | 5 | 100.00% |
static struct fib_info *fib_find_info(const struct fib_info *nfi)
{
struct hlist_head *head;
struct fib_info *fi;
unsigned int hash;
hash = fib_info_hashfn(nfi);
head = &fib_info_hash[hash];
hlist_for_each_entry(fi, head, fib_hash) {
if (!net_eq(fi->fib_net, nfi->fib_net))
continue;
if (fi->fib_nhs != nfi->fib_nhs)
continue;
if (nfi->fib_protocol == fi->fib_protocol &&
nfi->fib_scope == fi->fib_scope &&
nfi->fib_prefsrc == fi->fib_prefsrc &&
nfi->fib_priority == fi->fib_priority &&
nfi->fib_type == fi->fib_type &&
memcmp(nfi->fib_metrics, fi->fib_metrics,
sizeof(u32) * RTAX_MAX) == 0 &&
!((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_COMPARE_MASK) &&
(nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
return fi;
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 93 | 51.67% | 3 | 27.27% |
David S. Miller | 54 | 30.00% | 2 | 18.18% |
Eric Dumazet | 11 | 6.11% | 2 | 18.18% |
Denis V. Lunev | 10 | 5.56% | 1 | 9.09% |
Linus Torvalds | 5 | 2.78% | 1 | 9.09% |
Octavian Purdila | 5 | 2.78% | 1 | 9.09% |
Andy Gospodarek | 2 | 1.11% | 1 | 9.09% |
Total | 180 | 100.00% | 11 | 100.00% |
/* Check, that the gateway is already configured.
* Used only by redirect accept routine.
*/
int ip_fib_check_default(__be32 gw, struct net_device *dev)
{
struct hlist_head *head;
struct fib_nh *nh;
unsigned int hash;
spin_lock(&fib_info_lock);
hash = fib_devindex_hashfn(dev->ifindex);
head = &fib_info_devhash[hash];
hlist_for_each_entry(nh, head, nh_hash) {
if (nh->nh_dev == dev &&
nh->nh_gw == gw &&
!(nh->nh_flags & RTNH_F_DEAD)) {
spin_unlock(&fib_info_lock);
return 0;
}
}
spin_unlock(&fib_info_lock);
return -1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 56 | 53.85% | 2 | 40.00% |
David S. Miller | 44 | 42.31% | 1 | 20.00% |
Stephen Hemminger | 3 | 2.88% | 1 | 20.00% |
Al Viro | 1 | 0.96% | 1 | 20.00% |
Total | 104 | 100.00% | 5 | 100.00% |
static inline size_t fib_nlmsg_size(struct fib_info *fi)
{
size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
+ nla_total_size(4) /* RTA_TABLE */
+ nla_total_size(4) /* RTA_DST */
+ nla_total_size(4) /* RTA_PRIORITY */
+ nla_total_size(4) /* RTA_PREFSRC */
+ nla_total_size(TCP_CA_NAME_MAX); /* RTAX_CC_ALGO */
/* space for nested metrics */
payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
if (fi->fib_nhs) {
size_t nh_encapsize = 0;
/* Also handles the special case fib_nhs == 1 */
/* each nexthop is packed in an attribute */
size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
/* may contain flow and gateway attribute */
nhsize += 2 * nla_total_size(4);
/* grab encap info */
for_nexthops(fi) {
if (nh->nh_lwtstate) {
/* RTA_ENCAP_TYPE */
nh_encapsize += lwtunnel_get_encap_size(
nh->nh_lwtstate);
/* RTA_ENCAP */
nh_encapsize += nla_total_size(2);
}
} endfor_nexthops(fi);
/* all nexthops are packed in a nested attribute */
payload += nla_total_size((fi->fib_nhs * nhsize) +
nh_encapsize);
}
return payload;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Thomas Graf | 109 | 66.87% | 1 | 33.33% |
Roopa Prabhu | 47 | 28.83% | 1 | 33.33% |
Daniel Borkmann | 7 | 4.29% | 1 | 33.33% |
Total | 163 | 100.00% | 3 | 100.00% |
void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
int dst_len, u32 tb_id, const struct nl_info *info,
unsigned int nlm_flags)
{
struct sk_buff *skb;
u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
int err = -ENOBUFS;
skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
if (!skb)
goto errout;
err = fib_dump_info(skb, info->portid, seq, event, tb_id,
fa->fa_type, key, dst_len,
fa->fa_tos, fa->fa_info, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in fib_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
kfree_skb(skb);
goto errout;
}
rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
info->nlh, GFP_KERNEL);
return;
errout:
if (err < 0)
rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 83 | 45.86% | 1 | 6.25% |
Thomas Graf | 56 | 30.94% | 3 | 18.75% |
Patrick McHardy | 22 | 12.15% | 3 | 18.75% |
Denis V. Lunev | 8 | 4.42% | 2 | 12.50% |
Milan Kocian | 5 | 2.76% | 1 | 6.25% |
Eric W. Biedermann | 2 | 1.10% | 1 | 6.25% |
Al Viro | 1 | 0.55% | 1 | 6.25% |
Pablo Neira Ayuso | 1 | 0.55% | 1 | 6.25% |
Joe Perches | 1 | 0.55% | 1 | 6.25% |
Ian Morris | 1 | 0.55% | 1 | 6.25% |
Jamal Hadi Salim | 1 | 0.55% | 1 | 6.25% |
Total | 181 | 100.00% | 16 | 100.00% |
static int fib_detect_death(struct fib_info *fi, int order,
struct fib_info **last_resort, int *last_idx,
int dflt)
{
struct neighbour *n;
int state = NUD_NONE;
n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
if (n) {
state = n->nud_state;
neigh_release(n);
} else {
return 0;
}
if (state == NUD_REACHABLE)
return 0;
if ((state & NUD_VALID) && order != dflt)
return 0;
if ((state & NUD_VALID) ||
(*last_idx < 0 && order > dflt && state != NUD_INCOMPLETE)) {
*last_resort = fi;
*last_idx = order;
}
return 1;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Robert Olsson | 133 | 92.36% | 1 | 33.33% |
Julian Anastasov | 10 | 6.94% | 1 | 33.33% |
Stephen Hemminger | 1 | 0.69% | 1 | 33.33% |
Total | 144 | 100.00% | 3 | 100.00% |
#ifdef CONFIG_IP_ROUTE_MULTIPATH
static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining,
struct netlink_ext_ack *extack)
{
int nhs = 0;
while (rtnh_ok(rtnh, remaining)) {
nhs++;
rtnh = rtnh_next(rtnh, &remaining);
}
/* leftover implies invalid nexthop configuration, discard it */
if (remaining > 0) {
NL_SET_ERR_MSG(extack,
"Invalid nexthop configuration - extra data after nexthops");
nhs = 0;
}
return nhs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 30 | 42.25% | 1 | 25.00% |
David Ahern | 21 | 29.58% | 2 | 50.00% |
Thomas Graf | 20 | 28.17% | 1 | 25.00% |
Total | 71 | 100.00% | 4 | 100.00% |
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
int ret;
change_nexthops(fi) {
int attrlen;
if (!rtnh_ok(rtnh, remaining)) {
NL_SET_ERR_MSG(extack,
"Invalid nexthop configuration - extra data after nexthop");
return -EINVAL;
}
if (rtnh->rtnh_flags & (RTNH_F_DEAD | RTNH_F_LINKDOWN)) {
NL_SET_ERR_MSG(extack,
"Invalid flags for nexthop - can not contain DEAD or LINKDOWN");
return -EINVAL;
}
nexthop_nh->nh_flags =
(cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
nexthop_nh->nh_gw = nla ? nla_get_in_addr(nla) : 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
if (nexthop_nh->nh_tclassid)
fi->fib_net->ipv4.fib_num_tclassid_users++;
#endif
nla = nla_find(attrs, attrlen, RTA_ENCAP);
if (nla) {
struct lwtunnel_state *lwtstate;
struct nlattr *nla_entype;
nla_entype = nla_find(attrs, attrlen,
RTA_ENCAP_TYPE);
if (!nla_entype) {
NL_SET_BAD_ATTR(extack, nla);
NL_SET_ERR_MSG(extack,
"Encap type is missing");
goto err_inval;
}
ret = lwtunnel_build_state(nla_get_u16(
nla_entype),
nla, AF_INET, cfg,
&lwtstate, extack);
if (ret)
goto errout;
nexthop_nh->nh_lwtstate =
lwtstate_get(lwtstate);
}
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
return 0;
err_inval:
ret = -EINVAL;
errout:
return ret;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 98 | 28.08% | 3 | 18.75% |
Roopa Prabhu | 89 | 25.50% | 1 | 6.25% |
Thomas Graf | 76 | 21.78% | 1 | 6.25% |
David Ahern | 41 | 11.75% | 3 | 18.75% |
David S. Miller | 20 | 5.73% | 3 | 18.75% |
Julian Anastasov | 16 | 4.58% | 1 | 6.25% |
Tom Herbert | 4 | 1.15% | 1 | 6.25% |
Nicolas Dichtel | 3 | 0.86% | 1 | 6.25% |
Patrick McHardy | 1 | 0.29% | 1 | 6.25% |
Jiri Benc | 1 | 0.29% | 1 | 6.25% |
Total | 349 | 100.00% | 16 | 100.00% |
static void fib_rebalance(struct fib_info *fi)
{
int total;
int w;
struct in_device *in_dev;
if (fi->fib_nhs < 2)
return;
total = 0;
for_nexthops(fi) {
if (nh->nh_flags & RTNH_F_DEAD)
continue;
in_dev = __in_dev_get_rtnl(nh->nh_dev);
if (in_dev &&
IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
nh->nh_flags & RTNH_F_LINKDOWN)
continue;
total += nh->nh_weight;
} endfor_nexthops(fi);
w = 0;
change_nexthops(fi) {
int upper_bound;
in_dev = __in_dev_get_rtnl(nexthop_nh->nh_dev);
if (nexthop_nh->nh_flags & RTNH_F_DEAD) {
upper_bound = -1;
} else if (in_dev &&
IN_DEV_IGNORE_ROUTES_WITH_LINKDOWN(in_dev) &&
nexthop_nh->nh_flags & RTNH_F_LINKDOWN) {
upper_bound = -1;
} else {
w += nexthop_nh->nh_weight;
upper_bound = DIV_ROUND_CLOSEST_ULL((u64)w << 31,
total) - 1;
}
atomic_set(&nexthop_nh->nh_upper_bound, upper_bound);
} endfor_nexthops(fi);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Christensen | 184 | 98.92% | 2 | 66.67% |
David Ahern | 2 | 1.08% | 1 | 33.33% |
Total | 186 | 100.00% | 3 | 100.00% |
static inline void fib_add_weight(struct fib_info *fi,
const struct fib_nh *nh)
{
fi->fib_weight += nh->nh_weight;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Christensen | 26 | 100.00% | 1 | 100.00% |
Total | 26 | 100.00% | 1 | 100.00% |
#else /* CONFIG_IP_ROUTE_MULTIPATH */
#define fib_rebalance(fi) do { } while (0)
#define fib_add_weight(fi, nh) do { } while (0)
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
static int fib_encap_match(u16 encap_type,
struct nlattr *encap,
const struct fib_nh *nh,
const struct fib_config *cfg,
struct netlink_ext_ack *extack)
{
struct lwtunnel_state *lwtstate;
int ret, result = 0;
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
ret = lwtunnel_build_state(encap_type, encap, AF_INET,
cfg, &lwtstate, extack);
if (!ret) {
result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
lwtstate_free(lwtstate);
}
return result;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Roopa Prabhu | 63 | 65.62% | 1 | 20.00% |
Jiri Benc | 15 | 15.62% | 1 | 20.00% |
Tom Herbert | 10 | 10.42% | 1 | 20.00% |
David Ahern | 7 | 7.29% | 1 | 20.00% |
Ying Xue | 1 | 1.04% | 1 | 20.00% |
Total | 96 | 100.00% | 5 | 100.00% |
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi,
struct netlink_ext_ack *extack)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
#endif
if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
return 1;
if (cfg->fc_oif || cfg->fc_gw) {
if (cfg->fc_encap) {
if (fib_encap_match(cfg->fc_encap_type, cfg->fc_encap,
fi->fib_nh, cfg, extack))
return 1;
}
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
(!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
return 0;
return 1;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (!cfg->fc_mp)
return 0;
rtnh = cfg->fc_mp;
remaining = cfg->fc_mp_len;
for_nexthops(fi) {
int attrlen;
if (!rtnh_ok(rtnh, remaining))
return -EINVAL;
if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
return 1;
attrlen = rtnh_attrlen(rtnh);
if (attrlen > 0) {
struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
nla = nla_find(attrs, attrlen, RTA_GATEWAY);
if (nla && nla_get_in_addr(nla) != nh->nh_gw)
return 1;
#ifdef CONFIG_IP_ROUTE_CLASSID
nla = nla_find(attrs, attrlen, RTA_FLOW);
if (nla && nla_get_u32(nla) != nh->nh_tclassid)
return 1;
#endif
}
rtnh = rtnh_next(rtnh, &remaining);
} endfor_nexthops(fi);
#endif
return 0<