Release 4.11 net/ipv4/route.c
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* ROUTE - implementation of the IP router.
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Alan Cox, <gw4pts@gw4pts.ampr.org>
* Linus Torvalds, <Linus.Torvalds@helsinki.fi>
* Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
*
* Fixes:
* Alan Cox : Verify area fixes.
* Alan Cox : cli() protects routing changes
* Rui Oliveira : ICMP routing table updates
* (rco@di.uminho.pt) Routing table insertion and update
* Linus Torvalds : Rewrote bits to be sensible
* Alan Cox : Added BSD route gw semantics
* Alan Cox : Super /proc >4K
* Alan Cox : MTU in route table
* Alan Cox : MSS actually. Also added the window
* clamper.
* Sam Lantinga : Fixed route matching in rt_del()
* Alan Cox : Routing cache support.
* Alan Cox : Removed compatibility cruft.
* Alan Cox : RTF_REJECT support.
* Alan Cox : TCP irtt support.
* Jonathan Naylor : Added Metric support.
* Miquel van Smoorenburg : BSD API fixes.
* Miquel van Smoorenburg : Metrics.
* Alan Cox : Use __u32 properly
* Alan Cox : Aligned routing errors more closely with BSD
* our system is still very different.
* Alan Cox : Faster /proc handling
* Alexey Kuznetsov : Massive rework to support tree based routing,
* routing caches and better behaviour.
*
* Olaf Erb : irtt wasn't being copied right.
* Bjorn Ekwall : Kerneld route support.
* Alan Cox : Multicast fixed (I hope)
* Pavel Krauz : Limited broadcast fixed
* Mike McLagan : Routing by source
* Alexey Kuznetsov : End of old history. Split to fib.c and
* route.c and rewritten from scratch.
* Andi Kleen : Load-limit warning messages.
* Vitaly E. Lavrov : Transparent proxy revived after year coma.
* Vitaly E. Lavrov : Race condition in ip_route_input_slow.
* Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
* Vladimir V. Ivanov : IP rule info (flowid) is really useful.
* Marc Boucher : routing by fwmark
* Robert Olsson : Added rt_cache statistics
* Arnaldo C. Melo : Convert proc stuff to seq_file
* Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
* Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
* Ilia Sotnikov : Removed TOS from hash calculations
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define pr_fmt(fmt) "IPv4: " fmt
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>
#define RT_FL_TOS(oldflp4) \
((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
#define RT_GC_TIMEOUT (300*HZ)
static int ip_rt_max_size;
static int ip_rt_redirect_number __read_mostly = 9;
static int ip_rt_redirect_load __read_mostly = HZ / 50;
static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
static int ip_rt_error_cost __read_mostly = HZ;
static int ip_rt_error_burst __read_mostly = 5 * HZ;
static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/*
* Interface to generic destination cache.
*/
static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void ipv4_link_failure(struct sk_buff *skb);
static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
static void ipv4_dst_destroy(struct dst_entry *dst);
static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
{
WARN_ON(1);
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 24 | 100.00% | 2 | 100.00% |
Total | 24 | 100.00% | 2 | 100.00% |
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr);
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
static struct dst_ops ipv4_dst_ops = {
.family = AF_INET,
.check = ipv4_dst_check,
.default_advmss = ipv4_default_advmss,
.mtu = ipv4_mtu,
.cow_metrics = ipv4_cow_metrics,
.destroy = ipv4_dst_destroy,
.negative_advice = ipv4_negative_advice,
.link_failure = ipv4_link_failure,
.update_pmtu = ip_rt_update_pmtu,
.redirect = ip_do_redirect,
.local_out = __ip_local_out,
.neigh_lookup = ipv4_neigh_lookup,
.confirm_neigh = ipv4_confirm_neigh,
};
#define ECN_OR_COST(class) TC_PRIO_##class
const __u8 ip_tos2prio[16] = {
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BESTEFFORT,
ECN_OR_COST(BESTEFFORT),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_BULK,
ECN_OR_COST(BULK),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE,
ECN_OR_COST(INTERACTIVE),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK),
TC_PRIO_INTERACTIVE_BULK,
ECN_OR_COST(INTERACTIVE_BULK)
};
EXPORT_SYMBOL(ip_tos2prio);
static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
#define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
#ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
{
if (*pos)
return NULL;
return SEQ_START_TOKEN;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 10 | 37.04% | 1 | 14.29% |
Arnaldo Carvalho de Melo | 8 | 29.63% | 1 | 14.29% |
Linus Torvalds (pre-git) | 4 | 14.81% | 3 | 42.86% |
Eric Dumazet | 3 | 11.11% | 1 | 14.29% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 7.41% | 1 | 14.29% |
Total | 27 | 100.00% | 7 | 100.00% |
static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 11 | 40.74% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 7 | 25.93% | 1 | 16.67% |
Eric Dumazet | 4 | 14.81% | 1 | 16.67% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 11.11% | 1 | 16.67% |
Linus Torvalds (pre-git) | 2 | 7.41% | 2 | 33.33% |
Total | 27 | 100.00% | 6 | 100.00% |
static void rt_cache_seq_stop(struct seq_file *seq, void *v)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denis V. Lunev | 7 | 50.00% | 1 | 33.33% |
David S. Miller | 5 | 35.71% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 14.29% | 1 | 33.33% |
Total | 14 | 100.00% | 3 | 100.00% |
static int rt_cache_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq, "%-127s\n",
"Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
"Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
"HHUptod\tSpecDst");
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 23 | 65.71% | 1 | 33.33% |
Denis V. Lunev | 6 | 17.14% | 1 | 33.33% |
Hideaki Yoshifuji / 吉藤英明 | 6 | 17.14% | 1 | 33.33% |
Total | 35 | 100.00% | 3 | 100.00% |
static const struct seq_operations rt_cache_seq_ops = {
.start = rt_cache_seq_start,
.next = rt_cache_seq_next,
.stop = rt_cache_seq_stop,
.show = rt_cache_seq_show,
};
static int rt_cache_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cache_seq_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 14 | 56.00% | 1 | 50.00% |
Arnaldo Carvalho de Melo | 11 | 44.00% | 1 | 50.00% |
Total | 25 | 100.00% | 2 | 100.00% |
static const struct file_operations rt_cache_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cache_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 43 | 57.33% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 20 | 26.67% | 1 | 16.67% |
Eric Dumazet | 8 | 10.67% | 1 | 16.67% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 2.67% | 1 | 16.67% |
Denis V. Lunev | 1 | 1.33% | 1 | 16.67% |
Linus Torvalds (pre-git) | 1 | 1.33% | 1 | 16.67% |
Total | 75 | 100.00% | 6 | 100.00% |
static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu+1;
return &per_cpu(rt_cache_stat, cpu);
}
return NULL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 37 | 55.22% | 1 | 33.33% |
Arnaldo Carvalho de Melo | 29 | 43.28% | 1 | 33.33% |
Eric Dumazet | 1 | 1.49% | 1 | 33.33% |
Total | 67 | 100.00% | 3 | 100.00% |
static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 12 | 85.71% | 1 | 50.00% |
David S. Miller | 2 | 14.29% | 1 | 50.00% |
Total | 14 | 100.00% | 2 | 100.00% |
static int rt_cpu_seq_show(struct seq_file *seq, void *v)
{
struct rt_cache_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
return 0;
}
seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
" %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
dst_entries_get_slow(&ipv4_dst_ops),
0, /* st->in_hit */
st->in_slow_tot,
st->in_slow_mc,
st->in_no_route,
st->in_brd,
st->in_martian_dst,
st->in_martian_src,
0, /* st->out_hit */
st->out_slow_tot,
st->out_slow_mc,
0, /* st->gc_total */
0, /* st->gc_ignored */
0, /* st->gc_goal_miss */
0, /* st->gc_dst_overflow */
0, /* st->in_hlist_search */
0 /* st->out_hlist_search */
);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 38 | 33.63% | 1 | 12.50% |
Arnaldo Carvalho de Melo | 29 | 25.66% | 1 | 12.50% |
Linus Torvalds (pre-git) | 25 | 22.12% | 3 | 37.50% |
Eric Dumazet | 16 | 14.16% | 1 | 12.50% |
Pavel Emelyanov | 4 | 3.54% | 1 | 12.50% |
Joe Perches | 1 | 0.88% | 1 | 12.50% |
Total | 113 | 100.00% | 8 | 100.00% |
static const struct seq_operations rt_cpu_seq_ops = {
.start = rt_cpu_seq_start,
.next = rt_cpu_seq_next,
.stop = rt_cpu_seq_stop,
.show = rt_cpu_seq_show,
};
static int rt_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open(file, &rt_cpu_seq_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds | 10 | 40.00% | 1 | 25.00% |
Stephen Hemminger | 9 | 36.00% | 1 | 25.00% |
Pavel Emelyanov | 3 | 12.00% | 1 | 25.00% |
David S. Miller | 3 | 12.00% | 1 | 25.00% |
Total | 25 | 100.00% | 4 | 100.00% |
static const struct file_operations rt_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = rt_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
#ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v)
{
struct ip_rt_acct *dst, *src;
unsigned int i, j;
dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
if (!dst)
return -ENOMEM;
for_each_possible_cpu(i) {
src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
for (j = 0; j < 256; j++) {
dst[j].o_bytes += src[j].o_bytes;
dst[j].o_packets += src[j].o_packets;
dst[j].i_bytes += src[j].i_bytes;
dst[j].i_packets += src[j].i_packets;
}
}
seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
kfree(dst);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 134 | 80.24% | 1 | 25.00% |
Stephen Hemminger | 26 | 15.57% | 1 | 25.00% |
Harald Welte | 5 | 2.99% | 1 | 25.00% |
Linus Torvalds | 2 | 1.20% | 1 | 25.00% |
Total | 167 | 100.00% | 4 | 100.00% |
static int rt_acct_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, rt_acct_proc_show, NULL);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 12 | 46.15% | 1 | 33.33% |
Stephen Hemminger | 12 | 46.15% | 1 | 33.33% |
Linus Torvalds | 2 | 7.69% | 1 | 33.33% |
Total | 26 | 100.00% | 3 | 100.00% |
static const struct file_operations rt_acct_proc_fops = {
.owner = THIS_MODULE,
.open = rt_acct_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
static int __net_init ip_rt_do_proc_init(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
&rt_cache_seq_fops);
if (!pde)
goto err1;
pde = proc_create("rt_cache", S_IRUGO,
net->proc_net_stat, &rt_cpu_seq_fops);
if (!pde)
goto err2;
#ifdef CONFIG_IP_ROUTE_CLASSID
pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
if (!pde)
goto err3;
#endif
return 0;
#ifdef CONFIG_IP_ROUTE_CLASSID
err3:
remove_proc_entry("rt_cache", net->proc_net_stat);
#endif
err2:
remove_proc_entry("rt_cache", net->proc_net);
err1:
return -ENOMEM;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 99 | 76.15% | 1 | 14.29% |
Stephen Hemminger | 11 | 8.46% | 1 | 14.29% |
Ravikiran G. Thirumalai | 6 | 4.62% | 1 | 14.29% |
Linus Torvalds | 6 | 4.62% | 1 | 14.29% |
Gao Feng | 5 | 3.85% | 1 | 14.29% |
Robert Olsson | 2 | 1.54% | 1 | 14.29% |
Harald Welte | 1 | 0.77% | 1 | 14.29% |
Total | 130 | 100.00% | 7 | 100.00% |
static void __net_exit ip_rt_do_proc_exit(struct net *net)
{
remove_proc_entry("rt_cache", net->proc_net_stat);
remove_proc_entry("rt_cache", net->proc_net);
#ifdef CONFIG_IP_ROUTE_CLASSID
remove_proc_entry("rt_acct", net->proc_net);
#endif
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 35 | 79.55% | 1 | 20.00% |
Robert Olsson | 5 | 11.36% | 2 | 40.00% |
Linus Torvalds | 3 | 6.82% | 1 | 20.00% |
Ravikiran G. Thirumalai | 1 | 2.27% | 1 | 20.00% |
Total | 44 | 100.00% | 5 | 100.00% |
static struct pernet_operations ip_rt_proc_ops __net_initdata = {
.init = ip_rt_do_proc_init,
.exit = ip_rt_do_proc_exit,
};
static int __init ip_rt_proc_init(void)
{
return register_pernet_subsys(&ip_rt_proc_ops);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Arnaldo Carvalho de Melo | 10 | 62.50% | 1 | 33.33% |
David S. Miller | 5 | 31.25% | 1 | 33.33% |
Stephen Hemminger | 1 | 6.25% | 1 | 33.33% |
Total | 16 | 100.00% | 3 | 100.00% |
#else
static inline int ip_rt_proc_init(void)
{
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 10 | 83.33% | 1 | 50.00% |
Arnaldo Carvalho de Melo | 2 | 16.67% | 1 | 50.00% |
Total | 12 | 100.00% | 2 | 100.00% |
#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth)
{
return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 20 | 66.67% | 1 | 20.00% |
Pavel Emelyanov | 5 | 16.67% | 1 | 20.00% |
Alexey Dobriyan | 2 | 6.67% | 1 | 20.00% |
Eric Dumazet | 2 | 6.67% | 1 | 20.00% |
Fan Du | 1 | 3.33% | 1 | 20.00% |
Total | 30 | 100.00% | 5 | 100.00% |
void rt_cache_flush(struct net *net)
{
rt_genid_bump_ipv4(net);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 9 | 60.00% | 1 | 20.00% |
Pavel Emelyanov | 3 | 20.00% | 1 | 20.00% |
Alexey Dobriyan | 1 | 6.67% | 1 | 20.00% |
Eric Dumazet | 1 | 6.67% | 1 | 20.00% |
Fan Du | 1 | 6.67% | 1 | 20.00% |
Total | 15 | 100.00% | 5 | 100.00% |
static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
struct sk_buff *skb,
const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
struct neighbour *n;
rt = (const struct rtable *) dst;
if (rt->rt_gateway)
pkey = (const __be32 *) &rt->rt_gateway;
else if (skb)
pkey = &ip_hdr(skb)->daddr;
n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
if (n)
return n;
return neigh_create(&arp_tbl, pkey, dev);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 103 | 81.10% | 1 | 33.33% |
Alexey Dobriyan | 15 | 11.81% | 1 | 33.33% |
Pavel Emelyanov | 9 | 7.09% | 1 | 33.33% |
Total | 127 | 100.00% | 3 | 100.00% |
static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
{
struct net_device *dev = dst->dev;
const __be32 *pkey = daddr;
const struct rtable *rt;
rt = (const struct rtable *)dst;
if (rt->rt_gateway)
pkey = (const __be32 *)&rt->rt_gateway;
else if (!daddr ||
(rt->rt_flags &
(RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL)))
return;
__ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Julian Anastasov | 101 | 100.00% | 1 | 100.00% |
Total | 101 | 100.00% | 1 | 100.00% |
#define IP_IDENTS_SZ 2048u
static atomic_t *ip_idents __read_mostly;
static u32 *ip_tstamps __read_mostly;
/* In order to protect privacy, we add a perturbation to identifiers
* if one generator is seldom used. This makes hard for an attacker
* to infer how many packets were sent between two points in time.
*/
u32 ip_idents_reserve(u32 hash, int segs)
{
u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
u32 old = ACCESS_ONCE(*p_tstamp);
u32 now = (u32)jiffies;
u32 new, delta = 0;
if (old != now && cmpxchg(p_tstamp, old, now) == old)
delta = prandom_u32_max(now - old);
/* Do not use atomic_add_return() as it makes UBSAN unhappy */
do {
old = (u32)atomic_read(p_id);
new = old + delta + segs;
} while (atomic_cmpxchg(p_id, old, new) != old);
return new - segs;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 122 | 100.00% | 3 | 100.00% |
Total | 122 | 100.00% | 3 | 100.00% |
EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
{
static u32 ip_idents_hashrnd __read_mostly;
u32 hash, id;
net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
hash = jhash_3words((__force u32)iph->daddr,
(__force u32)iph->saddr,
iph->protocol ^ net_hash_mix(net),
ip_idents_hashrnd);
id = ip_idents_reserve(hash, segs);
iph->id = htons(id);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 37 | 41.57% | 2 | 33.33% |
David S. Miller | 20 | 22.47% | 1 | 16.67% |
Pavel Emelyanov | 18 | 20.22% | 1 | 16.67% |
Hannes Frederic Sowa | 10 | 11.24% | 1 | 16.67% |
Denis V. Lunev | 4 | 4.49% | 1 | 16.67% |
Total | 89 | 100.00% | 6 | 100.00% |
EXPORT_SYMBOL(__ip_select_ident);
static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
const struct sock *sk,
const struct iphdr *iph,
int oif, u8 tos,
u8 prot, u32 mark, int flow_flags)
{
if (sk) {
const struct inet_sock *inet = inet_sk(sk);
oif = sk->sk_bound_dev_if;
mark = sk->sk_mark;
tos = RT_CONN_FLAGS(sk);
prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
}
flowi4_init_output(fl4, oif, mark, tos,
RT_SCOPE_UNIVERSE, prot,
flow_flags,
iph->daddr, iph->saddr, 0, 0,
sock_net_uid(net, sk));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 102 | 79.69% | 1 | 14.29% |
Lorenzo Colitti | 13 | 10.16% | 1 | 14.29% |
Linus Torvalds (pre-git) | 10 | 7.81% | 4 | 57.14% |
Dipankar Sarma | 3 | 2.34% | 1 | 14.29% |
Total | 128 | 100.00% | 7 | 100.00% |
static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
const struct sock *sk)
{
const struct net *net = dev_net(skb->dev);
const struct iphdr *iph = ip_hdr(skb);
int oif = skb->dev->ifindex;
u8 tos = RT_TOS(iph->tos);
u8 prot = iph->protocol;
u32 mark = skb->mark;
__build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 71 | 70.30% | 1 | 20.00% |
Lorenzo Colitti | 15 | 14.85% | 2 | 40.00% |
Linus Torvalds (pre-git) | 12 | 11.88% | 1 | 20.00% |
Dipankar Sarma | 3 | 2.97% | 1 | 20.00% |
Total | 101 | 100.00% | 5 | 100.00% |
static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
{
const struct inet_sock *inet = inet_sk(sk);
const struct ip_options_rcu *inet_opt;
__be32 daddr = inet->inet_daddr;
rcu_read_lock();
inet_opt = rcu_dereference(inet->inet_opt);
if (inet_opt && inet_opt->opt.srr)
daddr = inet_opt->opt.faddr;
flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk),
daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
rcu_read_unlock();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 94 | 76.42% | 2 | 33.33% |
Linus Torvalds (pre-git) | 23 | 18.70% | 2 | 33.33% |
Lorenzo Colitti | 4 | 3.25% | 1 | 16.67% |
Linus Torvalds | 2 | 1.63% | 1 | 16.67% |
Total | 123 | 100.00% | 6 | 100.00% |
static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
const struct sk_buff *skb)
{
if (skb)
build_skb_flow_key(fl4, skb, sk);
else
build_sk_flow_key(fl4, sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 31 | 70.45% | 2 | 66.67% |
Alexey Kuznetsov | 13 | 29.55% | 1 | 33.33% |
Total | 44 | 100.00% | 3 | 100.00% |
static inline void rt_free(struct rtable *rt)
{
call_rcu(&rt->dst.rcu_head, dst_rcu_free);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 24 | 100.00% | 1 | 100.00% |
Total | 24 | 100.00% | 1 | 100.00% |
static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
{
struct rtable *rt;
rt = rcu_dereference(fnhe->fnhe_rth_input);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
rt_free(rt);
}
rt = rcu_dereference(fnhe->fnhe_rth_output);
if (rt) {
RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
rt_free(rt);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Timo Teräs | 74 | 100.00% | 1 | 100.00% |
Total | 74 | 100.00% | 1 | 100.00% |
static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
{
struct fib_nh_exception *fnhe, *oldest;
oldest = rcu_dereference(hash->chain);
for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
fnhe = rcu_dereference(fnhe->fnhe_next)) {
if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
oldest = fnhe;
}
fnhe_flush_routes(oldest);
return oldest;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 62 | 78.48% | 2 | 50.00% |
Neil Horman | 16 | 20.25% | 1 | 25.00% |
Timo Teräs | 1 | 1.27% | 1 | 25.00% |
Total | 79 | 100.00% | 4 | 100.00% |
static inline u32 fnhe_hashfun(__be32 daddr)
{
static u32 fnhe_hashrnd __read_mostly;
u32 hval;
net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
return hash_32(hval, FNHE_HASH_SHIFT);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 31 | 62.00% | 2 | 50.00% |
David S. Miller | 12 | 24.00% | 1 | 25.00% |
François Romieu | 7 | 14.00% | 1 | 25.00% |
Total | 50 | 100.00% | 4 | 100.00% |
static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
{
rt->rt_pmtu = fnhe->fnhe_pmtu;
rt->dst.expires = fnhe->fnhe_expires;
if (fnhe->fnhe_gw) {
rt->rt_flags |= RTCF_REDIRECTED;
rt->rt_gateway = fnhe->fnhe_gw;
rt->rt_uses_gateway = 1;
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Timo Teräs | 62 | 100.00% | 1 | 100.00% |
Total | 62 | 100.00% | 1 | 100.00% |
static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
u32 pmtu, unsigned long expires)
{
struct fnhe_hash_bucket *hash;
struct fib_nh_exception *fnhe;
struct rtable *rt;
unsigned int i;
int depth;
u32 hval = fnhe_hashfun(daddr);
spin_lock_bh(&fnhe_lock);
hash = rcu_dereference(nh->nh_exceptions);
if (!hash) {
hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
if (!hash)
goto out_unlock;
rcu_assign_pointer(nh->nh_exceptions, hash);