cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/route.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              ROUTE - implementation of the IP router.
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *              Alan Cox        :       Verify area fixes.
 *              Alan Cox        :       cli() protects routing changes
 *              Rui Oliveira    :       ICMP routing table updates
 *              (rco@di.uminho.pt)      Routing table insertion and update
 *              Linus Torvalds  :       Rewrote bits to be sensible
 *              Alan Cox        :       Added BSD route gw semantics
 *              Alan Cox        :       Super /proc >4K
 *              Alan Cox        :       MTU in route table
 *              Alan Cox        :       MSS actually. Also added the window
 *                                      clamper.
 *              Sam Lantinga    :       Fixed route matching in rt_del()
 *              Alan Cox        :       Routing cache support.
 *              Alan Cox        :       Removed compatibility cruft.
 *              Alan Cox        :       RTF_REJECT support.
 *              Alan Cox        :       TCP irtt support.
 *              Jonathan Naylor :       Added Metric support.
 *      Miquel van Smoorenburg  :       BSD API fixes.
 *      Miquel van Smoorenburg  :       Metrics.
 *              Alan Cox        :       Use __u32 properly
 *              Alan Cox        :       Aligned routing errors more closely with BSD
 *                                      our system is still very different.
 *              Alan Cox        :       Faster /proc handling
 *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
 *                                      routing caches and better behaviour.
 *
 *              Olaf Erb        :       irtt wasn't being copied right.
 *              Bjorn Ekwall    :       Kerneld route support.
 *              Alan Cox        :       Multicast fixed (I hope)
 *              Pavel Krauz     :       Limited broadcast fixed
 *              Mike McLagan    :       Routing by source
 *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
 *                                      route.c and rewritten from scratch.
 *              Andi Kleen      :       Load-limit warning messages.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
 *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
 *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
 *              Marc Boucher    :       routing by fwmark
 *      Robert Olsson           :       Added rt_cache statistics
 *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
 *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
 *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
 *      Ilia Sotnikov           :       Removed TOS from hash calculations
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "IPv4: " fmt

#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>


#define RT_FL_TOS(oldflp4) \
	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))


#define RT_GC_TIMEOUT (300*HZ)


static int ip_rt_max_size;

static int ip_rt_redirect_number __read_mostly	= 9;

static int ip_rt_redirect_load __read_mostly	= HZ / 50;

static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));

static int ip_rt_error_cost __read_mostly	= HZ;

static int ip_rt_error_burst __read_mostly	= 5 * HZ;

static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;

static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;

static int ip_rt_min_advmss __read_mostly	= 256;


static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
/*
 *      Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
static void		ipv4_dst_destroy(struct dst_entry *dst);


static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) { WARN_ON(1); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller24100.00%2100.00%
Total24100.00%2100.00%

static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .mtu = ipv4_mtu, .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .negative_advice = ipv4_negative_advice, .link_failure = ipv4_link_failure, .update_pmtu = ip_rt_update_pmtu, .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, .confirm_neigh = ipv4_confirm_neigh, }; #define ECN_OR_COST(class) TC_PRIO_##class const __u8 ip_tos2prio[16] = { TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) #ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos) return NULL; return SEQ_START_TOKEN; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1037.04%114.29%
Arnaldo Carvalho de Melo829.63%114.29%
Linus Torvalds (pre-git)414.81%342.86%
Eric Dumazet311.11%114.29%
Hideaki Yoshifuji / 吉藤英明27.41%114.29%
Total27100.00%7100.00%


static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1140.74%116.67%
Arnaldo Carvalho de Melo725.93%116.67%
Eric Dumazet414.81%116.67%
Hideaki Yoshifuji / 吉藤英明311.11%116.67%
Linus Torvalds (pre-git)27.41%233.33%
Total27100.00%6100.00%


static void rt_cache_seq_stop(struct seq_file *seq, void *v) { }

Contributors

PersonTokensPropCommitsCommitProp
Denis V. Lunev750.00%133.33%
David S. Miller535.71%133.33%
Hideaki Yoshifuji / 吉藤英明214.29%133.33%
Total14100.00%3100.00%


static int rt_cache_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" "HHUptod\tSpecDst"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2365.71%133.33%
Denis V. Lunev617.14%133.33%
Hideaki Yoshifuji / 吉藤英明617.14%133.33%
Total35100.00%3100.00%

static const struct seq_operations rt_cache_seq_ops = { .start = rt_cache_seq_start, .next = rt_cache_seq_next, .stop = rt_cache_seq_stop, .show = rt_cache_seq_show, };
static int rt_cache_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cache_seq_ops); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1456.00%150.00%
Arnaldo Carvalho de Melo1144.00%150.00%
Total25100.00%2100.00%

static const struct file_operations rt_cache_seq_fops = { .owner = THIS_MODULE, .open = rt_cache_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, };
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) { int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4357.33%116.67%
Arnaldo Carvalho de Melo2026.67%116.67%
Eric Dumazet810.67%116.67%
Hideaki Yoshifuji / 吉藤英明22.67%116.67%
Denis V. Lunev11.33%116.67%
Linus Torvalds (pre-git)11.33%116.67%
Total75100.00%6100.00%


static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int cpu; for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3755.22%133.33%
Arnaldo Carvalho de Melo2943.28%133.33%
Eric Dumazet11.49%133.33%
Total67100.00%3100.00%


static void rt_cpu_seq_stop(struct seq_file *seq, void *v) { }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo1285.71%150.00%
David S. Miller214.29%150.00%
Total14100.00%2100.00%


static int rt_cpu_seq_show(struct seq_file *seq, void *v) { struct rt_cache_stat *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; } seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", dst_entries_get_slow(&ipv4_dst_ops), 0, /* st->in_hit */ st->in_slow_tot, st->in_slow_mc, st->in_no_route, st->in_brd, st->in_martian_dst, st->in_martian_src, 0, /* st->out_hit */ st->out_slow_tot, st->out_slow_mc, 0, /* st->gc_total */ 0, /* st->gc_ignored */ 0, /* st->gc_goal_miss */ 0, /* st->gc_dst_overflow */ 0, /* st->in_hlist_search */ 0 /* st->out_hlist_search */ ); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3833.63%112.50%
Arnaldo Carvalho de Melo2925.66%112.50%
Linus Torvalds (pre-git)2522.12%337.50%
Eric Dumazet1614.16%112.50%
Pavel Emelyanov43.54%112.50%
Joe Perches10.88%112.50%
Total113100.00%8100.00%

static const struct seq_operations rt_cpu_seq_ops = { .start = rt_cpu_seq_start, .next = rt_cpu_seq_next, .stop = rt_cpu_seq_stop, .show = rt_cpu_seq_show, };
static int rt_cpu_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cpu_seq_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds1040.00%125.00%
Stephen Hemminger936.00%125.00%
Pavel Emelyanov312.00%125.00%
David S. Miller312.00%125.00%
Total25100.00%4100.00%

static const struct file_operations rt_cpu_seq_fops = { .owner = THIS_MODULE, .open = rt_cpu_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v) { struct ip_rt_acct *dst, *src; unsigned int i, j; dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); if (!dst) return -ENOMEM; for_each_possible_cpu(i) { src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); for (j = 0; j < 256; j++) { dst[j].o_bytes += src[j].o_bytes; dst[j].o_packets += src[j].o_packets; dst[j].i_bytes += src[j].i_bytes; dst[j].i_packets += src[j].i_packets; } } seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); kfree(dst); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller13480.24%125.00%
Stephen Hemminger2615.57%125.00%
Harald Welte52.99%125.00%
Linus Torvalds21.20%125.00%
Total167100.00%4100.00%


static int rt_acct_proc_open(struct inode *inode, struct file *file) { return single_open(file, rt_acct_proc_show, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1246.15%133.33%
Stephen Hemminger1246.15%133.33%
Linus Torvalds27.69%133.33%
Total26100.00%3100.00%

static const struct file_operations rt_acct_proc_fops = { .owner = THIS_MODULE, .open = rt_acct_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif
static int __net_init ip_rt_do_proc_init(struct net *net) { struct proc_dir_entry *pde; pde = proc_create("rt_cache", S_IRUGO, net->proc_net, &rt_cache_seq_fops); if (!pde) goto err1; pde = proc_create("rt_cache", S_IRUGO, net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; #ifdef CONFIG_IP_ROUTE_CLASSID pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops); if (!pde) goto err3; #endif return 0; #ifdef CONFIG_IP_ROUTE_CLASSID err3: remove_proc_entry("rt_cache", net->proc_net_stat); #endif err2: remove_proc_entry("rt_cache", net->proc_net); err1: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller9976.15%114.29%
Stephen Hemminger118.46%114.29%
Ravikiran G. Thirumalai64.62%114.29%
Linus Torvalds64.62%114.29%
Gao Feng53.85%114.29%
Robert Olsson21.54%114.29%
Harald Welte10.77%114.29%
Total130100.00%7100.00%


static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); #ifdef CONFIG_IP_ROUTE_CLASSID remove_proc_entry("rt_acct", net->proc_net); #endif }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3579.55%120.00%
Robert Olsson511.36%240.00%
Linus Torvalds36.82%120.00%
Ravikiran G. Thirumalai12.27%120.00%
Total44100.00%5100.00%

static struct pernet_operations ip_rt_proc_ops __net_initdata = { .init = ip_rt_do_proc_init, .exit = ip_rt_do_proc_exit, };
static int __init ip_rt_proc_init(void) { return register_pernet_subsys(&ip_rt_proc_ops); }

Contributors

PersonTokensPropCommitsCommitProp
Arnaldo Carvalho de Melo1062.50%133.33%
David S. Miller531.25%133.33%
Stephen Hemminger16.25%133.33%
Total16100.00%3100.00%

#else
static inline int ip_rt_proc_init(void) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller1083.33%150.00%
Arnaldo Carvalho de Melo216.67%150.00%
Total12100.00%2100.00%

#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth) { return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2066.67%120.00%
Pavel Emelyanov516.67%120.00%
Alexey Dobriyan26.67%120.00%
Eric Dumazet26.67%120.00%
Fan Du13.33%120.00%
Total30100.00%5100.00%


void rt_cache_flush(struct net *net) { rt_genid_bump_ipv4(net); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller960.00%120.00%
Pavel Emelyanov320.00%120.00%
Alexey Dobriyan16.67%120.00%
Eric Dumazet16.67%120.00%
Fan Du16.67%120.00%
Total15100.00%5100.00%


static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; struct neighbour *n; rt = (const struct rtable *) dst; if (rt->rt_gateway) pkey = (const __be32 *) &rt->rt_gateway; else if (skb) pkey = &ip_hdr(skb)->daddr; n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) return n; return neigh_create(&arp_tbl, pkey, dev); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10381.10%133.33%
Alexey Dobriyan1511.81%133.33%
Pavel Emelyanov97.09%133.33%
Total127100.00%3100.00%


static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr) { struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; rt = (const struct rtable *)dst; if (rt->rt_gateway) pkey = (const __be32 *)&rt->rt_gateway; else if (!daddr || (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) return; __ipv4_confirm_neigh(dev, *(__force u32 *)pkey); }

Contributors

PersonTokensPropCommitsCommitProp
Julian Anastasov101100.00%1100.00%
Total101100.00%1100.00%

#define IP_IDENTS_SZ 2048u static atomic_t *ip_idents __read_mostly; static u32 *ip_tstamps __read_mostly; /* In order to protect privacy, we add a perturbation to identifiers * if one generator is seldom used. This makes hard for an attacker * to infer how many packets were sent between two points in time. */
u32 ip_idents_reserve(u32 hash, int segs) { u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; u32 old = ACCESS_ONCE(*p_tstamp); u32 now = (u32)jiffies; u32 new, delta = 0; if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); /* Do not use atomic_add_return() as it makes UBSAN unhappy */ do { old = (u32)atomic_read(p_id); new = old + delta + segs; } while (atomic_cmpxchg(p_id, old, new) != old); return new - segs; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet122100.00%3100.00%
Total122100.00%3100.00%

EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) { static u32 ip_idents_hashrnd __read_mostly; u32 hash, id; net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); hash = jhash_3words((__force u32)iph->daddr, (__force u32)iph->saddr, iph->protocol ^ net_hash_mix(net), ip_idents_hashrnd); id = ip_idents_reserve(hash, segs); iph->id = htons(id); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3741.57%233.33%
David S. Miller2022.47%116.67%
Pavel Emelyanov1820.22%116.67%
Hannes Frederic Sowa1011.24%116.67%
Denis V. Lunev44.49%116.67%
Total89100.00%6100.00%

EXPORT_SYMBOL(__ip_select_ident);
static void __build_flow_key(const struct net *net, struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, int oif, u8 tos, u8 prot, u32 mark, int flow_flags) { if (sk) { const struct inet_sock *inet = inet_sk(sk); oif = sk->sk_bound_dev_if; mark = sk->sk_mark; tos = RT_CONN_FLAGS(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; } flowi4_init_output(fl4, oif, mark, tos, RT_SCOPE_UNIVERSE, prot, flow_flags, iph->daddr, iph->saddr, 0, 0, sock_net_uid(net, sk)); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller10279.69%114.29%
Lorenzo Colitti1310.16%114.29%
Linus Torvalds (pre-git)107.81%457.14%
Dipankar Sarma32.34%114.29%
Total128100.00%7100.00%


static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, const struct sock *sk) { const struct net *net = dev_net(skb->dev); const struct iphdr *iph = ip_hdr(skb); int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller7170.30%120.00%
Lorenzo Colitti1514.85%240.00%
Linus Torvalds (pre-git)1211.88%120.00%
Dipankar Sarma32.97%120.00%
Total101100.00%5100.00%


static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, inet->inet_saddr, 0, 0, sk->sk_uid); rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller9476.42%233.33%
Linus Torvalds (pre-git)2318.70%233.33%
Lorenzo Colitti43.25%116.67%
Linus Torvalds21.63%116.67%
Total123100.00%6100.00%


static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct sk_buff *skb) { if (skb) build_skb_flow_key(fl4, skb, sk); else build_sk_flow_key(fl4, sk); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3170.45%266.67%
Alexey Kuznetsov1329.55%133.33%
Total44100.00%3100.00%


static inline void rt_free(struct rtable *rt) { call_rcu(&rt->dst.rcu_head, dst_rcu_free); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller24100.00%1100.00%
Total24100.00%1100.00%

static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe) { struct rtable *rt; rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); rt_free(rt); } rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); rt_free(rt); } }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs74100.00%1100.00%
Total74100.00%1100.00%


static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { struct fib_nh_exception *fnhe, *oldest; oldest = rcu_dereference(hash->chain); for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; } fnhe_flush_routes(oldest); return oldest; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller6278.48%250.00%
Neil Horman1620.25%125.00%
Timo Teräs11.27%125.00%
Total79100.00%4100.00%


static inline u32 fnhe_hashfun(__be32 daddr) { static u32 fnhe_hashrnd __read_mostly; u32 hval; net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); return hash_32(hval, FNHE_HASH_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet3162.00%250.00%
David S. Miller1224.00%125.00%
François Romieu714.00%125.00%
Total50100.00%4100.00%


static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) { rt->rt_pmtu = fnhe->fnhe_pmtu; rt->dst.expires = fnhe->fnhe_expires; if (fnhe->fnhe_gw) { rt->rt_flags |= RTCF_REDIRECTED; rt->rt_gateway = fnhe->fnhe_gw; rt->rt_uses_gateway = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs62100.00%1100.00%
Total62100.00%1100.00%


static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, u32 pmtu, unsigned long expires) { struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; struct rtable *rt; unsigned int i; int depth; u32 hval = fnhe_hashfun(daddr); spin_lock_bh(&fnhe_lock); hash = rcu_dereference(nh->nh_exceptions); if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; rcu_assign_pointer(nh->nh_exceptions, hash);