cregit-Linux how code gets into the kernel

Release 4.8 net/ipv4/route.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              ROUTE - implementation of the IP router.
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 *              Linus Torvalds, <Linus.Torvalds@helsinki.fi>
 *              Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
 *
 * Fixes:
 *              Alan Cox        :       Verify area fixes.
 *              Alan Cox        :       cli() protects routing changes
 *              Rui Oliveira    :       ICMP routing table updates
 *              (rco@di.uminho.pt)      Routing table insertion and update
 *              Linus Torvalds  :       Rewrote bits to be sensible
 *              Alan Cox        :       Added BSD route gw semantics
 *              Alan Cox        :       Super /proc >4K
 *              Alan Cox        :       MTU in route table
 *              Alan Cox        :       MSS actually. Also added the window
 *                                      clamper.
 *              Sam Lantinga    :       Fixed route matching in rt_del()
 *              Alan Cox        :       Routing cache support.
 *              Alan Cox        :       Removed compatibility cruft.
 *              Alan Cox        :       RTF_REJECT support.
 *              Alan Cox        :       TCP irtt support.
 *              Jonathan Naylor :       Added Metric support.
 *      Miquel van Smoorenburg  :       BSD API fixes.
 *      Miquel van Smoorenburg  :       Metrics.
 *              Alan Cox        :       Use __u32 properly
 *              Alan Cox        :       Aligned routing errors more closely with BSD
 *                                      our system is still very different.
 *              Alan Cox        :       Faster /proc handling
 *      Alexey Kuznetsov        :       Massive rework to support tree based routing,
 *                                      routing caches and better behaviour.
 *
 *              Olaf Erb        :       irtt wasn't being copied right.
 *              Bjorn Ekwall    :       Kerneld route support.
 *              Alan Cox        :       Multicast fixed (I hope)
 *              Pavel Krauz     :       Limited broadcast fixed
 *              Mike McLagan    :       Routing by source
 *      Alexey Kuznetsov        :       End of old history. Split to fib.c and
 *                                      route.c and rewritten from scratch.
 *              Andi Kleen      :       Load-limit warning messages.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *      Vitaly E. Lavrov        :       Race condition in ip_route_input_slow.
 *      Tobias Ringstrom        :       Uninitialized res.type in ip_route_output_slow.
 *      Vladimir V. Ivanov      :       IP rule info (flowid) is really useful.
 *              Marc Boucher    :       routing by fwmark
 *      Robert Olsson           :       Added rt_cache statistics
 *      Arnaldo C. Melo         :       Convert proc stuff to seq_file
 *      Eric Dumazet            :       hashed spinlocks and rt_check_expire() fixes.
 *      Ilia Sotnikov           :       Ignore TOS on PMTUD and Redirect
 *      Ilia Sotnikov           :       Removed TOS from hash calculations
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 */


#define pr_fmt(fmt) "IPv4: " fmt

#include <linux/module.h>
#include <asm/uaccess.h>
#include <linux/bitops.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/errno.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/proc_fs.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/pkt_sched.h>
#include <linux/mroute.h>
#include <linux/netfilter_ipv4.h>
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/times.h>
#include <linux/slab.h>
#include <linux/jhash.h>
#include <net/dst.h>
#include <net/dst_metadata.h>
#include <net/net_namespace.h>
#include <net/protocol.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/inetpeer.h>
#include <net/sock.h>
#include <net/ip_fib.h>
#include <net/arp.h>
#include <net/tcp.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/lwtunnel.h>
#include <net/netevent.h>
#include <net/rtnetlink.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#include <linux/kmemleak.h>
#endif
#include <net/secure_seq.h>
#include <net/ip_tunnels.h>
#include <net/l3mdev.h>


#define RT_FL_TOS(oldflp4) \
	((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))


#define RT_GC_TIMEOUT (300*HZ)


static int ip_rt_max_size;

static int ip_rt_redirect_number __read_mostly	= 9;

static int ip_rt_redirect_load __read_mostly	= HZ / 50;

static int ip_rt_redirect_silence __read_mostly	= ((HZ / 50) << (9 + 1));

static int ip_rt_error_cost __read_mostly	= HZ;

static int ip_rt_error_burst __read_mostly	= 5 * HZ;

static int ip_rt_mtu_expires __read_mostly	= 10 * 60 * HZ;

static int ip_rt_min_pmtu __read_mostly		= 512 + 20 + 20;

static int ip_rt_min_advmss __read_mostly	= 256;


static int ip_rt_gc_timeout __read_mostly	= RT_GC_TIMEOUT;
/*
 *      Interface to generic destination cache.
 */

static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
static unsigned int	 ipv4_default_advmss(const struct dst_entry *dst);
static unsigned int	 ipv4_mtu(const struct dst_entry *dst);
static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
static void		 ipv4_link_failure(struct sk_buff *skb);
static void		 ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
					   struct sk_buff *skb, u32 mtu);
static void		 ip_do_redirect(struct dst_entry *dst, struct sock *sk,
					struct sk_buff *skb);
static void		ipv4_dst_destroy(struct dst_entry *dst);


static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old) { WARN_ON(1); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller24100.00%2100.00%
Total24100.00%2100.00%

static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr); static struct dst_ops ipv4_dst_ops = { .family = AF_INET, .check = ipv4_dst_check, .default_advmss = ipv4_default_advmss, .mtu = ipv4_mtu, .cow_metrics = ipv4_cow_metrics, .destroy = ipv4_dst_destroy, .negative_advice = ipv4_negative_advice, .link_failure = ipv4_link_failure, .update_pmtu = ip_rt_update_pmtu, .redirect = ip_do_redirect, .local_out = __ip_local_out, .neigh_lookup = ipv4_neigh_lookup, }; #define ECN_OR_COST(class) TC_PRIO_##class const __u8 ip_tos2prio[16] = { TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BESTEFFORT, ECN_OR_COST(BESTEFFORT), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_BULK, ECN_OR_COST(BULK), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE, ECN_OR_COST(INTERACTIVE), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK), TC_PRIO_INTERACTIVE_BULK, ECN_OR_COST(INTERACTIVE_BULK) }; EXPORT_SYMBOL(ip_tos2prio); static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat); #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field) #ifdef CONFIG_PROC_FS
static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos) { if (*pos) return NULL; return SEQ_START_TOKEN; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1037.04%114.29%
arnaldo carvalho de meloarnaldo carvalho de melo829.63%114.29%
pre-gitpre-git414.81%342.86%
eric dumazeteric dumazet311.11%114.29%
hideaki yoshifujihideaki yoshifuji27.41%114.29%
Total27100.00%7100.00%


static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos) { ++*pos; return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1140.74%116.67%
arnaldo carvalho de meloarnaldo carvalho de melo725.93%116.67%
eric dumazeteric dumazet414.81%116.67%
hideaki yoshifujihideaki yoshifuji311.11%116.67%
pre-gitpre-git27.41%233.33%
Total27100.00%6100.00%


static void rt_cache_seq_stop(struct seq_file *seq, void *v) { }

Contributors

PersonTokensPropCommitsCommitProp
denis v. lunevdenis v. lunev750.00%133.33%
david s. millerdavid s. miller535.71%133.33%
hideaki yoshifujihideaki yoshifuji214.29%133.33%
Total14100.00%3100.00%


static int rt_cache_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) seq_printf(seq, "%-127s\n", "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t" "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t" "HHUptod\tSpecDst"); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller2365.71%133.33%
hideaki yoshifujihideaki yoshifuji617.14%133.33%
denis v. lunevdenis v. lunev617.14%133.33%
Total35100.00%3100.00%

static const struct seq_operations rt_cache_seq_ops = { .start = rt_cache_seq_start, .next = rt_cache_seq_next, .stop = rt_cache_seq_stop, .show = rt_cache_seq_show, };
static int rt_cache_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cache_seq_ops); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1456.00%150.00%
arnaldo carvalho de meloarnaldo carvalho de melo1144.00%150.00%
Total25100.00%2100.00%

static const struct file_operations rt_cache_seq_fops = { .owner = THIS_MODULE, .open = rt_cache_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, };
static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos) { int cpu; if (*pos == 0) return SEQ_START_TOKEN; for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller4357.33%116.67%
arnaldo carvalho de meloarnaldo carvalho de melo2026.67%116.67%
eric dumazeteric dumazet810.67%116.67%
hideaki yoshifujihideaki yoshifuji22.67%116.67%
denis v. lunevdenis v. lunev11.33%116.67%
pre-gitpre-git11.33%116.67%
Total75100.00%6100.00%


static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos) { int cpu; for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) { if (!cpu_possible(cpu)) continue; *pos = cpu+1; return &per_cpu(rt_cache_stat, cpu); } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller3755.22%133.33%
arnaldo carvalho de meloarnaldo carvalho de melo2943.28%133.33%
eric dumazeteric dumazet11.49%133.33%
Total67100.00%3100.00%


static void rt_cpu_seq_stop(struct seq_file *seq, void *v) { }

Contributors

PersonTokensPropCommitsCommitProp
arnaldo carvalho de meloarnaldo carvalho de melo1285.71%150.00%
david s. millerdavid s. miller214.29%150.00%
Total14100.00%2100.00%


static int rt_cpu_seq_show(struct seq_file *seq, void *v) { struct rt_cache_stat *st = v; if (v == SEQ_START_TOKEN) { seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n"); return 0; } seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x " " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n", dst_entries_get_slow(&ipv4_dst_ops), 0, /* st->in_hit */ st->in_slow_tot, st->in_slow_mc, st->in_no_route, st->in_brd, st->in_martian_dst, st->in_martian_src, 0, /* st->out_hit */ st->out_slow_tot, st->out_slow_mc, 0, /* st->gc_total */ 0, /* st->gc_ignored */ 0, /* st->gc_goal_miss */ 0, /* st->gc_dst_overflow */ 0, /* st->in_hlist_search */ 0 /* st->out_hlist_search */ ); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller3833.63%112.50%
arnaldo carvalho de meloarnaldo carvalho de melo2925.66%112.50%
pre-gitpre-git2522.12%337.50%
eric dumazeteric dumazet1614.16%112.50%
pavel emelianovpavel emelianov43.54%112.50%
joe perchesjoe perches10.88%112.50%
Total113100.00%8100.00%

static const struct seq_operations rt_cpu_seq_ops = { .start = rt_cpu_seq_start, .next = rt_cpu_seq_next, .stop = rt_cpu_seq_stop, .show = rt_cpu_seq_show, };
static int rt_cpu_seq_open(struct inode *inode, struct file *file) { return seq_open(file, &rt_cpu_seq_ops); }

Contributors

PersonTokensPropCommitsCommitProp
linus torvaldslinus torvalds1040.00%125.00%
stephen hemmingerstephen hemminger936.00%125.00%
david s. millerdavid s. miller312.00%125.00%
pavel emelianovpavel emelianov312.00%125.00%
Total25100.00%4100.00%

static const struct file_operations rt_cpu_seq_fops = { .owner = THIS_MODULE, .open = rt_cpu_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; #ifdef CONFIG_IP_ROUTE_CLASSID
static int rt_acct_proc_show(struct seq_file *m, void *v) { struct ip_rt_acct *dst, *src; unsigned int i, j; dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL); if (!dst) return -ENOMEM; for_each_possible_cpu(i) { src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i); for (j = 0; j < 256; j++) { dst[j].o_bytes += src[j].o_bytes; dst[j].o_packets += src[j].o_packets; dst[j].i_bytes += src[j].i_bytes; dst[j].i_packets += src[j].i_packets; } } seq_write(m, dst, 256 * sizeof(struct ip_rt_acct)); kfree(dst); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller13480.24%125.00%
stephen hemmingerstephen hemminger2615.57%125.00%
harald welteharald welte52.99%125.00%
linus torvaldslinus torvalds21.20%125.00%
Total167100.00%4100.00%


static int rt_acct_proc_open(struct inode *inode, struct file *file) { return single_open(file, rt_acct_proc_show, NULL); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1246.15%133.33%
stephen hemmingerstephen hemminger1246.15%133.33%
linus torvaldslinus torvalds27.69%133.33%
Total26100.00%3100.00%

static const struct file_operations rt_acct_proc_fops = { .owner = THIS_MODULE, .open = rt_acct_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; #endif
static int __net_init ip_rt_do_proc_init(struct net *net) { struct proc_dir_entry *pde; pde = proc_create("rt_cache", S_IRUGO, net->proc_net, &rt_cache_seq_fops); if (!pde) goto err1; pde = proc_create("rt_cache", S_IRUGO, net->proc_net_stat, &rt_cpu_seq_fops); if (!pde) goto err2; #ifdef CONFIG_IP_ROUTE_CLASSID pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops); if (!pde) goto err3; #endif return 0; #ifdef CONFIG_IP_ROUTE_CLASSID err3: remove_proc_entry("rt_cache", net->proc_net_stat); #endif err2: remove_proc_entry("rt_cache", net->proc_net); err1: return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller9976.15%114.29%
stephen hemmingerstephen hemminger118.46%114.29%
linus torvaldslinus torvalds64.62%114.29%
ravikiran g thirumalairavikiran g thirumalai64.62%114.29%
gao fenggao feng53.85%114.29%
robert olssonrobert olsson21.54%114.29%
harald welteharald welte10.77%114.29%
Total130100.00%7100.00%


static void __net_exit ip_rt_do_proc_exit(struct net *net) { remove_proc_entry("rt_cache", net->proc_net_stat); remove_proc_entry("rt_cache", net->proc_net); #ifdef CONFIG_IP_ROUTE_CLASSID remove_proc_entry("rt_acct", net->proc_net); #endif }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller3579.55%120.00%
robert olssonrobert olsson511.36%240.00%
linus torvaldslinus torvalds36.82%120.00%
ravikiran g thirumalairavikiran g thirumalai12.27%120.00%
Total44100.00%5100.00%

static struct pernet_operations ip_rt_proc_ops __net_initdata = { .init = ip_rt_do_proc_init, .exit = ip_rt_do_proc_exit, };
static int __init ip_rt_proc_init(void) { return register_pernet_subsys(&ip_rt_proc_ops); }

Contributors

PersonTokensPropCommitsCommitProp
arnaldo carvalho de meloarnaldo carvalho de melo1062.50%133.33%
david s. millerdavid s. miller531.25%133.33%
stephen hemmingerstephen hemminger16.25%133.33%
Total16100.00%3100.00%

#else
static inline int ip_rt_proc_init(void) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller1083.33%150.00%
arnaldo carvalho de meloarnaldo carvalho de melo216.67%150.00%
Total12100.00%2100.00%

#endif /* CONFIG_PROC_FS */
static inline bool rt_is_expired(const struct rtable *rth) { return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev)); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller2066.67%120.00%
pavel emelianovpavel emelianov516.67%120.00%
eric dumazeteric dumazet26.67%120.00%
alexey dobriyanalexey dobriyan26.67%120.00%
fan dufan du13.33%120.00%
Total30100.00%5100.00%


void rt_cache_flush(struct net *net) { rt_genid_bump_ipv4(net); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller960.00%120.00%
pavel emelianovpavel emelianov320.00%120.00%
alexey dobriyanalexey dobriyan16.67%120.00%
fan dufan du16.67%120.00%
eric dumazeteric dumazet16.67%120.00%
Total15100.00%5100.00%


static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst, struct sk_buff *skb, const void *daddr) { struct net_device *dev = dst->dev; const __be32 *pkey = daddr; const struct rtable *rt; struct neighbour *n; rt = (const struct rtable *) dst; if (rt->rt_gateway) pkey = (const __be32 *) &rt->rt_gateway; else if (skb) pkey = &ip_hdr(skb)->daddr; n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey); if (n) return n; return neigh_create(&arp_tbl, pkey, dev); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller10381.10%133.33%
alexey dobriyanalexey dobriyan1511.81%133.33%
pavel emelianovpavel emelianov97.09%133.33%
Total127100.00%3100.00%

#define IP_IDENTS_SZ 2048u static atomic_t *ip_idents __read_mostly; static u32 *ip_tstamps __read_mostly; /* In order to protect privacy, we add a perturbation to identifiers * if one generator is seldom used. This makes hard for an attacker * to infer how many packets were sent between two points in time. */
u32 ip_idents_reserve(u32 hash, int segs) { u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ; atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ; u32 old = ACCESS_ONCE(*p_tstamp); u32 now = (u32)jiffies; u32 new, delta = 0; if (old != now && cmpxchg(p_tstamp, old, now) == old) delta = prandom_u32_max(now - old); /* Do not use atomic_add_return() as it makes UBSAN unhappy */ do { old = (u32)atomic_read(p_id); new = old + delta + segs; } while (atomic_cmpxchg(p_id, old, new) != old); return new - segs; }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet122100.00%3100.00%
Total122100.00%3100.00%

EXPORT_SYMBOL(ip_idents_reserve);
void __ip_select_ident(struct net *net, struct iphdr *iph, int segs) { static u32 ip_idents_hashrnd __read_mostly; u32 hash, id; net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd)); hash = jhash_3words((__force u32)iph->daddr, (__force u32)iph->saddr, iph->protocol ^ net_hash_mix(net), ip_idents_hashrnd); id = ip_idents_reserve(hash, segs); iph->id = htons(id); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet3741.57%233.33%
david s. millerdavid s. miller2022.47%116.67%
pavel emelianovpavel emelianov1820.22%116.67%
hannes frederic sowahannes frederic sowa1011.24%116.67%
denis v. lunevdenis v. lunev44.49%116.67%
Total89100.00%6100.00%

EXPORT_SYMBOL(__ip_select_ident);
static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct iphdr *iph, int oif, u8 tos, u8 prot, u32 mark, int flow_flags) { if (sk) { const struct inet_sock *inet = inet_sk(sk); oif = sk->sk_bound_dev_if; mark = sk->sk_mark; tos = RT_CONN_FLAGS(sk); prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol; } flowi4_init_output(fl4, oif, mark, tos, RT_SCOPE_UNIVERSE, prot, flow_flags, iph->daddr, iph->saddr, 0, 0); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller10288.70%116.67%
pre-gitpre-git108.70%466.67%
dipankar sarmadipankar sarma32.61%116.67%
Total115100.00%6100.00%


static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb, const struct sock *sk) { const struct iphdr *iph = ip_hdr(skb); int oif = skb->dev->ifindex; u8 tos = RT_TOS(iph->tos); u8 prot = iph->protocol; u32 mark = skb->mark; __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller7182.56%133.33%
pre-gitpre-git1213.95%133.33%
dipankar sarmadipankar sarma33.49%133.33%
Total86100.00%3100.00%


static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk) { const struct inet_sock *inet = inet_sk(sk); const struct ip_options_rcu *inet_opt; __be32 daddr = inet->inet_daddr; rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol, inet_sk_flowi_flags(sk), daddr, inet->inet_saddr, 0, 0); rcu_read_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller9478.99%240.00%
pre-gitpre-git2319.33%240.00%
linus torvaldslinus torvalds21.68%120.00%
Total119100.00%5100.00%


static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk, const struct sk_buff *skb) { if (skb) build_skb_flow_key(fl4, skb, sk); else build_sk_flow_key(fl4, sk); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller3170.45%266.67%
alexey kuznetsovalexey kuznetsov1329.55%133.33%
Total44100.00%3100.00%


static inline void rt_free(struct rtable *rt) { call_rcu(&rt->dst.rcu_head, dst_rcu_free); }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller24100.00%1100.00%
Total24100.00%1100.00%

static DEFINE_SPINLOCK(fnhe_lock);
static void fnhe_flush_routes(struct fib_nh_exception *fnhe) { struct rtable *rt; rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL); rt_free(rt); } rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) { RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL); rt_free(rt); } }

Contributors

PersonTokensPropCommitsCommitProp
timo terastimo teras74100.00%1100.00%
Total74100.00%1100.00%


static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash) { struct fib_nh_exception *fnhe, *oldest; oldest = rcu_dereference(hash->chain); for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) oldest = fnhe; } fnhe_flush_routes(oldest); return oldest; }

Contributors

PersonTokensPropCommitsCommitProp
david s. millerdavid s. miller6278.48%250.00%
neil hormanneil horman1620.25%125.00%
timo terastimo teras11.27%125.00%
Total79100.00%4100.00%


static inline u32 fnhe_hashfun(__be32 daddr) { static u32 fnhe_hashrnd __read_mostly; u32 hval; net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd)); hval = jhash_1word((__force u32) daddr, fnhe_hashrnd); return hash_32(hval, FNHE_HASH_SHIFT); }

Contributors

PersonTokensPropCommitsCommitProp
eric dumazeteric dumazet3162.00%250.00%
david s. millerdavid s. miller1224.00%125.00%
francois romieufrancois romieu714.00%125.00%
Total50100.00%4100.00%


static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe) { rt->rt_pmtu = fnhe->fnhe_pmtu; rt->dst.expires = fnhe->fnhe_expires; if (fnhe->fnhe_gw) { rt->rt_flags |= RTCF_REDIRECTED; rt->rt_gateway = fnhe->fnhe_gw; rt->rt_uses_gateway = 1; } }

Contributors

PersonTokensPropCommitsCommitProp
timo terastimo teras62100.00%1100.00%
Total62100.00%1100.00%


static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw, u32 pmtu, unsigned long expires) { struct fnhe_hash_bucket *hash; struct fib_nh_exception *fnhe; struct rtable *rt; unsigned int i; int depth; u32 hval = fnhe_hashfun(daddr); spin_lock_bh(&fnhe_lock); hash = rcu_dereference(nh->nh_exceptions); if (!hash) { hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC); if (!hash) goto out_unlock; rcu_assign_pointer(nh->nh_exceptions, hash); } hash += hval; depth = 0; for (fnhe = rcu_dereference(hash->chain); fnhe; fnhe = rcu_dereference(fnhe->fnhe_next)) { if (fnhe->fnhe_daddr == daddr) break; depth++; } if (fnhe) { if (gw) fnhe->fnhe_gw = gw; if (pmtu) { fnhe->fnhe_pmtu = pmtu; fnhe->fnhe_expires = max(1UL, expires); } /* Update all cached dsts too */ rt = rcu_dereference(fnhe->fnhe_rth_input); if (rt) fill_route_from_fnhe(rt, fnhe); rt = rcu_dereference(fnhe->fnhe_rth_output); if (rt) fill_route_from_fnhe(rt, fnhe); } else { if (depth > FNHE_RECLAIM_DEPTH) fnhe = fnhe_oldest(hash); else { fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC); if (!fnhe)