cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/ipmr.c

Directory: net/ipv4
/*
 *      IP multicast routing support for mrouted 3.6/3.8
 *
 *              (c) 1995 Alan Cox, <alan@lxorguk.ukuu.org.uk>
 *        Linux Consultancy and Custom Driver Development
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 *
 *      Fixes:
 *      Michael Chastain        :       Incorrect size of copying.
 *      Alan Cox                :       Added the cache manager code
 *      Alan Cox                :       Fixed the clone/copy bug and device race.
 *      Mike McLagan            :       Routing by source
 *      Malcolm Beattie         :       Buffer handling fixes.
 *      Alexey Kuznetsov        :       Double buffer free and other fixes.
 *      SVR Anand               :       Fixed several multicast bugs and problems.
 *      Alexey Kuznetsov        :       Status, optimisations and more.
 *      Brad Parker             :       Better behaviour on mrouted upcall
 *                                      overflow.
 *      Carlos Picoto           :       PIMv1 Support
 *      Pavlin Ivanov Radoslavov:       PIMv2 Registers must checksum only PIM header
 *                                      Relax this requirement to work with older peers.
 *
 */

#include <linux/uaccess.h>
#include <linux/types.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/stat.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/igmp.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/mroute.h>
#include <linux/init.h>
#include <linux/if_ether.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <linux/skbuff.h>
#include <net/route.h>
#include <net/sock.h>
#include <net/icmp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <linux/notifier.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/compat.h>
#include <linux/export.h>
#include <net/ip_tunnels.h>
#include <net/checksum.h>
#include <net/netlink.h>
#include <net/fib_rules.h>
#include <linux/netconf.h>
#include <net/nexthop.h>


struct ipmr_rule {
	
struct fib_rule		common;
};


struct ipmr_result {
	
struct mr_table		*mrt;
};

/* Big lock, protecting vif table, mrt cache and mroute socket state.
 * Note that the changes are semaphored via rtnl_lock.
 */

static DEFINE_RWLOCK(mrt_lock);

/* Multicast router control variables */

/* Special spinlock for queue of unresolved entries */
static DEFINE_SPINLOCK(mfc_unres_lock);

/* We return to original Alan's scheme. Hash table of resolved
 * entries is changed only in process context and protected
 * with weak lock mrt_lock. Queue of unresolved entries is protected
 * with strong spinlock mfc_unres_lock.
 *
 * In this case data path is free of exclusive locks at all.
 */


static struct kmem_cache *mrt_cachep __read_mostly;

static struct mr_table *ipmr_new_table(struct net *net, u32 id);
static void ipmr_free_table(struct mr_table *mrt);

static void ip_mr_forward(struct net *net, struct mr_table *mrt,
			  struct sk_buff *skb, struct mfc_cache *cache,
			  int local);
static int ipmr_cache_report(struct mr_table *mrt,
			     struct sk_buff *pkt, vifi_t vifi, int assert);
static int __ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
			      struct mfc_cache *c, struct rtmsg *rtm);
static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
				 int cmd);
static void mroute_clean_tables(struct mr_table *mrt, bool all);
static void ipmr_expire_process(unsigned long arg);

#ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES

#define ipmr_for_each_table(mrt, net) \
	list_for_each_entry_rcu(mrt, &net->ipv4.mr_tables, list)


static struct mr_table *ipmr_get_table(struct net *net, u32 id) { struct mr_table *mrt; ipmr_for_each_table(mrt, net) { if (mrt->id == id) return mrt; } return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy2865.12%133.33%
Wang Chen1227.91%133.33%
Benjamin Thery36.98%133.33%
Total43100.00%3100.00%


static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { int err; struct ipmr_result res; struct fib_lookup_arg arg = { .result = &res, .flags = FIB_LOOKUP_NOREF, }; /* update flow if oif or iif point to device enslaved to l3mdev */ l3mdev_update_flow(net, flowi4_to_flowi(flp4)); err = fib_rules_lookup(net->ipv4.mr_rules_ops, flowi4_to_flowi(flp4), 0, &arg); if (err < 0) return err; *mrt = res.mrt; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy6567.01%116.67%
David Ahern1111.34%116.67%
Hannes Frederic Sowa99.28%116.67%
David S. Miller66.19%116.67%
Wang Chen44.12%116.67%
Stephen Hemminger22.06%116.67%
Total97100.00%6100.00%


static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, int flags, struct fib_lookup_arg *arg) { struct ipmr_result *res = arg->result; struct mr_table *mrt; switch (rule->action) { case FR_ACT_TO_TBL: break; case FR_ACT_UNREACHABLE: return -ENETUNREACH; case FR_ACT_PROHIBIT: return -EACCES; case FR_ACT_BLACKHOLE: default: return -EINVAL; } arg->table = fib_rule_get_table(rule, arg); mrt = ipmr_get_table(rule->fr_net, arg->table); if (!mrt) return -EAGAIN; res->mrt = mrt; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy10188.60%133.33%
David Ahern1210.53%133.33%
Ian Morris10.88%133.33%
Total114100.00%3100.00%


static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy22100.00%1100.00%
Total22100.00%1100.00%

static const struct nla_policy ipmr_rule_policy[FRA_MAX + 1] = { FRA_GENERIC_POLICY, };
static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh, struct nlattr **tb) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy30100.00%1100.00%
Total30100.00%1100.00%


static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, struct nlattr **tb) { return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy25100.00%1100.00%
Total25100.00%1100.00%


static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, struct fib_rule_hdr *frh) { frh->dst_len = 0; frh->src_len = 0; frh->tos = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy42100.00%1100.00%
Total42100.00%1100.00%

static const struct fib_rules_ops __net_initconst ipmr_rules_ops_template = { .family = RTNL_FAMILY_IPMR, .rule_size = sizeof(struct ipmr_rule), .addr_size = sizeof(u32), .action = ipmr_rule_action, .match = ipmr_rule_match, .configure = ipmr_rule_configure, .compare = ipmr_rule_compare, .fill = ipmr_rule_fill, .nlgroup = RTNLGRP_IPV4_RULE, .policy = ipmr_rule_policy, .owner = THIS_MODULE, };
static int __net_init ipmr_rules_init(struct net *net) { struct fib_rules_ops *ops; struct mr_table *mrt; int err; ops = fib_rules_register(&ipmr_rules_ops_template, net); if (IS_ERR(ops)) return PTR_ERR(ops); INIT_LIST_HEAD(&net->ipv4.mr_tables); mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); if (IS_ERR(mrt)) { err = PTR_ERR(mrt); goto err1; } err = fib_default_rule_add(ops, 0x7fff, RT_TABLE_DEFAULT, 0); if (err < 0) goto err2; net->ipv4.mr_rules_ops = ops; return 0; err2: ipmr_free_table(mrt); err1: fib_rules_unregister(ops); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy12894.12%133.33%
Nikolay Aleksandrov75.15%133.33%
Américo Wang10.74%133.33%
Total136100.00%3100.00%


static void __net_exit ipmr_rules_exit(struct net *net) { struct mr_table *mrt, *next; rtnl_lock(); list_for_each_entry_safe(mrt, next, &net->ipv4.mr_tables, list) { list_del(&mrt->list); ipmr_free_table(mrt); } fib_rules_unregister(net->ipv4.mr_rules_ops); rtnl_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy4371.67%120.00%
Eric Dumazet1016.67%120.00%
Américo Wang610.00%240.00%
Francesco Ruggeri11.67%120.00%
Total60100.00%5100.00%

#else #define ipmr_for_each_table(mrt, net) \ for (mrt = net->ipv4.mrt; mrt; mrt = NULL)
static struct mr_table *ipmr_get_table(struct net *net, u32 id) { return net->ipv4.mrt; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy23100.00%1100.00%
Total23100.00%1100.00%


static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, struct mr_table **mrt) { *mrt = net->ipv4.mrt; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy3294.12%150.00%
David S. Miller25.88%150.00%
Total34100.00%2100.00%


static int __net_init ipmr_rules_init(struct net *net) { struct mr_table *mrt; mrt = ipmr_new_table(net, RT_TABLE_DEFAULT); if (IS_ERR(mrt)) return PTR_ERR(mrt); net->ipv4.mrt = mrt; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy2958.00%150.00%
Nikolay Aleksandrov2142.00%150.00%
Total50100.00%2100.00%


static void __net_exit ipmr_rules_exit(struct net *net) { rtnl_lock(); ipmr_free_table(net->ipv4.mrt); net->ipv4.mrt = NULL; rtnl_unlock(); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy2057.14%133.33%
Américo Wang1440.00%133.33%
Francesco Ruggeri12.86%133.33%
Total35100.00%3100.00%

#endif
static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg, const void *ptr) { const struct mfc_cache_cmp_arg *cmparg = arg->key; struct mfc_cache *c = (struct mfc_cache *)ptr; return cmparg->mfc_mcastgrp != c->mfc_mcastgrp || cmparg->mfc_origin != c->mfc_origin; }

Contributors

PersonTokensPropCommitsCommitProp
Nikolay Aleksandrov56100.00%1100.00%
Total56100.00%1100.00%

static const struct rhashtable_params ipmr_rht_params = { .head_offset = offsetof(struct mfc_cache, mnode), .key_offset = offsetof(struct mfc_cache, cmparg), .key_len = sizeof(struct mfc_cache_cmp_arg), .nelem_hint = 3, .locks_mul = 1, .obj_cmpfn = ipmr_hash_cmp, .automatic_shrinking = true, };
static struct mr_table *ipmr_new_table(struct net *net, u32 id) { struct mr_table *mrt; /* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */ if (id != RT_TABLE_DEFAULT && id >= 1000000000) return ERR_PTR(-EINVAL); mrt = ipmr_get_table(net, id); if (mrt) return mrt; mrt = kzalloc(sizeof(*mrt), GFP_KERNEL); if (!mrt) return ERR_PTR(-ENOMEM); write_pnet(&mrt->net, net); mrt->id = id; rhltable_init(&mrt->mfc_hash, &ipmr_rht_params); INIT_LIST_HEAD(&mrt->mfc_cache_list); INIT_LIST_HEAD(&mrt->mfc_unres_queue); setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process, (unsigned long)mrt); mrt->mroute_reg_vif_num = -1; #ifdef CONFIG_IP_MROUTE_MULTIPLE_TABLES list_add_tail_rcu(&mrt->list, &net->ipv4.mr_tables); #endif return mrt; }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy13378.70%240.00%
Nikolay Aleksandrov3520.71%240.00%
Ian Morris10.59%120.00%
Total169100.00%5100.00%


static void ipmr_free_table(struct mr_table *mrt) { del_timer_sync(&mrt->ipmr_expire_timer); mroute_clean_tables(mrt, true); rhltable_destroy(&mrt->mfc_hash); kfree(mrt); }

Contributors

PersonTokensPropCommitsCommitProp
Francesco Ruggeri2974.36%133.33%
Nikolay Aleksandrov1025.64%266.67%
Total39100.00%3100.00%

/* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
static void ipmr_del_tunnel(struct net_device *dev, struct vifctl *v) { struct net *net = dev_net(dev); dev_close(dev); dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; struct ifreq ifr; struct ip_tunnel_parm p; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); ops->ndo_do_ioctl(dev, &ifr, SIOCDELTUNNEL); set_fs(oldfs); } } }

Contributors

PersonTokensPropCommitsCommitProp
Wang Chen15582.01%133.33%
Stephen Hemminger2312.17%133.33%
Benjamin Thery115.82%133.33%
Total189100.00%3100.00%

/* Initialize ipmr pimreg/tunnel in_device */
static bool ipmr_init_vif_indev(const struct net_device *dev) { struct in_device *in_dev; ASSERT_RTNL(); in_dev = __in_dev_get_rtnl(dev); if (!in_dev) return false; ipv4_devconf_setall(in_dev); neigh_parms_data_state_setall(in_dev->arp_parms); IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0; return true; }

Contributors

PersonTokensPropCommitsCommitProp
Nikolay Aleksandrov61100.00%1100.00%
Total61100.00%1100.00%


static struct net_device *ipmr_new_tunnel(struct net *net, struct vifctl *v) { struct net_device *dev; dev = __dev_get_by_name(net, "tunl0"); if (dev) { const struct net_device_ops *ops = dev->netdev_ops; int err; struct ifreq ifr; struct ip_tunnel_parm p; memset(&p, 0, sizeof(p)); p.iph.daddr = v->vifc_rmt_addr.s_addr; p.iph.saddr = v->vifc_lcl_addr.s_addr; p.iph.version = 4; p.iph.ihl = 5; p.iph.protocol = IPPROTO_IPIP; sprintf(p.name, "dvmrp%d", v->vifc_vifi); ifr.ifr_ifru.ifru_data = (__force void __user *)&p; if (ops->ndo_do_ioctl) { mm_segment_t oldfs = get_fs(); set_fs(KERNEL_DS); err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL); set_fs(oldfs); } else { err = -EOPNOTSUPP; } dev = NULL; if (err == 0 && (dev = __dev_get_by_name(net, p.name)) != NULL) { dev->flags |= IFF_MULTICAST; if (!ipmr_init_vif_indev(dev)) goto failure; if (dev_open(dev)) goto failure; dev_hold(dev); } } return dev; failure: unregister_netdevice(dev); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)21380.08%535.71%
Stephen Hemminger3111.65%214.29%
Benjamin Thery72.63%17.14%
Wang Chen51.88%17.14%
Nikolay Aleksandrov51.88%214.29%
Eric Dumazet20.75%17.14%
Eric W. Biedermann20.75%17.14%
Ian Morris10.38%17.14%
Total266100.00%14100.00%

#if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
static netdev_tx_t reg_vif_xmit(struct sk_buff *skb, struct net_device *dev) { struct net *net = dev_net(dev); struct mr_table *mrt; struct flowi4 fl4 = { .flowi4_oif = dev->ifindex, .flowi4_iif = skb->skb_iif ? : LOOPBACK_IFINDEX, .flowi4_mark = skb->mark, }; int err; err = ipmr_fib_lookup(net, &fl4, &mrt); if (err < 0) { kfree_skb(skb); return err; } read_lock(&mrt_lock); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; ipmr_cache_report(mrt, skb, mrt->mroute_reg_vif_num, IGMPMSG_WHOLEPKT); read_unlock(&mrt_lock); kfree_skb(skb); return NETDEV_TX_OK; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)5538.46%637.50%
Patrick McHardy5437.76%318.75%
Benjamin Thery139.09%212.50%
Ben Greear74.90%16.25%
David S. Miller64.20%16.25%
Pavel Emelyanov42.80%16.25%
Cong Wang32.10%16.25%
Stephen Hemminger10.70%16.25%
Total143100.00%16100.00%


static int reg_vif_get_iflink(const struct net_device *dev) { return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Nicolas Dichtel15100.00%1100.00%
Total15100.00%1100.00%

static const struct net_device_ops reg_vif_netdev_ops = { .ndo_start_xmit = reg_vif_xmit, .ndo_get_iflink = reg_vif_get_iflink, };
static void reg_vif_setup(struct net_device *dev) { dev->type = ARPHRD_PIMREG; dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8; dev->flags = IFF_NOARP; dev->netdev_ops = &reg_vif_netdev_ops; dev->destructor = free_netdev; dev->features |= NETIF_F_NETNS_LOCAL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)3053.57%436.36%
David S. Miller1119.64%19.09%
Stephen Hemminger712.50%327.27%
Thomas Goff610.71%19.09%
Himangi Saraogi11.79%19.09%
Kris Katterjohn11.79%19.09%
Total56100.00%11100.00%


static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { struct net_device *dev; char name[IFNAMSIZ]; if (mrt->id == RT_TABLE_DEFAULT) sprintf(name, "pimreg"); else sprintf(name, "pimreg%u", mrt->id); dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup); if (!dev) return NULL; dev_net_set(dev, net); if (register_netdevice(dev)) { free_netdev(dev); return NULL; } if (!ipmr_init_vif_indev(dev)) goto failure; if (dev_open(dev)) goto failure; dev_hold(dev); return dev; failure: unregister_netdevice(dev); return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4532.14%321.43%
Patrick McHardy3927.86%17.14%
Stephen Hemminger2115.00%17.14%
Thomas Goff117.86%17.14%
Raj75.00%17.14%
Wang Chen53.57%17.14%
Nikolay Aleksandrov42.86%17.14%
Eric Dumazet32.14%17.14%
Tom Gundersen21.43%17.14%
Ian Morris10.71%17.14%
Al Viro10.71%17.14%
Pavel Emelyanov10.71%17.14%
Total140100.00%14100.00%

/* called with rcu_read_lock() */
static int __pim_rcv(struct mr_table *mrt, struct sk_buff *skb, unsigned int pimlen) { struct net_device *reg_dev = NULL; struct iphdr *encap; encap = (struct iphdr *)(skb_transport_header(skb) + pimlen); /* Check that: * a. packet is really sent to a multicast group * b. packet is not a NULL-REGISTER * c. packet is not truncated */ if (!ipv4_is_multicast(encap->daddr) || encap->tot_len == 0 || ntohs(encap->tot_len) + pimlen > skb->len) return 1; read_lock(&mrt_lock); if (mrt->mroute_reg_vif_num >= 0) reg_dev = mrt->vif_table[mrt->mroute_reg_vif_num].dev; read_unlock(&mrt_lock); if (!reg_dev) return 1; skb->mac_header = skb->network_header; skb_pull(skb, (u8 *)encap - skb->data); skb_reset_network_header(skb); skb->protocol = htons(ETH_P_IP); skb->ip_summed = CHECKSUM_NONE; skb_tunnel_rx(skb, reg_dev, dev_net(reg_dev)); netif_rx(skb); return NET_RX_SUCCESS; }

Contributors

PersonTokensPropCommitsCommitProp
Nikolay Aleksandrov185100.00%2100.00%
Total185100.00%2100.00%

#else
static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt) { return NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Nikolay Aleksandrov21100.00%1100.00%
Total21100.00%1100.00%

#endif /** * vif_delete - Delete a VIF entry * @notify: Set to 1, if the caller is a notifier_call */
static int vif_delete(struct mr_table *mrt, int vifi, int notify, struct list_head *head) { struct vif_device *v; struct net_device *dev; struct in_device *in_dev; if (vifi < 0 || vifi >= mrt->maxvif) return -EADDRNOTAVAIL; v = &mrt->vif_table[vifi]; write_lock_bh(&mrt_lock); dev = v->dev; v->dev = NULL; if (!dev) { write_unlock_bh(&mrt_lock); return -EADDRNOTAVAIL; } if (vifi == mrt->mroute_reg_vif_num) mrt->mroute_reg_vif_num = -1; if (vifi + 1 == mrt->maxvif) { int tmp; for (tmp = vifi - 1; tmp >= 0; tmp--) { if (VIF_EXISTS(mrt, tmp)) break; } mrt->maxvif = tmp+1; } write_unlock_bh(&mrt_lock); dev_set_allmulti(dev, -1); in_dev = __in_dev_get_rtnl(dev); if (in_dev) { IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--; inet_netconf_notify_devconf(dev_net(dev), NETCONFA_MC_FORWARDING, dev->ifindex, &in_dev->cnf); ip_rt_multicast_event(in_dev); } if (v->flags & (VIFF_TUNNEL | VIFF_REGISTER) && !notify) unregister_netdevice_queue(dev, head); dev_put(dev); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)19074.80%533.33%
Nicolas Dichtel197.48%16.67%
Benjamin Thery124.72%320.00%
Eric Dumazet124.72%213.33%
Patrick McHardy93.54%16.67%
Herbert Xu62.36%213.33%
Wang Chen62.36%16.67%
Total254100.00%15100.00%


static void ipmr_cache_free_rcu(struct rcu_head *head) { struct mfc_cache *c = container_of(head, struct mfc_cache, rcu); kmem_cache_free(mrt_cachep, c); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet33100.00%1100.00%
Total33100.00%1100.00%


static inline void ipmr_cache_free(struct mfc_cache *c) { call_rcu(&c->rcu, ipmr_cache_free_rcu); }

Contributors

PersonTokensPropCommitsCommitProp
Benjamin Thery1672.73%150.00%
Eric Dumazet627.27%150.00%
Total22100.00%2100.00%

/* Destroy an unresolved cache entry, killing queued skbs * and reporting error to netlink readers. */
static void ipmr_destroy_unres(struct mr_table *mrt, struct mfc_cache *c) { struct net *net = read_pnet(&mrt->net); struct sk_buff *skb; struct nlmsgerr *e; atomic_dec(&mrt->cache_resolve_queue_len); while ((skb = skb_dequeue(&c->mfc_un.unres.unresolved))) { if (ip_hdr(skb)->version == 0) { struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr)); nlh->nlmsg_type = NLMSG_ERROR; nlh->nlmsg_len = nlmsg_msg_size(sizeof(struct nlmsgerr)); skb_trim(skb, nlh->nlmsg_len); e = nlmsg_data(nlh); e->error = -ETIMEDOUT; memset(&e->msg, 0, sizeof(e->msg)); rtnl_unicast(skb, net, NETLINK_CB(skb).portid)