Release 4.11 net/ipv4/icmp.c
/*
* NET3: Implementation of the ICMP protocol layer.
*
* Alan Cox, <alan@lxorguk.ukuu.org.uk>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Some of the function names and the icmp unreach table for this
* module were derived from [icmp.c 1.0.11 06/02/93] by
* Ross Biro, Fred N. van Kempen, Mark Evans, Alan Cox, Gerhard Koerting.
* Other than that this module is a complete rewrite.
*
* Fixes:
* Clemens Fruhwirth : introduce global icmp rate limiting
* with icmp type masking ability instead
* of broken per type icmp timeouts.
* Mike Shaver : RFC1122 checks.
* Alan Cox : Multicast ping reply as self.
* Alan Cox : Fix atomicity lockup in ip_build_xmit
* call.
* Alan Cox : Added 216,128 byte paths to the MTU
* code.
* Martin Mares : RFC1812 checks.
* Martin Mares : Can be configured to follow redirects
* if acting as a router _without_ a
* routing protocol (RFC 1812).
* Martin Mares : Echo requests may be configured to
* be ignored (RFC 1812).
* Martin Mares : Limitation of ICMP error message
* transmit rate (RFC 1812).
* Martin Mares : TOS and Precedence set correctly
* (RFC 1812).
* Martin Mares : Now copying as much data from the
* original packet as we can without
* exceeding 576 bytes (RFC 1812).
* Willy Konynenberg : Transparent proxying support.
* Keith Owens : RFC1191 correction for 4.2BSD based
* path MTU bug.
* Thomas Quinot : ICMP Dest Unreach codes up to 15 are
* valid (RFC 1812).
* Andi Kleen : Check all packet lengths properly
* and moved all kfree_skb() up to
* icmp_rcv.
* Andi Kleen : Move the rate limit bookkeeping
* into the dest entry and use a token
* bucket filter (thanks to ANK). Make
* the rates sysctl configurable.
* Yu Tianli : Fixed two ugly bugs in icmp_send
* - IP option length was accounted wrongly
* - ICMP header length was not accounted
* at all.
* Tristan Greaves : Added sysctl option to ignore bogus
* broadcast responses from broken routers.
*
* To Fix:
*
* - Should use skb_pull() instead of all the manual checking.
* This would also greatly simply some upper layer error handlers. --AK
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/fcntl.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/inetdevice.h>
#include <linux/netdevice.h>
#include <linux/string.h>
#include <linux/netfilter_ipv4.h>
#include <linux/slab.h>
#include <net/snmp.h>
#include <net/ip.h>
#include <net/route.h>
#include <net/protocol.h>
#include <net/icmp.h>
#include <net/tcp.h>
#include <net/udp.h>
#include <net/raw.h>
#include <net/ping.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/uaccess.h>
#include <net/checksum.h>
#include <net/xfrm.h>
#include <net/inet_common.h>
#include <net/ip_fib.h>
#include <net/l3mdev.h>
/*
* Build xmit assembly blocks
*/
struct icmp_bxm {
struct sk_buff *skb;
int offset;
int data_len;
struct {
struct icmphdr icmph;
__be32 times[3];
}
data;
int head_len;
struct ip_options_data replyopts;
};
/* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
const struct icmp_err icmp_err_convert[] = {
{
.errno = ENETUNREACH, /* ICMP_NET_UNREACH */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNREACH */
.fatal = 0,
},
{
.errno = ENOPROTOOPT /* ICMP_PROT_UNREACH */,
.fatal = 1,
},
{
.errno = ECONNREFUSED, /* ICMP_PORT_UNREACH */
.fatal = 1,
},
{
.errno = EMSGSIZE, /* ICMP_FRAG_NEEDED */
.fatal = 0,
},
{
.errno = EOPNOTSUPP, /* ICMP_SR_FAILED */
.fatal = 0,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNKNOWN */
.fatal = 1,
},
{
.errno = EHOSTDOWN, /* ICMP_HOST_UNKNOWN */
.fatal = 1,
},
{
.errno = ENONET, /* ICMP_HOST_ISOLATED */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_ANO */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_ANO */
.fatal = 1,
},
{
.errno = ENETUNREACH, /* ICMP_NET_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_HOST_UNR_TOS */
.fatal = 0,
},
{
.errno = EHOSTUNREACH, /* ICMP_PKT_FILTERED */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_VIOLATION */
.fatal = 1,
},
{
.errno = EHOSTUNREACH, /* ICMP_PREC_CUTOFF */
.fatal = 1,
},
};
EXPORT_SYMBOL(icmp_err_convert);
/*
* ICMP control array. This specifies what to do with each ICMP.
*/
struct icmp_control {
bool (*handler)(struct sk_buff *skb);
short error; /* This ICMP is classed as an error message */
};
static const struct icmp_control icmp_pointers[NR_ICMP_TYPES+1];
/*
* The ICMP socket(s). This is the most convenient way to flow control
* our ICMP output as well as maintain a clean interface throughout
* all layers. All Socketless IP sends will soon be gone.
*
* On SMP we have one ICMP socket per-cpu.
*/
static struct sock *icmp_sk(struct net *net)
{
return *this_cpu_ptr(net->ipv4.icmp_sk);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denis V. Lunev | 19 | 79.17% | 3 | 60.00% |
Eric Dumazet | 4 | 16.67% | 1 | 20.00% |
David S. Miller | 1 | 4.17% | 1 | 20.00% |
Total | 24 | 100.00% | 5 | 100.00% |
/* Called with BH disabled */
static inline struct sock *icmp_xmit_lock(struct net *net)
{
struct sock *sk;
sk = icmp_sk(net);
if (unlikely(!spin_trylock(&sk->sk_lock.slock))) {
/* This can happen if the output path signals a
* dst_link_failure() for an outgoing ICMP packet.
*/
return NULL;
}
return sk;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Denis V. Lunev | 22 | 43.14% | 2 | 28.57% |
Linus Torvalds (pre-git) | 17 | 33.33% | 1 | 14.29% |
David S. Miller | 10 | 19.61% | 2 | 28.57% |
Arnaldo Carvalho de Melo | 1 | 1.96% | 1 | 14.29% |
Eric Dumazet | 1 | 1.96% | 1 | 14.29% |
Total | 51 | 100.00% | 7 | 100.00% |
static inline void icmp_xmit_unlock(struct sock *sk)
{
spin_unlock(&sk->sk_lock.slock);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 13 | 59.09% | 1 | 16.67% |
Denis V. Lunev | 5 | 22.73% | 1 | 16.67% |
Arnaldo Carvalho de Melo | 1 | 4.55% | 1 | 16.67% |
Eric Dumazet | 1 | 4.55% | 1 | 16.67% |
Jesper Dangaard Brouer | 1 | 4.55% | 1 | 16.67% |
David S. Miller | 1 | 4.55% | 1 | 16.67% |
Total | 22 | 100.00% | 6 | 100.00% |
int sysctl_icmp_msgs_per_sec __read_mostly = 1000;
int sysctl_icmp_msgs_burst __read_mostly = 50;
static struct {
spinlock_t lock;
u32 credit;
u32 stamp;
}
icmp_global = {
.lock = __SPIN_LOCK_UNLOCKED(icmp_global.lock),
};
/**
* icmp_global_allow - Are we allowed to send one more ICMP message ?
*
* Uses a token bucket to limit our ICMP messages to sysctl_icmp_msgs_per_sec.
* Returns false if we reached the limit and can not send another packet.
* Note: called with BH disabled
*/
bool icmp_global_allow(void)
{
u32 credit, delta, incr = 0, now = (u32)jiffies;
bool rc = false;
/* Check if token bucket is empty and cannot be refilled
* without taking the spinlock.
*/
if (!icmp_global.credit) {
delta = min_t(u32, now - icmp_global.stamp, HZ);
if (delta < HZ / 50)
return false;
}
spin_lock(&icmp_global.lock);
delta = min_t(u32, now - icmp_global.stamp, HZ);
if (delta >= HZ / 50) {
incr = sysctl_icmp_msgs_per_sec * delta / HZ ;
if (incr)
icmp_global.stamp = now;
}
credit = min_t(u32, icmp_global.credit + incr, sysctl_icmp_msgs_burst);
if (credit) {
credit--;
rc = true;
}
icmp_global.credit = credit;
spin_unlock(&icmp_global.lock);
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 160 | 100.00% | 1 | 100.00% |
Total | 160 | 100.00% | 1 | 100.00% |
EXPORT_SYMBOL(icmp_global_allow);
static bool icmpv4_mask_allow(struct net *net, int type, int code)
{
if (type > NR_ICMP_TYPES)
return true;
/* Don't limit PMTU discovery. */
if (type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED)
return true;
/* Limit if icmp type is enabled in ratemask. */
if (!((1 << type) & net->ipv4.sysctl_icmp_ratemask))
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 64 | 100.00% | 1 | 100.00% |
Total | 64 | 100.00% | 1 | 100.00% |
static bool icmpv4_global_allow(struct net *net, int type, int code)
{
if (icmpv4_mask_allow(net, type, code))
return true;
if (icmp_global_allow())
return true;
return false;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jesper Dangaard Brouer | 42 | 100.00% | 1 | 100.00% |
Total | 42 | 100.00% | 1 | 100.00% |
/*
* Send an ICMP frame.
*/
static bool icmpv4_xrlim_allow(struct net *net, struct rtable *rt,
struct flowi4 *fl4, int type, int code)
{
struct dst_entry *dst = &rt->dst;
struct inet_peer *peer;
bool rc = true;
int vif;
if (icmpv4_mask_allow(net, type, code))
goto out;
/* No rate limit on loopback */
if (dst->dev && (dst->dev->flags&IFF_LOOPBACK))
goto out;
vif = l3mdev_master_ifindex(dst->dev);
peer = inet_getpeer_v4(net->ipv4.peers, fl4->daddr, vif, 1);
rc = inet_peer_xrlim_allow(peer, net->ipv4.sysctl_icmp_ratelimit);
if (peer)
inet_putpeer(peer);
out:
return rc;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 51 | 36.69% | 3 | 20.00% |
David S. Miller | 29 | 20.86% | 3 | 20.00% |
Jesper Dangaard Brouer | 16 | 11.51% | 1 | 6.67% |
Arnaldo Carvalho de Melo | 15 | 10.79% | 1 | 6.67% |
Pavel Emelyanov | 9 | 6.47% | 2 | 13.33% |
David Ahern | 9 | 6.47% | 2 | 13.33% |
Neal Cardwell | 4 | 2.88% | 1 | 6.67% |
Eric Dumazet | 3 | 2.16% | 1 | 6.67% |
Linus Torvalds | 3 | 2.16% | 1 | 6.67% |
Total | 139 | 100.00% | 15 | 100.00% |
/*
* Maintain the counters used in the SNMP statistics for outgoing ICMP
*/
void icmp_out_count(struct net *net, unsigned char type)
{
ICMPMSGOUT_INC_STATS(net, type);
ICMP_INC_STATS(net, ICMP_MIB_OUTMSGS);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 14 | 50.00% | 2 | 25.00% |
Pavel Emelyanov | 9 | 32.14% | 3 | 37.50% |
David L Stevens | 3 | 10.71% | 1 | 12.50% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 3.57% | 1 | 12.50% |
Ravikiran G. Thirumalai | 1 | 3.57% | 1 | 12.50% |
Total | 28 | 100.00% | 8 | 100.00% |
/*
* Checksum each fragment, and on the first include the headers and final
* checksum.
*/
static int icmp_glue_bits(void *from, char *to, int offset, int len, int odd,
struct sk_buff *skb)
{
struct icmp_bxm *icmp_param = (struct icmp_bxm *)from;
__wsum csum;
csum = skb_copy_and_csum_bits(icmp_param->skb,
icmp_param->offset + offset,
to, len, 0);
skb->csum = csum_block_add(skb->csum, csum, odd);
if (icmp_pointers[icmp_param->data.icmph.type].error)
nf_ct_attach(skb, icmp_param->skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 44 | 41.51% | 6 | 46.15% |
Alexey Kuznetsov | 29 | 27.36% | 2 | 15.38% |
Patrick McHardy | 24 | 22.64% | 1 | 7.69% |
Linus Torvalds | 6 | 5.66% | 1 | 7.69% |
Al Viro | 1 | 0.94% | 1 | 7.69% |
Arnaldo Carvalho de Melo | 1 | 0.94% | 1 | 7.69% |
Adrian Bunk | 1 | 0.94% | 1 | 7.69% |
Total | 106 | 100.00% | 13 | 100.00% |
static void icmp_push_reply(struct icmp_bxm *icmp_param,
struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rt)
{
struct sock *sk;
struct sk_buff *skb;
sk = icmp_sk(dev_net((*rt)->dst.dev));
if (ip_append_data(sk, fl4, icmp_glue_bits, icmp_param,
icmp_param->data_len+icmp_param->head_len,
icmp_param->head_len,
ipc, rt, MSG_DONTWAIT) < 0) {
__ICMP_INC_STATS(sock_net(sk), ICMP_MIB_OUTERRORS);
ip_flush_pending_frames(sk);
} else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
struct icmphdr *icmph = icmp_hdr(skb);
__wsum csum = 0;
struct sk_buff *skb1;
skb_queue_walk(&sk->sk_write_queue, skb1) {
csum = csum_add(csum, skb1->csum);
}
csum = csum_partial_copy_nocheck((void *)&icmp_param->data,
(char *)icmph,
icmp_param->head_len, csum);
icmph->checksum = csum_fold(csum);
skb->ip_summed = CHECKSUM_NONE;
ip_push_pending_frames(sk, fl4);
}
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Alexey Kuznetsov | 113 | 53.81% | 1 | 5.56% |
Linus Torvalds (pre-git) | 32 | 15.24% | 3 | 16.67% |
Denis V. Lunev | 17 | 8.10% | 3 | 16.67% |
Eric Dumazet | 16 | 7.62% | 3 | 16.67% |
Patrick McHardy | 10 | 4.76% | 1 | 5.56% |
David S. Miller | 9 | 4.29% | 2 | 11.11% |
Linus Torvalds | 5 | 2.38% | 1 | 5.56% |
Arnaldo Carvalho de Melo | 4 | 1.90% | 2 | 11.11% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 1.43% | 1 | 5.56% |
Al Viro | 1 | 0.48% | 1 | 5.56% |
Total | 210 | 100.00% | 18 | 100.00% |
/*
* Driving logic for building and sending ICMP messages.
*/
static void icmp_reply(struct icmp_bxm *icmp_param, struct sk_buff *skb)
{
struct ipcm_cookie ipc;
struct rtable *rt = skb_rtable(skb);
struct net *net = dev_net(rt->dst.dev);
struct flowi4 fl4;
struct sock *sk;
struct inet_sock *inet;
__be32 daddr, saddr;
u32 mark = IP4_REPLY_MARK(net, skb->mark);
int type = icmp_param->data.icmph.type;
int code = icmp_param->data.icmph.code;
if (ip_options_echo(&icmp_param->replyopts.opt.opt, skb))
return;
/* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
/* global icmp_msgs_per_sec */
if (!icmpv4_global_allow(net, type, code))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
if (!sk)
goto out_bh_enable;
inet = inet_sk(sk);
icmp_param->data.icmph.checksum = 0;
inet->tos = ip_hdr(skb)->tos;
sk->sk_mark = mark;
daddr = ipc.addr = ip_hdr(skb)->saddr;
saddr = fib_compute_spec_dst(skb);
ipc.opt = NULL;
ipc.tx_flags = 0;
ipc.ttl = 0;
ipc.tos = -1;
if (icmp_param->replyopts.opt.opt.optlen) {
ipc.opt = &icmp_param->replyopts.opt;
if (ipc.opt->opt.srr)
daddr = icmp_param->replyopts.opt.opt.faddr;
}
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.saddr = saddr;
fl4.flowi4_mark = mark;
fl4.flowi4_uid = sock_net_uid(net, NULL);
fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
fl4.flowi4_proto = IPPROTO_ICMP;
fl4.flowi4_oif = l3mdev_master_ifindex(skb->dev);
security_skb_classify_flow(skb, flowi4_to_flowi(&fl4));
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt))
goto out_unlock;
if (icmpv4_xrlim_allow(net, rt, &fl4, type, code))
icmp_push_reply(icmp_param, &fl4, &ipc, &rt);
ip_rt_put(rt);
out_unlock:
icmp_xmit_unlock(sk);
out_bh_enable:
local_bh_enable();
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 137 | 32.16% | 12 | 24.00% |
David S. Miller | 81 | 19.01% | 11 | 22.00% |
Jesper Dangaard Brouer | 50 | 11.74% | 2 | 4.00% |
Denis V. Lunev | 43 | 10.09% | 4 | 8.00% |
Lorenzo Colitti | 35 | 8.22% | 2 | 4.00% |
Eric Dumazet | 20 | 4.69% | 3 | 6.00% |
Francesco Fusco | 13 | 3.05% | 1 | 2.00% |
David Ahern | 11 | 2.58% | 2 | 4.00% |
Venkat Yekkirala | 7 | 1.64% | 1 | 2.00% |
Changli Gao | 6 | 1.41% | 1 | 2.00% |
Arnaldo Carvalho de Melo | 5 | 1.17% | 2 | 4.00% |
Patrick Ohly | 5 | 1.17% | 1 | 2.00% |
Linus Torvalds | 3 | 0.70% | 1 | 2.00% |
Hideaki Yoshifuji / 吉藤英明 | 3 | 0.70% | 1 | 2.00% |
Pavel Emelyanov | 2 | 0.47% | 1 | 2.00% |
Al Viro | 1 | 0.23% | 1 | 2.00% |
Simon Horman | 1 | 0.23% | 1 | 2.00% |
Oliver Hartkopp | 1 | 0.23% | 1 | 2.00% |
Ian Morris | 1 | 0.23% | 1 | 2.00% |
Alexey Kuznetsov | 1 | 0.23% | 1 | 2.00% |
Total | 426 | 100.00% | 50 | 100.00% |
#ifdef CONFIG_IP_ROUTE_MULTIPATH
/* Source and destination is swapped. See ip_multipath_icmp_hash */
static int icmp_multipath_hash_skb(const struct sk_buff *skb)
{
const struct iphdr *iph = ip_hdr(skb);
return fib_multipath_hash(iph->daddr, iph->saddr);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Peter Christensen | 35 | 100.00% | 1 | 100.00% |
Total | 35 | 100.00% | 1 | 100.00% |
#else
#define icmp_multipath_hash_skb(skb) (-1)
#endif
static struct rtable *icmp_route_lookup(struct net *net,
struct flowi4 *fl4,
struct sk_buff *skb_in,
const struct iphdr *iph,
__be32 saddr, u8 tos, u32 mark,
int type, int code,
struct icmp_bxm *param)
{
struct rtable *rt, *rt2;
struct flowi4 fl4_dec;
int err;
memset(fl4, 0, sizeof(*fl4));
fl4->daddr = (param->replyopts.opt.opt.srr ?
param->replyopts.opt.opt.faddr : iph->saddr);
fl4->saddr = saddr;
fl4->flowi4_mark = mark;
fl4->flowi4_uid = sock_net_uid(net, NULL);
fl4->flowi4_tos = RT_TOS(tos);
fl4->flowi4_proto = IPPROTO_ICMP;
fl4->fl4_icmp_type = type;
fl4->fl4_icmp_code = code;
fl4->flowi4_oif = l3mdev_master_ifindex(skb_dst(skb_in)->dev);
security_skb_classify_flow(skb_in, flowi4_to_flowi(fl4));
rt = __ip_route_output_key_hash(net, fl4,
icmp_multipath_hash_skb(skb_in));
if (IS_ERR(rt))
return rt;
/* No need to clone since we're just using its address. */
rt2 = rt;
rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
flowi4_to_flowi(fl4), NULL, 0);
if (!IS_ERR(rt)) {
if (rt != rt2)
return rt;
} else if (PTR_ERR(rt) == -EPERM) {
rt = NULL;
} else
return rt;
err = xfrm_decode_session_reverse(skb_in, flowi4_to_flowi(&fl4_dec), AF_INET);
if (err)
goto relookup_failed;
if (inet_addr_type_dev_table(net, skb_dst(skb_in)->dev,
fl4_dec.saddr) == RTN_LOCAL) {
rt2 = __ip_route_output_key(net, &fl4_dec);
if (IS_ERR(rt2))
err = PTR_ERR(rt2);
} else {
struct flowi4 fl4_2 = {};
unsigned long orefdst;
fl4_2.daddr = fl4_dec.saddr;
rt2 = ip_route_output_key(net, &fl4_2);
if (IS_ERR(rt2)) {
err = PTR_ERR(rt2);
goto relookup_failed;
}
/* Ugh! */
orefdst = skb_in->_skb_refdst; /* save old refdst */
skb_dst_set(skb_in, NULL);
err = ip_route_input(skb_in, fl4_dec.daddr, fl4_dec.saddr,
RT_TOS(tos), rt2->dst.dev);
dst_release(&rt2->dst);
rt2 = skb_rtable(skb_in);
skb_in->_skb_refdst = orefdst; /* restore old refdst */
}
if (err)
goto relookup_failed;
rt2 = (struct rtable *) xfrm_lookup(net, &rt2->dst,
flowi4_to_flowi(&fl4_dec), NULL,
XFRM_LOOKUP_ICMP);
if (!IS_ERR(rt2)) {
dst_release(&rt->dst);
memcpy(fl4, &fl4_dec, sizeof(*fl4));
rt = rt2;
} else if (PTR_ERR(rt2) == -EPERM) {
if (rt)
dst_release(&rt->dst);
return rt2;
} else {
err = PTR_ERR(rt2);
goto relookup_failed;
}
return rt;
relookup_failed:
if (rt)
return rt;
return ERR_PTR(err);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
David S. Miller | 437 | 74.57% | 9 | 25.00% |
Linus Torvalds (pre-git) | 43 | 7.34% | 4 | 11.11% |
David Ahern | 22 | 3.75% | 4 | 11.11% |
Lorenzo Colitti | 20 | 3.41% | 2 | 5.56% |
Eric Dumazet | 10 | 1.71% | 3 | 8.33% |
Patrick McHardy | 10 | 1.71% | 1 | 2.78% |
J. Simonetti | 8 | 1.37% | 1 | 2.78% |
Linus Torvalds | 7 | 1.19% | 1 | 2.78% |
Thomas Graf | 7 | 1.19% | 1 | 2.78% |
Peter Christensen | 6 | 1.02% | 1 | 2.78% |
Denis V. Lunev | 6 | 1.02% | 3 | 8.33% |
Arnaldo Carvalho de Melo | 6 | 1.02% | 3 | 8.33% |
Hideaki Yoshifuji / 吉藤英明 | 2 | 0.34% | 1 | 2.78% |
Eric W. Biedermann | 1 | 0.17% | 1 | 2.78% |
Pavel Emelyanov | 1 | 0.17% | 1 | 2.78% |
Total | 586 | 100.00% | 36 | 100.00% |
/*
* Send an ICMP message in response to a situation
*
* RFC 1122: 3.2.2 MUST send at least the IP header and 8 bytes of header.
* MAY send more (we do).
* MUST NOT change this header information.
* MUST NOT reply to a multicast/broadcast IP address.
* MUST NOT reply to a multicast/broadcast MAC address.
* MUST reply to only the first fragment.
*/
void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
{
struct iphdr *iph;
int room;
struct icmp_bxm icmp_param;
struct rtable *rt = skb_rtable(skb_in);
struct ipcm_cookie ipc;
struct flowi4 fl4;
__be32 saddr;
u8 tos;
u32 mark;
struct net *net;
struct sock *sk;
if (!rt)
goto out;
net = dev_net(rt->dst.dev);
/*
* Find the original header. It is expected to be valid, of course.
* Check this, icmp_send is called from the most obscure devices
* sometimes.
*/
iph = ip_hdr(skb_in);
if ((u8 *)iph < skb_in->head ||
(skb_network_header(skb_in) + sizeof(*iph)) >
skb_tail_pointer(skb_in))
goto out;
/*
* No replies to physical multicast/broadcast
*/
if (skb_in->pkt_type != PACKET_HOST)
goto out;
/*
* Now check at the protocol level
*/
if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto out;
/*
* Only reply to fragment 0. We byte re-order the constant
* mask for efficiency.
*/
if (iph->frag_off & htons(IP_OFFSET))
goto out;
/*
* If we send an ICMP error to an ICMP error a mess would result..
*/
if (icmp_pointers[type].error) {
/*
* We are an error, check if we are replying to an
* ICMP error
*/
if (iph->protocol == IPPROTO_ICMP) {
u8 _inner_type, *itp;
itp = skb_header_pointer(skb_in,
skb_network_header(skb_in) +
(iph->ihl << 2) +
offsetof(struct icmphdr,
type) -
skb_in->data,
sizeof(_inner_type),
&_inner_type);
if (!itp)
goto out;
/*
* Assume any unknown ICMP type is an error. This
* isn't specified by the RFC, but think about it..
*/
if (*itp > NR_ICMP_TYPES ||
icmp_pointers[*itp].error)
goto out;
}
}
/* Needed by both icmp_global_allow and icmp_xmit_lock */
local_bh_disable();
/* Check global sysctl_icmp_msgs_per_sec ratelimit */
if (!icmpv4_global_allow(net, type, code))
goto out_bh_enable;
sk = icmp_xmit_lock(net);
if (!sk)
goto out_bh_enable;
/*
* Construct source address and options.
*/
saddr = iph->daddr;
if (!(rt->rt_flags & RTCF_LOCAL)) {
struct net_device *dev = NULL;
rcu_read_lock();
if (rt_is_input_route(rt) &&
net->ipv4.sysctl_icmp_errors_use_inbound_ifaddr)
dev = dev_get_by_index_rcu(net, inet_iif(skb_in));
if (dev)
saddr = inet_select_addr(dev, 0, RT_SCOPE_LINK);
else
saddr = 0;
rcu_read_unlock();
}
tos = icmp_pointers[type].error ? ((iph->tos & IPTOS_TOS_MASK) |
IPTOS_PREC_INTERNETCONTROL) :
iph->tos;
mark = IP4_REPLY_MARK(