Release 4.11 net/ipv6/ip6_input.c
/*
* IPv6 input
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
* Ian P. Morris <I.P.Morris@soton.ac.uk>
*
* Based in linux/net/ipv4/ip_input.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
/* Changes
*
* Mitsuru KANDA @USAGI and
* YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/mroute6.h>
#include <linux/slab.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/inet_ecn.h>
#include <net/dst_metadata.h>
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
/* if ingress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
skb = l3mdev_ip6_rcv(skb);
if (!skb)
return NET_RX_SUCCESS;
if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) {
const struct inet6_protocol *ipprot;
ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]);
if (ipprot && ipprot->early_demux)
ipprot->early_demux(skb);
}
if (!skb_valid_dst(skb))
ip6_route_input(skb);
return dst_input(skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Eric Dumazet | 51 | 45.13% | 2 | 20.00% |
Linus Torvalds (pre-git) | 24 | 21.24% | 1 | 10.00% |
David Ahern | 16 | 14.16% | 1 | 10.00% |
Holger Eitzenberger | 6 | 5.31% | 1 | 10.00% |
David S. Miller | 5 | 4.42% | 1 | 10.00% |
Eric W. Biedermann | 5 | 4.42% | 1 | 10.00% |
Nikolay Borisov | 4 | 3.54% | 1 | 10.00% |
Hideaki Yoshifuji / 吉藤英明 | 1 | 0.88% | 1 | 10.00% |
Wei-Chun Chao | 1 | 0.88% | 1 | 10.00% |
Total | 113 | 100.00% | 10 | 100.00% |
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
const struct ipv6hdr *hdr;
u32 pkt_len;
struct inet6_dev *idev;
struct net *net = dev_net(skb->dev);
if (skb->pkt_type == PACKET_OTHERHOST) {
kfree_skb(skb);
return NET_RX_DROP;
}
rcu_read_lock();
idev = __in6_dev_get(skb->dev);
__IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len);
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL ||
!idev || unlikely(idev->cnf.disable_ipv6)) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
goto drop;
}
memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm));
/*
* Store incoming device index. When the packet will
* be queued, we cannot refer to skb->dev anymore.
*
* BTW, when we send a packet for our own local address on a
* non-loopback interface (e.g. ethX), it is being delivered
* via the loopback interface (lo) here; skb->dev = loopback_dev.
* It, however, should be considered as if it is being
* arrived via the sending interface (ethX), because of the
* nature of scoping architecture. --yoshfuji
*/
IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex;
if (unlikely(!pskb_may_pull(skb, sizeof(*hdr))))
goto err;
hdr = ipv6_hdr(skb);
if (hdr->version != 6)
goto err;
__IP6_ADD_STATS(net, idev,
IPSTATS_MIB_NOECTPKTS +
(ipv6_get_dsfield(hdr) & INET_ECN_MASK),
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
/*
* RFC4291 2.5.3
* The loopback address must not be used as the source address in IPv6
* packets that are sent outside of a single node. [..]
* A packet received on an interface with a destination address
* of loopback must be dropped.
*/
if ((ipv6_addr_loopback(&hdr->saddr) ||
ipv6_addr_loopback(&hdr->daddr)) &&
!(dev->flags & IFF_LOOPBACK))
goto err;
/* RFC4291 Errata ID: 3480
* Interface-Local scope spans only a single interface on a
* node and is useful only for loopback transmission of
* multicast. Packets with interface-local scope received
* from another node must be discarded.
*/
if (!(skb->pkt_type == PACKET_LOOPBACK ||
dev->flags & IFF_LOOPBACK) &&
ipv6_addr_is_multicast(&hdr->daddr) &&
IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1)
goto err;
/* If enabled, drop unicast packets that were encapsulated in link-layer
* multicast or broadcast to protected against the so-called "hole-196"
* attack in 802.11 wireless.
*/
if (!ipv6_addr_is_multicast(&hdr->daddr) &&
(skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST) &&
idev->cnf.drop_unicast_in_l2_multicast)
goto err;
/* RFC4291 2.7
* Nodes must not originate a packet to a multicast address whose scope
* field contains the reserved value 0; if such a packet is received, it
* must be silently dropped.
*/
if (ipv6_addr_is_multicast(&hdr->daddr) &&
IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0)
goto err;
/*
* RFC4291 2.7
* Multicast addresses must not be used as source addresses in IPv6
* packets or appear in any Routing header.
*/
if (ipv6_addr_is_multicast(&hdr->saddr))
goto err;
skb->transport_header = skb->network_header + sizeof(*hdr);
IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr);
pkt_len = ntohs(hdr->payload_len);
/* pkt_len may be zero if Jumbo payload option is present */
if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) {
if (pkt_len + sizeof(struct ipv6hdr) > skb->len) {
__IP6_INC_STATS(net,
idev, IPSTATS_MIB_INTRUNCATEDPKTS);
goto drop;
}
if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
goto drop;
}
hdr = ipv6_hdr(skb);
}
if (hdr->nexthdr == NEXTHDR_HOP) {
if (ipv6_parse_hopopts(skb) < 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
rcu_read_unlock();
return NET_RX_DROP;
}
}
rcu_read_unlock();
/* Must drop socket now because of tproxy. */
skb_orphan(skb);
return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING,
net, NULL, skb, dev, NULL,
ip6_rcv_finish);
err:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS);
drop:
rcu_read_unlock();
kfree_skb(skb);
return NET_RX_DROP;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 177 | 29.80% | 7 | 14.58% |
Hideaki Yoshifuji / 吉藤英明 | 90 | 15.15% | 7 | 14.58% |
Hannes Frederic Sowa | 63 | 10.61% | 2 | 4.17% |
Eric Dumazet | 38 | 6.40% | 5 | 10.42% |
Johannes Berg | 35 | 5.89% | 1 | 2.08% |
Linus Torvalds | 29 | 4.88% | 1 | 2.08% |
Denis V. Lunev | 24 | 4.04% | 1 | 2.08% |
Patrick McHardy | 22 | 3.70% | 2 | 4.17% |
Florian Westphal | 18 | 3.03% | 1 | 2.08% |
Guillaume Chazarain | 16 | 2.69% | 1 | 2.08% |
Arnaldo Carvalho de Melo | 14 | 2.36% | 3 | 6.25% |
Brian Haley | 14 | 2.36% | 1 | 2.08% |
Herbert Xu | 12 | 2.02% | 3 | 6.25% |
Mitsuru Chinen | 9 | 1.52% | 1 | 2.08% |
David S. Miller | 7 | 1.18% | 2 | 4.17% |
Shirley Ma | 6 | 1.01% | 1 | 2.08% |
Neil Horman | 5 | 0.84% | 1 | 2.08% |
Nivedita Singhvi | 5 | 0.84% | 1 | 2.08% |
Eric W. Biedermann | 3 | 0.51% | 2 | 4.17% |
Mark Smith | 3 | 0.51% | 1 | 2.08% |
Daniel Lezcano | 1 | 0.17% | 1 | 2.08% |
Wei-Chun Chao | 1 | 0.17% | 1 | 2.08% |
Jan Engelhardt | 1 | 0.17% | 1 | 2.08% |
Jesper Nilsson | 1 | 0.17% | 1 | 2.08% |
Total | 594 | 100.00% | 48 | 100.00% |
/*
* Deliver the packet to the host
*/
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
const struct inet6_protocol *ipprot;
struct inet6_dev *idev;
unsigned int nhoff;
int nexthdr;
bool raw;
bool have_final = false;
/*
* Parse extension headers
*/
rcu_read_lock();
resubmit:
idev = ip6_dst_idev(skb_dst(skb));
if (!pskb_pull(skb, skb_transport_offset(skb)))
goto discard;
nhoff = IP6CB(skb)->nhoff;
nexthdr = skb_network_header(skb)[nhoff];
resubmit_final:
raw = raw6_local_deliver(skb, nexthdr);
ipprot = rcu_dereference(inet6_protos[nexthdr]);
if (ipprot) {
int ret;
if (have_final) {
if (!(ipprot->flags & INET6_PROTO_FINAL)) {
/* Once we've seen a final protocol don't
* allow encapsulation on any non-final
* ones. This allows foo in UDP encapsulation
* to work.
*/
goto discard;
}
} else if (ipprot->flags & INET6_PROTO_FINAL) {
const struct ipv6hdr *hdr;
/* Only do this once for first final protocol */
have_final = true;
/* Free reference early: we don't need it any more,
and it may hold ip_conntrack module loaded
indefinitely. */
nf_reset(skb);
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
hdr = ipv6_hdr(skb);
if (ipv6_addr_is_multicast(&hdr->daddr) &&
!ipv6_chk_mcast_addr(skb->dev, &hdr->daddr,
&hdr->saddr) &&
!ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb)))
goto discard;
}
if (!(ipprot->flags & INET6_PROTO_NOPOLICY) &&
!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
ret = ipprot->handler(skb);
if (ret > 0) {
if (ipprot->flags & INET6_PROTO_FINAL) {
/* Not an extension header, most likely UDP
* encapsulation. Use return value as nexthdr
* protocol not nhoff (which presumably is
* not set by handler).
*/
nexthdr = ret;
goto resubmit_final;
} else {
goto resubmit;
}
} else if (ret == 0) {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
}
} else {
if (!raw) {
if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
__IP6_INC_STATS(net, idev,
IPSTATS_MIB_INUNKNOWNPROTOS);
icmpv6_send(skb, ICMPV6_PARAMPROB,
ICMPV6_UNK_NEXTHDR, nhoff);
}
kfree_skb(skb);
} else {
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS);
consume_skb(skb);
}
}
rcu_read_unlock();
return 0;
discard:
__IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS);
rcu_read_unlock();
kfree_skb(skb);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 77 | 18.29% | 4 | 9.76% |
Tom Herbert | 61 | 14.49% | 2 | 4.88% |
Linus Torvalds (pre-git) | 60 | 14.25% | 6 | 14.63% |
David L Stevens | 48 | 11.40% | 1 | 2.44% |
Tom Lendacky | 33 | 7.84% | 1 | 2.44% |
David S. Miller | 30 | 7.13% | 4 | 9.76% |
Arnaldo Carvalho de Melo | 15 | 3.56% | 4 | 9.76% |
Linus Torvalds | 13 | 3.09% | 1 | 2.44% |
Stephen Hemminger | 12 | 2.85% | 1 | 2.44% |
Patrick McHardy | 12 | 2.85% | 2 | 4.88% |
Eric Dumazet | 10 | 2.38% | 4 | 9.76% |
Neil Horman | 8 | 1.90% | 1 | 2.44% |
Denis V. Lunev | 8 | 1.90% | 1 | 2.44% |
Nivedita Singhvi | 7 | 1.66% | 1 | 2.44% |
Yasuyuki Kozakai | 6 | 1.43% | 1 | 2.44% |
Eric W. Biedermann | 5 | 1.19% | 1 | 2.44% |
Pavel Emelyanov | 4 | 0.95% | 1 | 2.44% |
Ian Morris | 4 | 0.95% | 1 | 2.44% |
Dipankar Sarma | 3 | 0.71% | 1 | 2.44% |
Shirley Ma | 3 | 0.71% | 1 | 2.44% |
Alexey Dobriyan | 1 | 0.24% | 1 | 2.44% |
Herbert Xu | 1 | 0.24% | 1 | 2.44% |
Total | 421 | 100.00% | 41 | 100.00% |
int ip6_input(struct sk_buff *skb)
{
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN,
dev_net(skb->dev), NULL, skb, skb->dev, NULL,
ip6_input_finish);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 26 | 70.27% | 1 | 20.00% |
Eric W. Biedermann | 7 | 18.92% | 1 | 20.00% |
David S. Miller | 2 | 5.41% | 1 | 20.00% |
Patrick McHardy | 1 | 2.70% | 1 | 20.00% |
Jan Engelhardt | 1 | 2.70% | 1 | 20.00% |
Total | 37 | 100.00% | 5 | 100.00% |
EXPORT_SYMBOL_GPL(ip6_input);
int ip6_mc_input(struct sk_buff *skb)
{
const struct ipv6hdr *hdr;
bool deliver;
__IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev),
ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST,
skb->len);
hdr = ipv6_hdr(skb);
deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL);
#ifdef CONFIG_IPV6_MROUTE
/*
* IPv6 multicast router mode is now supported ;)
*/
if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding &&
!(ipv6_addr_type(&hdr->daddr) &
(IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&
likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {
/*
* Okay, we try to forward - split and duplicate
* packets.
*/
struct sk_buff *skb2;
struct inet6_skb_parm *opt = IP6CB(skb);
/* Check for MLD */
if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
/* Check if this is a mld message */
u8 nexthdr = hdr->nexthdr;
__be16 frag_off;
int offset;
/* Check if the value of Router Alert
* is for MLD (0x0000).
*/
if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) {
deliver = false;
if (!ipv6_ext_hdr(nexthdr)) {
/* BUG */
goto out;
}
offset = ipv6_skip_exthdr(skb, sizeof(*hdr),
&nexthdr, &frag_off);
if (offset < 0)
goto out;
if (ipv6_is_mld(skb, nexthdr, offset))
deliver = true;
goto out;
}
/* unknown RA - process it normally */
}
if (deliver)
skb2 = skb_clone(skb, GFP_ATOMIC);
else {
skb2 = skb;
skb = NULL;
}
if (skb2) {
ip6_mr_input(skb2);
}
}
out:
#endif
if (likely(deliver))
ip6_input(skb);
else {
/* discard */
kfree_skb(skb);
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 194 | 62.99% | 6 | 26.09% |
Linus Torvalds (pre-git) | 54 | 17.53% | 4 | 17.39% |
Thomas Goff | 13 | 4.22% | 1 | 4.35% |
Eric Dumazet | 11 | 3.57% | 4 | 17.39% |
David L Stevens | 8 | 2.60% | 2 | 8.70% |
Denis V. Lunev | 7 | 2.27% | 1 | 4.35% |
Jesse Gross | 6 | 1.95% | 1 | 4.35% |
Neil Horman | 5 | 1.62% | 1 | 4.35% |
Hannes Frederic Sowa | 4 | 1.30% | 1 | 4.35% |
Angga | 3 | 0.97% | 1 | 4.35% |
Arnaldo Carvalho de Melo | 3 | 0.97% | 1 | 4.35% |
Total | 308 | 100.00% | 23 | 100.00% |
Overall Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 399 | 25.69% | 8 | 8.99% |
Hideaki Yoshifuji / 吉藤英明 | 365 | 23.50% | 14 | 15.73% |
Eric Dumazet | 113 | 7.28% | 7 | 7.87% |
Hannes Frederic Sowa | 67 | 4.31% | 3 | 3.37% |
Tom Herbert | 61 | 3.93% | 2 | 2.25% |
David L Stevens | 56 | 3.61% | 2 | 2.25% |
David S. Miller | 44 | 2.83% | 5 | 5.62% |
Linus Torvalds | 42 | 2.70% | 1 | 1.12% |
Denis V. Lunev | 39 | 2.51% | 1 | 1.12% |
Tom Lendacky | 36 | 2.32% | 1 | 1.12% |
Johannes Berg | 35 | 2.25% | 1 | 1.12% |
Patrick McHardy | 35 | 2.25% | 3 | 3.37% |
Arnaldo Carvalho de Melo | 32 | 2.06% | 6 | 6.74% |
David Ahern | 21 | 1.35% | 2 | 2.25% |
Eric W. Biedermann | 20 | 1.29% | 3 | 3.37% |
Neil Horman | 18 | 1.16% | 2 | 2.25% |
Florian Westphal | 18 | 1.16% | 1 | 1.12% |
Guillaume Chazarain | 16 | 1.03% | 1 | 1.12% |
Brian Haley | 14 | 0.90% | 1 | 1.12% |
Thomas Goff | 13 | 0.84% | 1 | 1.12% |
Herbert Xu | 13 | 0.84% | 3 | 3.37% |
Stephen Hemminger | 12 | 0.77% | 1 | 1.12% |
Nivedita Singhvi | 12 | 0.77% | 1 | 1.12% |
Shirley Ma | 9 | 0.58% | 1 | 1.12% |
Mitsuru Chinen | 9 | 0.58% | 1 | 1.12% |
Yasuyuki Kozakai | 6 | 0.39% | 1 | 1.12% |
Jesse Gross | 6 | 0.39% | 1 | 1.12% |
Holger Eitzenberger | 6 | 0.39% | 1 | 1.12% |
Wei-Chun Chao | 5 | 0.32% | 1 | 1.12% |
Ian Morris | 5 | 0.32% | 2 | 2.25% |
Pavel Emelyanov | 4 | 0.26% | 1 | 1.12% |
Nikolay Borisov | 4 | 0.26% | 1 | 1.12% |
Dipankar Sarma | 3 | 0.19% | 1 | 1.12% |
Mark Smith | 3 | 0.19% | 1 | 1.12% |
Angga | 3 | 0.19% | 1 | 1.12% |
Tejun Heo | 3 | 0.19% | 1 | 1.12% |
Jan Engelhardt | 2 | 0.13% | 1 | 1.12% |
Alexey Dobriyan | 1 | 0.06% | 1 | 1.12% |
Jesper Nilsson | 1 | 0.06% | 1 | 1.12% |
Daniel Lezcano | 1 | 0.06% | 1 | 1.12% |
Adrian Bunk | 1 | 0.06% | 1 | 1.12% |
Total | 1553 | 100.00% | 89 | 100.00% |
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.