cregit-Linux how code gets into the kernel

Release 4.15 net/ipv6/ip6_input.c

Directory: net/ipv6
/*
 *      IPv6 input
 *      Linux INET6 implementation
 *
 *      Authors:
 *      Pedro Roque             <roque@di.fc.ul.pt>
 *      Ian P. Morris           <I.P.Morris@soton.ac.uk>
 *
 *      Based in linux/net/ipv4/ip_input.c
 *
 *      This program is free software; you can redistribute it and/or
 *      modify it under the terms of the GNU General Public License
 *      as published by the Free Software Foundation; either version
 *      2 of the License, or (at your option) any later version.
 */
/* Changes
 *
 *      Mitsuru KANDA @USAGI and
 *      YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs().
 */

#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/in6.h>
#include <linux/icmpv6.h>
#include <linux/mroute6.h>
#include <linux/slab.h>

#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>

#include <net/sock.h>
#include <net/snmp.h>

#include <net/ipv6.h>
#include <net/protocol.h>
#include <net/transp_v6.h>
#include <net/rawv6.h>
#include <net/ndisc.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/xfrm.h>
#include <net/inet_ecn.h>
#include <net/dst_metadata.h>


int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { void (*edemux)(struct sk_buff *skb); /* if ingress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip6_rcv(skb); if (!skb) return NET_RX_SUCCESS; if (net->ipv4.sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); if (ipprot && (edemux = READ_ONCE(ipprot->early_demux))) edemux(skb); } if (!skb_valid_dst(skb)) ip6_route_input(skb); return dst_input(skb); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet4836.92%218.18%
Linus Torvalds (pre-git)2418.46%19.09%
Subash Abhinov Kasiviswanathan2015.38%19.09%
David Ahern1612.31%19.09%
Holger Eitzenberger64.62%19.09%
David S. Miller53.85%19.09%
Eric W. Biedermann53.85%19.09%
Nikolay Borisov43.08%19.09%
Wei-Chun Chao10.77%19.09%
Hideaki Yoshifuji / 吉藤英明10.77%19.09%
Total130100.00%11100.00%


int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return NET_RX_DROP; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); __IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_valid_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; __IP6_ADD_STATS(net, idev, IPSTATS_MIB_NOECTPKTS + (ipv6_get_dsfield(hdr) & INET_ECN_MASK), max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs)); /* * RFC4291 2.5.3 * The loopback address must not be used as the source address in IPv6 * packets that are sent outside of a single node. [..] * A packet received on an interface with a destination address * of loopback must be dropped. */ if ((ipv6_addr_loopback(&hdr->saddr) || ipv6_addr_loopback(&hdr->daddr)) && !(dev->flags & IFF_LOOPBACK)) goto err; /* RFC4291 Errata ID: 3480 * Interface-Local scope spans only a single interface on a * node and is useful only for loopback transmission of * multicast. Packets with interface-local scope received * from another node must be discarded. */ if (!(skb->pkt_type == PACKET_LOOPBACK || dev->flags & IFF_LOOPBACK) && ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) goto err; /* If enabled, drop unicast packets that were encapsulated in link-layer * multicast or broadcast to protected against the so-called "hole-196" * attack in 802.11 wireless. */ if (!ipv6_addr_is_multicast(&hdr->daddr) && (skb->pkt_type == PACKET_BROADCAST || skb->pkt_type == PACKET_MULTICAST) && idev->cnf.drop_unicast_in_l2_multicast) goto err; /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it * must be silently dropped. */ if (ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) goto err; /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 * packets or appear in any Routing header. */ if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NET_RX_DROP; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ skb_orphan(skb); return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, net, NULL, skb, dev, NULL, ip6_rcv_finish); err: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); return NET_RX_DROP; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)17729.80%714.58%
Hideaki Yoshifuji / 吉藤英明9015.15%714.58%
Hannes Frederic Sowa6310.61%24.17%
Eric Dumazet386.40%510.42%
Johannes Berg355.89%12.08%
Linus Torvalds294.88%12.08%
Denis V. Lunev244.04%12.08%
Patrick McHardy223.70%24.17%
Florian Westphal183.03%12.08%
Guillaume Chazarain162.69%12.08%
Arnaldo Carvalho de Melo142.36%36.25%
Brian Haley142.36%12.08%
Herbert Xu122.02%36.25%
Mitsuru Chinen91.52%12.08%
David S. Miller71.18%24.17%
Shirley Ma61.01%12.08%
Nivedita Singhvi50.84%12.08%
Neil Horman50.84%12.08%
Eric W. Biedermann30.51%24.17%
Mark Smith30.51%12.08%
Wei-Chun Chao10.17%12.08%
Daniel Lezcano10.17%12.08%
Jesper Nilsson10.17%12.08%
Jan Engelhardt10.17%12.08%
Total594100.00%48100.00%

/* * Deliver the packet to the host */
static int ip6_input_finish(struct net *net, struct sock *sk, struct sk_buff *skb) { const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; int nexthdr; bool raw; bool have_final = false; /* * Parse extension headers */ rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; resubmit_final: raw = raw6_local_deliver(skb, nexthdr); ipprot = rcu_dereference(inet6_protos[nexthdr]); if (ipprot) { int ret; if (have_final) { if (!(ipprot->flags & INET6_PROTO_FINAL)) { /* Once we've seen a final protocol don't * allow encapsulation on any non-final * ones. This allows foo in UDP encapsulation * to work. */ goto discard; } } else if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; /* Only do this once for first final protocol */ have_final = true; /* Free reference early: we don't need it any more, and it may hold ip_conntrack module loaded indefinitely. */ nf_reset(skb); skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(skb); if (ret > 0) { if (ipprot->flags & INET6_PROTO_FINAL) { /* Not an extension header, most likely UDP * encapsulation. Use return value as nexthdr * protocol not nhoff (which presumably is * not set by handler). */ nexthdr = ret; goto resubmit_final; } else { goto resubmit; } } else if (ret == 0) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); } } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); } kfree_skb(skb); } else { __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } rcu_read_unlock(); return 0; discard: __IP6_INC_STATS(net, idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明7718.29%49.76%
Tom Herbert6114.49%24.88%
Linus Torvalds (pre-git)6014.25%614.63%
David L Stevens4811.40%12.44%
Tom Lendacky337.84%12.44%
David S. Miller307.13%49.76%
Arnaldo Carvalho de Melo153.56%49.76%
Linus Torvalds133.09%12.44%
Patrick McHardy122.85%24.88%
Stephen Hemminger122.85%12.44%
Eric Dumazet102.38%49.76%
Denis V. Lunev81.90%12.44%
Neil Horman81.90%12.44%
Nivedita Singhvi71.66%12.44%
Yasuyuki Kozakai61.43%12.44%
Eric W. Biedermann51.19%12.44%
Ian Morris40.95%12.44%
Pavel Emelyanov40.95%12.44%
Dipankar Sarma30.71%12.44%
Shirley Ma30.71%12.44%
Herbert Xu10.24%12.44%
Alexey Dobriyan10.24%12.44%
Total421100.00%41100.00%


int ip6_input(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, dev_net(skb->dev), NULL, skb, skb->dev, NULL, ip6_input_finish); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2670.27%120.00%
Eric W. Biedermann718.92%120.00%
David S. Miller25.41%120.00%
Patrick McHardy12.70%120.00%
Jan Engelhardt12.70%120.00%
Total37100.00%5100.00%

EXPORT_SYMBOL_GPL(ip6_input);
int ip6_mc_input(struct sk_buff *skb) { const struct ipv6hdr *hdr; bool deliver; __IP6_UPD_PO_STATS(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, skb->len); hdr = ipv6_hdr(skb); deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { /* Check if this is a mld message */ u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; /* Check if the value of Router Alert * is for MLD (0x0000). */ if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) { deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) goto out; if (ipv6_is_mld(skb, nexthdr, offset)) deliver = true; goto out; } /* unknown RA - process it normally */ } if (deliver) skb2 = skb_clone(skb, GFP_ATOMIC); else { skb2 = skb; skb = NULL; } if (skb2) { ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) ip6_input(skb); else { /* discard */ kfree_skb(skb); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Hideaki Yoshifuji / 吉藤英明19462.99%626.09%
Linus Torvalds (pre-git)5417.53%417.39%
Thomas Goff134.22%14.35%
Eric Dumazet113.57%417.39%
David L Stevens82.60%28.70%
Denis V. Lunev72.27%14.35%
Jesse Gross61.95%14.35%
Neil Horman51.62%14.35%
Hannes Frederic Sowa41.30%14.35%
Angga30.97%14.35%
Arnaldo Carvalho de Melo30.97%14.35%
Total308100.00%23100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)39925.41%88.89%
Hideaki Yoshifuji / 吉藤英明36523.25%1415.56%
Eric Dumazet1107.01%77.78%
Hannes Frederic Sowa674.27%33.33%
Tom Herbert613.89%22.22%
David L Stevens563.57%22.22%
David S. Miller442.80%55.56%
Linus Torvalds422.68%11.11%
Denis V. Lunev392.48%11.11%
Tom Lendacky362.29%11.11%
Patrick McHardy352.23%33.33%
Johannes Berg352.23%11.11%
Arnaldo Carvalho de Melo322.04%66.67%
David Ahern211.34%22.22%
Eric W. Biedermann201.27%33.33%
Subash Abhinov Kasiviswanathan201.27%11.11%
Florian Westphal181.15%11.11%
Neil Horman181.15%22.22%
Guillaume Chazarain161.02%11.11%
Brian Haley140.89%11.11%
Thomas Goff130.83%11.11%
Herbert Xu130.83%33.33%
Nivedita Singhvi120.76%11.11%
Stephen Hemminger120.76%11.11%
Mitsuru Chinen90.57%11.11%
Shirley Ma90.57%11.11%
Jesse Gross60.38%11.11%
Holger Eitzenberger60.38%11.11%
Yasuyuki Kozakai60.38%11.11%
Ian Morris50.32%22.22%
Wei-Chun Chao50.32%11.11%
Nikolay Borisov40.25%11.11%
Pavel Emelyanov40.25%11.11%
Angga30.19%11.11%
Dipankar Sarma30.19%11.11%
Tejun Heo30.19%11.11%
Mark Smith30.19%11.11%
Jan Engelhardt20.13%11.11%
Jesper Nilsson10.06%11.11%
Daniel Lezcano10.06%11.11%
Adrian Bunk10.06%11.11%
Alexey Dobriyan10.06%11.11%
Total1570100.00%90100.00%
Directory: net/ipv6
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.