Release 4.11 net/ipv6/ip6_output.c
/*
* IPv6 output functions
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Based on linux/net/ipv4/ip_output.c
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
* A.N.Kuznetsov : airthmetics in fragmentation.
* extension headers are implemented.
* route changes now work.
* ip6_forward does not confuse sniffers.
* etc.
*
* H. von Brand : Added missing #include <linux/string.h>
* Imran Patel : frag id should be in NBO
* Kazunori MIYAZAWA @USAGI
* : add ip6_append_data and related functions
* for datagram xmit
*/
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/tcp.h>
#include <linux/route.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/bpf-cgroup.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <net/rawv6.h>
#include <net/icmp.h>
#include <net/xfrm.h>
#include <net/checksum.h>
#include <linux/mroute6.h>
#include <net/l3mdev.h>
#include <net/lwtunnel.h>
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct net_device *dev = dst->dev;
struct neighbour *neigh;
struct in6_addr *nexthop;
int ret;
skb->protocol = htons(ETH_P_IPV6);
skb->dev = dev;
if (ipv6_addr_is_multicast(&ipv6_hdr(skb)->daddr)) {
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (!(dev->flags & IFF_LOOPBACK) && sk_mc_loop(sk) &&
((mroute6_socket(net, skb) &&
!(IP6CB(skb)->flags & IP6SKB_FORWARDED)) ||
ipv6_chk_mcast_addr(dev, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr))) {
struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
/* Do not check for IFF_ALLMULTI; multicast routing
is not supported in any case.
*/
if (newskb)
NF_HOOK(NFPROTO_IPV6, NF_INET_POST_ROUTING,
net, sk, newskb, NULL, newskb->dev,
dev_loopback_xmit);
if (ipv6_hdr(skb)->hop_limit == 0) {
IP6_INC_STATS(net, idev,
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
}
IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, skb->len);
if (IPV6_ADDR_MC_SCOPE(&ipv6_hdr(skb)->daddr) <=
IPV6_ADDR_SCOPE_NODELOCAL &&
!(dev->flags & IFF_LOOPBACK)) {
kfree_skb(skb);
return 0;
}
}
if (lwtunnel_xmit_redirect(dst->lwtstate)) {
int res = lwtunnel_xmit(skb);
if (res < 0 || res == LWTUNNEL_XMIT_DONE)
return res;
}
rcu_read_lock_bh();
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
if (!IS_ERR(neigh)) {
sock_confirm_neigh(skb, neigh);
ret = neigh_output(neigh, skb);
rcu_read_unlock_bh();
return ret;
}
rcu_read_unlock_bh();
IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTNOROUTES);
kfree_skb(skb);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 131 | 31.26% | 7 | 16.67% |
Hideaki Yoshifuji / 吉藤英明 | 93 | 22.20% | 5 | 11.90% |
Hannes Frederic Sowa | 35 | 8.35% | 2 | 4.76% |
Roopa Prabhu | 32 | 7.64% | 1 | 2.38% |
David S. Miller | 32 | 7.64% | 6 | 14.29% |
Jan Engelhardt | 22 | 5.25% | 2 | 4.76% |
Arnaldo Carvalho de Melo | 12 | 2.86% | 1 | 2.38% |
Eric W. Biedermann | 11 | 2.63% | 3 | 7.14% |
Martin KaFai Lau | 8 | 1.91% | 1 | 2.38% |
Julian Anastasov | 8 | 1.91% | 2 | 4.76% |
Neil Horman | 6 | 1.43% | 1 | 2.38% |
Eric Dumazet | 6 | 1.43% | 1 | 2.38% |
Shirley Ma | 5 | 1.19% | 2 | 4.76% |
David L Stevens | 5 | 1.19% | 1 | 2.38% |
Octavian Purdila | 4 | 0.95% | 1 | 2.38% |
Patrick McHardy | 3 | 0.72% | 2 | 4.76% |
Benjamin Thery | 2 | 0.48% | 1 | 2.38% |
Denis V. Lunev | 2 | 0.48% | 1 | 2.38% |
Herbert Xu | 1 | 0.24% | 1 | 2.38% |
Michel Machado | 1 | 0.24% | 1 | 2.38% |
Total | 419 | 100.00% | 42 | 100.00% |
static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
int ret;
ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb);
if (ret) {
kfree_skb(skb);
return ret;
}
if ((skb->len > ip6_skb_dst_mtu(skb) && !skb_is_gso(skb)) ||
dst_allfrag(skb_dst(skb)) ||
(IP6CB(skb)->frag_max_size && skb->len > IP6CB(skb)->frag_max_size))
return ip6_fragment(net, sk, skb, ip6_finish_output2);
else
return ip6_finish_output2(net, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Jan Engelhardt | 53 | 45.30% | 1 | 16.67% |
Daniel Mack | 26 | 22.22% | 1 | 16.67% |
Jiri Pirko | 20 | 17.09% | 1 | 16.67% |
David S. Miller | 9 | 7.69% | 1 | 16.67% |
Eric W. Biedermann | 9 | 7.69% | 2 | 33.33% |
Total | 117 | 100.00% | 6 | 100.00% |
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
{
struct net_device *dev = skb_dst(skb)->dev;
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
if (unlikely(idev->cnf.disable_ipv6)) {
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return 0;
}
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
net, sk, skb, NULL, dev,
ip6_finish_output,
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hideaki Yoshifuji / 吉藤英明 | 38 | 35.51% | 1 | 8.33% |
Jan Engelhardt | 34 | 31.78% | 2 | 16.67% |
Kazunori Miyazawa | 14 | 13.08% | 1 | 8.33% |
Eric Dumazet | 8 | 7.48% | 2 | 16.67% |
Eric W. Biedermann | 8 | 7.48% | 3 | 25.00% |
David S. Miller | 2 | 1.87% | 1 | 8.33% |
Herbert Xu | 2 | 1.87% | 1 | 8.33% |
Denis V. Lunev | 1 | 0.93% | 1 | 8.33% |
Total | 107 | 100.00% | 12 | 100.00% |
/*
* xmit an sk_buff (used by TCP, SCTP and DCCP)
* Note : socket lock is not held for SYNACK packets, but might be modified
* by calls to skb_set_owner_w() and ipv6_local_error(),
* which are using proper atomic operations or spinlocks.
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
__u32 mark, struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
struct in6_addr *first_hop = &fl6->daddr;
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr;
u8 proto = fl6->flowi6_proto;
int seg_len = skb->len;
int hlimit = -1;
u32 mtu;
if (opt) {
unsigned int head_room;
/* First: exthdrs may take lots of space (~8K for now)
MAX_HEADER is not enough.
*/
head_room = opt->opt_nflen + opt->opt_flen;
seg_len += head_room;
head_room += sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
if (skb_headroom(skb) < head_room) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
if (!skb2) {
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUTDISCARDS);
kfree_skb(skb);
return -ENOBUFS;
}
consume_skb(skb);
skb = skb2;
/* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
* it is safe to call in our context (socket lock not held)
*/
skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
if (opt->opt_nflen)
ipv6_push_nfrag_opts(skb, opt, &proto, &first_hop,
&fl6->saddr);
}
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
hdr = ipv6_hdr(skb);
/*
* Fill in the IPv6 header
*/
if (np)
hlimit = np->hop_limit;
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
np->autoflowlabel, fl6));
hdr->payload_len = htons(seg_len);
hdr->nexthdr = proto;
hdr->hop_limit = hlimit;
hdr->saddr = fl6->saddr;
hdr->daddr = *first_hop;
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = mark;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
IP6_UPD_PO_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_OUT, skb->len);
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
*/
skb = l3mdev_ip6_out((struct sock *)sk, skb);
if (unlikely(!skb))
return 0;
/* hooks should never assume socket lock is held.
* we promote our socket to non const
*/
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
net, (struct sock *)sk, skb, NULL, dst->dev,
dst_output);
}
skb->dev = dst->dev;
/* ipv6_local_error() does not require socket lock,
* we promote our socket to non const
*/
ipv6_local_error((struct sock *)sk, EMSGSIZE, fl6, mtu);
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 273 | 50.84% | 9 | 16.36% |
Hideaki Yoshifuji / 吉藤英明 | 46 | 8.57% | 7 | 12.73% |
Eric Dumazet | 36 | 6.70% | 4 | 7.27% |
David Ahern | 26 | 4.84% | 1 | 1.82% |
David S. Miller | 17 | 3.17% | 5 | 9.09% |
Denis V. Lunev | 16 | 2.98% | 1 | 1.82% |
Tom Herbert | 13 | 2.42% | 2 | 3.64% |
Arnaldo Carvalho de Melo | 12 | 2.23% | 2 | 3.64% |
Sridhar Samudrala | 11 | 2.05% | 1 | 1.82% |
Alexey Kuznetsov | 11 | 2.05% | 1 | 1.82% |
Shirley Ma | 10 | 1.86% | 1 | 1.82% |
Patrick McHardy | 9 | 1.68% | 2 | 3.64% |
Hannes Frederic Sowa | 9 | 1.68% | 1 | 1.82% |
Neil Horman | 6 | 1.12% | 1 | 1.82% |
Herbert Xu | 6 | 1.12% | 3 | 5.45% |
David Lebrun | 5 | 0.93% | 1 | 1.82% |
Tóth László Attila | 5 | 0.93% | 1 | 1.82% |
Gerrit Renker | 4 | 0.74% | 1 | 1.82% |
Pablo Neira Ayuso | 4 | 0.74% | 1 | 1.82% |
Steffen Klassert | 4 | 0.74% | 1 | 1.82% |
Alexey Dobriyan | 3 | 0.56% | 1 | 1.82% |
Eric W. Biedermann | 3 | 0.56% | 2 | 3.64% |
Kazunori Miyazawa | 2 | 0.37% | 1 | 1.82% |
Wei Yongjun | 2 | 0.37% | 1 | 1.82% |
Ian Morris | 1 | 0.19% | 1 | 1.82% |
Américo Wang | 1 | 0.19% | 1 | 1.82% |
Jan Engelhardt | 1 | 0.19% | 1 | 1.82% |
Chuck Lever | 1 | 0.19% | 1 | 1.82% |
Total | 537 | 100.00% | 55 | 100.00% |
EXPORT_SYMBOL(ip6_xmit);
static int ip6_call_ra_chain(struct sk_buff *skb, int sel)
{
struct ip6_ra_chain *ra;
struct sock *last = NULL;
read_lock(&ip6_ra_lock);
for (ra = ip6_ra_chain; ra; ra = ra->next) {
struct sock *sk = ra->sk;
if (sk && ra->sel == sel &&
(!sk->sk_bound_dev_if ||
sk->sk_bound_dev_if == skb->dev->ifindex)) {
if (last) {
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
if (skb2)
rawv6_rcv(last, skb2);
}
last = sk;
}
}
if (last) {
rawv6_rcv(last, skb);
read_unlock(&ip6_ra_lock);
return 1;
}
read_unlock(&ip6_ra_lock);
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 132 | 88.00% | 3 | 60.00% |
Andrew McDonald | 17 | 11.33% | 1 | 20.00% |
Adrian Bunk | 1 | 0.67% | 1 | 20.00% |
Total | 150 | 100.00% | 5 | 100.00% |
static int ip6_forward_proxy_check(struct sk_buff *skb)
{
struct ipv6hdr *hdr = ipv6_hdr(skb);
u8 nexthdr = hdr->nexthdr;
__be16 frag_off;
int offset;
if (ipv6_ext_hdr(nexthdr)) {
offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off);
if (offset < 0)
return 0;
} else
offset = sizeof(struct ipv6hdr);
if (nexthdr == IPPROTO_ICMPV6) {
struct icmp6hdr *icmp6;
if (!pskb_may_pull(skb, (skb_network_header(skb) +
offset + 1 - skb->data)))
return 0;
icmp6 = (struct icmp6hdr *)(skb_network_header(skb) + offset);
switch (icmp6->icmp6_type) {
case NDISC_ROUTER_SOLICITATION:
case NDISC_ROUTER_ADVERTISEMENT:
case NDISC_NEIGHBOUR_SOLICITATION:
case NDISC_NEIGHBOUR_ADVERTISEMENT:
case NDISC_REDIRECT:
/* For reaction involving unicast neighbor discovery
* message destined to the proxied address, pass it to
* input function.
*/
return 1;
default:
break;
}
}
/*
* The proxying router can't forward traffic sent to a link-local
* address, so signal the sender and discard the packet. This
* behavior is clarified by the MIPv6 specification.
*/
if (ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) {
dst_link_failure(skb);
return -1;
}
return 0;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Ville Nuorvala | 174 | 91.10% | 2 | 40.00% |
Arnaldo Carvalho de Melo | 11 | 5.76% | 2 | 40.00% |
Jesse Gross | 6 | 3.14% | 1 | 20.00% |
Total | 191 | 100.00% | 5 | 100.00% |
static inline int ip6_forward_finish(struct net *net, struct sock *sk,
struct sk_buff *skb)
{
return dst_output(net, sk, skb);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 17 | 53.12% | 1 | 20.00% |
Eric W. Biedermann | 8 | 25.00% | 3 | 60.00% |
David S. Miller | 7 | 21.88% | 1 | 20.00% |
Total | 32 | 100.00% | 5 | 100.00% |
static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
{
unsigned int mtu;
struct inet6_dev *idev;
if (dst_metric_locked(dst, RTAX_MTU)) {
mtu = dst_metric_raw(dst, RTAX_MTU);
if (mtu)
return mtu;
}
mtu = IPV6_MIN_MTU;
rcu_read_lock();
idev = __in6_dev_get(dst->dev);
if (idev)
mtu = idev->cnf.mtu6;
rcu_read_unlock();
return mtu;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Hannes Frederic Sowa | 83 | 100.00% | 1 | 100.00% |
Total | 83 | 100.00% | 1 | 100.00% |
static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
{
if (skb->len <= mtu)
return false;
/* ipv6 conntrack defrag sets max_frag_size + ignore_df */
if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
return true;
if (skb->ignore_df)
return false;
if (skb_is_gso(skb) && skb_gso_validate_mtu(skb, mtu))
return false;
return true;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Florian Westphal | 73 | 93.59% | 2 | 50.00% |
Marcelo Ricardo Leitner | 3 | 3.85% | 1 | 25.00% |
Américo Wang | 2 | 2.56% | 1 | 25.00% |
Total | 78 | 100.00% | 4 | 100.00% |
int ip6_forward(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct ipv6hdr *hdr = ipv6_hdr(skb);
struct inet6_skb_parm *opt = IP6CB(skb);
struct net *net = dev_net(dst->dev);
u32 mtu;
if (net->ipv6.devconf_all->forwarding == 0)
goto error;
if (skb->pkt_type != PACKET_HOST)
goto drop;
if (unlikely(skb->sk))
goto drop;
if (skb_warn_if_lro(skb))
goto drop;
if (!xfrm6_policy_check(NULL, XFRM_POLICY_FWD, skb)) {
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
skb_forward_csum(skb);
/*
* We DO NOT make any processing on
* RA packets, pushing them to user level AS IS
* without ane WARRANTY that application will be able
* to interpret them. The reason is that we
* cannot make anything clever here.
*
* We are not end-node, so that if packet contains
* AH/ESP, we cannot make anything.
* Defragmentation also would be mistake, RA packets
* cannot be fragmented, because there is no warranty
* that different fragments will go along one path. --ANK
*/
if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) {
if (ip6_call_ra_chain(skb, ntohs(opt->ra)))
return 0;
}
/*
* check and decrement ttl
*/
if (hdr->hop_limit <= 1) {
/* Force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_TIME_EXCEED, ICMPV6_EXC_HOPLIMIT, 0);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INHDRERRORS);
kfree_skb(skb);
return -ETIMEDOUT;
}
/* XXX: idev->cnf.proxy_ndp? */
if (net->ipv6.devconf_all->proxy_ndp &&
pneigh_lookup(&nd_tbl, net, &hdr->daddr, skb->dev, 0)) {
int proxied = ip6_forward_proxy_check(skb);
if (proxied > 0)
return ip6_input(skb);
else if (proxied < 0) {
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
}
if (!xfrm6_route_forward(skb)) {
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INDISCARDS);
goto drop;
}
dst = skb_dst(skb);
/* IPv6 specs say nothing about it, but it is clear that we cannot
send redirects to source routed frames.
We don't send redirects to frames decapsulated from IPsec.
*/
if (skb->dev == dst->dev && opt->srcrt == 0 && !skb_sec_path(skb)) {
struct in6_addr *target = NULL;
struct inet_peer *peer;
struct rt6_info *rt;
/*
* incoming and outgoing devices are the same
* send a redirect.
*/
rt = (struct rt6_info *) dst;
if (rt->rt6i_flags & RTF_GATEWAY)
target = &rt->rt6i_gateway;
else
target = &hdr->daddr;
peer = inet_getpeer_v6(net->ipv6.peers, &hdr->daddr, 1);
/* Limit redirects both by destination (here)
and by source (inside ndisc_send_redirect)
*/
if (inet_peer_xrlim_allow(peer, 1*HZ))
ndisc_send_redirect(skb, target);
if (peer)
inet_putpeer(peer);
} else {
int addrtype = ipv6_addr_type(&hdr->saddr);
/* This check is security critical. */
if (addrtype == IPV6_ADDR_ANY ||
addrtype & (IPV6_ADDR_MULTICAST | IPV6_ADDR_LOOPBACK))
goto error;
if (addrtype & IPV6_ADDR_LINKLOCAL) {
icmpv6_send(skb, ICMPV6_DEST_UNREACH,
ICMPV6_NOT_NEIGHBOUR, 0);
goto error;
}
}
mtu = ip6_dst_mtu_forward(dst);
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
if (ip6_pkt_too_big(skb, mtu)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_INTOOBIGERRORS);
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_FRAGFAILS);
kfree_skb(skb);
return -EMSGSIZE;
}
if (skb_cow(skb, dst->dev->hard_header_len)) {
__IP6_INC_STATS(net, ip6_dst_idev(dst),
IPSTATS_MIB_OUTDISCARDS);
goto drop;
}
hdr = ipv6_hdr(skb);
/* Mangling hops number delayed to point after skb COW */
hdr->hop_limit--;
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
__IP6_ADD_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD,
net, NULL, skb, skb->dev, dst->dev,
ip6_forward_finish);
error:
__IP6_INC_STATS(net, ip6_dst_idev(dst), IPSTATS_MIB_INADDRERRORS);
drop:
kfree_skb(skb);
return -EINVAL;
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Linus Torvalds (pre-git) | 288 | 40.91% | 8 | 15.69% |
Hideaki Yoshifuji / 吉藤英明 | 88 | 12.50% | 8 | 15.69% |
Ville Nuorvala | 56 | 7.95% | 2 | 3.92% |
David S. Miller | 36 | 5.11% | 5 | 9.80% |
David L Stevens | 31 | 4.40% | 1 | 1.96% |
Tom Lendacky | 26 | 3.69% | 1 | 1.96% |
Ulrich Weber | 20 | 2.84% | 1 | 1.96% |
Shirley Ma | 18 | 2.56% | 1 | 1.96% |
Denis V. Lunev | 18 | 2.56% | 2 | 3.92% |
Eric Dumazet | 16 | 2.27% | 2 | 3.92% |
Vincent Bernat | 15 | 2.13% | 1 | 1.96% |
Hannes Frederic Sowa | 13 | 1.85% | 2 | 3.92% |
Li RongQing | 11 | 1.56% | 1 | 1.96% |
Linus Torvalds | 10 | 1.42% | 1 | 1.96% |
Daniel Lezcano | 10 | 1.42% | 1 | 1.96% |
Ben Hutchings | 10 | 1.42% | 1 | 1.96% |
Herbert Xu | 7 | 0.99% | 2 | 3.92% |
Arnaldo Carvalho de Melo | 6 | 0.85% | 1 | 1.96% |
Eric W. Biedermann | 4 | 0.57% | 2 | 3.92% |
Wei Dong | 4 | 0.57% | 1 | 1.96% |
Patrick McHardy | 4 | 0.57% | 2 | 3.92% |
Masahide Nakamura | 4 | 0.57% | 1 | 1.96% |
Florian Westphal | 3 | 0.43% | 1 | 1.96% |
Alexey Dobriyan | 3 | 0.43% | 1 | 1.96% |
Martin KaFai Lau | 2 | 0.28% | 1 | 1.96% |
Jan Engelhardt | 1 | 0.14% | 1 | 1.96% |
Total | 704 | 100.00% | 51 | 100.00% |
static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_set(to, dst_clone(skb_dst(from)));
to->dev = from->dev;
to->mark = from->mark;
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
skb_copy_secmark(to, from);
}
Contributors
Person | Tokens | Prop | Commits | CommitProp |
Kazunori Miyazawa | 70 | 69.31% | 1 | 14.29% |
Eric Dumazet | 8 | 7.92% | 1 | 14.29% |
Thomas Graf | 8 | 7.92% | 1 | 14.29% |
James Morris | 7 | 6.93% | 1 | 14.29% |
Yasuyuki Kozakai | 4 | 3.96% | 2 | 28.57% |
Hideaki Yoshifuji / 吉藤英明 | 4 | 3.96% | 1 | 14.29% |
Total | 101 | 100.00% | 7 | 100.00% |
int ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *))
{
struct sk_buff *frag;
struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
inet6_sk(skb->sk) : NULL;
struct ipv6hdr *tmp_hdr;
struct frag_hdr *fh;
unsigned int mtu, hlen, left, len;
int hroom, troom;
__be32 frag_id;
int ptr, offset = 0, err = 0;
u8 *prevhdr, nexthdr = 0;
hlen = ip6_find_1stfragopt(skb, &prevhdr);
nexthdr = *prevhdr;
mtu = ip6_skb_dst_mtu(skb);
/* We must not fragment if the socket is set to force MTU discovery
* or if the skb it not generated by a local socket.
*/
if (unlikely(!skb->ignore_df && skb->len > mtu))
goto fail_toobig;
if (IP6CB(skb)->frag_max_size) {
if (IP6CB(skb)->frag_max_size > mtu)
goto fail_toobig;
/* don't send fragments larger than what we received */
mtu = IP6CB(skb)->frag_max_size;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
}
if (np && np->frag_size < mtu) {
if (np->frag_size)
mtu = np->frag_size;
}
if (mtu < hlen + sizeof(struct frag_hdr) + 8)
goto fail_toobig;
mtu -= hlen + sizeof(struct frag_hdr);
frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
&ipv6_hdr(skb)->saddr);
if (skb->ip_summed == CHECKSUM_PARTIAL &&
(err = skb_checksum_help(skb)))
goto fail;
hroom = LL_RESERVED_SPACE(rt->dst.dev);
if (skb_has_frag_list(skb)) {
unsigned int first_len = skb_pagelen(skb);
struct sk_buff *frag2;
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
skb_cloned