cregit-Linux how code gets into the kernel

Release 4.14 net/ipv4/ip_output.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              The Internet Protocol (IP) output module.
 *
 * Authors:     Ross Biro
 *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 *              Donald Becker, <becker@super.org>
 *              Alan Cox, <Alan.Cox@linux.org>
 *              Richard Underwood
 *              Stefan Becker, <stefanb@yello.ping.de>
 *              Jorge Cwik, <jorge@laser.satlink.net>
 *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 *              Hirokazu Takahashi, <taka@valinux.co.jp>
 *
 *      See ip_input.c for original log
 *
 *      Fixes:
 *              Alan Cox        :       Missing nonblock feature in ip_build_xmit.
 *              Mike Kilburn    :       htons() missing in ip_build_xmit.
 *              Bradford Johnson:       Fix faulty handling of some frames when
 *                                      no route is found.
 *              Alexander Demenshin:    Missing sk/skb free in ip_queue_xmit
 *                                      (in case if packet not accepted by
 *                                      output firewall rules)
 *              Mike McLagan    :       Routing by source
 *              Alexey Kuznetsov:       use new route cache
 *              Andi Kleen:             Fix broken PMTU recovery and remove
 *                                      some redundant tests.
 *      Vitaly E. Lavrov        :       Transparent proxy revived after year coma.
 *              Andi Kleen      :       Replace ip_reply with ip_send_reply.
 *              Andi Kleen      :       Split fast and slow ip_build_xmit path
 *                                      for decreased register pressure on x86
 *                                      and more readibility.
 *              Marc Boucher    :       When call_out_firewall returns FW_QUEUE,
 *                                      silently drop skb instead of failing with -EPERM.
 *              Detlev Wengorz  :       Copy protocol for fragments.
 *              Hirokazu Takahashi:     HW checksumming for outgoing UDP
 *                                      datagrams.
 *              Hirokazu Takahashi:     sendfile() on UDP works now.
 */

#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/highmem.h>
#include <linux/slab.h>

#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/init.h>

#include <net/snmp.h>
#include <net/ip.h>
#include <net/protocol.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/arp.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
#include <net/lwtunnel.h>
#include <linux/bpf-cgroup.h>
#include <linux/igmp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
#include <linux/netlink.h>
#include <linux/tcp.h>

static int
ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
	    unsigned int mtu,
	    int (*output)(struct net *, struct sock *, struct sk_buff *));

/* Generate a checksum for an outgoing IP datagram. */

void ip_send_check(struct iphdr *iph) { iph->check = 0; iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)34100.00%1100.00%
Total34100.00%1100.00%

EXPORT_SYMBOL(ip_send_check);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); iph->tot_len = htons(skb->len); ip_send_check(iph); /* if egress device is enslaved to an L3 master device pass the * skb to its handler for processing */ skb = l3mdev_ip_out(sk, skb); if (unlikely(!skb)) return 0; skb->protocol = htons(ETH_P_IP); return nf_hook(NFPROTO_IPV4, NF_INET_LOCAL_OUT, net, sk, skb, NULL, skb_dst(skb)->dev, dst_output); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu4746.53%19.09%
David Ahern2120.79%19.09%
David S. Miller109.90%19.09%
Eli Cooper98.91%19.09%
Eric W. Biedermann98.91%436.36%
Eric Dumazet32.97%19.09%
Jan Engelhardt10.99%19.09%
Patrick McHardy10.99%19.09%
Total101100.00%11100.00%


int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb) { int err; err = __ip_local_out(net, sk, skb); if (likely(err == 1)) err = dst_output(net, sk, skb); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3663.16%111.11%
Eric W. Biedermann1424.56%777.78%
Eric Dumazet712.28%111.11%
Total57100.00%9100.00%

EXPORT_SYMBOL_GPL(ip_local_out);
static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst) { int ttl = inet->uc_ttl; if (ttl < 0) ttl = ip4_dst_hoplimit(dst); return ttl; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3997.50%266.67%
Arnaldo Carvalho de Melo12.50%133.33%
Total40100.00%3100.00%

/* * Add an ip header to a skbuff and send it out. * */
int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk, __be32 saddr, __be32 daddr, struct ip_options_rcu *opt) { struct inet_sock *inet = inet_sk(sk); struct rtable *rt = skb_rtable(skb); struct net *net = sock_net(sk); struct iphdr *iph; /* Build the IP header. */ skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = inet->tos; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; if (ip_dont_fragment(sk, &rt->dst)) { iph->frag_off = htons(IP_DF); iph->id = 0; } else { iph->frag_off = 0; __ip_select_ident(net, iph, 1); } if (opt && opt->opt.optlen) { iph->ihl += opt->opt.optlen>>2; ip_options_build(skb, &opt->opt, daddr, rt, 0); } skb->priority = sk->sk_priority; if (!skb->mark) skb->mark = sk->sk_mark; /* Send it out. */ return ip_local_out(net, skb->sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)14148.45%932.14%
Eric Dumazet5518.90%310.71%
David S. Miller3512.03%310.71%
Arnaldo Carvalho de Melo175.84%414.29%
Eric W. Biedermann175.84%310.71%
Tóth László Attila82.75%13.57%
Jamal Hadi Salim72.41%13.57%
Alexey Kuznetsov72.41%13.57%
Al Viro20.69%13.57%
Herbert Xu10.34%13.57%
Hannes Frederic Sowa10.34%13.57%
Total291100.00%28100.00%

EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
static int ip_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb) { struct dst_entry *dst = skb_dst(skb); struct rtable *rt = (struct rtable *)dst; struct net_device *dev = dst->dev; unsigned int hh_len = LL_RESERVED_SPACE(dev); struct neighbour *neigh; u32 nexthop; if (rt->rt_type == RTN_MULTICAST) { IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTMCAST, skb->len); } else if (rt->rt_type == RTN_BROADCAST) IP_UPD_PO_STATS(net, IPSTATS_MIB_OUTBCAST, skb->len); /* Be paranoid, rather than too clever. */ if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) { struct sk_buff *skb2; skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev)); if (!skb2) { kfree_skb(skb); return -ENOMEM; } if (skb->sk) skb_set_owner_w(skb2, skb->sk); consume_skb(skb); skb = skb2; } if (lwtunnel_xmit_redirect(dst->lwtstate)) { int res = lwtunnel_xmit(skb); if (res < 0 || res == LWTUNNEL_XMIT_DONE) return res; } rcu_read_lock_bh(); nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr); neigh = __ipv4_neigh_lookup_noref(dev, nexthop); if (unlikely(!neigh)) neigh = __neigh_create(&arp_tbl, &nexthop, dev, false); if (!IS_ERR(neigh)) { int res; sock_confirm_neigh(skb, neigh); res = neigh_output(neigh, skb); rcu_read_unlock_bh(); return res; } rcu_read_unlock_bh(); net_dbg_ratelimited("%s: No header cache and no neighbour!\n", __func__); kfree_skb(skb); return -EINVAL; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov8927.38%26.90%
David S. Miller6219.08%620.69%
Linus Torvalds (pre-git)3711.38%413.79%
Mitsuru Chinen3510.77%13.45%
Roopa Prabhu329.85%13.45%
Julian Anastasov206.15%310.34%
Eric Dumazet164.92%310.34%
Neil Horman144.31%13.45%
Eric W. Biedermann72.15%26.90%
Vasiliy Kulikov41.23%13.45%
Joe Perches41.23%13.45%
Pavel Emelyanov20.62%13.45%
Ian Morris10.31%13.45%
Stephen Hemminger10.31%13.45%
Chuck Lever10.31%13.45%
Total325100.00%29100.00%


static int ip_finish_output_gso(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int mtu) { netdev_features_t features; struct sk_buff *segs; int ret = 0; /* common case: seglen is <= mtu */ if (skb_gso_validate_mtu(skb, mtu)) return ip_finish_output2(net, sk, skb); /* Slowpath - GSO segment length exceeds the egress MTU. * * This can happen in several cases: * - Forwarding of a TCP GRO skb, when DF flag is not set. * - Forwarding of an skb that arrived on a virtualization interface * (virtio-net/vhost/tap) with TSO/GSO size set by other network * stack. * - Local GSO skb transmitted on an NETIF_F_TSO tunnel stacked over an * interface with a smaller MTU. * - Arriving GRO skb (or GSO skb in a virtualized environment) that is * bridged to a NETIF_F_TSO tunnel stacked over an interface with an * insufficent MTU. */ features = netif_skb_features(skb); BUILD_BUG_ON(sizeof(*IPCB(skb)) > SKB_SGO_CB_OFFSET); segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); if (IS_ERR_OR_NULL(segs)) { kfree_skb(skb); return -ENOMEM; } consume_skb(skb); do { struct sk_buff *nskb = segs->next; int err; segs->next = NULL; err = ip_fragment(net, sk, segs, mtu, ip_finish_output2); if (err && ret == 0) ret = err; segs = nskb; } while (segs); return ret; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal13878.86%333.33%
Konstantin Khlebnikov148.00%111.11%
Eric W. Biedermann95.14%111.11%
David S. Miller95.14%111.11%
Marcelo Ricardo Leitner31.71%111.11%
Lance Richardson21.14%222.22%
Total175100.00%9100.00%


static int ip_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { unsigned int mtu; int ret; ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); if (ret) { kfree_skb(skb); return ret; } #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM) /* Policy lookup after SNAT yielded a new policy */ if (skb_dst(skb)->xfrm) { IPCB(skb)->flags |= IPSKB_REROUTED; return dst_output(net, sk, skb); } #endif mtu = ip_skb_dst_mtu(sk, skb); if (skb_is_gso(skb)) return ip_finish_output_gso(net, sk, skb, mtu); if (skb->len > mtu || (IPCB(skb)->flags & IPSKB_FRAG_PMTU)) return ip_fragment(net, sk, skb, mtu, ip_finish_output2); return ip_finish_output2(net, sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy4828.92%317.65%
Florian Westphal4024.10%317.65%
Daniel Mack2615.66%15.88%
Linus Torvalds (pre-git)1911.45%211.76%
Eric W. Biedermann148.43%423.53%
David S. Miller137.83%15.88%
Eric Dumazet31.81%15.88%
Shmulik Ladkani21.20%15.88%
Adrian Bunk10.60%15.88%
Total166100.00%17100.00%


static int ip_mc_finish_output(struct net *net, struct sock *sk, struct sk_buff *skb) { int ret; ret = BPF_CGROUP_RUN_PROG_INET_EGRESS(sk, skb); if (ret) { kfree_skb(skb); return ret; } return dev_loopback_xmit(net, sk, skb); }

Contributors

PersonTokensPropCommitsCommitProp
Daniel Mack57100.00%1100.00%
Total57100.00%1100.00%


int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct rtable *rt = skb_rtable(skb); struct net_device *dev = rt->dst.dev; /* * If the indicated interface is up and running, send the packet. */ IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); skb->dev = dev; skb->protocol = htons(ETH_P_IP); /* * Multicasts are looped back for other local users */ if (rt->rt_flags&RTCF_MULTICAST) { if (sk_mc_loop(sk) #ifdef CONFIG_IP_MROUTE /* Small optimization: do not loopback not local frames, which returned after forwarding; they will be dropped by ip_mr_input in any case. Note, that local frames are looped back to be delivered to local recipients. This check is duplicated in ip_mr_input at the moment. */ && ((rt->rt_flags & RTCF_LOCAL) || !(IPCB(skb)->flags & IPSKB_FORWARDED)) #endif ) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); if (newskb) NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, newskb, NULL, newskb->dev, ip_mc_finish_output); } /* Multicasts with ttl 0 must not go beyond the host */ if (ip_hdr(skb)->ttl == 0) { kfree_skb(skb); return 0; } } if (rt->rt_flags&RTCF_BROADCAST) { struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC); if (newskb) NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, newskb, NULL, newskb->dev, ip_mc_finish_output); } return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb->dev, ip_finish_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)17267.19%1135.48%
Patrick McHardy249.38%39.68%
Eric W. Biedermann155.86%39.68%
David S. Miller83.12%26.45%
Eric Dumazet83.12%26.45%
Linus Torvalds83.12%13.23%
Neil Horman51.95%13.23%
Alexey Kuznetsov41.56%13.23%
Jan Engelhardt31.17%13.23%
Arnaldo Carvalho de Melo31.17%13.23%
Daniel Mack20.78%13.23%
Hideaki Yoshifuji / 吉藤英明10.39%13.23%
Octavian Purdila10.39%13.23%
Herbert Xu10.39%13.23%
Pavel Emelyanov10.39%13.23%
Total256100.00%31100.00%


int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb) { struct net_device *dev = skb_dst(skb)->dev; IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len); skb->dev = dev; skb->protocol = htons(ETH_P_IP); return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, dev, ip_finish_output, !(IPCB(skb)->flags & IPSKB_REROUTED)); }

Contributors

PersonTokensPropCommitsCommitProp
Patrick McHardy3842.22%316.67%
Linus Torvalds (pre-git)1516.67%316.67%
Eric W. Biedermann1011.11%316.67%
Eric Dumazet88.89%211.11%
Alexey Kuznetsov77.78%15.56%
Neil Horman55.56%15.56%
Ananda Raju22.22%15.56%
David S. Miller22.22%15.56%
Jan Engelhardt11.11%15.56%
Pavel Emelyanov11.11%15.56%
Herbert Xu11.11%15.56%
Total90100.00%18100.00%

/* * copy saddr and daddr, possibly using 64bit load/stores * Equivalent to : * iph->saddr = fl4->saddr; * iph->daddr = fl4->daddr; */
static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4) { BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) != offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr)); memcpy(&iph->saddr, &fl4->saddr, sizeof(fl4->saddr) + sizeof(fl4->daddr)); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet76100.00%1100.00%
Total76100.00%1100.00%

/* Note: skb->sk can be different from sk, in case of tunnels */
int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl) { struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ip_options_rcu *inet_opt; struct flowi4 *fl4; struct rtable *rt; struct iphdr *iph; int res; /* Skip all of this if the packet is already routed, * f.e. by something like SCTP. */ rcu_read_lock(); inet_opt = rcu_dereference(inet->inet_opt); fl4 = &fl->u.ip4; rt = skb_rtable(skb); if (rt) goto packet_routed; /* Make sure we can route this packet. */ rt = (struct rtable *)__sk_dst_check(sk, 0); if (!rt) { __be32 daddr; /* Use correct destination address if we have options. */ daddr = inet->inet_daddr; if (inet_opt && inet_opt->opt.srr) daddr = inet_opt->opt.faddr; /* If this fails, retransmit mechanism of transport layer will * keep trying until route appears or the connection times * itself out. */ rt = ip_route_output_ports(net, fl4, sk, daddr, inet->inet_saddr, inet->inet_dport, inet->inet_sport, sk->sk_protocol, RT_CONN_FLAGS(sk), sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; sk_setup_caps(sk, &rt->dst); } skb_dst_set_noref(skb, &rt->dst); packet_routed: if (inet_opt && inet_opt->opt.is_strictroute && rt->rt_uses_gateway) goto no_route; /* OK, we know where to send it, allocate and build IP header. */ skb_push(skb, sizeof(struct iphdr) + (inet_opt ? inet_opt->opt.optlen : 0)); skb_reset_network_header(skb); iph = ip_hdr(skb); *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff)); if (ip_dont_fragment(sk, &rt->dst) && !skb->ignore_df) iph->frag_off = htons(IP_DF); else iph->frag_off = 0; iph->ttl = ip_select_ttl(inet, &rt->dst); iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); /* Transport layer set skb->h.foo itself. */ if (inet_opt && inet_opt->opt.optlen) { iph->ihl += inet_opt->opt.optlen >> 2; ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); } ip_select_ident_segs(net, skb, sk, skb_shinfo(skb)->gso_segs ?: 1); /* TODO : should we use skb->sk here instead of sk ? */ skb->priority = sk->sk_priority; skb->mark = sk->sk_mark; res = ip_local_out(net, sk, skb); rcu_read_unlock(); return res; no_route: rcu_read_unlock(); IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES); kfree_skb(skb); return -EHOSTUNREACH; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov20642.56%35.56%
Linus Torvalds (pre-git)7615.70%47.41%
Eric Dumazet7315.08%814.81%
David S. Miller6212.81%1018.52%
Eric W. Biedermann183.72%47.41%
Arnaldo Carvalho de Melo163.31%59.26%
Tóth László Attila81.65%11.85%
Herbert Xu40.83%23.70%
Hideaki Yoshifuji / 吉藤英明30.62%23.70%
Al Viro20.41%23.70%
Shan Wei20.41%11.85%
Venkat Yekkirala20.41%11.85%
Sridhar Samudrala20.41%11.85%
Denis V. Lunev20.41%23.70%
Ansis Atteka10.21%11.85%
Julian Anastasov10.21%11.85%
Ian Morris10.21%11.85%
Hannes Frederic Sowa10.21%11.85%
Atis Elsts10.21%11.85%
Américo Wang10.21%11.85%
Pavel Emelyanov10.21%11.85%
Linus Torvalds10.21%11.85%
Total484100.00%54100.00%

EXPORT_SYMBOL(ip_queue_xmit);
static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from) { to->pkt_type = from->pkt_type; to->priority = from->priority; to->protocol = from->protocol; skb_dst_drop(to); skb_dst_copy(to, from); to->dev = from->dev; to->mark = from->mark; /* Copy the flags to each fragment. */ IPCB(to)->flags = IPCB(from)->flags; #ifdef CONFIG_NET_SCHED to->tc_index = from->tc_index; #endif nf_copy(to, from); #if IS_ENABLED(CONFIG_IP_VS) to->ipvs_property = from->ipvs_property; #endif skb_copy_secmark(to, from); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov8265.08%19.09%
Julian Anastasov1411.11%19.09%
Thomas Graf86.35%19.09%
James Morris75.56%19.09%
Eric Dumazet53.97%218.18%
Patrick McHardy43.17%19.09%
Jozsef Kadlecsik32.38%218.18%
Yasuyuki Kozakai21.59%19.09%
Javier Martinez Canillas10.79%19.09%
Total126100.00%11100.00%


static int ip_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, unsigned int mtu, int (*output)(struct net *, struct sock *, struct sk_buff *)) { struct iphdr *iph = ip_hdr(skb); if ((iph->frag_off & htons(IP_DF)) == 0) return ip_do_fragment(net, sk, skb, output); if (unlikely(!skb->ignore_df || (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size > mtu))) { IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu)); kfree_skb(skb); return -EMSGSIZE; } return ip_do_fragment(net, sk, skb, output); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov6743.79%214.29%
Florian Westphal2113.73%214.29%
Patrick McHardy1811.76%17.14%
Eric W. Biedermann149.15%214.29%
Andy Zhou138.50%17.14%
David S. Miller95.88%17.14%
Wei Dong53.27%17.14%
Eric Dumazet21.31%17.14%
Hannes Frederic Sowa21.31%17.14%
Pavel Emelyanov10.65%17.14%
Américo Wang10.65%17.14%
Total153100.00%14100.00%

/* * This IP datagram is too large to be sent in one piece. Break it up into * smaller pieces (each of size equal to IP header plus * a block of the data of the original IP data part) that will yet fit in a * single device frame, and queue such a frame for sending. */
int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb, int (*output)(struct net *, struct sock *, struct sk_buff *)) { struct iphdr *iph; int ptr; struct sk_buff *skb2; unsigned int mtu, hlen, left, len, ll_rs; int offset; __be16 not_last_frag; struct rtable *rt = skb_rtable(skb); int err = 0; /* for offloaded checksums cleanup checksum before fragmentation */ if (skb->ip_summed == CHECKSUM_PARTIAL && (err = skb_checksum_help(skb))) goto fail; /* * Point into the IP datagram header. */ iph = ip_hdr(skb); mtu = ip_skb_dst_mtu(sk, skb); if (IPCB(skb)->frag_max_size && IPCB(skb)->frag_max_size < mtu) mtu = IPCB(skb)->frag_max_size; /* * Setup starting values. */ hlen = iph->ihl * 4; mtu = mtu - hlen; /* Size of data space */ IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE; ll_rs = LL_RESERVED_SPACE(rt->dst.dev); /* When frag_list is given, use it. First, check its validity: * some transformers could create wrong frag_list or break existing * one, it is not prohibited. In this case fall back to copying. * * LATER: this step can be merged to real generation of fragments, * we can switch to copy when see the first bad fragment. */ if (skb_has_frag_list(skb)) { struct sk_buff *frag, *frag2; unsigned int first_len = skb_pagelen(skb); if (first_len - hlen > mtu || ((first_len - hlen) & 7) || ip_is_fragment(iph) || skb_cloned(skb) || skb_headroom(skb) < ll_rs) goto slow_path; skb_walk_frags(skb, frag) { /* Correct geometry. */ if (frag->len > mtu || ((frag->len & 7) && frag->next) || skb_headroom(frag) < hlen + ll_rs) goto slow_path_clean; /* Partially cloned skb? */ if (skb_shared(frag)) goto slow_path_clean; BUG_ON(frag->sk); if (skb->sk) { frag->sk = skb->sk; frag->destructor = sock_wfree; } skb->truesize -= frag->truesize; } /* Everything is OK. Generate! */ err = 0; offset = 0; frag = skb_shinfo(skb)->frag_list; skb_frag_list_init(skb); skb->data_len = first_len - skb_headlen(skb); skb->len = first_len; iph->tot_len = htons(first_len); iph->frag_off = htons(IP_MF); ip_send_check(iph); for (;;) { /* Prepare header of the next frame, * before previous one went down. */ if (frag) { frag->ip_summed = CHECKSUM_NONE; skb_reset_transport_header(frag); __skb_push(frag, hlen); skb_reset_network_header(frag); memcpy(skb_network_header(frag), iph, hlen); iph = ip_hdr(frag); iph->tot_len = htons(frag->len); ip_copy_metadata(frag, skb); if (offset == 0) ip_options_fragment(frag); offset += skb->len - hlen; iph->frag_off = htons(offset>>3); if (frag->next) iph->frag_off |= htons(IP_MF); /* Ready, complete checksum */ ip_send_check(iph); } err = output(net, sk, skb); if (!err) IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); if (err || !frag) break; skb = frag; frag = skb->next; skb->next = NULL; } if (err == 0) { IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); return 0; } while (frag) { skb = frag->next; kfree_skb(frag); frag = skb; } IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); return err; slow_path_clean: skb_walk_frags(skb, frag2) { if (frag2 == frag) break; frag2->sk = NULL; frag2->destructor = NULL; skb->truesize += frag2->truesize; } } slow_path: iph = ip_hdr(skb); left = skb->len - hlen; /* Space per frame */ ptr = hlen; /* Where to start from */ /* * Fragment the datagram. */ offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3; not_last_frag = iph->frag_off & htons(IP_MF); /* * Keep copying data until we run out. */ while (left > 0) { len = left; /* IF: it doesn't fit, use 'mtu' - the data space left */ if (len > mtu) len = mtu; /* IF: we are not sending up to and including the packet end then align the next start on an eight byte boundary */ if (len < left) { len &= ~7; } /* Allocate buffer */ skb2 = alloc_skb(len + hlen + ll_rs, GFP_ATOMIC); if (!skb2) { err = -ENOMEM; goto fail; } /* * Set up data on packet */ ip_copy_metadata(skb2, skb); skb_reserve(skb2, ll_rs); skb_put(skb2, len + hlen); skb_reset_network_header(skb2); skb2->transport_header = skb2->network_header + hlen; /* * Charge the memory for the fragment to any owner * it might possess */ if (skb->sk) skb_set_owner_w(skb2, skb->sk); /* * Copy the packet header into the new buffer. */ skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen); /* * Copy a block of the IP datagram. */ if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len)) BUG(); left -= len; /* * Fill in the new header fields. */ iph = ip_hdr(skb2); iph->frag_off = htons((offset >> 3)); if (IPCB(skb)->flags & IPSKB_FRAG_PMTU) iph->frag_off |= htons(IP_DF); /* ANK: dirty, but effective trick. Upgrade options only if * the segment to be fragmented was THE FIRST (otherwise, * options are already fixed) and make it ONCE * on the initial skb, so that all the following fragments * will inherit fixed options. */ if (offset == 0) ip_options_fragment(skb); /* * Added AC : If we are fragmenting a fragment that's not the * last fragment then keep MF on each bit */ if (left > 0 || not_last_frag) iph->frag_off |= htons(IP_MF); ptr += len; offset += len; /* * Put this fragment into the sending queue. */ iph->tot_len = htons(len + hlen); ip_send_check(iph); err = output(net, sk, skb2); if (err) goto fail; IP_INC_STATS(net, IPSTATS_MIB_FRAGCREATES); } consume_skb(skb); IP_INC_STATS(net, IPSTATS_MIB_FRAGOKS); return err; fail: kfree_skb(skb); IP_INC_STATS(net, IPSTATS_MIB_FRAGFAILS); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov64462.83%12.78%
Andy Zhou908.78%12.78%
Herbert Xu484.68%411.11%
Eric Dumazet474.59%25.56%
Florian Westphal474.59%12.78%
Arnaldo Carvalho de Melo313.02%822.22%
Hannes Frederic Sowa222.15%25.56%
Vasily Averin201.95%12.78%
Eric W. Biedermann191.85%25.56%
Wei Dong151.46%12.78%
David S. Miller100.98%38.33%
Pravin B Shelar70.68%12.78%
Joe Perches60.59%12.78%
Pavel Emelyanov60.59%12.78%
Hideaki Yoshifuji / 吉藤英明40.39%12.78%
Shmulik Ladkani20.20%12.78%
Bart De Schuymer20.20%12.78%
Paul Gortmaker20.20%12.78%
Alexey Dobriyan10.10%12.78%
Patrick McHardy10.10%12.78%
Lucas De Marchi10.10%12.78%
Total1025100.00%36100.00%

EXPORT_SYMBOL(ip_do_fragment);
int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb) { struct msghdr *msg = from; if (skb->ip_summed == CHECKSUM_PARTIAL) { if (!copy_from_iter_full(to, len, &msg->msg_iter)) return -EFAULT; } else { __wsum csum = 0; if (!csum_and_copy_from_iter_full(to, len, &csum, &msg->msg_iter)) return -EFAULT; skb->csum = csum_block_add(skb->csum, csum, odd); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov9282.88%225.00%
Al Viro1816.22%562.50%
Patrick McHardy10.90%112.50%
Total111100.00%8100.00%

EXPORT_SYMBOL(ip_generic_getfrag);
static inline __wsum csum_page(struct page *page, int offset, int copy) { char *kaddr; __wsum csum; kaddr = kmap(page); csum = csum_partial(kaddr + offset, copy, 0); kunmap(page); return csum; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov4075.47%111.11%
Linus Torvalds (pre-git)1120.75%777.78%
Al Viro23.77%111.11%
Total53100.00%9100.00%


static int __ip_append_data(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork, struct page_frag *pfrag, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; struct ip_options *opt = cork->opt; int hh_len; int exthdrlen; int mtu; int copy; int err; int offset = 0; unsigned int maxfraglen, fragheaderlen, maxnonfragsize; int csummode = CHECKSUM_NONE; struct rtable *rt = (struct rtable *)cork->dst; u32 tskey = 0; skb = skb_peek_tail(queue); exthdrlen = !skb ? rt->dst.header_len : 0; mtu = cork->fragsize; if (cork->tx_flags & SKBTX_ANY_SW_TSTAMP && sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID) tskey = sk->sk_tskey++; hh_len = LL_RESERVED_SPACE(rt->dst.dev); fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; if (cork->length + length > maxnonfragsize - fragheaderlen) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu - (opt ? opt->optlen : 0)); return -EMSGSIZE; } /* * transhdrlen > 0 means that this is the first fragment and we wish * it won't be fragmented in the future. */ if (transhdrlen && length + fragheaderlen <= mtu && rt->dst.dev->features & (NETIF_F_HW_CSUM | NETIF_F_IP_CSUM) && !(flags & MSG_MORE) && !exthdrlen) csummode = CHECKSUM_PARTIAL; cork->length += length; /* So, what's going on in the loop below? * * We use calculated fragment length to generate chained skb, * each of segments is IP fragment ready for sending to network after * adding appropriate IP header. */ if (!skb) goto alloc_new_skb; while (length > 0) { /* Check if the remaining data fits into current packet. */ copy = mtu - skb->len; if (copy < length) copy = maxfraglen - skb->len; if (copy <= 0) { char *data; unsigned int datalen; unsigned int fraglen; unsigned int fraggap; unsigned int alloclen; struct sk_buff *skb_prev; alloc_new_skb: skb_prev = skb; if (skb_prev) fraggap = skb_prev->len - maxfraglen; else fraggap = 0; /* * If remaining data exceeds the mtu, * we know we need more fragment(s). */ datalen = length + fraggap; if (datalen > mtu - fragheaderlen) datalen = maxfraglen - fragheaderlen; fraglen = datalen + fragheaderlen; if ((flags & MSG_MORE) && !(rt->dst.dev->features&NETIF_F_SG)) alloclen = mtu; else alloclen = fraglen; alloclen += exthdrlen; /* The last fragment gets additional space at tail. * Note, with MSG_MORE we overallocate on fragments, * because we have no idea what fragment will be * the last. */ if (datalen == length + fraggap) alloclen += rt->dst.trailer_len; if (transhdrlen) { skb = sock_alloc_send_skb(sk, alloclen + hh_len + 15, (flags & MSG_DONTWAIT), &err); } else { skb = NULL; if (refcount_read(&sk->sk_wmem_alloc) <= 2 * sk->sk_sndbuf) skb = sock_wmalloc(sk, alloclen + hh_len + 15, 1, sk->sk_allocation); if (unlikely(!skb)) err = -ENOBUFS; } if (!skb) goto error; /* * Fill in the control structures */ skb->ip_summed = csummode; skb->csum = 0; skb_reserve(skb, hh_len); /* only the initial fragment is time stamped */ skb_shinfo(skb)->tx_flags = cork->tx_flags; cork->tx_flags = 0; skb_shinfo(skb)->tskey = tskey; tskey = 0; /* * Find where to start putting bytes. */ data = skb_put(skb, fraglen + exthdrlen); skb_set_network_header(skb, exthdrlen); skb->transport_header = (skb->network_header + fragheaderlen); data += fragheaderlen + exthdrlen; if (fraggap) { skb->csum = skb_copy_and_csum_bits( skb_prev, maxfraglen, data + transhdrlen, fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); data += fraggap; pskb_trim_unique(skb_prev, maxfraglen); } copy = datalen - transhdrlen - fraggap; if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) { err = -EFAULT; kfree_skb(skb); goto error; } offset += copy; length -= datalen - fraggap; transhdrlen = 0; exthdrlen = 0; csummode = CHECKSUM_NONE; if ((flags & MSG_CONFIRM) && !skb_prev) skb_set_dst_pending_confirm(skb, 1); /* * Put the packet on the pending queue. */ __skb_queue_tail(queue, skb); continue; } if (copy > length) copy = length; if (!(rt->dst.dev->features&NETIF_F_SG)) { unsigned int off; off = skb->len; if (getfrag(from, skb_put(skb, copy), offset, copy, off, skb) < 0) { __skb_trim(skb, off); err = -EFAULT; goto error; } } else { int i = skb_shinfo(skb)->nr_frags; err = -ENOMEM; if (!sk_page_frag_refill(sk, pfrag)) goto error; if (!skb_can_coalesce(skb, i, pfrag->page, pfrag->offset)) { err = -EMSGSIZE; if (i == MAX_SKB_FRAGS) goto error; __skb_fill_page_desc(skb, i, pfrag->page, pfrag->offset, 0); skb_shinfo(skb)->nr_frags = ++i; get_page(pfrag->page); } copy = min_t(int, copy, pfrag->size - pfrag->offset); if (getfrag(from, page_address(pfrag->page) + pfrag->offset, offset, copy, skb->len, skb) < 0) goto error_efault; pfrag->offset += copy; skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); skb->len += copy; skb->data_len += copy; skb->truesize += copy; refcount_add(copy, &sk->sk_wmem_alloc); } offset += copy; length -= copy; } return 0; error_efault: err = -EFAULT; error: cork->length -= length; IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu45439.21%612.50%
Alexey Kuznetsov28324.44%36.25%
Linus Torvalds (pre-git)1149.84%816.67%
Eric Dumazet968.29%48.33%
Willem de Bruijn463.97%24.17%
David S. Miller302.59%48.33%
Hannes Frederic Sowa292.50%48.33%
Hideaki Yoshifuji / 吉藤英明262.25%24.17%
Julian Anastasov181.55%12.08%
Steffen Klassert171.47%24.17%
Ananda Raju131.12%12.08%
Krishna Kumar100.86%12.08%
Patrick McHardy70.60%24.17%
Tom Herbert50.43%12.08%
Ian Morris20.17%12.08%
Elena Reshetova20.17%12.08%
Ian Campbell20.17%12.08%
Arnaldo Carvalho de Melo10.09%12.08%
Américo Wang10.09%12.08%
Linus Torvalds10.09%12.08%
Kostya Belezko10.09%12.08%
Total1158100.00%48100.00%


static int ip_setup_cork(struct sock *sk, struct inet_cork *cork, struct ipcm_cookie *ipc, struct rtable **rtp) { struct ip_options_rcu *opt; struct rtable *rt; /* * setup for corking. */ opt = ipc->opt; if (opt) { if (!cork->opt) { cork->opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation); if (unlikely(!cork->opt)) return -ENOBUFS; } memcpy(cork->opt, &opt->opt, sizeof(struct ip_options) + opt->opt.optlen); cork->flags |= IPCORK_OPT; cork->addr = ipc->addr; } rt = *rtp; if (unlikely(!rt)) return -EFAULT; /* * We steal reference to this route, caller should not release it */ *rtp = NULL; cork->fragsize = ip_sk_use_pmtu(sk) ? dst_mtu(&rt->dst) : rt->dst.dev->mtu; cork->dst = &rt->dst; cork->length = 0; cork->ttl = ipc->ttl; cork->tos = ipc->tos; cork->priority = ipc->priority; cork->tx_flags = ipc->tx_flags; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu18380.26%116.67%
Francesco Fusco2410.53%116.67%
Hannes Frederic Sowa125.26%116.67%
Eric Dumazet62.63%116.67%
Ian Morris20.88%116.67%
Steffen Klassert10.44%116.67%
Total228100.00%6100.00%

/* * ip_append_data() and ip_append_page() can make one large IP datagram * from many pieces of data. Each pieces will be holded on the socket * until ip_push_pending_frames() is called. Each piece can be a page * or non-page data. * * Not only UDP, other transport protocols - e.g. raw sockets - can use * this interface potentially. * * LATER: length must be adjusted by pad at tail, when it is required. */
int ip_append_data(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm_cookie *ipc, struct rtable **rtp, unsigned int flags) { struct inet_sock *inet = inet_sk(sk); int err; if (flags&MSG_PROBE) return 0; if (skb_queue_empty(&sk->sk_write_queue)) { err = ip_setup_cork(sk, &inet->cork.base, ipc, rtp); if (err) return err; } else { transhdrlen = 0; } return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, sk_page_frag(sk), getfrag, from, length, transhdrlen, flags); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu13480.24%216.67%
David S. Miller116.59%216.67%
Linus Torvalds (pre-git)105.99%541.67%
Alexey Kuznetsov63.59%18.33%
Eric Dumazet52.99%18.33%
Pavel Emelyanov10.60%18.33%
Total167100.00%12100.00%


ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page, int offset, size_t size, int flags) { struct inet_sock *inet = inet_sk(sk); struct sk_buff *skb; struct rtable *rt; struct ip_options *opt = NULL; struct inet_cork *cork; int hh_len; int mtu; int len; int err; unsigned int maxfraglen, fragheaderlen, fraggap, maxnonfragsize; if (inet->hdrincl) return -EPERM; if (flags&MSG_PROBE) return 0; if (skb_queue_empty(&sk->sk_write_queue)) return -EINVAL; cork = &inet->cork.base; rt = (struct rtable *)cork->dst; if (cork->flags & IPCORK_OPT) opt = cork->opt; if (!(rt->dst.dev->features&NETIF_F_SG)) return -EOPNOTSUPP; hh_len = LL_RESERVED_SPACE(rt->dst.dev); mtu = cork->fragsize; fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0); maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen; maxnonfragsize = ip_sk_ignore_df(sk) ? 0xFFFF : mtu; if (cork->length + size > maxnonfragsize - fragheaderlen) { ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu - (opt ? opt->optlen : 0)); return -EMSGSIZE; } skb = skb_peek_tail(&sk->sk_write_queue); if (!skb) return -EINVAL; cork->length += size; while (size > 0) { /* Check if the remaining data fits into current packet. */ len = mtu - skb->len; if (len < size) len = maxfraglen - skb->len; if (len <= 0) { struct sk_buff *skb_prev; int alloclen; skb_prev = skb; fraggap = skb_prev->len - maxfraglen; alloclen = fragheaderlen + hh_len + fraggap + 15; skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation); if (unlikely(!skb)) { err = -ENOBUFS; goto error; } /* * Fill in the control structures */ skb->ip_summed = CHECKSUM_NONE; skb->csum = 0; skb_reserve(skb, hh_len); /* * Find where to start putting bytes. */ skb_put(skb, fragheaderlen + fraggap); skb_reset_network_header(skb); skb->transport_header = (skb->network_header + fragheaderlen); if (fraggap) { skb->csum = skb_copy_and_csum_bits(skb_prev, maxfraglen, skb_transport_header(skb), fraggap, 0); skb_prev->csum = csum_sub(skb_prev->csum, skb->csum); pskb_trim_unique(skb_prev, maxfraglen); } /* * Put the packet on the pending queue. */ __skb_queue_tail(&sk->sk_write_queue, skb); continue; } if (len > size) len = size; if (skb_append_pagefrags(skb, page, offset, len)) { err = -EMSGSIZE; goto error; } if (skb->ip_summed == CHECKSUM_NONE) { __wsum csum; csum = csum_page(page, offset, len); skb->csum = csum_block_add(skb->csum, csum, skb->len); } skb->len += len; skb->data_len += len; skb->truesize += len; refcount_add(len, &sk->sk_wmem_alloc); offset += len; size -= len; } return 0; error: cork->length -= size; IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov25840.12%24.17%
Linus Torvalds (pre-git)14522.55%1429.17%
David S. Miller10416.17%612.50%
Hannes Frederic Sowa304.67%510.42%
Hideaki Yoshifuji / 吉藤英明284.35%36.25%
Herbert Xu233.58%36.25%
Arnaldo Carvalho de Melo223.42%612.50%
Patrick McHardy162.49%12.08%
Pavel Emelyanov50.78%12.08%
Ian Morris50.78%12.08%
Eric Dumazet30.47%24.17%
Américo Wang10.16%12.08%
Linus Torvalds10.16%12.08%
Al Viro10.16%12.08%
Elena Reshetova10.16%12.08%
Total643100.00%48100.00%


static void ip_cork_release(struct inet_cork *cork) { cork->flags &= ~IPCORK_OPT; kfree(cork->opt); cork->opt = NULL; dst_release(cork->dst); cork->dst = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov3477.27%133.33%
Herbert Xu715.91%133.33%
Hideaki Yoshifuji / 吉藤英明36.82%133.33%
Total44100.00%3100.00%

/* * Combined all pending IP fragments on the socket as one IP datagram * and push them out. */
struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4, struct sk_buff_head *queue, struct inet_cork *cork) { struct sk_buff *skb, *tmp_skb; struct sk_buff **tail_skb; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); struct ip_options *opt = NULL; struct rtable *rt = (struct rtable *)cork->dst; struct iphdr *iph; __be16 df = 0; __u8 ttl; skb = __skb_dequeue(queue); if (!skb) goto out; tail_skb = &(skb_shinfo(skb)->frag_list); /* move skb->data to ip header from ext header */ if (skb->data < skb_network_header(skb)) __skb_pull(skb, skb_network_offset(skb)); while ((tmp_skb = __skb_dequeue(queue)) != NULL) { __skb_pull(tmp_skb, skb_network_header_len(skb)); *tail_skb = tmp_skb; tail_skb = &(tmp_skb->next); skb->len += tmp_skb->len; skb->data_len += tmp_skb->len; skb->truesize += tmp_skb->truesize; tmp_skb->destructor = NULL; tmp_skb->sk = NULL; } /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow * to fragment the frame generated here. No matter, what transforms * how transforms change size of the packet, it will come out. */ skb->ignore_df = ip_sk_ignore_df(sk); /* DF bit is set when we want to see DF on outgoing frames. * If ignore_df is set too, we still allow to fragment this frame * locally. */ if (inet->pmtudisc == IP_PMTUDISC_DO || inet->pmtudisc == IP_PMTUDISC_PROBE || (skb->len <= dst_mtu(&rt->dst) && ip_dont_fragment(sk, &rt->dst))) df = htons(IP_DF); if (cork->flags & IPCORK_OPT) opt = cork->opt; if (cork->ttl != 0) ttl = cork->ttl; else if (rt->rt_type == RTN_MULTICAST) ttl = inet->mc_ttl; else ttl = ip_select_ttl(inet, &rt->dst); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 5; iph->tos = (cork->tos != -1) ? cork->tos : inet->tos; iph->frag_off = df; iph->ttl = ttl; iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); ip_select_ident(net, skb, sk); if (opt) { iph->ihl += opt->optlen>>2; ip_options_build(skb, opt, cork->addr, rt, 0); } skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority; skb->mark = sk->sk_mark; /* * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec * on dst refcount */ cork->dst = NULL; skb_dst_set(skb, &rt->dst); if (iph->protocol == IPPROTO_ICMP) icmp_out_count(net, ((struct icmphdr *) skb_transport_header(skb))->type); ip_cork_release(cork); out: return skb; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov17333.21%36.98%
Linus Torvalds (pre-git)8215.74%716.28%
David S. Miller6412.28%49.30%
Francesco Fusco417.87%12.33%
Herbert Xu407.68%36.98%
David L Stevens254.80%12.33%
Eric Dumazet203.84%49.30%
Linus Torvalds142.69%49.30%
Hannes Frederic Sowa122.30%36.98%
Arnaldo Carvalho de Melo122.30%511.63%
Pavel Emelyanov122.30%12.33%
Tóth László Attila71.34%12.33%
Hideaki Yoshifuji / 吉藤英明61.15%12.33%
Ian Morris50.96%12.33%
Ansis Atteka40.77%24.65%
Américo Wang30.58%12.33%
Alexey Dobriyan10.19%12.33%
Total521100.00%43100.00%


int ip_send_skb(struct net *net, struct sk_buff *skb) { int err; err = ip_local_out(net, skb->sk, skb); if (err) { if (err > 0) err = net_xmit_errno(err); if (err) IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS); } return err; }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov2132.81%110.00%
Herbert Xu1726.56%220.00%
Linus Torvalds (pre-git)1218.75%220.00%
Eric W. Biedermann69.38%220.00%
Eric Dumazet57.81%110.00%
Pavel Emelyanov23.12%110.00%
Hideaki Yoshifuji / 吉藤英明11.56%110.00%
Total64100.00%10100.00%


int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4) { struct sk_buff *skb; skb = ip_finish_skb(sk, fl4); if (!skb) return 0; /* Netfilter gets whole the not fragmented skb. */ return ip_send_skb(sock_net(sk), skb); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3775.51%250.00%
David S. Miller714.29%125.00%
Eric Dumazet510.20%125.00%
Total49100.00%4100.00%

/* * Throw away all pending data on the socket. */
static void __ip_flush_pending_frames(struct sock *sk, struct sk_buff_head *queue, struct inet_cork *cork) { struct sk_buff *skb; while ((skb = __skb_dequeue_tail(queue)) != NULL) kfree_skb(skb); ip_cork_release(cork); }

Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov2551.02%120.00%
Herbert Xu1734.69%120.00%
Linus Torvalds (pre-git)612.24%240.00%
Pavel Emelyanov12.04%120.00%
Total49100.00%5100.00%


void ip_flush_pending_frames(struct sock *sk) { __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu2170.00%120.00%
Pavel Emelyanov310.00%120.00%
Alexey Kuznetsov310.00%120.00%
David S. Miller26.67%120.00%
Linus Torvalds (pre-git)13.33%120.00%
Total30100.00%5100.00%


struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4, int getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb), void *from, int length, int transhdrlen, struct ipcm_cookie *ipc, struct rtable **rtp, unsigned int flags) { struct inet_cork cork; struct sk_buff_head queue; int err; if (flags & MSG_PROBE) return NULL; __skb_queue_head_init(&queue); cork.flags = 0; cork.addr = 0; cork.opt = NULL; err = ip_setup_cork(sk, &cork, ipc, rtp); if (err) return ERR_PTR(err); err = __ip_append_data(sk, fl4, &queue, &cork, &current->task_frag, getfrag, from, length, transhdrlen, flags); if (err) { __ip_flush_pending_frames(sk, &queue, &cork); return ERR_PTR(err); } return __ip_make_skb(sk, fl4, &queue, &cork); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu17184.24%116.67%
David S. Miller2713.30%466.67%
Eric Dumazet52.46%116.67%
Total203100.00%6100.00%

/* * Fetch data from kernel space and fill in checksum if needed. */
static int ip_reply_glue_bits(void *dptr, char *to, int offset, int len, int odd, struct sk_buff *skb) { __wsum csum; csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0); skb->csum = csum_block_add(skb->csum, csum, odd); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)4570.31%125.00%
Alexey Kuznetsov1828.12%250.00%
Al Viro11.56%125.00%
Total64100.00%4100.00%

/* * Generic function to send a packet as reply to another packet. * Used to send some TCP resets/acks so far. */
void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, const struct ip_options *sopt, __be32 daddr, __be32 saddr, const struct ip_reply_arg *arg, unsigned int len) { struct ip_options_data replyopts; struct ipcm_cookie ipc; struct flowi4 fl4; struct rtable *rt = skb_rtable(skb); struct net *net = sock_net(sk); struct sk_buff *nskb; int err; int oif; if (__ip_options_echo(net, &replyopts.opt.opt, skb, sopt)) return; ipc.addr = daddr; ipc.opt = NULL; ipc.tx_flags = 0; ipc.ttl = 0; ipc.tos = -1; if (replyopts.opt.opt.optlen) { ipc.opt = &replyopts.opt; if (replyopts.opt.opt.srr) daddr = replyopts.opt.opt.faddr; } oif = arg->bound_dev_if; if (!oif && netif_index_is_l3_master(net, skb->skb_iif)) oif = skb->skb_iif; flowi4_init_output(&fl4, oif, IP4_REPLY_MARK(net, skb->mark), RT_TOS(arg->tos), RT_SCOPE_UNIVERSE, ip_hdr(skb)->protocol, ip_reply_arg_flowi_flags(arg), daddr, saddr, tcp_hdr(skb)->source, tcp_hdr(skb)->dest, arg->uid); security_skb_classify_flow(skb, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(net, &fl4); if (IS_ERR(rt)) return; inet_sk(sk)->tos = arg->tos; sk->sk_priority = skb->priority; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; sk->sk_sndbuf = sysctl_wmem_default; sk->sk_mark = fl4.flowi4_mark; err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base, len, 0, &ipc, &rt, MSG_DONTWAIT); if (unlikely(err)) { ip_flush_pending_frames(sk); goto out; } nskb = skb_peek(&sk->sk_write_queue); if (nskb) { if (arg->csumoffset >= 0) *((__sum16 *)skb_transport_header(nskb) + arg->csumoffset) = csum_fold(csum_add(nskb->csum, arg->csum)); nskb->ip_summed = CHECKSUM_NONE; ip_push_pending_frames(sk, &fl4); } out: ip_rt_put(rt); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)13729.78%817.02%
Eric Dumazet6714.57%714.89%
David S. Miller6313.70%919.15%
Alexey Kuznetsov6213.48%24.26%
David Ahern306.52%36.38%
Vasily Averin245.22%12.13%
Arnaldo Carvalho de Melo143.04%48.51%
Francesco Fusco132.83%12.13%
Lorenzo Colitti122.61%24.26%
Patrick McHardy81.74%12.13%
Pau Espin Pedrol81.74%12.13%
Venkat Yekkirala71.52%12.13%
Patrick Ohly51.09%12.13%
Changli Gao20.43%12.13%
Denis V. Lunev20.43%12.13%
KOVACS Krisztian20.43%12.13%
Paolo Abeni20.43%12.13%
Al Viro10.22%12.13%
Oliver Hartkopp10.22%12.13%
Total460100.00%47100.00%


void __init ip_init(void) { ip_rt_init(); inet_initpeers(); #if defined(CONFIG_IP_MULTICAST) igmp_mc_init(); #endif }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1664.00%555.56%
Stephen Hemminger520.00%111.11%
David L Stevens28.00%111.11%
Hideaki Yoshifuji / 吉藤英明14.00%111.11%
Américo Wang14.00%111.11%
Total25100.00%9100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Alexey Kuznetsov208927.79%72.38%
Herbert Xu124716.59%175.78%
Linus Torvalds (pre-git)115815.41%3210.88%
David S. Miller5677.54%3812.93%
Eric Dumazet5337.09%268.84%
Florian Westphal2503.33%41.36%
Patrick McHardy1732.30%134.42%
Eric W. Biedermann1622.16%175.78%
Andy Zhou1351.80%10.34%
Arnaldo Carvalho de Melo1281.70%206.80%
Hannes Frederic Sowa1091.45%103.40%
Daniel Mack881.17%10.34%
Francesco Fusco781.04%10.34%
Hideaki Yoshifuji / 吉藤英明740.98%82.72%
Pavel Emelyanov700.93%31.02%
Julian Anastasov530.71%51.70%
David Ahern510.68%41.36%
Willem de Bruijn460.61%20.68%
Vasily Averin440.59%20.68%
Mitsuru Chinen350.47%10.34%
Roopa Prabhu350.47%10.34%
Al Viro300.40%113.74%
David L Stevens270.36%20.68%
Linus Torvalds260.35%62.04%
Neil Horman240.32%10.34%
Tóth László Attila230.31%10.34%
Wei Dong200.27%20.68%
Steffen Klassert180.24%20.68%
Ian Morris160.21%10.34%
Ananda Raju150.20%10.34%
Konstantin Khlebnikov140.19%10.34%
Lorenzo Colitti120.16%20.68%
Krishna Kumar100.13%10.34%
Joe Perches100.13%20.68%
Eli Cooper90.12%10.34%
Venkat Yekkirala90.12%10.34%
Thomas Graf80.11%10.34%
Américo Wang80.11%20.68%
Pau Espin Pedrol80.11%10.34%
Jamal Hadi Salim70.09%10.34%
Pravin B Shelar70.09%10.34%
James Morris70.09%10.34%
Stephen Hemminger60.08%20.68%
Jan Engelhardt50.07%10.34%
Patrick Ohly50.07%10.34%
Tom Herbert50.07%10.34%
Bart De Schuymer50.07%10.34%
Ansis Atteka50.07%20.68%
Denis V. Lunev40.05%31.02%
Vasiliy Kulikov40.05%10.34%
Shmulik Ladkani40.05%10.34%
Tejun Heo30.04%10.34%
Jozsef Kadlecsik30.04%20.68%
Marcelo Ricardo Leitner30.04%10.34%
Elena Reshetova30.04%10.34%
Alexey Dobriyan20.03%20.68%
KOVACS Krisztian20.03%10.34%
Paul Gortmaker20.03%10.34%
Yasuyuki Kozakai20.03%10.34%
Lance Richardson20.03%20.68%
Sridhar Samudrala20.03%10.34%
Shan Wei20.03%10.34%
Ian Campbell20.03%10.34%
Changli Gao20.03%10.34%
Adrian Bunk20.03%20.68%
Paolo Abeni20.03%10.34%
Lucas De Marchi10.01%10.34%
Kostya Belezko10.01%10.34%
Chuck Lever10.01%10.34%
Atis Elsts10.01%10.34%
Octavian Purdila10.01%10.34%
Oliver Hartkopp10.01%10.34%
Javier Martinez Canillas10.01%10.34%
Total7517100.00%294100.00%
Directory: net/ipv4
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.