cregit-Linux how code gets into the kernel

Release 4.11 net/ipv4/ip_fragment.c

Directory: net/ipv4
/*
 * INET         An implementation of the TCP/IP protocol suite for the LINUX
 *              operating system.  INET is implemented using the  BSD Socket
 *              interface as the means of communication with the user level.
 *
 *              The IP fragmentation functionality.
 *
 * Authors:     Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG>
 *              Alan Cox <alan@lxorguk.ukuu.org.uk>
 *
 * Fixes:
 *              Alan Cox        :       Split from ip.c , see ip_input.c for history.
 *              David S. Miller :       Begin massive cleanup...
 *              Andi Kleen      :       Add sysctls.
 *              xxxx            :       Overlapfrag bug.
 *              Ultima          :       ip_expire() kernel panic.
 *              Bill Hawes      :       Frag accounting and evictor fixes.
 *              John McDonald   :       0 length frag bug.
 *              Alexey Kuznetsov:       SMP races, threading, cleanup.
 *              Patrick McHardy :       LRU queue of frag heads for evictor.
 */


#define pr_fmt(fmt) "IPv4: " fmt

#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/jiffies.h>
#include <linux/skbuff.h>
#include <linux/list.h>
#include <linux/ip.h>
#include <linux/icmp.h>
#include <linux/netdevice.h>
#include <linux/jhash.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <net/route.h>
#include <net/dst.h>
#include <net/sock.h>
#include <net/ip.h>
#include <net/icmp.h>
#include <net/checksum.h>
#include <net/inetpeer.h>
#include <net/inet_frag.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/inet.h>
#include <linux/netfilter_ipv4.h>
#include <net/inet_ecn.h>
#include <net/l3mdev.h>

/* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6
 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c
 * as well. Or notify me, at least. --ANK
 */

static const char ip_frag_cache_name[] = "ip4-frags";


struct ipfrag_skb_cb
{
	
struct inet_skb_parm	h;
	
int			offset;
};


#define FRAG_CB(skb)	((struct ipfrag_skb_cb *)((skb)->cb))

/* Describe an entry in the "incomplete datagrams" queue. */

struct ipq {
	
struct inet_frag_queue q;

	
u32		user;
	
__be32		saddr;
	
__be32		daddr;
	
__be16		id;
	
u8		protocol;
	
u8		ecn; /* RFC3168 support */
	
u16		max_df_size; /* largest frag with DF set seen */
	
int             iif;
	
int             vif;   /* L3 master device index */
	
unsigned int    rid;
	
struct inet_peer *peer;
};


static u8 ip4_frag_ecn(u8 tos) { return 1 << (tos & INET_ECN_MASK); }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet18100.00%2100.00%
Total18100.00%2100.00%

static struct inet_frags ip4_frags;
int ip_frag_mem(struct net *net) { return sum_frag_mem_limit(&net->ipv4.frags); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov2095.24%266.67%
Jesper Dangaard Brouer14.76%133.33%
Total21100.00%3100.00%

static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev); struct ip4_create_arg { struct iphdr *iph; u32 user; int vif; };
static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) { net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd)); return jhash_3words((__force u32)id << 16 | prot, (__force u32)saddr, (__force u32)daddr, ip4_frags.rnd); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)2132.81%233.33%
Hannes Frederic Sowa1523.44%116.67%
David S. Miller1320.31%116.67%
Al Viro1218.75%116.67%
Pavel Emelyanov34.69%116.67%
Total64100.00%6100.00%


static unsigned int ip4_hashfn(const struct inet_frag_queue *q) { const struct ipq *ipq; ipq = container_of(q, struct ipq, q); return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2345.10%125.00%
Pavel Emelyanov2141.18%125.00%
Yasuyuki Kozakai59.80%125.00%
Florian Westphal23.92%125.00%
Total51100.00%4100.00%


static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) { const struct ipq *qp; const struct ip4_create_arg *arg = a; qp = container_of(q, struct ipq, q); return qp->id == arg->iph->id && qp->saddr == arg->iph->saddr && qp->daddr == arg->iph->daddr && qp->protocol == arg->iph->protocol && qp->user == arg->user && qp->vif == arg->vif; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov8787.00%125.00%
David Ahern88.00%125.00%
Florian Westphal44.00%125.00%
Eric Dumazet11.00%125.00%
Total100100.00%4100.00%


static void ip4_frag_init(struct inet_frag_queue *q, const void *a) { struct ipq *qp = container_of(q, struct ipq, q); struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, frags); struct net *net = container_of(ipv4, struct net, ipv4); const struct ip4_create_arg *arg = a; qp->protocol = arg->iph->protocol; qp->id = arg->iph->id; qp->ecn = ip4_frag_ecn(arg->iph->tos); qp->saddr = arg->iph->saddr; qp->daddr = arg->iph->daddr; qp->vif = arg->vif; qp->user = arg->user; qp->peer = q->net->max_dist ? inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) : NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov10258.96%111.11%
Gao Feng3419.65%111.11%
Eric Dumazet137.51%111.11%
David Ahern126.94%222.22%
Nikolay Borisov52.89%111.11%
David S. Miller52.89%222.22%
Florian Westphal21.16%111.11%
Total173100.00%9100.00%


static void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; qp = container_of(q, struct ipq, q); if (qp->peer) inet_putpeer(qp->peer); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov2560.98%120.00%
Linus Torvalds (pre-git)921.95%240.00%
David S. Miller614.63%120.00%
Linus Torvalds12.44%120.00%
Total41100.00%5100.00%

/* Destruction primitives. */
static void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q, &ip4_frags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1463.64%240.00%
Pavel Emelyanov836.36%360.00%
Total22100.00%5100.00%

/* Kill ipq entry. It is not destroyed immediately, * because caller (and someone more) holds reference count. */
static void ipq_kill(struct ipq *ipq) { inet_frag_kill(&ipq->q, &ip4_frags); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)1881.82%250.00%
Pavel Emelyanov418.18%250.00%
Total22100.00%4100.00%


static bool frag_expire_skip_icmp(u32 user) { return user == IP_DEFRAG_AF_PACKET || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_IN, __IP_DEFRAG_CONNTRACK_IN_END) || ip_defrag_user_in_between(user, IP_DEFRAG_CONNTRACK_BRIDGE_IN, __IP_DEFRAG_CONNTRACK_BRIDGE_IN); }

Contributors

PersonTokensPropCommitsCommitProp
Andy Zhou32100.00%2100.00%
Total32100.00%2100.00%

/* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */
static void ip_expire(unsigned long arg) { struct ipq *qp; struct net *net; qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); rcu_read_lock(); spin_lock(&qp->q.lock); if (qp->q.flags & INET_FRAG_COMPLETE) goto out; ipq_kill(qp); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); if (!inet_frag_evicting(&qp->q)) { struct sk_buff *clone, *head = qp->q.fragments; const struct iphdr *iph; int err; __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT); if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments) goto out; head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out; /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); if (err) goto out; /* Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ if (frag_expire_skip_icmp(qp->user) && (skb_rtable(head)->rt_type != RTN_LOCAL)) goto out; clone = skb_clone(head, GFP_ATOMIC); /* Send an ICMP "Fragment Reassembly Timeout" message. */ if (clone) { spin_unlock(&qp->q.lock); icmp_send(clone, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); consume_skb(clone); goto out_rcu_unlock; } } out: spin_unlock(&qp->q.lock); out_rcu_unlock: rcu_read_unlock(); ipq_put(qp); }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)9530.55%724.14%
Eric Dumazet7323.47%517.24%
Shan Wei4815.43%13.45%
Pavel Emelyanov4715.11%413.79%
Nikolay Aleksandrov3611.58%310.34%
Andy Zhou30.96%13.45%
Vasily Averin20.64%13.45%
David S. Miller20.64%26.90%
Denis V. Lunev10.32%13.45%
Eric W. Biedermann10.32%13.45%
Joe Perches10.32%13.45%
Patrick McHardy10.32%13.45%
Hideaki Yoshifuji / 吉藤英明10.32%13.45%
Total311100.00%29100.00%

/* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */
static struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user, int vif) { struct inet_frag_queue *q; struct ip4_create_arg arg; unsigned int hash; arg.iph = iph; arg.user = user; arg.vif = vif; hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; } return container_of(q, struct ipq, q); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov6752.76%436.36%
Linus Torvalds (pre-git)2318.11%327.27%
Hannes Frederic Sowa1612.60%19.09%
David Ahern97.09%19.09%
Patrick McHardy75.51%19.09%
Herbert Xu53.94%19.09%
Total127100.00%11100.00%

/* Is the fragment too far ahead to be part of ipq? */
static int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = qp->q.net->max_dist; unsigned int start, end; int rc; if (!peer || !max) return 0; start = qp->rid; end = atomic_inc_return(&peer->rid); qp->rid = end; rc = qp->q.fragments && (end - start) > max; if (rc) { struct net *net; net = container_of(qp->q.net, struct net, ipv4.frags); __IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS); } return rc; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu9472.87%120.00%
Pavel Emelyanov2720.93%240.00%
Nikolay Borisov75.43%120.00%
Eric Dumazet10.78%120.00%
Total129100.00%5100.00%


static int ip_frag_reinit(struct ipq *qp) { struct sk_buff *fp; unsigned int sum_truesize = 0; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { atomic_inc(&qp->q.refcnt); return -ETIMEDOUT; } fp = qp->q.fragments; do { struct sk_buff *xp = fp->next; sum_truesize += fp->truesize; kfree_skb(fp); fp = xp; } while (fp); sub_frag_mem_limit(qp->q.net, sum_truesize); qp->q.flags = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; qp->q.fragments_tail = NULL; qp->iif = 0; qp->ecn = 0; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu10663.47%110.00%
Jesper Dangaard Brouer2213.17%110.00%
Pavel Emelyanov2213.17%440.00%
Changli Gao84.79%110.00%
Eric Dumazet63.59%110.00%
Florian Westphal21.20%110.00%
Nikolay Aleksandrov10.60%110.00%
Total167100.00%10100.00%

/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct sk_buff *prev, *next; struct net_device *dev; unsigned int fragsize; int flags, offset; int ihl, end; int err = -ENOENT; u8 ecn; if (qp->q.flags & INET_FRAG_COMPLETE) goto err; if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { ipq_kill(qp); goto err; } ecn = ip4_frag_ecn(ip_hdr(skb)->tos); offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - skb_network_offset(skb) - ihl; err = -EINVAL; /* Is this the final fragment? */ if ((flags & IP_MF) == 0) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < qp->q.len || ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len)) goto err; qp->q.flags |= INET_FRAG_LAST_IN; qp->q.len = end; } else { if (end&7) { end &= ~7; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.flags & INET_FRAG_LAST_IN) goto err; qp->q.len = end; } } if (end == offset) goto err; err = -ENOMEM; if (!pskb_pull(skb, skb_network_offset(skb) + ihl)) goto err; err = pskb_trim_rcsum(skb, end - offset); if (err) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = qp->q.fragments_tail; if (!prev || FRAG_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for (next = qp->q.fragments; next != NULL; next = next->next) { if (FRAG_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. */ if (prev) { int i = (FRAG_CB(prev)->offset + prev->len) - offset; if (i > 0) { offset += i; err = -EINVAL; if (end <= offset) goto err; err = -ENOMEM; if (!pskb_pull(skb, i)) goto err; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } } err = -ENOMEM; while (next && FRAG_CB(next)->offset < end) { int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; FRAG_CB(next)->offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) next->ip_summed = CHECKSUM_NONE; break; } else { struct sk_buff *free_it = next; /* Old fragment is completely overridden with * new one drop it. */ next = next->next; if (prev) prev->next = next; else qp->q.fragments = next; qp->q.meat -= free_it->len; sub_frag_mem_limit(qp->q.net, free_it->truesize); kfree_skb(free_it); } } FRAG_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) qp->q.fragments_tail = skb; if (prev) prev->next = skb; else qp->q.fragments = skb; dev = skb->dev; if (dev) { qp->iif = dev->ifindex; skb->dev = NULL; } qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; qp->ecn |= ecn; add_frag_mem_limit(qp->q.net, skb->truesize); if (offset == 0) qp->q.flags |= INET_FRAG_FIRST_IN; fragsize = skb->len + ihl; if (fragsize > qp->q.max_size) qp->q.max_size = fragsize; if (ip_hdr(skb)->frag_off & htons(IP_DF) && fragsize > qp->max_df_size) qp->max_df_size = fragsize; if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) { unsigned long orefdst = skb->_skb_refdst; skb->_skb_refdst = 0UL; err = ip_frag_reasm(qp, prev, dev); skb->_skb_refdst = orefdst; return err; } skb_dst_drop(skb); return -EINPROGRESS; err: kfree_skb(skb); return err; }

Contributors

PersonTokensPropCommitsCommitProp
Linus Torvalds (pre-git)50154.16%1336.11%
Herbert Xu12613.62%25.56%
Eric Dumazet576.16%38.33%
Pavel Emelyanov485.19%25.56%
Changli Gao464.97%12.78%
Linus Torvalds414.43%12.78%
Florian Westphal384.11%25.56%
Patrick McHardy232.49%25.56%
Jesper Dangaard Brouer121.30%12.78%
Edward Hyunkoo Jee101.08%12.78%
Joe Perches70.76%12.78%
Nikolay Aleksandrov60.65%12.78%
Arnaldo Carvalho de Melo60.65%25.56%
Justin P. Mattock10.11%12.78%
Stephen Hemminger10.11%12.78%
Peter Zijlstra10.11%12.78%
Ian Morris10.11%12.78%
Total925100.00%36100.00%

/* Build a new IP datagram from all its fragments. */
static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; u8 ecn; ipq_kill(qp); ecn = ip_frag_ecn_table[qp->ecn]; if (unlikely(ecn == 0xff)) { err = -EINVAL; goto out_fail; } /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; if (!fp->next) qp->q.fragments_tail = fp; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; consume_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(!head); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_unclone(head, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; clone = alloc_skb(0, GFP_ATOMIC); if (!clone) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; add_frag_mem_limit(qp->q.net, clone->truesize); } skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } sub_frag_mem_limit(qp->q.net, head->truesize); head->