cregit-Linux how code gets into the kernel

Release 4.15 net/ipv4/inet_fragment.c

Directory: net/ipv4
/*
 * inet fragments management
 *
 *              This program is free software; you can redistribute it and/or
 *              modify it under the terms of the GNU General Public License
 *              as published by the Free Software Foundation; either version
 *              2 of the License, or (at your option) any later version.
 *
 *              Authors:        Pavel Emelyanov <xemul@openvz.org>
 *                              Started as consolidation of ipv4/ip_fragment.c,
 *                              ipv6/reassembly. and ipv6 nf conntrack reassembly
 */

#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/skbuff.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>

#include <net/sock.h>
#include <net/inet_frag.h>
#include <net/inet_ecn.h>


#define INETFRAGS_EVICT_BUCKETS   128

#define INETFRAGS_EVICT_MAX	  512

/* don't rebuild inetfrag table with new secret more often than this */

#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)

/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
 * Value : 0xff if frame should be dropped.
 *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
 */

const u8 ip_frag_ecn_table[16] = {
	/* at least one fragment had CE, and others ECT_0 or ECT_1 */
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1]			= INET_ECN_CE,
	[IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1]	= INET_ECN_CE,

	/* invalid combinations : drop frame */
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_1] = 0xff,
	[IPFRAG_ECN_NOT_ECT | IPFRAG_ECN_CE | IPFRAG_ECN_ECT_0 | IPFRAG_ECN_ECT_1] = 0xff,
};

EXPORT_SYMBOL(ip_frag_ecn_table);


static unsigned int inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) { return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal33100.00%1100.00%
Total33100.00%1100.00%


static bool inet_frag_may_rebuild(struct inet_frags *f) { return time_after(jiffies, f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal23100.00%1100.00%
Total23100.00%1100.00%


static void inet_frag_secret_rebuild(struct inet_frags *f) { int i; write_seqlock_bh(&f->rnd_seqlock); if (!inet_frag_may_rebuild(f)) goto out; get_random_bytes(&f->rnd, sizeof(u32)); for (i = 0; i < INETFRAGS_HASHSZ; i++) { struct inet_frag_bucket *hb; struct inet_frag_queue *q; struct hlist_node *n; hb = &f->hash[i]; spin_lock(&hb->chain_lock); hlist_for_each_entry_safe(q, n, &hb->chain, list) { unsigned int hval = inet_frag_hashfn(f, q); if (hval != i) { struct inet_frag_bucket *hb_dest; hlist_del(&q->list); /* Relink to new hash chain. */ hb_dest = &f->hash[hval]; /* This is the only place where we take * another chain_lock while already holding * one. As this will not run concurrently, * we cannot deadlock on hb_dest lock below, if its * already locked it will be released soon since * other caller cannot be waiting for hb lock * that we've taken above. */ spin_lock_nested(&hb_dest->chain_lock, SINGLE_DEPTH_NESTING); hlist_add_head(&q->list, &hb_dest->chain); spin_unlock(&hb_dest->chain_lock); } } spin_unlock(&hb->chain_lock); } f->rebuild = false; f->last_rebuild_jiffies = jiffies; out: write_sequnlock_bh(&f->rnd_seqlock); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov10449.29%116.67%
Florian Westphal7435.07%466.67%
Jesper Dangaard Brouer3315.64%116.67%
Total211100.00%6100.00%


static bool inet_fragq_should_evict(const struct inet_frag_queue *q) { return q->net->low_thresh == 0 || frag_mem_limit(q->net) >= q->net->low_thresh; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal34100.00%1100.00%
Total34100.00%1100.00%


static unsigned int inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb) { struct inet_frag_queue *fq; struct hlist_node *n; unsigned int evicted = 0; HLIST_HEAD(expired); spin_lock(&hb->chain_lock); hlist_for_each_entry_safe(fq, n, &hb->chain, list) { if (!inet_fragq_should_evict(fq)) continue; if (!del_timer(&fq->timer)) continue; hlist_add_head(&fq->list_evictor, &expired); ++evicted; } spin_unlock(&hb->chain_lock); hlist_for_each_entry_safe(fq, n, &expired, list_evictor) f->frag_expire(&fq->timer); return evicted; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal12197.58%375.00%
Kees Cook32.42%125.00%
Total124100.00%4100.00%


static void inet_frag_worker(struct work_struct *work) { unsigned int budget = INETFRAGS_EVICT_BUCKETS; unsigned int i, evicted = 0; struct inet_frags *f; f = container_of(work, struct inet_frags, frags_work); BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ); local_bh_disable(); for (i = READ_ONCE(f->next_bucket); budget; --budget) { evicted += inet_evict_bucket(f, &f->hash[i]); i = (i + 1) & (INETFRAGS_HASHSZ - 1); if (evicted > INETFRAGS_EVICT_MAX) break; } f->next_bucket = i; local_bh_enable(); if (f->rebuild && inet_frag_may_rebuild(f)) inet_frag_secret_rebuild(f); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal13099.24%375.00%
Mark Rutland10.76%125.00%
Total131100.00%4100.00%


static void inet_frag_schedule_worker(struct inet_frags *f) { if (unlikely(!work_pending(&f->frags_work))) schedule_work(&f->frags_work); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal2575.76%150.00%
Pavel Emelyanov824.24%150.00%
Total33100.00%2100.00%


int inet_frags_init(struct inet_frags *f) { int i; INIT_WORK(&f->frags_work, inet_frag_worker); for (i = 0; i < INETFRAGS_HASHSZ; i++) { struct inet_frag_bucket *hb = &f->hash[i]; spin_lock_init(&hb->chain_lock); INIT_HLIST_HEAD(&hb->chain); } seqlock_init(&f->rnd_seqlock); f->last_rebuild_jiffies = 0; f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0, NULL); if (!f->frags_cachep) return -ENOMEM; return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov4437.93%228.57%
Nikolay Aleksandrov3631.03%114.29%
Jesper Dangaard Brouer2218.97%114.29%
Florian Westphal1412.07%342.86%
Total116100.00%7100.00%

EXPORT_SYMBOL(inet_frags_init);
void inet_frags_fini(struct inet_frags *f) { cancel_work_sync(&f->frags_work); kmem_cache_destroy(f->frags_cachep); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov1040.00%250.00%
Florian Westphal832.00%125.00%
Nikolay Aleksandrov728.00%125.00%
Total25100.00%4100.00%

EXPORT_SYMBOL(inet_frags_fini);
void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f) { unsigned int seq; int i; nf->low_thresh = 0; evict_again: local_bh_disable(); seq = read_seqbegin(&f->rnd_seqlock); for (i = 0; i < INETFRAGS_HASHSZ ; i++) inet_evict_bucket(f, &f->hash[i]); local_bh_enable(); cond_resched(); if (read_seqretry(&f->rnd_seqlock, seq) || sum_frag_mem_limit(nf)) goto evict_again; }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal6770.53%342.86%
Pavel Emelyanov2526.32%114.29%
Jesper Dangaard Brouer11.05%114.29%
David S. Miller11.05%114.29%
Américo Wang11.05%114.29%
Total95100.00%7100.00%

EXPORT_SYMBOL(inet_frags_exit_net);
static struct inet_frag_bucket * get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f) __acquires(hb->chain_lock) { struct inet_frag_bucket *hb; unsigned int seq, hash; restart: seq = read_seqbegin(&f->rnd_seqlock); hash = inet_frag_hashfn(f, fq); hb = &f->hash[hash]; spin_lock(&hb->chain_lock); if (read_seqretry(&f->rnd_seqlock, seq)) { spin_unlock(&hb->chain_lock); goto restart; } return hb; }

Contributors

PersonTokensPropCommitsCommitProp
Jesper Dangaard Brouer4140.20%125.00%
Florian Westphal3837.25%250.00%
Pavel Emelyanov2322.55%125.00%
Total102100.00%4100.00%


static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f) { struct inet_frag_bucket *hb; hb = get_frag_bucket_locked(fq, f); hlist_del(&fq->list); fq->flags |= INET_FRAG_COMPLETE; spin_unlock(&hb->chain_lock); }

Contributors

PersonTokensPropCommitsCommitProp
Florian Westphal4788.68%266.67%
Pavel Emelyanov611.32%133.33%
Total53100.00%3100.00%


void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f) { if (del_timer(&fq->timer)) refcount_dec(&fq->refcnt); if (!(fq->flags & INET_FRAG_COMPLETE)) { fq_unlink(fq, f); refcount_dec(&fq->refcnt); } }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov5793.44%125.00%
Elena Reshetova23.28%125.00%
Nikolay Aleksandrov11.64%125.00%
Joe Perches11.64%125.00%
Total61100.00%4100.00%

EXPORT_SYMBOL(inet_frag_kill);
void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f) { struct sk_buff *fp; struct netns_frags *nf; unsigned int sum, sum_truesize = 0; WARN_ON(!(q->flags & INET_FRAG_COMPLETE)); WARN_ON(del_timer(&q->timer) != 0); /* Release all fragment data. */ fp = q->fragments; nf = q->net; while (fp) { struct sk_buff *xp = fp->next; sum_truesize += fp->truesize; kfree_skb(fp); fp = xp; } sum = sum_truesize + f->qsize; if (f->destructor) f->destructor(q); kmem_cache_free(f->frags_cachep, q); sub_frag_mem_limit(nf, sum); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov9568.84%330.00%
Jesper Dangaard Brouer2215.94%110.00%
Florian Westphal85.80%220.00%
Ilpo Järvinen64.35%110.00%
Nikolay Aleksandrov64.35%220.00%
Joe Perches10.72%110.00%
Total138100.00%10100.00%

EXPORT_SYMBOL(inet_frag_destroy);
static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, struct inet_frag_queue *qp_in, struct inet_frags *f, void *arg) { struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f); struct inet_frag_queue *qp; #ifdef CONFIG_SMP /* With SMP race we have to recheck hash table, because * such entry could have been created on other cpu before * we acquired hash bucket lock. */ hlist_for_each_entry(qp, &hb->chain, list) { if (qp->net == nf && f->match(qp, arg)) { refcount_inc(&qp->refcnt); spin_unlock(&hb->chain_lock); qp_in->flags |= INET_FRAG_COMPLETE; inet_frag_put(qp_in, f); return qp; } } #endif qp = qp_in; if (!mod_timer(&qp->timer, jiffies + nf->timeout)) refcount_inc(&qp->refcnt); refcount_inc(&qp->refcnt); hlist_add_head(&qp->list, &hb->chain); spin_unlock(&hb->chain_lock); return qp; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov13578.49%550.00%
Jesper Dangaard Brouer2413.95%110.00%
Florian Westphal84.65%110.00%
Elena Reshetova31.74%110.00%
Nikolay Aleksandrov10.58%110.00%
Joe Perches10.58%110.00%
Total172100.00%10100.00%


static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) { inet_frag_schedule_worker(f); return NULL; } q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC); if (!q) return NULL; q->net = nf; f->constructor(q, arg); add_frag_mem_limit(nf, f->qsize); timer_setup(&q->timer, f->frag_expire, 0); spin_lock_init(&q->lock); refcount_set(&q->refcnt, 1); return q; }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov8967.94%321.43%
Florian Westphal2216.79%321.43%
Gao Feng64.58%17.14%
Michal Kubeček53.82%17.14%
Jesper Dangaard Brouer21.53%17.14%
Kees Cook21.53%17.14%
Nikolay Aleksandrov21.53%17.14%
Ian Morris10.76%17.14%
Konstantin Khlebnikov10.76%17.14%
Elena Reshetova10.76%17.14%
Total131100.00%14100.00%


static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf, struct inet_frags *f, void *arg) { struct inet_frag_queue *q; q = inet_frag_alloc(nf, f, arg); if (!q) return NULL; return inet_frag_intern(nf, q, f, arg); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov5798.28%480.00%
Ian Morris11.72%120.00%
Total58100.00%5100.00%


struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, struct inet_frags *f, void *key, unsigned int hash) { struct inet_frag_bucket *hb; struct inet_frag_queue *q; int depth = 0; if (frag_mem_limit(nf) > nf->low_thresh) inet_frag_schedule_worker(f); hash &= (INETFRAGS_HASHSZ - 1); hb = &f->hash[hash]; spin_lock(&hb->chain_lock); hlist_for_each_entry(q, &hb->chain, list) { if (q->net == nf && f->match(q, key)) { refcount_inc(&q->refcnt); spin_unlock(&hb->chain_lock); return q; } depth++; } spin_unlock(&hb->chain_lock); if (depth <= INETFRAGS_MAXDEPTH) return inet_frag_create(nf, f, key); if (inet_frag_may_rebuild(f)) { if (!f->rebuild) f->rebuild = true; inet_frag_schedule_worker(f); } return ERR_PTR(-ENOBUFS); }

Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov7840.84%220.00%
Florian Westphal5126.70%550.00%
Jesper Dangaard Brouer4020.94%110.00%
Hannes Frederic Sowa2110.99%110.00%
Elena Reshetova10.52%110.00%
Total191100.00%10100.00%

EXPORT_SYMBOL(inet_frag_find);
void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, const char *prefix) { static const char msg[] = "inet_frag_find: Fragment hash bucket" " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) ". Dropping fragment.\n"; if (PTR_ERR(q) == -ENOBUFS) net_dbg_ratelimited("%s%s", prefix, msg); }

Contributors

PersonTokensPropCommitsCommitProp
Hannes Frederic Sowa4797.92%150.00%
Joe Perches12.08%150.00%
Total48100.00%2100.00%

EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);

Overall Contributors

PersonTokensPropCommitsCommitProp
Pavel Emelyanov78939.99%1432.56%
Florian Westphal71636.29%920.93%
Hannes Frederic Sowa1889.53%24.65%
Jesper Dangaard Brouer1859.38%36.98%
Nikolay Aleksandrov532.69%24.65%
Elena Reshetova70.35%12.33%
Gao Feng60.30%12.33%
Ilpo Järvinen60.30%12.33%
Michal Kubeček50.25%12.33%
Kees Cook50.25%12.33%
Joe Perches40.20%24.65%
Tejun Heo30.15%12.33%
Ian Morris20.10%12.33%
David S. Miller10.05%12.33%
Konstantin Khlebnikov10.05%12.33%
Américo Wang10.05%12.33%
Mark Rutland10.05%12.33%
Total1973100.00%43100.00%
Directory: net/ipv4
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.