cregit-Linux how code gets into the kernel

Release 4.11 net/core/flow.c

Directory: net/core
/* flow.c: Generic flow cache.
 *
 * Copyright (C) 2003 Alexey N. Kuznetsov (kuznet@ms2.inr.ac.ru)
 * Copyright (C) 2003 David S. Miller (davem@redhat.com)
 */

#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/jhash.h>
#include <linux/interrupt.h>
#include <linux/mm.h>
#include <linux/random.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/smp.h>
#include <linux/completion.h>
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/mutex.h>
#include <net/flow.h>
#include <linux/atomic.h>
#include <linux/security.h>
#include <net/net_namespace.h>


struct flow_cache_entry {
	union {
		
struct hlist_node	hlist;
		
struct list_head	gc_list;
	} 
u;
	
struct net			*net;
	
u16				family;
	
u8				dir;
	
u32				genid;
	
struct flowi			key;
	
struct flow_cache_object	*object;
};


struct flow_flush_info {
	
struct flow_cache		*cache;
	
atomic_t			cpuleft;
	
struct completion		completion;
};


static struct kmem_cache *flow_cachep __read_mostly;


#define flow_cache_hash_size(cache)	(1 << (cache)->hash_shift)

#define FLOW_HASH_RND_PERIOD		(10 * 60 * HZ)


static void flow_cache_new_hashrnd(unsigned long arg) { struct flow_cache *fc = (void *) arg; int i; for_each_possible_cpu(i) per_cpu_ptr(fc->percpu, i)->hash_rnd_recalc = 1; fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&fc->rnd_timer); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller3152.54%233.33%
Timo Teräs2440.68%116.67%
Herbert Xu23.39%116.67%
Kamezawa Hiroyuki11.69%116.67%
Rusty Russell11.69%116.67%
Total59100.00%6100.00%


static int flow_entry_valid(struct flow_cache_entry *fle, struct netns_xfrm *xfrm) { if (atomic_read(&xfrm->flow_cache_genid) != fle->genid) return 0; if (fle->object && !fle->object->ops->check(fle->object)) return 0; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs5288.14%150.00%
Fan Du711.86%150.00%
Total59100.00%2100.00%


static void flow_entry_kill(struct flow_cache_entry *fle, struct netns_xfrm *xfrm) { if (fle->object) fle->object->ops->delete(fle->object); kmem_cache_free(flow_cachep, fle); }

Contributors

PersonTokensPropCommitsCommitProp
James Morris2764.29%120.00%
Timo Teräs1023.81%360.00%
Fan Du511.90%120.00%
Total42100.00%5100.00%


static void flow_cache_gc_task(struct work_struct *work) { struct list_head gc_list; struct flow_cache_entry *fce, *n; struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, flow_cache_gc_work); INIT_LIST_HEAD(&gc_list); spin_lock_bh(&xfrm->flow_cache_gc_lock); list_splice_tail_init(&xfrm->flow_cache_gc_list, &gc_list); spin_unlock_bh(&xfrm->flow_cache_gc_lock); list_for_each_entry_safe(fce, n, &gc_list, u.gc_list) { flow_entry_kill(fce, xfrm); atomic_dec(&xfrm->flow_cache_gc_count); } }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs6566.33%133.33%
Fan Du2323.47%133.33%
Steffen Klassert1010.20%133.33%
Total98100.00%3100.00%


static void flow_cache_queue_garbage(struct flow_cache_percpu *fcp, int deleted, struct list_head *gc_list, struct netns_xfrm *xfrm) { if (deleted) { atomic_add(deleted, &xfrm->flow_cache_gc_count); fcp->hash_count -= deleted; spin_lock_bh(&xfrm->flow_cache_gc_lock); list_splice_tail(gc_list, &xfrm->flow_cache_gc_list); spin_unlock_bh(&xfrm->flow_cache_gc_lock); schedule_work(&xfrm->flow_cache_gc_work); } }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs5568.75%240.00%
Fan Du1316.25%120.00%
Steffen Klassert1012.50%120.00%
James Morris22.50%120.00%
Total80100.00%5100.00%


static void __flow_cache_shrink(struct flow_cache *fc, struct flow_cache_percpu *fcp, int shrink_to) { struct flow_cache_entry *fle; struct hlist_node *tmp; LIST_HEAD(gc_list); int i, deleted = 0; struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, flow_cache_global); for (i = 0; i < flow_cache_hash_size(fc); i++) { int saved = 0; hlist_for_each_entry_safe(fle, tmp, &fcp->hash_table[i], u.hlist) { if (saved < shrink_to && flow_entry_valid(fle, xfrm)) { saved++; } else { deleted++; hlist_del(&fle->u.hlist); list_add_tail(&fle->u.gc_list, &gc_list); } } } flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs7852.00%350.00%
David S. Miller5335.33%233.33%
Fan Du1912.67%116.67%
Total150100.00%6100.00%


static void flow_cache_shrink(struct flow_cache *fc, struct flow_cache_percpu *fcp) { int shrink_to = fc->low_watermark / flow_cache_hash_size(fc); __flow_cache_shrink(fc, fcp, shrink_to); }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs1951.35%150.00%
David S. Miller1848.65%150.00%
Total37100.00%2100.00%


static void flow_new_hash_rnd(struct flow_cache *fc, struct flow_cache_percpu *fcp) { get_random_bytes(&fcp->hash_rnd, sizeof(u32)); fcp->hash_rnd_recalc = 0; __flow_cache_shrink(fc, fcp, 0); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller2659.09%150.00%
Timo Teräs1840.91%150.00%
Total44100.00%2100.00%


static u32 flow_hash_code(struct flow_cache *fc, struct flow_cache_percpu *fcp, const struct flowi *key, size_t keysize) { const u32 *k = (const u32 *) key; const u32 length = keysize * sizeof(flow_compare_t) / sizeof(u32); return jhash2(k, length, fcp->hash_rnd) & (flow_cache_hash_size(fc) - 1); }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller4054.05%250.00%
Timo Teräs1722.97%125.00%
David Ward1722.97%125.00%
Total74100.00%4100.00%

/* I hear what you're saying, use memcmp. But memcmp cannot make * important assumptions that we can here, such as alignment. */
static int flow_key_compare(const struct flowi *key1, const struct flowi *key2, size_t keysize) { const flow_compare_t *k1, *k1_lim, *k2; k1 = (const flow_compare_t *) key1; k1_lim = k1 + keysize; k2 = (const flow_compare_t *) key2; do { if (*k1++ != *k2++) return 1; } while (k1 < k1_lim); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
David S. Miller7895.12%266.67%
David Ward44.88%133.33%
Total82100.00%3100.00%


struct flow_cache_object * flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir, flow_resolve_t resolver, void *ctx) { struct flow_cache *fc = &net->xfrm.flow_cache_global; struct flow_cache_percpu *fcp; struct flow_cache_entry *fle, *tfle; struct flow_cache_object *flo; size_t keysize; unsigned int hash; local_bh_disable(); fcp = this_cpu_ptr(fc->percpu); fle = NULL; flo = NULL; keysize = flow_key_size(family); if (!keysize) goto nocache; /* Packet really early in init? Making flow_cache_init a * pre-smp initcall would solve this. --RR */ if (!fcp->hash_table) goto nocache; if (fcp->hash_rnd_recalc) flow_new_hash_rnd(fc, fcp); hash = flow_hash_code(fc, fcp, key, keysize); hlist_for_each_entry(tfle, &fcp->hash_table[hash], u.hlist) { if (tfle->net == net && tfle->family == family && tfle->dir == dir && flow_key_compare(key, &tfle->key, keysize) == 0) { fle = tfle; break; } } if (unlikely(!fle)) { if (fcp->hash_count > fc->high_watermark) flow_cache_shrink(fc, fcp); if (atomic_read(&net->xfrm.flow_cache_gc_count) > 2 * num_online_cpus() * fc->high_watermark) { flo = ERR_PTR(-ENOBUFS); goto ret_object; } fle = kmem_cache_alloc(flow_cachep, GFP_ATOMIC); if (fle) { fle->net = net; fle->family = family; fle->dir = dir; memcpy(&fle->key, key, keysize * sizeof(flow_compare_t)); fle->object = NULL; hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]); fcp->hash_count++; } } else if (likely(fle->genid == atomic_read(&net->xfrm.flow_cache_genid))) { flo = fle->object; if (!flo) goto ret_object; flo = flo->ops->get(flo); if (flo) goto ret_object; } else if (fle->object) { flo = fle->object; flo->ops->delete(flo); fle->object = NULL; } nocache: flo = NULL; if (fle) { flo = fle->object; fle->object = NULL; } flo = resolver(net, key, family, dir, flo, ctx); if (fle) { fle->genid = atomic_read(&net->xfrm.flow_cache_genid); if (!IS_ERR(flo)) fle->object = flo; else fle->genid--; } else { if (!IS_ERR_OR_NULL(flo)) flo->ops->delete(flo); } ret_object: local_bh_enable(); return flo; }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs22743.32%315.79%
David S. Miller11822.52%315.79%
Herbert Xu8215.65%210.53%
David Ward377.06%210.53%
Steffen Klassert295.53%15.26%
Fan Du122.29%15.26%
Alexey Dobriyan71.34%15.26%
Miroslav Urbanek50.95%15.26%
James Morris30.57%15.26%
Eric Dumazet10.19%15.26%
Christoph Lameter10.19%15.26%
Rusty Russell10.19%15.26%
Hideaki Yoshifuji / 吉藤英明10.19%15.26%
Total524100.00%19100.00%

EXPORT_SYMBOL(flow_cache_lookup);
static void flow_cache_flush_tasklet(unsigned long data) { struct flow_flush_info *info = (void *)data; struct flow_cache *fc = info->cache; struct flow_cache_percpu *fcp; struct flow_cache_entry *fle; struct hlist_node *tmp; LIST_HEAD(gc_list); int i, deleted = 0; struct netns_xfrm *xfrm = container_of(fc, struct netns_xfrm, flow_cache_global); fcp = this_cpu_ptr(fc->percpu); for (i = 0; i < flow_cache_hash_size(fc); i++) { hlist_for_each_entry_safe(fle, tmp, &fcp->hash_table[i], u.hlist) { if (flow_entry_valid(fle, xfrm)) continue; deleted++; hlist_del(&fle->u.hlist); list_add_tail(&fle->u.gc_list, &gc_list); } } flow_cache_queue_garbage(fcp, deleted, &gc_list, xfrm); if (atomic_dec_and_test(&info->cpuleft)) complete(&info->completion); }

Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs9251.98%337.50%
Herbert Xu6033.90%225.00%
Fan Du1910.73%112.50%
David S. Miller52.82%112.50%
Eric Dumazet10.56%112.50%
Total177100.00%8100.00%

/* * Return whether a cpu needs flushing. Conservatively, we assume * the presence of any entries means the core may require flushing, * since the flow_cache_ops.check() function may assume it's running * on the same core as the per-cpu cache component. */
static int flow_cache_percpu_empty(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp; int i; fcp = per_cpu_ptr(fc->percpu, cpu); for (i = 0; i < flow_cache_hash_size(fc); i++) if (!hlist_empty(&fcp->hash_table[i])) return 0; return 1; }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf6898.55%150.00%
Li RongQing11.45%150.00%
Total69100.00%2100.00%


static void flow_cache_flush_per_cpu(void *data) { struct flow_flush_info *info = data; struct tasklet_struct *tasklet; tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet; tasklet->data = (unsigned long)info; tasklet_schedule(tasklet); }

Contributors

PersonTokensPropCommitsCommitProp
Herbert Xu3772.55%233.33%
Timo Teräs713.73%116.67%
David S. Miller47.84%116.67%
Li RongQing23.92%116.67%
Shan Wei11.96%116.67%
Total51100.00%6100.00%


void flow_cache_flush(struct net *net) { struct flow_flush_info info; cpumask_var_t mask; int i, self; /* Track which cpus need flushing to avoid disturbing all cores. */ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) return; cpumask_clear(mask); /* Don't want cpus going down or up during this. */ get_online_cpus(); mutex_lock(&net->xfrm.flow_flush_sem); info.cache = &net->xfrm.flow_cache_global; for_each_online_cpu(i) if (!flow_cache_percpu_empty(info.cache, i)) cpumask_set_cpu(i, mask); atomic_set(&info.cpuleft, cpumask_weight(mask)); if (atomic_read(&info.cpuleft) == 0) goto done; init_completion(&info.completion); local_bh_disable(); self = cpumask_test_and_clear_cpu(smp_processor_id(), mask); on_each_cpu_mask(mask, flow_cache_flush_per_cpu, &info, 0); if (self) flow_cache_flush_tasklet((unsigned long)&info); local_bh_enable(); wait_for_completion(&info.completion); done: mutex_unlock(&net->xfrm.flow_flush_sem); put_online_cpus(); free_cpumask_var(mask); }

Contributors

PersonTokensPropCommitsCommitProp
Chris Metcalf9247.42%18.33%
Herbert Xu5025.77%433.33%
Fan Du168.25%18.33%
Andrew Morton136.70%216.67%
David S. Miller126.19%18.33%
Timo Teräs73.61%18.33%
Gautham R. Shenoy21.03%18.33%
Arjan van de Ven21.03%18.33%
Total194100.00%12100.00%


static void flow_cache_flush_task(struct work_struct *work) { struct netns_xfrm *xfrm = container_of(work, struct netns_xfrm, flow_cache_flush_work); struct net *net = container_of(xfrm, struct net, xfrm); flow_cache_flush(net); }

Contributors

PersonTokensPropCommitsCommitProp
Fan Du3167.39%133.33%
Steffen Klassert1430.43%133.33%
Miroslav Urbanek12.17%133.33%
Total46100.00%3100.00%


void flow_cache_flush_deferred(struct net *net) { schedule_work(&net->xfrm.flow_cache_flush_work); }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert1260.00%150.00%
Fan Du840.00%150.00%
Total20100.00%2100.00%


static int flow_cache_cpu_prepare(struct flow_cache *fc, int cpu) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); size_t sz = sizeof(struct hlist_head) * flow_cache_hash_size(fc); if (!fcp->hash_table) { fcp->hash_table = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu)); if (!fcp->hash_table) { pr_err("NET: failed to allocate flow cache sz %zu\n", sz); return -ENOMEM; } fcp->hash_rnd_recalc = 1; fcp->hash_count = 0; tasklet_init(&fcp->flush_tasklet, flow_cache_flush_tasklet, 0); } return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Eric Dumazet5649.12%114.29%
Timo Teräs2421.05%114.29%
Herbert Xu1916.67%228.57%
David S. Miller119.65%228.57%
Rusty Russell43.51%114.29%
Total114100.00%7100.00%


static int flow_cache_cpu_up_prep(unsigned int cpu, struct hlist_node *node) { struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node); return flow_cache_cpu_prepare(fc, cpu); }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior1847.37%133.33%
Timo Teräs1231.58%133.33%
Rusty Russell821.05%133.33%
Total38100.00%3100.00%


static int flow_cache_cpu_dead(unsigned int cpu, struct hlist_node *node) { struct flow_cache *fc = hlist_entry_safe(node, struct flow_cache, node); struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, cpu); __flow_cache_shrink(fc, fcp, 0); return 0; }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior2646.43%133.33%
Timo Teräs2137.50%133.33%
Rusty Russell916.07%133.33%
Total56100.00%3100.00%


int flow_cache_init(struct net *net) { int i; struct flow_cache *fc = &net->xfrm.flow_cache_global; if (!flow_cachep) flow_cachep = kmem_cache_create("flow_cache", sizeof(struct flow_cache_entry), 0, SLAB_PANIC, NULL); spin_lock_init(&net->xfrm.flow_cache_gc_lock); INIT_LIST_HEAD(&net->xfrm.flow_cache_gc_list); INIT_WORK(&net->xfrm.flow_cache_gc_work, flow_cache_gc_task); INIT_WORK(&net->xfrm.flow_cache_flush_work, flow_cache_flush_task); mutex_init(&net->xfrm.flow_flush_sem); atomic_set(&net->xfrm.flow_cache_gc_count, 0); fc->hash_shift = 10; fc->low_watermark = 2 * flow_cache_hash_size(fc); fc->high_watermark = 4 * flow_cache_hash_size(fc); fc->percpu = alloc_percpu(struct flow_cache_percpu); if (!fc->percpu) return -ENOMEM; if (cpuhp_state_add_instance(CPUHP_NET_FLOW_PREPARE, &fc->node)) goto err; setup_timer(&fc->rnd_timer, flow_cache_new_hashrnd, (unsigned long) fc); fc->rnd_timer.expires = jiffies + FLOW_HASH_RND_PERIOD; add_timer(&fc->rnd_timer); return 0; err: for_each_possible_cpu(i) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); kfree(fcp->hash_table); fcp->hash_table = NULL; } free_percpu(fc->percpu); fc->percpu = NULL; return -ENOMEM; }

Contributors

PersonTokensPropCommitsCommitProp
Fan Du8732.46%19.09%
Huajun Li5520.52%19.09%
Timo Teräs4817.91%19.09%
Eric Dumazet269.70%218.18%
David S. Miller228.21%19.09%
Steffen Klassert124.48%19.09%
Herbert Xu82.99%218.18%
Sebastian Andrzej Siewior62.24%19.09%
Pavel Emelyanov41.49%19.09%
Total268100.00%11100.00%

EXPORT_SYMBOL(flow_cache_init);
void flow_cache_fini(struct net *net) { int i; struct flow_cache *fc = &net->xfrm.flow_cache_global; del_timer_sync(&fc->rnd_timer); cpuhp_state_remove_instance_nocalls(CPUHP_NET_FLOW_PREPARE, &fc->node); for_each_possible_cpu(i) { struct flow_cache_percpu *fcp = per_cpu_ptr(fc->percpu, i); kfree(fcp->hash_table); fcp->hash_table = NULL; } free_percpu(fc->percpu); fc->percpu = NULL; }

Contributors

PersonTokensPropCommitsCommitProp
Steffen Klassert8595.51%150.00%
Sebastian Andrzej Siewior44.49%150.00%
Total89100.00%2100.00%

EXPORT_SYMBOL(flow_cache_fini);
void __init flow_cache_hp_init(void) { int ret; ret = cpuhp_setup_state_multi(CPUHP_NET_FLOW_PREPARE, "net/flow:prepare", flow_cache_cpu_up_prep, flow_cache_cpu_dead); WARN_ON(ret < 0); }

Contributors

PersonTokensPropCommitsCommitProp
Sebastian Andrzej Siewior31100.00%1100.00%
Total31100.00%1100.00%


Overall Contributors

PersonTokensPropCommitsCommitProp
Timo Teräs80131.30%36.00%
David S. Miller46818.29%48.00%
Herbert Xu29711.61%816.00%
Fan Du2459.57%12.00%
Steffen Klassert1776.92%36.00%
Chris Metcalf1616.29%12.00%
Eric Dumazet993.87%510.00%
Sebastian Andrzej Siewior853.32%12.00%
David Ward642.50%24.00%
Huajun Li552.15%12.00%
James Morris321.25%12.00%
Rusty Russell230.90%24.00%
Andrew Morton130.51%24.00%
Alexey Dobriyan70.27%12.00%
Miroslav Urbanek60.23%24.00%
Arjan van de Ven50.20%12.00%
Pavel Emelyanov40.16%12.00%
Arnaldo Carvalho de Melo30.12%12.00%
Linus Torvalds30.12%12.00%
Li RongQing30.12%24.00%
Gautham R. Shenoy20.08%12.00%
Kamezawa Hiroyuki10.04%12.00%
Trent Jaeger10.04%12.00%
Hideaki Yoshifuji / 吉藤英明10.04%12.00%
Shan Wei10.04%12.00%
Christoph Lameter10.04%12.00%
Arun Sharma10.04%12.00%
Total2559100.00%50100.00%
Directory: net/core
Information contained on this website is for historical information purposes only and does not indicate or represent copyright ownership.
Created with cregit.